From 33591a30d2e495b46877b76084aa2f52e5389246 Mon Sep 17 00:00:00 2001 From: Claes Redestad Date: Tue, 10 Oct 2023 17:01:57 +0000 Subject: [PATCH 01/15] 8317795: Add an ImmutableBitSetPredicate variant for bitsets <= 128 elements Reviewed-by: pminborg, rriggs --- .../util/ImmutableBitSetPredicate.java | 49 +++++++++++++++---- .../jdk/java/util/BitSet/ImmutableBitSet.java | 36 ++++++++------ 2 files changed, 61 insertions(+), 24 deletions(-) diff --git a/src/java.base/share/classes/jdk/internal/util/ImmutableBitSetPredicate.java b/src/java.base/share/classes/jdk/internal/util/ImmutableBitSetPredicate.java index 4baeb8e31e3a8..f0def80064f4d 100644 --- a/src/java.base/share/classes/jdk/internal/util/ImmutableBitSetPredicate.java +++ b/src/java.base/share/classes/jdk/internal/util/ImmutableBitSetPredicate.java @@ -48,23 +48,21 @@ private ImmutableBitSetPredicate(BitSet original) { this.words = original.toLongArray(); } + /** + * @param bitIndex the bit index to test + * @return true if the bit is in the range of the BitSet and the bit is set, otherwise false + */ @Override public boolean test(int bitIndex) { - if (bitIndex < 0) - throw new IndexOutOfBoundsException("bitIndex < 0: " + bitIndex); + if (bitIndex < 0) { + return false; + } - int wordIndex = wordIndex(bitIndex); + int wordIndex = bitIndex >> 6; return (wordIndex < words.length) && ((words[wordIndex] & (1L << bitIndex)) != 0); } - /** - * Given a bit index, return word index containing it. - */ - private static int wordIndex(int bitIndex) { - return bitIndex >> 6; - } - /** * {@return a new {@link IntPredicate } representing the {@link BitSet#get(int)} method applied * on an immutable snapshot of the current state of this BitSet}. @@ -79,7 +77,38 @@ private static int wordIndex(int bitIndex) { * @since 22 */ public static IntPredicate of(BitSet original) { + if (original.size() <= 128) { + long[] array = original.toLongArray(); + return new SmallImmutableBitSetPredicate( + array.length > 0 ? array[0] : 0L, + array.length > 1 ? array[1] : 0L); + } return new ImmutableBitSetPredicate(original); } + /** + * Specialization for small sets of 128 bits or less + * @param first - bits index 0 through 63, inclusive + * @param second - bits index 64 through 127, inclusive + */ + public record SmallImmutableBitSetPredicate(long first, long second) implements IntPredicate { + + /** + * @param bitIndex the bit index to test + * @return true if the bit is in the range of the BitSet and the bit is set, otherwise false + */ + @Override + public boolean test(int bitIndex) { + if (bitIndex < 0) { + return false; + } + + int wordIndex = bitIndex >> 6; + if (wordIndex > 1) { + return false; + } + long bits = wordIndex == 0 ? first : second; + return (bits & (1L << bitIndex)) != 0; + } + } } diff --git a/test/jdk/java/util/BitSet/ImmutableBitSet.java b/test/jdk/java/util/BitSet/ImmutableBitSet.java index 7dde261ff5d12..76e7f028749ed 100644 --- a/test/jdk/java/util/BitSet/ImmutableBitSet.java +++ b/test/jdk/java/util/BitSet/ImmutableBitSet.java @@ -34,6 +34,7 @@ import java.util.BitSet; import java.util.Random; import java.util.function.IntPredicate; +import java.util.stream.IntStream; import static org.junit.jupiter.api.Assertions.*; @@ -48,30 +49,37 @@ void empty() { @Test void negativeIndex() { - BitSet bs = new BitSet(); - IntPredicate ibs = ImmutableBitSetPredicate.of(bs); - assertThrows(IndexOutOfBoundsException.class, () -> { - ibs.test(-1); - }); + IntStream.of(0, 127, 128, 129, 143, 4711).forEach(k -> { + BitSet bs = new BitSet(k); + IntPredicate ibs = ImmutableBitSetPredicate.of(bs); + assertFalse(ibs.test(-1)); + assertFalse(ibs.test(Integer.MIN_VALUE)); + }); } @Test void basic() { - BitSet bs = createReference(147); + IntStream.of(0, 16, 127, 128, 129, 143, 4711).forEach(k -> basic(k)); + } + + void basic(int length) { + BitSet bs = createReference(length); IntPredicate ibs = ImmutableBitSetPredicate.of(bs); test(bs, ibs); } @Test void clearedAtTheTail() { - for (int i = Long.BYTES - 1; i < Long.BYTES + 2; i++) { - BitSet bs = createReference(i); - for (int j = bs.length() - 1; j > Long.BYTES - 1; j++) { - bs.clear(j); + IntStream.of(0, 16, 127, 128, 129, 143, 4711).forEach(k -> { + for (int i = Long.BYTES - 1; i < Long.BYTES + 2; i++) { + BitSet bs = createReference(k + i); + for (int j = bs.length() - 1; j > Long.BYTES - 1; j--) { + bs.clear(j); + } + IntPredicate ibs = ImmutableBitSetPredicate.of(bs); + test(bs, ibs); } - IntPredicate ibs = ImmutableBitSetPredicate.of(bs); - test(bs, ibs); - } + }); } static void test(BitSet expected, IntPredicate actual) { @@ -81,7 +89,7 @@ static void test(BitSet expected, IntPredicate actual) { } private static BitSet createReference(int length) { - BitSet result = new BitSet(); + BitSet result = new BitSet(length); Random random = new Random(length); for (int i = 0; i < length; i++) { result.set(i, random.nextBoolean()); From 2b8276aa5285da524e3f6514bd4954cfbdd3108c Mon Sep 17 00:00:00 2001 From: Alex Menkov Date: Tue, 10 Oct 2023 20:13:09 +0000 Subject: [PATCH 02/15] 8316691: Heap dump: separate stack traces for mounted virtual threads Reviewed-by: lmesnik, sspitsyn --- src/hotspot/share/services/heapDumper.cpp | 550 ++++++++++++------ .../vthread/HeapDump/VThreadInHeapDump.java | 300 ++++++++++ test/lib/jdk/test/lib/hprof/model/Root.java | 6 +- .../jdk/test/lib/hprof/model/Snapshot.java | 13 +- .../test/lib/hprof/model/ThreadObject.java | 56 ++ .../test/lib/hprof/parser/HprofReader.java | 38 +- 6 files changed, 756 insertions(+), 207 deletions(-) create mode 100644 test/hotspot/jtreg/serviceability/jvmti/vthread/HeapDump/VThreadInHeapDump.java create mode 100644 test/lib/jdk/test/lib/hprof/model/ThreadObject.java diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index e58d3e542c3ea..347dfa3c17b39 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -43,6 +43,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "runtime/continuationWrapper.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -1385,7 +1386,6 @@ class JNILocalsDumper : public OopClosure { void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); } }; - void JNILocalsDumper::do_oop(oop* obj_p) { // ignore null handles oop o = *obj_p; @@ -1451,6 +1451,310 @@ class StickyClassDumper : public KlassClosure { } }; +// Support class used to generate HPROF_GC_ROOT_JAVA_FRAME records. + +class JavaStackRefDumper : public StackObj { +private: + AbstractDumpWriter* _writer; + u4 _thread_serial_num; + int _frame_num; + AbstractDumpWriter* writer() const { return _writer; } +public: + JavaStackRefDumper(AbstractDumpWriter* writer, u4 thread_serial_num) + : _writer(writer), _thread_serial_num(thread_serial_num), _frame_num(-1) // default - empty stack + { + } + + void set_frame_number(int n) { _frame_num = n; } + + void dump_java_stack_refs(StackValueCollection* values); +}; + +void JavaStackRefDumper::dump_java_stack_refs(StackValueCollection* values) { + for (int index = 0; index < values->size(); index++) { + if (values->at(index)->type() == T_OBJECT) { + oop o = values->obj_at(index)(); + if (o != nullptr) { + u4 size = 1 + sizeof(address) + 4 + 4; + writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size); + writer()->write_objectID(o); + writer()->write_u4(_thread_serial_num); + writer()->write_u4((u4)_frame_num); + writer()->end_sub_record(); + } + } + } +} + +// Class to collect, store and dump thread-related data: +// - HPROF_TRACE and HPROF_FRAME records; +// - HPROF_GC_ROOT_THREAD_OBJ/HPROF_GC_ROOT_JAVA_FRAME/HPROF_GC_ROOT_JNI_LOCAL subrecords. +class ThreadDumper : public CHeapObj { +public: + enum class ThreadType { Platform, MountedVirtual, UnmountedVirtual }; + +private: + ThreadType _thread_type; + JavaThread* _java_thread; + oop _thread_oop; + + GrowableArray* _frames; + // non-null if the thread is OOM thread + Method* _oome_constructor; + int _thread_serial_num; + int _start_frame_serial_num; + + vframe* get_top_frame() const; + +public: + static bool should_dump_pthread(JavaThread* thread) { + return thread->threadObj() != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view(); + } + + static bool should_dump_vthread(oop vt) { + return java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::NEW + && java_lang_VirtualThread::state(vt) != java_lang_VirtualThread::TERMINATED; + } + + ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop); + + // affects frame_count + void add_oom_frame(Method* oome_constructor) { + assert(_start_frame_serial_num == 0, "add_oom_frame cannot be called after init_serial_nums"); + _oome_constructor = oome_constructor; + } + + void init_serial_nums(volatile int* thread_counter, volatile int* frame_counter) { + assert(_start_frame_serial_num == 0, "already initialized"); + _thread_serial_num = Atomic::fetch_then_add(thread_counter, 1); + _start_frame_serial_num = Atomic::fetch_then_add(frame_counter, frame_count()); + } + + bool oom_thread() const { + return _oome_constructor != nullptr; + } + + int frame_count() const { + return _frames->length() + (oom_thread() ? 1 : 0); + } + + u4 thread_serial_num() const { + return (u4)_thread_serial_num; + } + + u4 stack_trace_serial_num() const { + return (u4)(_thread_serial_num + STACK_TRACE_ID); + } + + // writes HPROF_TRACE and HPROF_FRAME records + // returns number of dumped frames + void dump_stack_traces(AbstractDumpWriter* writer, GrowableArray* klass_map); + + // writes HPROF_GC_ROOT_THREAD_OBJ subrecord + void dump_thread_obj(AbstractDumpWriter* writer); + + // Walk the stack of the thread. + // Dumps a HPROF_GC_ROOT_JAVA_FRAME subrecord for each local + // Dumps a HPROF_GC_ROOT_JNI_LOCAL subrecord for each JNI local + void dump_stack_refs(AbstractDumpWriter* writer); + +}; + +ThreadDumper::ThreadDumper(ThreadType thread_type, JavaThread* java_thread, oop thread_oop) + : _thread_type(thread_type), _java_thread(java_thread), _thread_oop(thread_oop), + _oome_constructor(nullptr), + _thread_serial_num(0), _start_frame_serial_num(0) +{ + // sanity checks + if (_thread_type == ThreadType::UnmountedVirtual) { + assert(_java_thread == nullptr, "sanity"); + assert(_thread_oop != nullptr, "sanity"); + } else { + assert(_java_thread != nullptr, "sanity"); + assert(_thread_oop != nullptr, "sanity"); + } + + _frames = new (mtServiceability) GrowableArray(10, mtServiceability); + bool stop_at_vthread_entry = _thread_type == ThreadType::MountedVirtual; + + // vframes are resource allocated + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); + + for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) { + if (stop_at_vthread_entry && vf->is_vthread_entry()) { + break; + } + if (vf->is_java_frame()) { + javaVFrame* jvf = javaVFrame::cast(vf); + _frames->append(new StackFrameInfo(jvf, false)); + } else { + // ignore non-Java frames + } + } +} + +void ThreadDumper::dump_stack_traces(AbstractDumpWriter* writer, GrowableArray* klass_map) { + assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_nums are not initialized"); + + // write HPROF_FRAME records for this thread's stack trace + int depth = _frames->length(); + int frame_serial_num = _start_frame_serial_num; + + if (oom_thread()) { + // OOM thread + // write fake frame that makes it look like the thread, which caused OOME, + // is in the OutOfMemoryError zero-parameter constructor + int oome_serial_num = klass_map->find(_oome_constructor->method_holder()); + // the class serial number starts from 1 + assert(oome_serial_num > 0, "OutOfMemoryError class not found"); + DumperSupport::dump_stack_frame(writer, ++frame_serial_num, oome_serial_num, _oome_constructor, 0); + depth++; + } + + for (int j = 0; j < _frames->length(); j++) { + StackFrameInfo* frame = _frames->at(j); + Method* m = frame->method(); + int class_serial_num = klass_map->find(m->method_holder()); + // the class serial number starts from 1 + assert(class_serial_num > 0, "class not found"); + DumperSupport::dump_stack_frame(writer, ++frame_serial_num, class_serial_num, m, frame->bci()); + } + + // write HPROF_TRACE record for the thread + DumperSupport::write_header(writer, HPROF_TRACE, checked_cast(3 * sizeof(u4) + depth * oopSize)); + writer->write_u4(stack_trace_serial_num()); // stack trace serial number + writer->write_u4(thread_serial_num()); // thread serial number + writer->write_u4((u4)depth); // frame count (including oom frame) + for (int j = 1; j <= depth; j++) { + writer->write_id(_start_frame_serial_num + j); + } +} + +void ThreadDumper::dump_thread_obj(AbstractDumpWriter * writer) { + assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized"); + + u4 size = 1 + sizeof(address) + 4 + 4; + writer->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size); + writer->write_objectID(_thread_oop); + writer->write_u4(thread_serial_num()); // thread serial number + writer->write_u4(stack_trace_serial_num()); // stack trace serial number + writer->end_sub_record(); +} + +void ThreadDumper::dump_stack_refs(AbstractDumpWriter * writer) { + assert(_thread_serial_num != 0 && _start_frame_serial_num != 0, "serial_num is not initialized"); + + JNILocalsDumper blk(writer, thread_serial_num()); + if (_thread_type == ThreadType::Platform) { + if (!_java_thread->has_last_Java_frame()) { + // no last java frame but there may be JNI locals + _java_thread->active_handles()->oops_do(&blk); + return; + } + } + + JavaStackRefDumper java_ref_dumper(writer, thread_serial_num()); + + // vframes are resource allocated + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); + + bool stopAtVthreadEntry = _thread_type == ThreadType::MountedVirtual; + frame* last_entry_frame = nullptr; + bool is_top_frame = true; + int depth = 0; + if (oom_thread()) { + depth++; + } + + for (vframe* vf = get_top_frame(); vf != nullptr; vf = vf->sender()) { + if (stopAtVthreadEntry && vf->is_vthread_entry()) { + break; + } + + if (vf->is_java_frame()) { + javaVFrame* jvf = javaVFrame::cast(vf); + if (!(jvf->method()->is_native())) { + java_ref_dumper.set_frame_number(depth); + java_ref_dumper.dump_java_stack_refs(jvf->locals()); + java_ref_dumper.dump_java_stack_refs(jvf->expressions()); + } else { + // native frame + blk.set_frame_number(depth); + if (is_top_frame) { + // JNI locals for the top frame. + assert(_java_thread != nullptr, "impossible for unmounted vthread"); + _java_thread->active_handles()->oops_do(&blk); + } else { + if (last_entry_frame != nullptr) { + // JNI locals for the entry frame + assert(last_entry_frame->is_entry_frame(), "checking"); + last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk); + } + } + } + last_entry_frame = nullptr; + // increment only for Java frames + depth++; + } else { + // externalVFrame - for an entry frame then we report the JNI locals + // when we find the corresponding javaVFrame + frame* fr = vf->frame_pointer(); + assert(fr != nullptr, "sanity check"); + if (fr->is_entry_frame()) { + last_entry_frame = fr; + } + } + is_top_frame = false; + } + assert(depth == frame_count(), "total number of Java frames not matched"); +} + +vframe* ThreadDumper::get_top_frame() const { + if (_thread_type == ThreadType::UnmountedVirtual) { + ContinuationWrapper cont(java_lang_VirtualThread::continuation(_thread_oop)); + if (cont.is_empty()) { + return nullptr; + } + assert(!cont.is_mounted(), "sanity check"); + stackChunkOop chunk = cont.last_nonempty_chunk(); + if (chunk == nullptr || chunk->is_empty()) { + return nullptr; + } + + RegisterMap reg_map(cont.continuation(), RegisterMap::UpdateMap::include); + frame fr = chunk->top_frame(®_map); + vframe* vf = vframe::new_vframe(&fr, ®_map, nullptr); // don't need JavaThread + return vf; + } + + RegisterMap reg_map(_java_thread, + RegisterMap::UpdateMap::include, + RegisterMap::ProcessFrames::include, + RegisterMap::WalkContinuation::skip); + switch (_thread_type) { + case ThreadType::Platform: + if (!_java_thread->has_last_Java_frame()) { + return nullptr; + } + return _java_thread->is_vthread_mounted() + ? _java_thread->carrier_last_java_vframe(®_map) + : _java_thread->platform_thread_last_java_vframe(®_map); + + case ThreadType::MountedVirtual: + return _java_thread->last_java_vframe(®_map); + + default: // make compilers happy + break; + } + ShouldNotReachHere(); + return nullptr; +} + + class VM_HeapDumper; // Support class using when iterating over the heap. @@ -1683,8 +1987,12 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask { Method* _oome_constructor; bool _gc_before_heap_dump; GrowableArray* _klass_map; - ThreadStackTrace** _stack_traces; - int _num_threads; + + ThreadDumper** _thread_dumpers; // platform, carrier and mounted virtual threads + int _thread_dumpers_count; + volatile int _thread_serial_num; + volatile int _frame_serial_num; + volatile int _dump_seq; // parallel heap dump support uint _num_dumper_threads; @@ -1721,15 +2029,18 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask { // writes a HPROF_GC_CLASS_DUMP record for the given class static void do_class_dump(Klass* k); - // HPROF_GC_ROOT_THREAD_OBJ records - int do_thread(JavaThread* thread, u4 thread_serial_num); - void do_threads(); + // HPROF_GC_ROOT_THREAD_OBJ records for platform and mounted virtual threads + void dump_threads(); void add_class_serial_number(Klass* k, int serial_num) { _klass_map->at_put_grow(serial_num, k); } - // HPROF_TRACE and HPROF_FRAME records + bool is_oom_thread(JavaThread* thread) const { + return thread == _oome_thread && _oome_constructor != nullptr; + } + + // HPROF_TRACE and HPROF_FRAME records for platform and mounted virtual threads void dump_stack_traces(); public: @@ -1742,8 +2053,12 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask { _local_writer = writer; _gc_before_heap_dump = gc_before_heap_dump; _klass_map = new (mtServiceability) GrowableArray(INITIAL_CLASS_COUNT, mtServiceability); - _stack_traces = nullptr; - _num_threads = 0; + + _thread_dumpers = nullptr; + _thread_dumpers_count = 0; + _thread_serial_num = 1; + _frame_serial_num = 1; + _dump_seq = 0; _num_dumper_threads = num_dump_threads; _dumper_controller = nullptr; @@ -1763,12 +2078,13 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask { } ~VM_HeapDumper() { - if (_stack_traces != nullptr) { - for (int i=0; i < _num_threads; i++) { - delete _stack_traces[i]; + if (_thread_dumpers != nullptr) { + for (int i = 0; i < _thread_dumpers_count; i++) { + delete _thread_dumpers[i]; } - FREE_C_HEAP_ARRAY(ThreadStackTrace*, _stack_traces); + FREE_C_HEAP_ARRAY(ThreadDumper*, _thread_dumpers); } + if (_dumper_controller != nullptr) { delete _dumper_controller; _dumper_controller = nullptr; @@ -1835,127 +2151,13 @@ void VM_HeapDumper::do_class_dump(Klass* k) { } } -// Walk the stack of the given thread. -// Dumps a HPROF_GC_ROOT_JAVA_FRAME record for each local -// Dumps a HPROF_GC_ROOT_JNI_LOCAL record for each JNI local -// -// It returns the number of Java frames in this thread stack -int VM_HeapDumper::do_thread(JavaThread* java_thread, u4 thread_serial_num) { - JNILocalsDumper blk(writer(), thread_serial_num); - - oop threadObj = java_thread->threadObj(); - assert(threadObj != nullptr, "sanity check"); - - int stack_depth = 0; - if (java_thread->has_last_Java_frame()) { - - // vframes are resource allocated - Thread* current_thread = Thread::current(); - ResourceMark rm(current_thread); - HandleMark hm(current_thread); - - RegisterMap reg_map(java_thread, - RegisterMap::UpdateMap::include, - RegisterMap::ProcessFrames::include, - RegisterMap::WalkContinuation::skip); - frame f = java_thread->last_frame(); - vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); - frame* last_entry_frame = nullptr; - int extra_frames = 0; - - if (java_thread == _oome_thread && _oome_constructor != nullptr) { - extra_frames++; - } - while (vf != nullptr) { - blk.set_frame_number(stack_depth); - if (vf->is_java_frame()) { - - // java frame (interpreted, compiled, ...) - javaVFrame *jvf = javaVFrame::cast(vf); - if (!(jvf->method()->is_native())) { - StackValueCollection* locals = jvf->locals(); - for (int slot=0; slotsize(); slot++) { - if (locals->at(slot)->type() == T_OBJECT) { - oop o = locals->obj_at(slot)(); - - if (o != nullptr) { - u4 size = 1 + sizeof(address) + 4 + 4; - writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size); - writer()->write_objectID(o); - writer()->write_u4(thread_serial_num); - writer()->write_u4((u4) (stack_depth + extra_frames)); - writer()->end_sub_record(); - } - } - } - StackValueCollection *exprs = jvf->expressions(); - for(int index = 0; index < exprs->size(); index++) { - if (exprs->at(index)->type() == T_OBJECT) { - oop o = exprs->obj_at(index)(); - if (o != nullptr) { - u4 size = 1 + sizeof(address) + 4 + 4; - writer()->start_sub_record(HPROF_GC_ROOT_JAVA_FRAME, size); - writer()->write_objectID(o); - writer()->write_u4(thread_serial_num); - writer()->write_u4((u4) (stack_depth + extra_frames)); - writer()->end_sub_record(); - } - } - } - } else { - // native frame - if (stack_depth == 0) { - // JNI locals for the top frame. - java_thread->active_handles()->oops_do(&blk); - } else { - if (last_entry_frame != nullptr) { - // JNI locals for the entry frame - assert(last_entry_frame->is_entry_frame(), "checking"); - last_entry_frame->entry_frame_call_wrapper()->handles()->oops_do(&blk); - } - } - } - // increment only for Java frames - stack_depth++; - last_entry_frame = nullptr; - - } else { - // externalVFrame - if it's an entry frame then report any JNI locals - // as roots when we find the corresponding native javaVFrame - frame* fr = vf->frame_pointer(); - assert(fr != nullptr, "sanity check"); - if (fr->is_entry_frame()) { - last_entry_frame = fr; - } - } - vf = vf->sender(); +// Write a HPROF_GC_ROOT_THREAD_OBJ record for platform/carrier and mounted virtual threads. +// Then walk the stack so that locals and JNI locals are dumped. +void VM_HeapDumper::dump_threads() { + for (int i = 0; i < _thread_dumpers_count; i++) { + _thread_dumpers[i]->dump_thread_obj(writer()); + _thread_dumpers[i]->dump_stack_refs(writer()); } - } else { - // no last java frame but there may be JNI locals - java_thread->active_handles()->oops_do(&blk); - } - return stack_depth; -} - - -// write a HPROF_GC_ROOT_THREAD_OBJ record for each java thread. Then walk -// the stack so that locals and JNI locals are dumped. -void VM_HeapDumper::do_threads() { - for (int i=0; i < _num_threads; i++) { - JavaThread* thread = _stack_traces[i]->thread(); - oop threadObj = thread->threadObj(); - u4 thread_serial_num = i+1; - u4 stack_serial_num = thread_serial_num + STACK_TRACE_ID; - u4 size = 1 + sizeof(address) + 4 + 4; - writer()->start_sub_record(HPROF_GC_ROOT_THREAD_OBJ, size); - writer()->write_objectID(threadObj); - writer()->write_u4(thread_serial_num); // thread number - writer()->write_u4(stack_serial_num); // stack trace serial number - writer()->end_sub_record(); - int num_frames = do_thread(thread, thread_serial_num); - assert(num_frames == _stack_traces[i]->get_stack_depth(), - "total number of Java frames not matched"); - } } bool VM_HeapDumper::doit_prologue() { @@ -2100,6 +2302,8 @@ void VM_HeapDumper::work(uint worker_id) { // this must be called after _klass_map is built when iterating the classes above. dump_stack_traces(); + // HPROF_HEAP_DUMP/HPROF_HEAP_DUMP_SEGMENT starts here + // Writes HPROF_GC_CLASS_DUMP records { LockedClassesDo locked_dump_class(&do_class_dump); @@ -2107,7 +2311,7 @@ void VM_HeapDumper::work(uint worker_id) { } // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals - do_threads(); + dump_threads(); // HPROF_GC_ROOT_JNI_GLOBAL JNIGlobalsDumper jni_dumper(writer()); @@ -2163,58 +2367,44 @@ void VM_HeapDumper::work(uint worker_id) { void VM_HeapDumper::dump_stack_traces() { // write a HPROF_TRACE record without any frames to be referenced as object alloc sites - DumperSupport::write_header(writer(), HPROF_TRACE, 3*sizeof(u4)); - writer()->write_u4((u4) STACK_TRACE_ID); + DumperSupport::write_header(writer(), HPROF_TRACE, 3 * sizeof(u4)); + writer()->write_u4((u4)STACK_TRACE_ID); writer()->write_u4(0); // thread number writer()->write_u4(0); // frame count - _stack_traces = NEW_C_HEAP_ARRAY(ThreadStackTrace*, Threads::number_of_threads(), mtInternal); - int frame_serial_num = 0; - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { - oop threadObj = thread->threadObj(); - if (threadObj != nullptr && !thread->is_exiting() && !thread->is_hidden_from_external_view()) { - // dump thread stack trace - Thread* current_thread = Thread::current(); - ResourceMark rm(current_thread); - HandleMark hm(current_thread); - - ThreadStackTrace* stack_trace = new ThreadStackTrace(thread, false); - stack_trace->dump_stack_at_safepoint(-1, /* ObjectMonitorsHashtable is not needed here */ nullptr, true); - _stack_traces[_num_threads++] = stack_trace; - - // write HPROF_FRAME records for this thread's stack trace - int depth = stack_trace->get_stack_depth(); - int thread_frame_start = frame_serial_num; - int extra_frames = 0; - // write fake frame that makes it look like the thread, which caused OOME, - // is in the OutOfMemoryError zero-parameter constructor - if (thread == _oome_thread && _oome_constructor != nullptr) { - int oome_serial_num = _klass_map->find(_oome_constructor->method_holder()); - // the class serial number starts from 1 - assert(oome_serial_num > 0, "OutOfMemoryError class not found"); - DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, oome_serial_num, - _oome_constructor, 0); - extra_frames++; + // max number if every platform thread is carrier with mounted virtual thread + _thread_dumpers = NEW_C_HEAP_ARRAY(ThreadDumper*, Threads::number_of_threads() * 2, mtInternal); + + for (JavaThreadIteratorWithHandle jtiwh; JavaThread * thread = jtiwh.next(); ) { + if (ThreadDumper::should_dump_pthread(thread)) { + bool add_oom_frame = is_oom_thread(thread); + + oop mounted_vt = thread->is_vthread_mounted() ? thread->vthread() : nullptr; + if (mounted_vt != nullptr && !ThreadDumper::should_dump_vthread(mounted_vt)) { + mounted_vt = nullptr; } - for (int j=0; j < depth; j++) { - StackFrameInfo* frame = stack_trace->stack_frame_at(j); - Method* m = frame->method(); - int class_serial_num = _klass_map->find(m->method_holder()); - // the class serial number starts from 1 - assert(class_serial_num > 0, "class not found"); - DumperSupport::dump_stack_frame(writer(), ++frame_serial_num, class_serial_num, m, frame->bci()); + + // mounted vthread (if any) + if (mounted_vt != nullptr) { + ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::MountedVirtual, thread, mounted_vt); + _thread_dumpers[_thread_dumpers_count++] = thread_dumper; + if (add_oom_frame) { + thread_dumper->add_oom_frame(_oome_constructor); + // we add oom frame to the VT stack, don't add it to the carrier thread stack + add_oom_frame = false; + } + thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num); + thread_dumper->dump_stack_traces(writer(), _klass_map); } - depth += extra_frames; - - // write HPROF_TRACE record for one thread - DumperSupport::write_header(writer(), HPROF_TRACE, checked_cast(3*sizeof(u4) + depth*oopSize)); - int stack_serial_num = _num_threads + STACK_TRACE_ID; - writer()->write_u4(stack_serial_num); // stack trace serial number - writer()->write_u4((u4) _num_threads); // thread serial number - writer()->write_u4(depth); // frame count - for (int j=1; j <= depth; j++) { - writer()->write_id(thread_frame_start + j); + + // platform or carrier thread + ThreadDumper* thread_dumper = new ThreadDumper(ThreadDumper::ThreadType::Platform, thread, thread->threadObj()); + _thread_dumpers[_thread_dumpers_count++] = thread_dumper; + if (add_oom_frame) { + thread_dumper->add_oom_frame(_oome_constructor); } + thread_dumper->init_serial_nums(&_thread_serial_num, &_frame_serial_num); + thread_dumper->dump_stack_traces(writer(), _klass_map); } } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/vthread/HeapDump/VThreadInHeapDump.java b/test/hotspot/jtreg/serviceability/jvmti/vthread/HeapDump/VThreadInHeapDump.java new file mode 100644 index 0000000000000..ec9362bf53f5e --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/vthread/HeapDump/VThreadInHeapDump.java @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.File; +import java.lang.ref.Reference; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import jdk.test.lib.Asserts; +import jdk.test.lib.JDKToolLauncher; +import jdk.test.lib.apps.LingeredApp; +import jdk.test.lib.process.ProcessTools; + +import jdk.test.lib.hprof.model.JavaClass; +import jdk.test.lib.hprof.model.JavaHeapObject; +import jdk.test.lib.hprof.model.Root; +import jdk.test.lib.hprof.model.Snapshot; +import jdk.test.lib.hprof.model.StackFrame; +import jdk.test.lib.hprof.model.StackTrace; +import jdk.test.lib.hprof.model.ThreadObject; +import jdk.test.lib.hprof.parser.Reader; + +/** + * @test id=default + * @requires vm.jvmti + * @requires vm.continuations + * @library /test/lib + * @run main VThreadInHeapDump + */ + +/** + * @test id=no-vmcontinuations + * @requires vm.jvmti + * @library /test/lib + * @comment pass extra VM arguments as the test arguments + * @run main VThreadInHeapDump + * -XX:+UnlockExperimentalVMOptions -XX:-VMContinuations + */ + +class VThreadInHeapDumpTarg extends LingeredApp { + + public static class VThreadUnmountedReferenced { + } + public static class VThreadMountedReferenced { + } + public static class PThreadReferenced { + } + + public class ThreadBase { + private volatile boolean threadReady = false; + + protected void ready() { + threadReady = true; + } + + public void waitReady() { + while (!threadReady) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + } + } + } + } + + public class VthreadUnmounted extends ThreadBase implements Runnable { + public void run() { + Object referenced = new VThreadUnmountedReferenced(); + ready(); + // The thread will be unmounted in awaitToStop(). + awaitToStop(); + Reference.reachabilityFence(referenced); + } + } + + public class VthreadMounted extends ThreadBase implements Runnable { + int dummy = -1; + + public void run() { + Object referenced = new VThreadMountedReferenced(); + ready(); + // Don't give a chance for the thread to unmount. + while (!timeToStop) { + if (++dummy == 10000) { + dummy = 0; + } + } + Reference.reachabilityFence(referenced); + } + } + + public class Pthread extends ThreadBase implements Runnable { + public void run() { + Object referenced = new PThreadReferenced(); + ready(); + awaitToStop(); + Reference.reachabilityFence(referenced); + } + } + + CountDownLatch timeToStopLatch = new CountDownLatch(1); + volatile boolean timeToStop = false; + + void awaitToStop() { + try { + timeToStopLatch.await(); + } catch (InterruptedException e) { + } + } + + private void runTest(String[] args) { + try { + // Unmounted virtual thread. + VthreadUnmounted vthreadUnmounted = new VthreadUnmounted(); + Thread.ofVirtual().start(vthreadUnmounted); + vthreadUnmounted.waitReady(); + + // Mounted virtual thread. + VthreadMounted vthreadMounted = new VthreadMounted(); + Thread.ofVirtual().start(vthreadMounted); + vthreadMounted.waitReady(); + + // Platform thread. + Pthread pthread = new Pthread(); + Thread.ofPlatform().start(pthread); + pthread.waitReady(); + + // We are ready. + LingeredApp.main(args); + + } finally { + // Signal all threads to finish. + timeToStop = true; + timeToStopLatch.countDown(); + } + } + + public static void main(String[] args) { + VThreadInHeapDumpTarg test = new VThreadInHeapDumpTarg(); + test.runTest(args); + } + +} + + +public class VThreadInHeapDump { + + // test arguments are extra VM options for target process + public static void main(String[] args) throws Exception { + File dumpFile = new File("Myheapdump.hprof"); + createDump(dumpFile, args); + verifyDump(dumpFile); + } + + private static void createDump(File dumpFile, String[] extraOptions) throws Exception { + LingeredApp theApp = null; + try { + theApp = new VThreadInHeapDumpTarg(); + + List extraVMArgs = new ArrayList<>(); + extraVMArgs.add("-Djdk.virtualThreadScheduler.parallelism=1"); + extraVMArgs.addAll(Arrays.asList(extraOptions)); + LingeredApp.startApp(theApp, extraVMArgs.toArray(new String[0])); + + //jcmd GC.heap_dump + JDKToolLauncher launcher = JDKToolLauncher + .createUsingTestJDK("jcmd") + .addToolArg(Long.toString(theApp.getPid())) + .addToolArg("GC.heap_dump") + .addToolArg(dumpFile.getAbsolutePath()); + Process p = ProcessTools.startProcess("jcmd", new ProcessBuilder(launcher.getCommand())); + // If something goes wrong with heap dumping most likely we'll get crash of the target VM. + while (!p.waitFor(5, TimeUnit.SECONDS)) { + if (!theApp.getProcess().isAlive()) { + log("ERROR: target VM died, killing jcmd..."); + p.destroyForcibly(); + throw new Exception("Target VM died"); + } + } + + if (p.exitValue() != 0) { + throw new Exception("Jcmd exited with code " + p.exitValue()); + } + } finally { + LingeredApp.stopApp(theApp); + } + } + + private static void verifyDump(File dumpFile) throws Exception { + Asserts.assertTrue(dumpFile.exists(), "Heap dump file not found."); + + log("Reading " + dumpFile + "..."); + try (Snapshot snapshot = Reader.readFile(dumpFile.getPath(), true, 0)) { + log("Resolving snapshot..."); + snapshot.resolve(true); + log("Snapshot resolved."); + + // Log all threads with stack traces and stack references. + List threads = snapshot.getThreads(); + List roots = Collections.list(snapshot.getRoots()); + log("Threads:"); + for (ThreadObject thread: threads) { + StackTrace st = thread.getStackTrace(); + StackFrame[] frames = st.getFrames(); + log("thread " + thread.getIdString() + ", " + frames.length + " frames"); + + List stackRoots = findStackRoot(roots, thread); + for (int i = 0; i < frames.length; i++) { + log(" - [" + i + "] " + + frames[i].getClassName() + "." + frames[i].getMethodName() + + frames[i].getMethodSignature() + + " (" + frames[i].getSourceFileName() + + ":" + frames[i].getLineNumber() + ")"); + + for (Root r: stackRoots) { + StackFrame[] rootFrames = r.getStackTrace().getFrames(); + // the frame this local belongs to + StackFrame frame = rootFrames[rootFrames.length - 1]; + if (frame == frames[i]) { + JavaHeapObject obj = snapshot.findThing(r.getId()); + JavaClass objClass = obj.getClazz(); + log(" " + r.getDescription() + ": " + objClass.getName()); + } + } + } + } + + // Verify objects from thread stacks are dumped. + test(snapshot, VThreadInHeapDumpTarg.VThreadMountedReferenced.class); + test(snapshot, VThreadInHeapDumpTarg.PThreadReferenced.class); + // Dumping of unmounted vthreads is not implemented yet + //test(snapshot, VThreadInHeapDumpTarg.VThreadUnmountedReferenced.class); + } + + } + + private static List findStackRoot(List roots, ThreadObject thread) { + List result = new ArrayList<>(); + for (Root root: roots) { + if (root.getRefererId() == thread.getId()) { + result.add(root); + } + } + return result; + } + + private static void test(Snapshot snapshot, String className) { + log("Testing " + className + "..."); + JavaClass jClass = snapshot.findClass(className); + if (jClass == null) { + throw new RuntimeException("'" + className + "' not found"); + } + int instanceCount = jClass.getInstancesCount(false); + if (instanceCount != 1) { + throw new RuntimeException("Expected 1 instance, " + instanceCount + " instances found"); + } + // There is the only instance. + JavaHeapObject heapObj = jClass.getInstances(false).nextElement(); + + Root root = heapObj.getRoot(); + if (root == null) { + throw new RuntimeException("No root for " + className + " instance"); + } + log(" root: " + root.getDescription()); + JavaHeapObject referrer = root.getReferer(); + log(" referrer: " + referrer); + } + + private static void test(Snapshot snapshot, Class cls) { + test(snapshot, cls.getName()); + } + + private static void log(Object s) { + System.out.println(s); + } +} diff --git a/test/lib/jdk/test/lib/hprof/model/Root.java b/test/lib/jdk/test/lib/hprof/model/Root.java index b223c1370a5c6..f8375ed6c4c94 100644 --- a/test/lib/jdk/test/lib/hprof/model/Root.java +++ b/test/lib/jdk/test/lib/hprof/model/Root.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -141,6 +141,10 @@ public JavaHeapObject getReferer() { return referer; } + public long getRefererId() { + return refererId; + } + /** * @return the stack trace responsible for this root, or null if there * is none. diff --git a/test/lib/jdk/test/lib/hprof/model/Snapshot.java b/test/lib/jdk/test/lib/hprof/model/Snapshot.java index 05902d7028ea3..219f80117941c 100644 --- a/test/lib/jdk/test/lib/hprof/model/Snapshot.java +++ b/test/lib/jdk/test/lib/hprof/model/Snapshot.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,6 +82,9 @@ public class Snapshot implements AutoCloseable { // soft cache of finalizeable objects - lazily initialized private SoftReference> finalizablesCache; + // threads + private ArrayList threads = new ArrayList<>(); + // represents null reference private JavaThing nullThing; @@ -175,6 +178,10 @@ public void addClass(long id, JavaClass c) { putInClassesMap(c); } + public void addThreadObject(ThreadObject thread) { + threads.add(thread); + } + JavaClass addFakeInstanceClass(long classID, int instSize) { // Create a fake class name based on ID. String name = "unknown-class<@" + Misc.toHex(classID) + ">"; @@ -433,6 +440,10 @@ public Root getRootAt(int i) { return roots.elementAt(i); } + public List getThreads() { + return Collections.unmodifiableList(threads); + } + public ReferenceChain[] rootsetReferencesTo(JavaHeapObject target, boolean includeWeak) { Vector fifo = new Vector(); // This is slow... A real fifo would help diff --git a/test/lib/jdk/test/lib/hprof/model/ThreadObject.java b/test/lib/jdk/test/lib/hprof/model/ThreadObject.java new file mode 100644 index 0000000000000..86a3d1c5fe10f --- /dev/null +++ b/test/lib/jdk/test/lib/hprof/model/ThreadObject.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.test.lib.hprof.model; + +import jdk.test.lib.hprof.util.Misc; + +public class ThreadObject { + + private final long id; // ID of the JavaThing we refer to + private final StackTrace stackTrace; + + public ThreadObject(long id, StackTrace stackTrace) { + this.id = id; + this.stackTrace = stackTrace; + } + + public long getId() { + return id; + } + + public String getIdString() { + return Misc.toHex(id); + } + + public StackTrace getStackTrace() { + return stackTrace; + } + + void resolve(Snapshot ss) { + if (stackTrace != null) { + stackTrace.resolve(ss); + } + } + +} diff --git a/test/lib/jdk/test/lib/hprof/parser/HprofReader.java b/test/lib/jdk/test/lib/hprof/parser/HprofReader.java index ac76663677145..08deac80c0eea 100644 --- a/test/lib/jdk/test/lib/hprof/parser/HprofReader.java +++ b/test/lib/jdk/test/lib/hprof/parser/HprofReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -436,8 +436,10 @@ private void readHeapDump(long bytesLeft, long posAtEnd) throws IOException { int threadSeq = in.readInt(); int stackSeq = in.readInt(); bytesLeft -= identifierSize + 8; - threadObjects.put(threadSeq, - new ThreadObject(id, stackSeq)); + StackTrace st = getStackTraceFromSerial(stackSeq); + ThreadObject threadObj = new ThreadObject(id, st); + threadObjects.put(threadSeq, threadObj); + snapshot.addThreadObject(threadObj); break; } case HPROF_GC_ROOT_JNI_GLOBAL: { @@ -453,11 +455,11 @@ private void readHeapDump(long bytesLeft, long posAtEnd) throws IOException { int depth = in.readInt(); bytesLeft -= identifierSize + 8; ThreadObject to = getThreadObjectFromSequence(threadSeq); - StackTrace st = getStackTraceFromSerial(to.stackSeq); + StackTrace st = to.getStackTrace(); if (st != null) { st = st.traceForDepth(depth+1); } - snapshot.addRoot(new Root(id, to.threadId, + snapshot.addRoot(new Root(id, to.getId(), Root.NATIVE_LOCAL, "", st)); break; } @@ -467,11 +469,11 @@ private void readHeapDump(long bytesLeft, long posAtEnd) throws IOException { int depth = in.readInt(); bytesLeft -= identifierSize + 8; ThreadObject to = getThreadObjectFromSequence(threadSeq); - StackTrace st = getStackTraceFromSerial(to.stackSeq); + StackTrace st = to.getStackTrace();; if (st != null) { st = st.traceForDepth(depth+1); } - snapshot.addRoot(new Root(id, to.threadId, + snapshot.addRoot(new Root(id, to.getId(), Root.JAVA_LOCAL, "", st)); break; } @@ -480,8 +482,8 @@ private void readHeapDump(long bytesLeft, long posAtEnd) throws IOException { int threadSeq = in.readInt(); bytesLeft -= identifierSize + 4; ThreadObject to = getThreadObjectFromSequence(threadSeq); - StackTrace st = getStackTraceFromSerial(to.stackSeq); - snapshot.addRoot(new Root(id, to.threadId, + StackTrace st = to.getStackTrace();; + snapshot.addRoot(new Root(id, to.getId(), Root.NATIVE_STACK, "", st)); break; } @@ -496,8 +498,8 @@ private void readHeapDump(long bytesLeft, long posAtEnd) throws IOException { int threadSeq = in.readInt(); bytesLeft -= identifierSize + 4; ThreadObject to = getThreadObjectFromSequence(threadSeq); - StackTrace st = getStackTraceFromSerial(to.stackSeq); - snapshot.addRoot(new Root(id, to.threadId, + StackTrace st = to.getStackTrace(); + snapshot.addRoot(new Root(id, to.getId(), Root.THREAD_BLOCK, "", st)); break; } @@ -913,18 +915,4 @@ private void warn(String msg) { System.out.println("WARNING: " + msg); } - // - // A trivial data-holder class for HPROF_GC_ROOT_THREAD_OBJ. - // - private class ThreadObject { - - long threadId; - int stackSeq; - - ThreadObject(long threadId, int stackSeq) { - this.threadId = threadId; - this.stackSeq = stackSeq; - } - } - } From fec1d497835de2a37d056f1d6642deac09541118 Mon Sep 17 00:00:00 2001 From: Leonid Mesnik Date: Tue, 10 Oct 2023 20:14:56 +0000 Subject: [PATCH 03/15] 8316452: java/lang/instrument/modules/AppendToClassPathModuleTest.java ignores VM flags Reviewed-by: sspitsyn --- .../lang/instrument/modules/AppendToClassPathModuleTest.java | 1 + 1 file changed, 1 insertion(+) diff --git a/test/jdk/java/lang/instrument/modules/AppendToClassPathModuleTest.java b/test/jdk/java/lang/instrument/modules/AppendToClassPathModuleTest.java index 9eb3cff391b45..943abe7385150 100644 --- a/test/jdk/java/lang/instrument/modules/AppendToClassPathModuleTest.java +++ b/test/jdk/java/lang/instrument/modules/AppendToClassPathModuleTest.java @@ -24,6 +24,7 @@ /** * @test * @bug 8169909 + * @requires vm.flagless * @library src /test/lib * @build test/* * @run shell AppendToClassPathModuleTest.sh From f40ea5109e4ea8a78aebdb90ce8eec3830096a9c Mon Sep 17 00:00:00 2001 From: "lawrence.andrews" <87324768+lawrence-andrew@users.noreply.github.com> Date: Tue, 10 Oct 2023 22:52:22 +0000 Subject: [PATCH 04/15] 8317751: ProblemList ConsumeForModalDialogTest.java, MenuItemActivatedTest.java & MouseModifiersUnitTest_Standard.java for windows Reviewed-by: prr --- test/jdk/ProblemList.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index 519d59cfa2ac1..b7e7dbdd59de0 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -456,6 +456,9 @@ java/awt/MenuBar/TestNoScreenMenuBar.java 8265987 macosx-all java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java 8266283 generic-all java/awt/Graphics2D/DrawString/RotTransText.java 8316878 linux-all java/awt/KeyboardFocusmanager/TypeAhead/ButtonActionKeyTest/ButtonActionKeyTest.java 8257529 windows-x64 +java/awt/KeyboardFocusmanager/ConsumeNextMnemonicKeyTypedTest/ConsumeForModalDialogTest/ConsumeForModalDialogTest.java 8302787 windows-all +java/awt/KeyboardFocusmanager/TypeAhead/MenuItemActivatedTest/MenuItemActivatedTest.java 8302787 windows-all +java/awt/Mouse/MouseModifiersUnitTest/MouseModifiersUnitTest_Standard.java 8302787 windows-all java/awt/Window/GetScreenLocation/GetScreenLocationTest.java 8225787 linux-x64 java/awt/Dialog/MakeWindowAlwaysOnTop/MakeWindowAlwaysOnTop.java 8266243 macosx-aarch64 From 54861df3d9e29a86dcfcecc4eb5072cc3f006069 Mon Sep 17 00:00:00 2001 From: Jaikiran Pai Date: Wed, 11 Oct 2023 00:54:54 +0000 Subject: [PATCH 05/15] 8317802: jmh tests fail with Unable to find the resource: /META-INF/BenchmarkList after JDK-8306819 Reviewed-by: erikj, ihse --- make/test/BuildMicrobenchmark.gmk | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/make/test/BuildMicrobenchmark.gmk b/make/test/BuildMicrobenchmark.gmk index c0484cba30f19..0a6a56f2a2e05 100644 --- a/make/test/BuildMicrobenchmark.gmk +++ b/make/test/BuildMicrobenchmark.gmk @@ -84,7 +84,9 @@ $(eval $(call SetupJavaCompilation, BUILD_INDIFY, \ #### Compile Targets # Building microbenchmark requires the jdk.unsupported and java.management modules. -# sun.security.util is required to compile Cache benchmark +# sun.security.util is required to compile Cache benchmark. +# jmh uses annotation processors to generate the benchmark jar and thus +# requires the use of -processor option during benchmark compilation. # Build microbenchmark suite for the current JDK $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \ @@ -106,7 +108,8 @@ $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \ --add-exports java.base/jdk.internal.org.objectweb.asm.tree=ALL-UNNAMED \ --add-exports java.base/jdk.internal.vm=ALL-UNNAMED \ --add-exports java.base/jdk.internal.event=ALL-UNNAMED \ - --enable-preview, \ + --enable-preview \ + -processor org.openjdk.jmh.generators.BenchmarkProcessor, \ JAVA_FLAGS := --add-modules jdk.unsupported --limit-modules java.management \ --add-exports java.base/jdk.internal.vm=ALL-UNNAMED \ --enable-preview, \ From 3aa4cba17520a488aa4a338a80f573af10d3e657 Mon Sep 17 00:00:00 2001 From: Jayathirth D V Date: Wed, 11 Oct 2023 03:36:17 +0000 Subject: [PATCH 06/15] 8316975: Memory leak in MTLSurfaceData Reviewed-by: prr --- .../libawt_lwawt/java2d/metal/MTLSurfaceData.m | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLSurfaceData.m b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLSurfaceData.m index 129ba99adcc6e..90b77879b2a7d 100644 --- a/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLSurfaceData.m +++ b/src/java.desktop/macosx/native/libawt_lwawt/java2d/metal/MTLSurfaceData.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -296,6 +296,11 @@ static jboolean MTLSurfaceData_initTexture(BMTLSDOps *bmtlsdo, jboolean isOpaque BMTLSDOps *bmtlsdo = (BMTLSDOps *)SurfaceData_InitOps(env, mtlsd, sizeof(BMTLSDOps)); MTLSDOps *mtlsdo = (MTLSDOps *)malloc(sizeof(MTLSDOps)); + if (mtlsdo == NULL) { + JNU_ThrowOutOfMemoryError(env, "Initialization of SurfaceData failed."); + return; + } + J2dTraceLn1(J2D_TRACE_INFO, "MTLSurfaceData_initOps p=%p", bmtlsdo); J2dTraceLn1(J2D_TRACE_INFO, " pPeerData=%p", jlong_to_ptr(pPeerData)); J2dTraceLn1(J2D_TRACE_INFO, " layerPtr=%p", jlong_to_ptr(layerPtr)); @@ -303,12 +308,7 @@ static jboolean MTLSurfaceData_initTexture(BMTLSDOps *bmtlsdo, jboolean isOpaque gc = (*env)->NewGlobalRef(env, gc); if (gc == NULL) { - JNU_ThrowOutOfMemoryError(env, "Initialization of SurfaceData failed."); - return; - } - - if (mtlsdo == NULL) { - (*env)->DeleteGlobalRef(env, gc); + free(mtlsdo); JNU_ThrowOutOfMemoryError(env, "Initialization of SurfaceData failed."); return; } From 84b7cc15c20581a14cdd2a590e0a30b1ef9acddb Mon Sep 17 00:00:00 2001 From: Ioi Lam Date: Wed, 11 Oct 2023 05:11:41 +0000 Subject: [PATCH 07/15] 8317761: Combine two versions of print_statistics() in java.cpp Reviewed-by: kvn, vlivanov --- src/hotspot/share/runtime/java.cpp | 57 +++------------------ src/hotspot/share/runtime/sharedRuntime.hpp | 3 +- 2 files changed, 9 insertions(+), 51 deletions(-) diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 10e0bfc837d78..6f22798874817 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -225,6 +225,13 @@ void print_bytecode_count() { } } +#else + +void print_method_invocation_histogram() {} +void print_bytecode_count() {} + +#endif // PRODUCT + // General statistics printing (profiling ...) void print_statistics() { @@ -337,56 +344,6 @@ void print_statistics() { ThreadsSMRSupport::log_statistics(); } -#else // PRODUCT MODE STATISTICS - -void print_statistics() { - - if (PrintMethodData) { - print_method_profiling_data(); - } - - if (CITime) { - CompileBroker::print_times(); - } - -#ifdef COMPILER2_OR_JVMCI - if ((LogVMOutput || LogCompilation) && UseCompiler) { - // Only print the statistics to the log file - FlagSetting fs(DisplayVMOutput, false); - Deoptimization::print_statistics(); - } -#endif /* COMPILER2 || INCLUDE_JVMCI */ - - if (PrintCodeCache) { - MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::print(); - } - - // CodeHeap State Analytics. - if (PrintCodeHeapAnalytics) { - CompileBroker::print_heapinfo(nullptr, "all", 4096); // details - } - -#ifdef COMPILER2 - if (PrintPreciseRTMLockingStatistics) { - OptoRuntime::print_named_counters(); - } -#endif - - // Native memory tracking data - if (PrintNMTStatistics) { - MemTracker::final_report(tty); - } - - if (PrintMetaspaceStatisticsAtExit) { - MetaspaceUtils::print_basic_report(tty, 0); - } - - ThreadsSMRSupport::log_statistics(); -} - -#endif - // Note: before_exit() can be executed only once, if more than one threads // are trying to shutdown the VM at the same time, only one thread // can run before_exit() and all other threads must wait. diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index b401e057e0566..b557d9cda9cc9 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -575,10 +575,11 @@ class SharedRuntime: AllStatic { static address nof_interface_calls_addr() { return (address)&_nof_interface_calls; } static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; } static void print_call_statistics(uint64_t comp_total); - static void print_statistics(); static void print_ic_miss_histogram(); #endif // PRODUCT + + static void print_statistics() PRODUCT_RETURN; }; From e55c482ce151afb9fd52ae16c2c419279852c1d5 Mon Sep 17 00:00:00 2001 From: Christoph Langer Date: Wed, 11 Oct 2023 05:55:52 +0000 Subject: [PATCH 08/15] 8317790: Fix Bug entry for exclusion of runtime/jni/terminatedThread/TestTerminatedThread.java on AIX Reviewed-by: mbaesken --- test/hotspot/jtreg/ProblemList.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt index 6164068c9d11b..a76c41f8b385d 100644 --- a/test/hotspot/jtreg/ProblemList.txt +++ b/test/hotspot/jtreg/ProblemList.txt @@ -91,7 +91,7 @@ gc/stress/TestStressG1Humongous.java 8286554 windows-x64 # :hotspot_runtime -runtime/jni/terminatedThread/TestTerminatedThread.java 8219652 aix-ppc64 +runtime/jni/terminatedThread/TestTerminatedThread.java 8317789 aix-ppc64 runtime/handshake/HandshakeSuspendExitTest.java 8294313 generic-all runtime/os/TestTracePageSizes.java#no-options 8267460 linux-aarch64 runtime/os/TestTracePageSizes.java#explicit-large-page-size 8267460 linux-aarch64 From 1161e3da14dde739aa6d76bba082662babb8d2d8 Mon Sep 17 00:00:00 2001 From: Christoph Langer Date: Wed, 11 Oct 2023 05:57:28 +0000 Subject: [PATCH 09/15] 8317803: Exclude java/net/Socket/asyncClose/Race.java on AIX Reviewed-by: mbaesken --- test/jdk/ProblemList.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index b7e7dbdd59de0..44f458195ae4a 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -557,6 +557,8 @@ java/net/MulticastSocket/Test.java 7145658,8308807 java/net/ServerSocket/AcceptInheritHandle.java 8211854 aix-ppc64 +java/net/Socket/asyncClose/Race.java 8317801 aix-ppc64 + ############################################################################ # jdk_nio From 0fd807118c9b2cb8381b5c0f5d22d4e3451e8f10 Mon Sep 17 00:00:00 2001 From: Tobias Hartmann Date: Wed, 11 Oct 2023 06:18:34 +0000 Subject: [PATCH 10/15] 8317738: CodeCacheFullCountTest failed with "VirtualMachineError: Out of space in CodeCache for method handle intrinsic" Reviewed-by: kvn --- .../jtreg/compiler/codecache/CodeCacheFullCountTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/hotspot/jtreg/compiler/codecache/CodeCacheFullCountTest.java b/test/hotspot/jtreg/compiler/codecache/CodeCacheFullCountTest.java index a9c6b34509e62..577cb03a9397c 100644 --- a/test/hotspot/jtreg/compiler/codecache/CodeCacheFullCountTest.java +++ b/test/hotspot/jtreg/compiler/codecache/CodeCacheFullCountTest.java @@ -67,7 +67,7 @@ public static void runTest() throws Throwable { "-XX:ReservedCodeCacheSize=2496k", "-XX:-UseCodeCacheFlushing", "-XX:-MethodFlushing", "CodeCacheFullCountTest", "WasteCodeCache"); OutputAnalyzer oa = ProcessTools.executeProcess(pb); // Ignore adapter creation failures - if (oa.getExitValue() != 0 && !oa.getOutput().contains("Out of space in CodeCache for adapters")) { + if (oa.getExitValue() != 0 && !oa.getOutput().contains("Out of space in CodeCache")) { oa.reportDiagnosticSummary(); throw new RuntimeException("VM finished with exit code " + oa.getExitValue()); } From a9b41da9df398ae7e2cf598b2779808d16504e14 Mon Sep 17 00:00:00 2001 From: Matthias Baesken Date: Wed, 11 Oct 2023 06:43:45 +0000 Subject: [PATCH 11/15] 8317603: Improve exception messages thrown by sun.nio.ch.Net native methods (win) Reviewed-by: vtewari, alanb, djelinski --- .../native/libnio/ch/DatagramChannelImpl.c | 12 ++++-- .../windows/native/libnio/ch/IOUtil.c | 5 +-- src/java.base/windows/native/libnio/ch/Net.c | 40 ++++++++----------- .../native/libnio/ch/UnixDomainSockets.c | 5 ++- .../windows/native/libnio/ch/nio_util.h | 3 +- 5 files changed, 31 insertions(+), 34 deletions(-) diff --git a/src/java.base/windows/native/libnio/ch/DatagramChannelImpl.c b/src/java.base/windows/native/libnio/ch/DatagramChannelImpl.c index 25c0370f66e86..54a4133a3b226 100644 --- a/src/java.base/windows/native/libnio/ch/DatagramChannelImpl.c +++ b/src/java.base/windows/native/libnio/ch/DatagramChannelImpl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,7 @@ Java_sun_nio_ch_DatagramChannelImpl_disconnect0(JNIEnv *env, jclass clazz, rv = connect((SOCKET)fd, &sa.sa, sa_len); if (rv == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "connect"); } else { /* Disable WSAECONNRESET errors as socket is no longer connected */ BOOL enable = FALSE; @@ -136,7 +136,10 @@ Java_sun_nio_ch_DatagramChannelImpl_receive0(JNIEnv *env, jclass clazz, } } else if (theErr == WSAEWOULDBLOCK) { return IOS_UNAVAILABLE; - } else return handleSocketError(env, theErr); + } else { + NET_ThrowNew(env, theErr, "recvfrom"); + return IOS_THROWN; + } } } while (retry); @@ -160,7 +163,8 @@ Java_sun_nio_ch_DatagramChannelImpl_send0(JNIEnv *env, jclass clazz, if (theErr == WSAEWOULDBLOCK) { return IOS_UNAVAILABLE; } - return handleSocketError(env, (jint)WSAGetLastError()); + NET_ThrowNew(env, (jint)WSAGetLastError(), "sendto"); + return IOS_THROWN; } return rv; } diff --git a/src/java.base/windows/native/libnio/ch/IOUtil.c b/src/java.base/windows/native/libnio/ch/IOUtil.c index 511fcdcadb260..850c237d9e908 100644 --- a/src/java.base/windows/native/libnio/ch/IOUtil.c +++ b/src/java.base/windows/native/libnio/ch/IOUtil.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,8 +149,7 @@ Java_sun_nio_ch_IOUtil_configureBlocking(JNIEnv *env, jclass clazz, } result = ioctlsocket(fd, FIONBIO, &argp); if (result == SOCKET_ERROR) { - int error = WSAGetLastError(); - handleSocketError(env, (jint)error); + NET_ThrowNew(env, WSAGetLastError(), "ioctlsocket"); } } diff --git a/src/java.base/windows/native/libnio/ch/Net.c b/src/java.base/windows/native/libnio/ch/Net.c index 41a08666d42cd..5cc5a2cd53f3c 100644 --- a/src/java.base/windows/native/libnio/ch/Net.c +++ b/src/java.base/windows/native/libnio/ch/Net.c @@ -77,12 +77,6 @@ static void setConnectionReset(SOCKET s, BOOL enable) { NULL, 0, &bytesReturned, NULL, NULL); } -jint handleSocketError(JNIEnv *env, int errorValue) -{ - NET_ThrowNew(env, errorValue, NULL); - return IOS_THROWN; -} - static jclass isa_class; /* java.net.InetSocketAddress */ static jmethodID isa_ctorID; /* InetSocketAddress(InetAddress, int) */ @@ -392,7 +386,7 @@ Java_sun_nio_ch_Net_getIntOption0(JNIEnv *env, jclass clazz, jobject fdo, n = getsockopt(fdval(env, fdo), level, opt, arg, &arglen); } if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "getsockopt"); return IOS_THROWN; } @@ -436,7 +430,7 @@ Java_sun_nio_ch_Net_setIntOption0(JNIEnv *env, jclass clazz, jobject fdo, n = setsockopt(fdval(env, fdo), level, opt, parg, arglen); } if (n == SOCKET_ERROR) - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsocketopt"); } JNIEXPORT jint JNICALL @@ -467,7 +461,7 @@ Java_sun_nio_ch_Net_joinOrDrop4(JNIEnv *env, jobject this, jboolean join, jobjec if (n == SOCKET_ERROR) { if (join && (WSAGetLastError() == WSAENOPROTOOPT)) return IOS_UNAVAILABLE; - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsocketopt"); } return 0; } @@ -489,7 +483,7 @@ Java_sun_nio_ch_Net_blockOrUnblock4(JNIEnv *env, jobject this, jboolean block, j if (n == SOCKET_ERROR) { if (block && (WSAGetLastError() == WSAENOPROTOOPT)) return IOS_UNAVAILABLE; - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsockopt"); } return 0; } @@ -542,7 +536,7 @@ Java_sun_nio_ch_Net_joinOrDrop6(JNIEnv *env, jobject this, jboolean join, jobjec } if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsockopt"); } return 0; } @@ -554,7 +548,7 @@ Java_sun_nio_ch_Net_blockOrUnblock6(JNIEnv *env, jobject this, jboolean block, j int opt = (block) ? MCAST_BLOCK_SOURCE : MCAST_UNBLOCK_SOURCE; int n = setGroupSourceReqOption(env, fdo, opt, group, index, source); if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsocketopt to block or unblock source"); } return 0; } @@ -571,7 +565,7 @@ Java_sun_nio_ch_Net_setInterface4(JNIEnv* env, jobject this, jobject fdo, jint i n = setsockopt(fdval(env, fdo), IPPROTO_IP, IP_MULTICAST_IF, (void*)&(in.s_addr), arglen); if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsockopt"); } } @@ -584,7 +578,7 @@ Java_sun_nio_ch_Net_getInterface4(JNIEnv* env, jobject this, jobject fdo) n = getsockopt(fdval(env, fdo), IPPROTO_IP, IP_MULTICAST_IF, (void*)&in, &arglen); if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "getsockopt"); return IOS_THROWN; } return ntohl(in.s_addr); @@ -600,7 +594,7 @@ Java_sun_nio_ch_Net_setInterface6(JNIEnv* env, jobject this, jobject fdo, jint i n = setsockopt(fdval(env, fdo), IPPROTO_IPV6, IPV6_MULTICAST_IF, (void*)&(index), arglen); if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "setsockopt"); } } @@ -613,7 +607,7 @@ Java_sun_nio_ch_Net_getInterface6(JNIEnv* env, jobject this, jobject fdo) n = getsockopt(fdval(env, fdo), IPPROTO_IPV6, IPV6_MULTICAST_IF, (void*)&index, &arglen); if (n == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "getsockopt"); return -1; } return (jint)index; @@ -631,12 +625,12 @@ Java_sun_nio_ch_Net_shutdown(JNIEnv *env, jclass cl, jobject fdo, jint jhow) { JNIEXPORT jint JNICALL Java_sun_nio_ch_Net_available(JNIEnv *env, jclass cl, jobject fdo) { - int count = 0; - if (NET_SocketAvailable(fdval(env, fdo), &count) != 0) { - handleSocketError(env, WSAGetLastError()); + u_long arg; + if (ioctlsocket((SOCKET) fdval(env, fdo), FIONREAD, &arg) == SOCKET_ERROR) { + NET_ThrowNew(env, WSAGetLastError(), "ioctlsocket"); return IOS_THROWN; } - return (jint) count; + return (jint) arg; } JNIEXPORT jint JNICALL @@ -667,7 +661,7 @@ Java_sun_nio_ch_Net_poll(JNIEnv* env, jclass this, jobject fdo, jint events, jlo /* save last winsock error */ if (rv == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "select"); return IOS_THROWN; } else if (rv >= 0) { rv = 0; @@ -707,7 +701,7 @@ Java_sun_nio_ch_Net_pollConnect(JNIEnv* env, jclass this, jobject fdo, jlong tim result = select(fd+1, 0, &wr, &ex, (timeout >= 0) ? &t : NULL); if (result == SOCKET_ERROR) { - handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "select"); return JNI_FALSE; } else if (result == 0) { return JNI_FALSE; @@ -727,7 +721,7 @@ Java_sun_nio_ch_Net_pollConnect(JNIEnv* env, jclass this, jobject fdo, jlong tim NET_ThrowNew(env, lastError, "getsockopt"); } } else if (optError != NO_ERROR) { - handleSocketError(env, optError); + NET_ThrowNew(env, optError, "getsockopt"); } return JNI_FALSE; } diff --git a/src/java.base/windows/native/libnio/ch/UnixDomainSockets.c b/src/java.base/windows/native/libnio/ch/UnixDomainSockets.c index aaf8996155cbc..209c30f3be716 100644 --- a/src/java.base/windows/native/libnio/ch/UnixDomainSockets.c +++ b/src/java.base/windows/native/libnio/ch/UnixDomainSockets.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -158,7 +158,8 @@ Java_sun_nio_ch_UnixDomainSockets_socket0(JNIEnv *env, jclass cl) { SOCKET s = WSASocketW(PF_UNIX, SOCK_STREAM, 0, &provider, 0, WSA_FLAG_OVERLAPPED); if (s == INVALID_SOCKET) { - return handleSocketError(env, WSAGetLastError()); + NET_ThrowNew(env, WSAGetLastError(), "WSASocketW"); + return IOS_THROWN; } SetHandleInformation((HANDLE)s, HANDLE_FLAG_INHERIT, 0); return (int)s; diff --git a/src/java.base/windows/native/libnio/ch/nio_util.h b/src/java.base/windows/native/libnio/ch/nio_util.h index a4506d93d217d..b90e0ac63d5c2 100644 --- a/src/java.base/windows/native/libnio/ch/nio_util.h +++ b/src/java.base/windows/native/libnio/ch/nio_util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,6 @@ jlong handleval(JNIEnv *env, jobject fdo); jint convertReturnVal(JNIEnv *env, jint n, jboolean r); jlong convertLongReturnVal(JNIEnv *env, jlong n, jboolean r); jboolean purgeOutstandingICMP(JNIEnv *env, jclass clazz, jint fd); -jint handleSocketError(JNIEnv *env, int errorValue); #ifdef _WIN64 From ca96fd3b07958a7de6274bd945490bb9e79c2170 Mon Sep 17 00:00:00 2001 From: Christoph Langer Date: Wed, 11 Oct 2023 06:54:20 +0000 Subject: [PATCH 12/15] 8317839: Exclude java/nio/channels/Channels/SocketChannelStreams.java on AIX Reviewed-by: alanb --- test/jdk/ProblemList.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index 44f458195ae4a..ddf0fcfb2d1a9 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -565,6 +565,8 @@ java/net/Socket/asyncClose/Race.java 8317801 aix-ppc6 java/nio/channels/AsynchronousSocketChannel/StressLoopback.java 8211851 aix-ppc64 +java/nio/channels/Channels/SocketChannelStreams.java 8317838 aix-ppc64 + java/nio/channels/DatagramChannel/AdaptorMulticasting.java 8308807 aix-ppc64 java/nio/channels/DatagramChannel/AfterDisconnect.java 8308807 aix-ppc64 java/nio/channels/DatagramChannel/ManySourcesAndTargets.java 8264385 macosx-aarch64 From 79761519f68837e265bc943e926087806b68330e Mon Sep 17 00:00:00 2001 From: Amit Kumar Date: Wed, 11 Oct 2023 08:49:54 +0000 Subject: [PATCH 13/15] 8313438: [s390x] build broken after JDK-8301996 Reviewed-by: mdoerr, lucy --- src/hotspot/cpu/s390/interp_masm_s390.cpp | 38 +- src/hotspot/cpu/s390/interp_masm_s390.hpp | 1 + src/hotspot/cpu/s390/templateTable_s390.cpp | 628 ++++++++++++-------- 3 files changed, 402 insertions(+), 265 deletions(-) diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index ad65cb2dbf48e..2b9deed8a177b 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -36,6 +36,7 @@ #include "oops/markWord.hpp" #include "oops/methodCounters.hpp" #include "oops/methodData.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -349,16 +350,45 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis } void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Register index) { - // Get index out of bytecode pointer, get_cache_entry_pointer_at_bcp + // Get index out of bytecode pointer. get_cache_index_at_bcp(index, 1, sizeof(u4)); - // Get address of invokedynamic array + + // Get the address of the ResolvedIndyEntry array get_constant_pool_cache(cache); z_lg(cache, Address(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); - // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) - z_sllg(index, index, exact_log2(sizeof(ResolvedIndyEntry))); + + // Scale the index to form a byte offset into the ResolvedIndyEntry array + size_t entry_size = sizeof(ResolvedIndyEntry); + if (is_power_of_2(entry_size)) { + z_sllg(index, index, exact_log2(entry_size)); + } else { + z_mghi(index, entry_size); + } + + // Calculate the final field address. z_la(cache, Array::base_offset_in_bytes(), index, cache); } +void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { + // Get field index out of bytecode pointer. + get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); + + // Get the address of the ResolvedFieldEntry array. + get_constant_pool_cache(cache); + z_lg(cache, Address(cache, in_bytes(ConstantPoolCache::field_entries_offset()))); + + // Scale the index to form a byte offset into the ResolvedFieldEntry array + size_t entry_size = sizeof(ResolvedFieldEntry); + if (is_power_of_2(entry_size)) { + z_sllg(index, index, exact_log2(entry_size)); + } else { + z_mghi(index, entry_size); + } + + // Calculate the final field address. + z_la(cache, Array::base_offset_in_bytes(), index, cache); +} + // Kills Z_R0_scratch. void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register cpe_offset, diff --git a/src/hotspot/cpu/s390/interp_masm_s390.hpp b/src/hotspot/cpu/s390/interp_masm_s390.hpp index 755861dd04498..5cc71b9d89114 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.hpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.hpp @@ -113,6 +113,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_at_bcp(Register cache, Register cpe_offset, int bcp_offset, size_t index_size = sizeof(u2)); void load_resolved_indy_entry(Register cache, Register index); + void load_field_entry(Register cache, Register index, int bcp_offset = 1); void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register cpe_offset, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp index 12c3c2688bc14..b4742a2a006e3 100644 --- a/src/hotspot/cpu/s390/templateTable_s390.cpp +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp @@ -38,6 +38,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -76,10 +77,9 @@ __ bind(lbl); \ { unsigned int b_off = __ offset(); \ uintptr_t b_addr = (uintptr_t)__ pc(); \ - __ z_larl(Z_R0, (int64_t)0); /* Check current address alignment. */ \ - __ z_slgr(Z_R0, br_tab); /* Current Address must be equal */ \ - __ z_slgr(Z_R0, flags); /* to calculated branch target. */ \ - __ z_brc(Assembler::bcondLogZero, 3); /* skip trap if ok. */ \ + __ z_larl(br_tab_temp, (int64_t)0); /* Check current address alignment. */\ + __ z_slgr(br_tab_temp, br_tab); /* Current Address must be equal */\ + __ z_brc(Assembler::bcondLogZero, 3);/* skip trap if ok. */ \ __ z_illtrap(0x55); \ guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name); @@ -251,16 +251,25 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, // additional, required work. assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(Z_R1_scratch, bc_reg, - temp_reg, byte_no, 1); + + // Both registers are block-local temp regs. Their contents before and after is not used. + Register index = bc_reg; + Register cache = temp_reg; + + __ load_field_entry(cache, index); __ load_const_optimized(bc_reg, bc); - __ compareU32_and_branch(temp_reg, (intptr_t)0, - Assembler::bcondZero, L_patch_done); + + if (byte_no == f1_byte) { + __ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())), 0); + } else { + __ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())), 0); + } + __ z_bre(L_patch_done); } break; default: assert(byte_no == -1, "sanity"); - // The pair bytecodes have already done the load. + // The bytecode pair may have already performed the load. if (load_bc_into_bc_reg) { __ load_const_optimized(bc_reg, bc); } @@ -268,17 +277,17 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, } if (JvmtiExport::can_post_breakpoint()) { - - Label L_fast_patch; + NearLabel L_fast_patch; // If a breakpoint is present we can't rewrite the stream directly. __ z_cli(at_bcp(0), Bytecodes::_breakpoint); __ z_brne(L_fast_patch); + __ get_method(temp_reg); // Let breakpoint table handling rewrite to quicker bytecode. __ call_VM_static(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), - temp_reg, Z_R13, bc_reg); + temp_reg, Z_bcp, bc_reg); __ z_bru(L_patch_done); __ bind(L_fast_patch); @@ -2342,42 +2351,35 @@ void TemplateTable::_return(TosState state) { } // ---------------------------------------------------------------------------- -// NOTE: Cpe_offset is already computed as byte offset, so we must not +// NOTE: index is already computed as byte offset, so we must not // shift it afterwards! void TemplateTable::resolve_cache_and_index(int byte_no, Register cache, - Register cpe_offset, + Register index, size_t index_size) { - BLOCK_COMMENT("resolve_cache_and_index {"); - NearLabel resolved, clinit_barrier_slow; - const Register bytecode_in_cpcache = Z_R1_scratch; - const int total_f1_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset()); - assert_different_registers(cache, cpe_offset, bytecode_in_cpcache); + assert_different_registers(cache, index, Z_R1_scratch); + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + + const Register bytecode_in_cpcache = Z_R1_scratch; + NearLabel resolved, clinit_barrier_slow; Bytecodes::Code code = bytecode(); - switch (code) { - case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; - case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; - default: - break; - } - { - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - __ get_cache_and_index_and_bytecode_at_bcp(cache, cpe_offset, bytecode_in_cpcache, byte_no, 1, index_size); - // Have we resolved this bytecode? - __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved); - } + BLOCK_COMMENT("resolve_cache_and_index {"); + + __ get_cache_and_index_and_bytecode_at_bcp(cache, index, bytecode_in_cpcache, byte_no, 1, index_size); + // Have we resolved this bytecode? + __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved); - // Resolve first time through. + // Resolve first time through via runtime call. // Class initialization barrier slow path lands here as well. __ bind(clinit_barrier_slow); address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); - __ load_const_optimized(Z_ARG2, (int) code); + __ load_const_optimized(Z_ARG2, (int)code); __ call_VM(noreg, entry, Z_ARG2); - // Update registers with resolved info. - __ get_cache_and_index_at_bcp(cache, cpe_offset, 1, index_size); + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + __ bind(resolved); // Class initialization barrier for static methods @@ -2385,7 +2387,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no, const Register method = Z_R1_scratch; const Register klass = Z_R1_scratch; - __ load_resolved_method_at_index(byte_no, cache, cpe_offset, method); + __ load_resolved_method_at_index(byte_no, cache, index, method); __ load_method_holder(klass, method); __ clinit_barrier(klass, Z_thread, nullptr /*L_fast_path*/, &clinit_barrier_slow); } @@ -2393,6 +2395,69 @@ void TemplateTable::resolve_cache_and_index(int byte_no, BLOCK_COMMENT("} resolve_cache_and_index"); } +void TemplateTable::resolve_cache_and_index_for_field(int byte_no, + Register cache, + Register index) { + + assert_different_registers(cache, index); + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + + NearLabel resolved; + + Bytecodes::Code code = bytecode(); + switch (code) { + case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; + case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; + default: break; + } + + __ load_field_entry(cache, index); + if (byte_no == f1_byte) { + __ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset())), code); + } else { + __ z_cli(Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset())), code); + } + __ z_bre(resolved); + + // resolve first time through + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ load_const_optimized(Z_ARG2, (int)code); + __ call_VM(noreg, entry, Z_ARG2); + + // Update registers with resolved info. + __ load_field_entry(cache, index); + + __ bind(resolved); +} + +// The cache register (the only input reg) must be set before call. +void TemplateTable::load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register offset, + Register flags, + bool is_static = false) { + assert_different_registers(cache, tos_state, flags, offset); + + // Field offset + __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + + // Flags + __ load_sized_value(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset())), sizeof(u1), false); + + // TOS state + if (tos_state != noreg) { + __ load_sized_value(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset())), sizeof(u1), false); + } + + // Klass overwrite register + if (is_static) { + __ load_sized_value(obj, Address(cache, ResolvedFieldEntry::field_holder_offset()), sizeof(void*), false); + __ load_sized_value(obj, Address(obj, in_bytes(Klass::java_mirror_offset())), sizeof(void*), false); + __ resolve_oop_handle(obj); + } +} + // The Rcache and index registers must be set before call. // Index is already a byte offset, don't shift! void TemplateTable::load_field_cp_cache_entry(Register obj, @@ -2520,8 +2585,10 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no, BLOCK_COMMENT("} load_invoke_cp_cache_entry"); } -// The registers cache and index expected to be set before call. -// Correct values of the cache and index registers are preserved. +// The registers cache and index are set up if needed. +// However, the field entry must have been resolved before. +// If no jvmti post operation is performed, their contents remains unchanged. +// After a jvmti post operation, the registers are re-calculated by load_field_entry(). void TemplateTable::jvmti_post_field_access(Register cache, Register index, bool is_static, bool has_tos) { @@ -2534,34 +2601,32 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, // Check to see if a field access watch has been set before we // take the time to call into the VM. - Label exit; + Label dontPost; assert_different_registers(cache, index, Z_tos); __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_access_count_addr()); - __ load_and_test_int(Z_R0, Address(Z_tos)); - __ z_brz(exit); - - // Index is returned as byte offset, do not shift! - __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); + __ z_chsi(0, Z_tos, 0); // avoid loading data into a scratch register + __ z_bre(dontPost); // cache entry pointer - __ add2reg_with_index(Z_ARG3, - in_bytes(ConstantPoolCache::base_offset()), - Z_ARG3, Z_R1_scratch); + // __ load_field_entry(cache, index); // not required as already set by resolve_cache_and_index_for_field() if (is_static) { __ clear_reg(Z_ARG2, true, false); // null object reference. Don't set CC. } else { - __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it. + __ load_ptr(0, Z_ARG2); // Get object pointer without popping it. __ verify_oop(Z_ARG2); } + // Z_ARG2: object pointer or null - // Z_ARG3: cache entry pointer + // cache: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - Z_ARG2, Z_ARG3); - __ get_cache_and_index_at_bcp(cache, index, 1); + Z_ARG2, cache); + + // restore registers after runtime call. + __ load_field_entry(cache, index); - __ bind(exit); + __ bind(dontPost); } void TemplateTable::pop_and_check_object(Register r) { @@ -2573,55 +2638,72 @@ void TemplateTable::pop_and_check_object(Register r) { void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); - const Register cache = Z_tmp_1; - const Register index = Z_tmp_2; - const Register obj = Z_tmp_1; - const Register off = Z_ARG2; - const Register flags = Z_ARG1; - const Register bc = Z_tmp_1; // Uses same reg as obj, so don't mix them. + const Register obj = Z_tmp_1; + const Register off = Z_tmp_2; + const Register cache = Z_tmp_1; + const Register index = Z_tmp_2; + const Register flags = Z_R1_scratch; // flags are not used in getfield + const Register br_tab = Z_R1_scratch; + const Register tos_state = Z_ARG4; + const Register bc_reg = Z_tmp_1; + const Register patch_tmp = Z_ARG4; + const Register oopLoad_tmp1 = Z_R1_scratch; + const Register oopLoad_tmp2 = Z_ARG5; +#ifdef ASSERT + const Register br_tab_temp = Z_R0_scratch; // for branch table verification code only +#endif + - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + // Register usage and life range + // + // cache, index : short-lived. Their life ends after load_resolved_field_entry. + // obj (overwrites cache): long-lived. Used in branch table entries. + // off (overwrites index): long-lived. Used in branch table entries. + // flags : unused in getfield. + // br_tab : short-lived. Only used to address branch table, and for verification in BTB_BEGIN macro. + // tos_state : short-lived. Only used to index the branch table entry. + // bc_reg : short-lived. Used as work register in patch_bytecode. + // + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); if (!is_static) { // Obj is on the stack. pop_and_check_object(obj); } - // Displacement is 0, so any store instruction will be fine on any CPU. + // Displacement is 0. No need to care about limited displacement range. const Address field(obj, off); - Label is_Byte, is_Bool, is_Int, is_Short, is_Char, + Label is_Byte, is_Bool, is_Int, is_Short, is_Char, is_Long, is_Float, is_Object, is_Double; - Label is_badState8, is_badState9, is_badStateA, is_badStateB, - is_badStateC, is_badStateD, is_badStateE, is_badStateF, - is_badState; + Label is_badState, is_badState9, is_badStateA, is_badStateB, + is_badStateC, is_badStateD, is_badStateE, is_badStateF; Label branchTable, atosHandler, Done; - Register br_tab = Z_R1_scratch; bool do_rewrite = !is_static && (rc == may_rewrite); bool dont_rewrite = (is_static || (rc == may_not_rewrite)); assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); - assert(btos == 0, "change code, btos != 0"); + assert((btos == 0) && (atos == 8), "change branch table! ByteCodes may have changed"); // Calculate branch table size. Generated code size depends on ASSERT and on bytecode rewriting. #ifdef ASSERT const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; #else + // Calculate branch table size. const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4; #endif // Calculate address of branch table entry and branch there. { const int bit_shift = exact_log2(bsize); // Size of each branch table entry. - const int r_bitpos = 63 - bit_shift; - const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; - const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); __ z_larl(br_tab, branchTable); - __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); + __ z_sllg(tos_state, tos_state, bit_shift); + assert(tos_state != Z_R0_scratch, "shouldn't be"); + __ z_agr(br_tab, tos_state); + __ z_bcr(Assembler::bcondAlways, br_tab); } - __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); __ align_address(bsize); BIND(branchTable); @@ -2632,7 +2714,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(btos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_bgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Byte, bsize, "getfield_or_static:is_Byte"); @@ -2644,7 +2726,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr // Rewrite bytecode to be faster. if (do_rewrite) { // Use btos rewriting, no truncating to t/f bit is needed for getfield. - patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_bgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Bool, bsize, "getfield_or_static:is_Bool"); @@ -2656,7 +2738,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(ctos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_cgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_cgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Char, bsize, "getfield_or_static:is_Char"); @@ -2667,7 +2749,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(stos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_sgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_sgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Short, bsize, "getfield_or_static:is_Short"); @@ -2678,7 +2760,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(itos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_igetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_igetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Int, bsize, "getfield_or_static:is_Int"); @@ -2689,7 +2771,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(ltos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_lgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_lgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Long, bsize, "getfield_or_static:is_Long"); @@ -2700,7 +2782,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(ftos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_fgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_fgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Float, bsize, "getfield_or_static:is_Float"); @@ -2711,7 +2793,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ push(dtos); // Rewrite bytecode to be faster. if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_dgetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_dgetfield, bc_reg, patch_tmp); } __ z_bru(Done); BTB_END(is_Double, bsize, "getfield_or_static:is_Double"); @@ -2722,38 +2804,34 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr BTB_END(is_Object, bsize, "getfield_or_static:is_Object"); // Bad state detection comes at no extra runtime cost. - BTB_BEGIN(is_badState8, bsize, "getfield_or_static:is_badState8"); - __ z_illtrap(); - __ z_bru(is_badState); - BTB_END( is_badState8, bsize, "getfield_or_static:is_badState8"); BTB_BEGIN(is_badState9, bsize, "getfield_or_static:is_badState9"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badState9, bsize, "getfield_or_static:is_badState9"); + BTB_END(is_badState9, bsize, "getfield_or_static:is_badState9"); BTB_BEGIN(is_badStateA, bsize, "getfield_or_static:is_badStateA"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateA, bsize, "getfield_or_static:is_badStateA"); + BTB_END(is_badStateA, bsize, "getfield_or_static:is_badStateA"); BTB_BEGIN(is_badStateB, bsize, "getfield_or_static:is_badStateB"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateB, bsize, "getfield_or_static:is_badStateB"); + BTB_END(is_badStateB, bsize, "getfield_or_static:is_badStateB"); BTB_BEGIN(is_badStateC, bsize, "getfield_or_static:is_badStateC"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateC, bsize, "getfield_or_static:is_badStateC"); + BTB_END(is_badStateC, bsize, "getfield_or_static:is_badStateC"); BTB_BEGIN(is_badStateD, bsize, "getfield_or_static:is_badStateD"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateD, bsize, "getfield_or_static:is_badStateD"); + BTB_END(is_badStateD, bsize, "getfield_or_static:is_badStateD"); BTB_BEGIN(is_badStateE, bsize, "getfield_or_static:is_badStateE"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateE, bsize, "getfield_or_static:is_badStateE"); + BTB_END(is_badStateE, bsize, "getfield_or_static:is_badStateE"); BTB_BEGIN(is_badStateF, bsize, "getfield_or_static:is_badStateF"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateF, bsize, "getfield_or_static:is_badStateF"); + BTB_END(is_badStateF, bsize, "getfield_or_static:is_badStateF"); __ align_address(64); BIND(is_badState); // Do this outside branch table. Needs a lot of space. @@ -2775,11 +2853,11 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr // to here is compensated for by the fallthru to "Done". { unsigned int b_off = __ offset(); - do_oop_load(_masm, field, Z_tos, Z_tmp_2, Z_tmp_3, IN_HEAP); + do_oop_load(_masm, field, Z_tos, oopLoad_tmp1, oopLoad_tmp2, IN_HEAP); __ verify_oop(Z_tos); __ push(atos); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_agetfield, bc, Z_ARG5); + patch_bytecode(Bytecodes::_fast_agetfield, bc_reg, patch_tmp); } unsigned int e_off = __ offset(); } @@ -2803,9 +2881,9 @@ void TemplateTable::getstatic(int byte_no) { BLOCK_COMMENT("} getstatic"); } -// The registers cache and index expected to be set before call. The -// function may destroy various registers, just not the cache and -// index registers. +// Register cache is expected to be set before the call. +// This function may destroy various registers. +// Only the contents of register cache is preserved/restored. void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { transition(vtos, vtos); @@ -2816,65 +2894,68 @@ void TemplateTable::jvmti_post_field_mod(Register cache, BLOCK_COMMENT("jvmti_post_field_mod {"); - // Check to see if a field modification watch has been set before - // we take the time to call into the VM. - Label L1; - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - assert_different_registers(cache, index, Z_tos); - + // Check to see if a field modification watch has been set + // before we take the time to call into the VM. + Label dontPost; + assert_different_registers(cache, index, Z_tos, Z_ARG2, Z_ARG3, Z_ARG4); __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_modification_count_addr()); - __ load_and_test_int(Z_R0, Address(Z_tos)); - __ z_brz(L1); + __ z_chsi(0, Z_tos, 0); // avoid loading data into a scratch register + __ z_bre(dontPost); - // Index is returned as byte offset, do not shift! - __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1); + Register obj = Z_ARG2; + Register fieldEntry = Z_ARG3; + Register value = Z_ARG4; + + // Take a copy of cache entry pointer + __ z_lgr(fieldEntry, cache); if (is_static) { - // Life is simple. Null out the object pointer. - __ clear_reg(Z_ARG2, true, false); // Don't set CC. + // Life is simple. NULL the object pointer. + __ clear_reg(obj, true, false); // Don't set CC. } else { // Life is harder. The stack holds the value on top, followed by // the object. We don't know the size of the value, though. It // could be one or two words depending on its type. As a result, // we must find the type to determine where the object is. - __ mem2reg_opt(Z_ARG4, - Address(Z_ARG3, Z_R1_scratch, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()) + - (BytesPerLong - BytesPerInt)), - false); - __ z_srl(Z_ARG4, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask Z_ARG4 for tos_state after the above shift. - ConstantPoolCacheEntry::verify_tos_state_shift(); - __ mem2reg_opt(Z_ARG2, at_tos(1)); // Initially assume a one word jvalue. + __ load_sized_value(value, Address(fieldEntry, in_bytes(ResolvedFieldEntry::type_offset())), sizeof(u1), false); - NearLabel load_dtos, cont; + __ mem2reg_opt(obj, at_tos(1)); // Initially assume a one word jvalue. - __ compareU32_and_branch(Z_ARG4, (intptr_t) ltos, - Assembler::bcondNotEqual, load_dtos); - __ mem2reg_opt(Z_ARG2, at_tos(2)); // ltos (two word jvalue) - __ z_bru(cont); + if (VM_Version::has_LoadStoreConditional()) { + __ z_chi(value, ltos); + __ z_locg(obj, at_tos(2), Assembler::bcondEqual); + __ z_chi(value, dtos); + __ z_locg(obj, at_tos(2), Assembler::bcondEqual); + } else { + NearLabel load_dtos, cont; - __ bind(load_dtos); - __ compareU32_and_branch(Z_ARG4, (intptr_t)dtos, Assembler::bcondNotEqual, cont); - __ mem2reg_opt(Z_ARG2, at_tos(2)); // dtos (two word jvalue) + __ z_chi(value, ltos); + __ z_brne(load_dtos); + __ mem2reg_opt(obj, at_tos(2)); // ltos (two word jvalue) + __ z_bru(cont); - __ bind(cont); - } - // cache entry pointer + __ bind(load_dtos); + __ z_chi(value, dtos); + __ z_brne(cont); + __ mem2reg_opt(obj, at_tos(2)); // dtos (two word jvalue) - __ add2reg_with_index(Z_ARG3, in_bytes(cp_base_offset), Z_ARG3, Z_R1_scratch); + __ bind(cont); + } + } // object(tos) - __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); - // Z_ARG2: object pointer set up above (null if static) - // Z_ARG3: cache entry pointer - // Z_ARG4: jvalue object on the stack + __ load_address(value, Address(Z_esp, Interpreter::expr_offset_in_bytes(0))); + // obj: object pointer set up above (null if static) + // fieldEntry: field entry pointer + // value: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), - Z_ARG2, Z_ARG3, Z_ARG4); - __ get_cache_and_index_at_bcp(cache, index, 1); + obj, fieldEntry, value); + + // Reload field entry + __ load_field_entry(cache, index); - __ bind(L1); + __ bind(dontPost); BLOCK_COMMENT("} jvmti_post_field_mod"); } @@ -2882,42 +2963,66 @@ void TemplateTable::jvmti_post_field_mod(Register cache, void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); - const Register cache = Z_tmp_1; - const Register index = Z_ARG5; - const Register obj = Z_tmp_1; + const Register obj = Z_ARG5; const Register off = Z_tmp_2; - const Register flags = Z_R1_scratch; - const Register br_tab = Z_ARG5; - const Register bc = Z_tmp_1; + const Register cache = Z_ARG5; + const Register index = Z_tmp_2; + const Register fieldAddr = Z_tmp_2; // contains obj and off combined. Could be any address register. + const Register flags = Z_tmp_1; // preserves flag value till the end, for volatility check + const Register br_tab = Z_R1_scratch; + const Register tos_state = Z_ARG4; + const Register bc_reg = Z_tmp_2; + const Register patch_tmp = Z_ARG4; const Register oopStore_tmp1 = Z_R1_scratch; - const Register oopStore_tmp2 = Z_ARG5; + const Register oopStore_tmp2 = Z_ARG5; // tmp2 must be non-volatile reg const Register oopStore_tmp3 = Z_R0_scratch; +#ifdef ASSERT + const Register br_tab_temp = Z_R0_scratch; // for branch table verification code only +#endif - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); +/* + * Register usage and life range + * + * cache, index : short-lived. Their life ends after load_resolved_field_entry. + * obj (overwrites cache): very short-lived, Combined with off immediately. + * off (overwrites index): long-lived, Used in branch table entries. + * flags : long-lived, Has to survive until the end to determine volatility. + * br_tab : short-lived, Only used to address branch table, and for verification in BTB_BEGIN macro. + * tos_state : short-live, Only used to index the branch table entry. + * bc_reg : short-lived, Used as work register in patch_bytecode. +*/ + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - // begin of life for: - // obj, off long life range - // flags short life range, up to branch into branch table - // end of life for: - // cache, index + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); + + const Address field(fieldAddr); + __ lgr_if_needed(fieldAddr, off); + + /* + * In the static case, we can calculate the final field address easily. + * Do so to occupy only one non-volatile register + * --------------------- + * In the non-static case, we preset fieldAddr with the field offset. + * The object address is available only later. It is popped from stack. + * see pop_and_check_object(obj); + */ + if (is_static) { + __ z_agr(fieldAddr, obj); + } - const Address field(obj, off); - Label is_Byte, is_Bool, is_Int, is_Short, is_Char, + Label is_Byte, is_Bool, is_Int, is_Short, is_Char, is_Long, is_Float, is_Object, is_Double; - Label is_badState8, is_badState9, is_badStateA, is_badStateB, - is_badStateC, is_badStateD, is_badStateE, is_badStateF, - is_badState; + Label is_badState, is_badState9, is_badStateA, is_badStateB, + is_badStateC, is_badStateD, is_badStateE, is_badStateF; Label branchTable, atosHandler, Done; bool do_rewrite = !is_static && (rc == may_rewrite); bool dont_rewrite = (is_static || (rc == may_not_rewrite)); assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that"); - - assert(btos == 0, "change code, btos != 0"); + assert((btos == 0) && (atos == 8), "change branch table! ByteCodes may have changed"); #ifdef ASSERT - const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*4; + const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8; #else const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8; #endif @@ -2925,15 +3030,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr // Calculate address of branch table entry and branch there. { const int bit_shift = exact_log2(bsize); // Size of each branch table entry. - const int r_bitpos = 63 - bit_shift; - const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1; - const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift); __ z_larl(br_tab, branchTable); - __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true); - __ z_bc(Assembler::bcondAlways, 0, flags, br_tab); + __ z_sllg(tos_state, tos_state, bit_shift); + assert(tos_state != Z_R0_scratch, "shouldn't be"); + __ z_agr(br_tab, tos_state); + __ z_bcr(Assembler::bcondAlways, br_tab); } - // end of life for: - // flags, br_tab __ align_address(bsize); BIND(branchTable); @@ -2943,24 +3045,26 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr __ pop(btos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ z_stc(Z_tos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_bputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_bputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Byte, bsize, "putfield_or_static:is_Byte"); + BTB_END(is_Byte, bsize, "putfield_or_static:is_Byte"); // ztos BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool"); __ pop(ztos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ z_nilf(Z_tos, 0x1); __ z_stc(Z_tos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_zputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); BTB_END(is_Bool, bsize, "putfield_or_static:is_Bool"); @@ -2970,124 +3074,126 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr __ pop(ctos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ z_sth(Z_tos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_cputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_cputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Char, bsize, "putfield_or_static:is_Char"); + BTB_END(is_Char, bsize, "putfield_or_static:is_Char"); // stos BTB_BEGIN(is_Short, bsize, "putfield_or_static:is_Short"); __ pop(stos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ z_sth(Z_tos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_sputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_sputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Short, bsize, "putfield_or_static:is_Short"); + BTB_END(is_Short, bsize, "putfield_or_static:is_Short"); // itos BTB_BEGIN(is_Int, bsize, "putfield_or_static:is_Int"); __ pop(itos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ reg2mem_opt(Z_tos, field, false); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_iputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_iputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Int, bsize, "putfield_or_static:is_Int"); + BTB_END(is_Int, bsize, "putfield_or_static:is_Int"); // ltos BTB_BEGIN(is_Long, bsize, "putfield_or_static:is_Long"); __ pop(ltos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ reg2mem_opt(Z_tos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_lputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_lputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Long, bsize, "putfield_or_static:is_Long"); + BTB_END(is_Long, bsize, "putfield_or_static:is_Long"); // ftos BTB_BEGIN(is_Float, bsize, "putfield_or_static:is_Float"); __ pop(ftos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ freg2mem_opt(Z_ftos, field, false); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_fputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_fputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Float, bsize, "putfield_or_static:is_Float"); + BTB_END(is_Float, bsize, "putfield_or_static:is_Float"); // dtos BTB_BEGIN(is_Double, bsize, "putfield_or_static:is_Double"); __ pop(dtos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } __ freg2mem_opt(Z_ftos, field); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_dputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_dputfield, bc_reg, patch_tmp, true, byte_no); } __ z_bru(Done); - BTB_END( is_Double, bsize, "putfield_or_static:is_Double"); + BTB_END(is_Double, bsize, "putfield_or_static:is_Double"); // atos BTB_BEGIN(is_Object, bsize, "putfield_or_static:is_Object"); __ z_bru(atosHandler); - BTB_END( is_Object, bsize, "putfield_or_static:is_Object"); + BTB_END(is_Object, bsize, "putfield_or_static:is_Object"); // Bad state detection comes at no extra runtime cost. - BTB_BEGIN(is_badState8, bsize, "putfield_or_static:is_badState8"); - __ z_illtrap(); - __ z_bru(is_badState); - BTB_END( is_badState8, bsize, "putfield_or_static:is_badState8"); BTB_BEGIN(is_badState9, bsize, "putfield_or_static:is_badState9"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badState9, bsize, "putfield_or_static:is_badState9"); + BTB_END(is_badState9, bsize, "putfield_or_static:is_badState9"); BTB_BEGIN(is_badStateA, bsize, "putfield_or_static:is_badStateA"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateA, bsize, "putfield_or_static:is_badStateA"); + BTB_END(is_badStateA, bsize, "putfield_or_static:is_badStateA"); BTB_BEGIN(is_badStateB, bsize, "putfield_or_static:is_badStateB"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateB, bsize, "putfield_or_static:is_badStateB"); + BTB_END(is_badStateB, bsize, "putfield_or_static:is_badStateB"); BTB_BEGIN(is_badStateC, bsize, "putfield_or_static:is_badStateC"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateC, bsize, "putfield_or_static:is_badStateC"); + BTB_END(is_badStateC, bsize, "putfield_or_static:is_badStateC"); BTB_BEGIN(is_badStateD, bsize, "putfield_or_static:is_badStateD"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateD, bsize, "putfield_or_static:is_badStateD"); + BTB_END(is_badStateD, bsize, "putfield_or_static:is_badStateD"); BTB_BEGIN(is_badStateE, bsize, "putfield_or_static:is_badStateE"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateE, bsize, "putfield_or_static:is_badStateE"); + BTB_END(is_badStateE, bsize, "putfield_or_static:is_badStateE"); BTB_BEGIN(is_badStateF, bsize, "putfield_or_static:is_badStateF"); __ z_illtrap(); __ z_bru(is_badState); - BTB_END( is_badStateF, bsize, "putfield_or_static:is_badStateF"); + BTB_END(is_badStateF, bsize, "putfield_or_static:is_badStateF"); __ align_address(64); BIND(is_badState); // Do this outside branch table. Needs a lot of space. { unsigned int b_off = __ offset(); if (is_static) __ stop_static("Bad state in putstatic"); - else __ stop_static("Bad state in putfield"); + else __ stop_static("Bad state in putfield"); unsigned int e_off = __ offset(); } @@ -3102,12 +3208,13 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr __ pop(atos); if (!is_static) { pop_and_check_object(obj); + __ z_agr(fieldAddr, obj); } // Store into the field - do_oop_store(_masm, Address(obj, off), Z_tos, + do_oop_store(_masm, field, Z_tos, oopStore_tmp1, oopStore_tmp2, oopStore_tmp3, IN_HEAP); if (do_rewrite) { - patch_bytecode(Bytecodes::_fast_aputfield, bc, Z_ARG5, true, byte_no); + patch_bytecode(Bytecodes::_fast_aputfield, bc_reg, patch_tmp, true, byte_no); } // __ z_bru(Done); // fallthru unsigned int e_off = __ offset(); @@ -3116,10 +3223,13 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr BIND(Done); // Check for volatile store. - Label notVolatile; + // only if flags register is non-volatile + NearLabel notVolatile; - __ testbit(Z_ARG4, ConstantPoolCacheEntry::is_volatile_shift); + assert(flags.is_nonvolatile(), "flags register needs to be non-volatile"); + __ testbit(flags, ResolvedFieldEntry::is_volatile_shift); __ z_brz(notVolatile); + __ z_fence(); BIND(notVolatile); @@ -3149,22 +3259,21 @@ void TemplateTable::jvmti_post_fast_field_mod() { return; } - // Check to see if a field modification watch has been set before - // we take the time to call into the VM. - Label exit; - BLOCK_COMMENT("jvmti_post_fast_field_mod {"); - __ load_absolute_address(Z_R1_scratch, - (address) JvmtiExport::get_field_modification_count_addr()); - __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); - __ z_brz(exit); + // Check to see if a field modification watch has been set + // before we take the time to call into the VM. + Label dontPost; + __ load_absolute_address(Z_R1_scratch, (address)JvmtiExport::get_field_modification_count_addr()); + __ z_chsi(0, Z_R1_scratch, 0); // avoid loading data into a scratch register + __ z_bre(dontPost); - Register obj = Z_tmp_1; + Register obj = Z_ARG2; + Register fieldEntry = Z_ARG3; + Register value = Z_ARG4; - __ pop_ptr(obj); // Copy the object pointer from tos. - __ verify_oop(obj); - __ push_ptr(obj); // Put the object pointer back on tos. + __ load_ptr(0, obj); // Copy the object pointer from tos. + __ verify_oop(obj); // and verify it // Save tos values before call_VM() clobbers them. Since we have // to do it for every data type, we use the saved values as the @@ -3195,17 +3304,17 @@ void TemplateTable::jvmti_post_fast_field_mod() { } // jvalue on the stack - __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize)); + __ load_address(value, Address(Z_esp, Interpreter::expr_offset_in_bytes(0))); // Access constant pool cache entry. - __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tos, 1); + __ load_field_entry(fieldEntry, Z_tos, 1); __ verify_oop(obj); - // obj : object pointer copied above - // Z_ARG3: cache entry pointer - // Z_ARG4: jvalue object on the stack + // obj : object pointer copied above + // fieldEntry : cache entry pointer + // value : jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), - obj, Z_ARG3, Z_ARG4); + obj, fieldEntry, value); switch (bytecode()) { // Restore tos values. case Bytecodes::_fast_aputfield: @@ -3231,44 +3340,37 @@ void TemplateTable::jvmti_post_fast_field_mod() { break; } - __ bind(exit); + __ bind(dontPost); BLOCK_COMMENT("} jvmti_post_fast_field_mod"); } void TemplateTable::fast_storefield(TosState state) { transition(state, vtos); - ByteSize base = ConstantPoolCache::base_offset(); jvmti_post_fast_field_mod(); // Access constant pool cache. - Register cache = Z_tmp_1; - Register index = Z_tmp_2; - Register flags = Z_ARG5; + Register obj = Z_tmp_1; + Register cache = Z_tmp_1; + Register index = Z_tmp_2; + Register off = Z_tmp_2; + Register flags = Z_ARG5; // Index comes in bytes, don't shift afterwards! - __ get_cache_and_index_at_bcp(cache, index, 1); - - // Test for volatile. - assert(!flags->is_volatile(), "do_oop_store could perform leaf RT call"); - __ z_lg(flags, Address(cache, index, base + ConstantPoolCacheEntry::flags_offset())); - - // Replace index with field offset from cache entry. - Register field_offset = index; - __ z_lg(field_offset, Address(cache, index, base + ConstantPoolCacheEntry::f2_offset())); + __ load_field_entry(cache, index); + // this call is for nonstatic. obj remains unchanged. + load_resolved_field_entry(obj, cache, noreg, off, flags, false); // Get object from stack. - Register obj = cache; - pop_and_check_object(obj); // field address - const Address field(obj, field_offset); + const Address field(obj, off); // access field switch (bytecode()) { case Bytecodes::_fast_aputfield: - do_oop_store(_masm, Address(obj, field_offset), Z_tos, + do_oop_store(_masm, field, Z_tos, Z_ARG2, Z_ARG3, Z_ARG4, IN_HEAP); break; case Bytecodes::_fast_lputfield: @@ -3301,7 +3403,7 @@ void TemplateTable::fast_storefield(TosState state) { // Check for volatile store. Label notVolatile; - __ testbit(flags, ConstantPoolCacheEntry::is_volatile_shift); + __ testbit(flags, ResolvedFieldEntry::is_volatile_shift); __ z_brz(notVolatile); __ z_fence(); @@ -3311,51 +3413,53 @@ void TemplateTable::fast_storefield(TosState state) { void TemplateTable::fast_accessfield(TosState state) { transition(atos, state); - Register obj = Z_tos; + Register obj = Z_tos; // Object ptr is in TOS - // Do the JVMTI work here to avoid disturbing the register state below + // Do the JVMTI work here. There is no specific jvmti_post_fast_access() emitter. if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we - // take the time to call into the VM. - Label cont; + // Check to see if a field modification watch has been set + // before we take the time to call into the VM. + BLOCK_COMMENT("jvmti_post_fast_field_access {"); + Label dontPost; + Register cache = Z_ARG3; + Register index = Z_tmp_2; - __ load_absolute_address(Z_R1_scratch, - (address)JvmtiExport::get_field_access_count_addr()); - __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch)); - __ z_brz(cont); + __ load_absolute_address(Z_R1_scratch, (address)JvmtiExport::get_field_access_count_addr()); + __ z_chsi(0, Z_R1_scratch, 0); // avoid loading data into a scratch register + __ z_bre(dontPost); // Access constant pool cache entry. + __ load_field_entry(cache, index); - __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tmp_1, 1); __ verify_oop(obj); __ push_ptr(obj); // Save object pointer before call_VM() clobbers it. __ z_lgr(Z_ARG2, obj); // Z_ARG2: object pointer copied above - // Z_ARG3: cache entry pointer + // cache: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - Z_ARG2, Z_ARG3); + Z_ARG2, cache); __ pop_ptr(obj); // Restore object pointer. - __ bind(cont); + __ bind(dontPost); + BLOCK_COMMENT("} jvmti_post_fast_field_access"); } // Access constant pool cache. - Register cache = Z_tmp_1; - Register index = Z_tmp_2; + Register cache = Z_tmp_1; + Register index = Z_tmp_2; + Register off = Z_tmp_2; // Index comes in bytes, don't shift afterwards! - __ get_cache_and_index_at_bcp(cache, index, 1); + __ load_field_entry(cache, index); // Replace index with field offset from cache entry. - __ mem2reg_opt(index, - Address(cache, index, - ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); + __ load_sized_value(off, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(jint), true); __ verify_oop(obj); __ null_check(obj); - Address field(obj, index); + Address field(obj, off); // access field switch (bytecode()) { @@ -3399,28 +3503,30 @@ void TemplateTable::fast_xaccess(TosState state) { // Access constant pool cache. Register cache = Z_tmp_1; Register index = Z_tmp_2; + Register off = Z_tmp_2; // Index comes in bytes, don't shift afterwards! - __ get_cache_and_index_at_bcp(cache, index, 2); + __ load_field_entry(cache, index, 2); // Replace index with field offset from cache entry. - __ mem2reg_opt(index, - Address(cache, index, - ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); + __ load_sized_value(off, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(jint), true); // Make sure exception is reported in correct bcp range (getfield is // next instruction). __ add2reg(Z_bcp, 1); __ null_check(receiver); + + Address field(receiver, off); + switch (state) { case itos: - __ mem2reg_opt(Z_tos, Address(receiver, index), false); + __ mem2reg_opt(Z_tos, field, false); break; case atos: - do_oop_load(_masm, Address(receiver, index), Z_tos, Z_tmp_1, Z_tmp_2, IN_HEAP); + do_oop_load(_masm, field, Z_tos, Z_tmp_1, Z_tmp_2, IN_HEAP); __ verify_oop(Z_tos); break; case ftos: - __ mem2freg_opt(Z_ftos, Address(receiver, index)); + __ mem2freg_opt(Z_ftos, field); break; default: ShouldNotReachHere(); From 731fb4eea21ab67d90970d7c6107fb0a4fbee9ec Mon Sep 17 00:00:00 2001 From: Albert Mingkun Yang Date: Wed, 11 Oct 2023 09:22:27 +0000 Subject: [PATCH 14/15] 8317797: G1: Remove unimplemented predict_will_fit Reviewed-by: tschatzl --- src/hotspot/share/gc/g1/g1Policy.hpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index 7ecfc4dd2f739..c5ab4e9e1de13 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -251,14 +251,6 @@ class G1Policy: public CHeapObj { double predict_survivor_regions_evac_time() const; double predict_retained_regions_evac_time() const; - // Check whether a given young length (young_length) fits into the - // given target pause time and whether the prediction for the amount - // of objects to be copied for the given length will fit into the - // given free space (expressed by base_free_regions). It is used by - // calculate_young_list_target_length(). - bool predict_will_fit(uint young_length, double base_time_ms, - uint base_free_regions, double target_pause_time_ms) const; - public: size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; } From bcafec54a52e4c0d92b075de461fcf16d6c100b4 Mon Sep 17 00:00:00 2001 From: Roman Kennke Date: Wed, 11 Oct 2023 13:44:39 +0000 Subject: [PATCH 15/15] 8316958: Add test for unstructured locking Reviewed-by: dholmes, shade --- .../locking/TestUnstructuredLocking.jasm | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 test/hotspot/jtreg/runtime/locking/TestUnstructuredLocking.jasm diff --git a/test/hotspot/jtreg/runtime/locking/TestUnstructuredLocking.jasm b/test/hotspot/jtreg/runtime/locking/TestUnstructuredLocking.jasm new file mode 100644 index 0000000000000..7e663f78ddc1a --- /dev/null +++ b/test/hotspot/jtreg/runtime/locking/TestUnstructuredLocking.jasm @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test id=int + * @summary Check that monitorenter A; monitorenter B; monitorexit A; monitorexit B; works + * @compile TestUnstructuredLocking.jasm + * @run main/othervm -Xint TestUnstructuredLocking + */ +/* + * @test id=comp + * @summary Check that monitorenter A; monitorenter B; monitorexit A; monitorexit B; works, with -Xcomp + * @compile TestUnstructuredLocking.jasm + * @run main/othervm -Xcomp TestUnstructuredLocking + */ + +super public class TestUnstructuredLocking version 64:0 { + + public static Method main:"([Ljava/lang/String;)V" stack 2 locals 4 { + new class java/lang/Object; + dup; + invokespecial Method java/lang/Object."":"()V"; + astore_1; + new class java/lang/Object; + dup; + invokespecial Method java/lang/Object."":"()V"; + astore_2; + aload_1; + monitorenter; + aload_2; + monitorenter; + aload_1; + monitorexit; + aload_2; + monitorexit; + return; + } + +}