diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp index ca175fe1c47b1..89a97a4984fc8 100644 --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -57,7 +57,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ mov_metadata(rscratch1, m); ce->store_parameter(rscratch1, 1); ce->store_parameter(_bci, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -66,7 +66,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -79,13 +79,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { __ mov(rscratch1, _index->as_jint()); } - Runtime1::StubID stub_id; + C1StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = Runtime1::throw_index_exception_id; + stub_id = C1StubId::throw_index_exception_id; } else { assert(_array != LIR_Opr::nullOpr(), "sanity"); __ mov(rscratch2, _array->as_pointer_register()); - stub_id = Runtime1::throw_range_check_failed_id; + stub_id = C1StubId::throw_range_check_failed_id; } __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id))); __ blr(lr); @@ -100,7 +100,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -112,7 +112,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); #ifdef ASSERT @@ -124,14 +124,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -167,7 +167,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == r19, "length must in r19,"); assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == r0, "result must in r0"); @@ -190,7 +190,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == r19, "length must in r19,"); assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == r0, "result must in r0"); @@ -202,11 +202,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - Runtime1::StubID enter_id; + C1StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = Runtime1::monitorenter_id; + enter_id = C1StubId::monitorenter_id; } else { - enter_id = Runtime1::monitorenter_nofpu_id; + enter_id = C1StubId::monitorenter_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -223,11 +223,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - Runtime1::StubID exit_id; + C1StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = Runtime1::monitorexit_id; + exit_id = C1StubId::monitorexit_id; } else { - exit_id = Runtime1::monitorexit_nofpu_id; + exit_id = C1StubId::monitorexit_nofpu_id; } __ adr(lr, _continuation); __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); @@ -255,7 +255,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -265,9 +265,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 91430be5835b5..5e116d82761ac 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -321,19 +321,19 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(Runtime1::access_field_patching_id); + target = Runtime1::entry_for(C1StubId::access_field_patching_id); reloc_type = relocInfo::section_word_type; break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); @@ -375,7 +375,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(r0); // search an exception handler (r0: exception oop, r3: throwing pc) - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -432,7 +432,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); __ remove_frame(initial_frame_size_in_bytes()); - __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -875,19 +875,19 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(Runtime1::access_field_patching_id); + target = Runtime1::entry_for(C1StubId::access_field_patching_id); reloc_type = relocInfo::section_word_type; break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); @@ -1356,7 +1356,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ br(Assembler::EQ, *success_target); __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbzw(klass_RInfo, *failure_target); @@ -1367,7 +1367,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbz(k_RInfo, *failure_target); @@ -1446,7 +1446,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbzw(k_RInfo, *failure_target); @@ -2035,7 +2035,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - Runtime1::StubID unwind_id; + C1StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -2054,9 +2054,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(r0); // search an exception handler (r0: exception oop, r3: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = Runtime1::handle_exception_id; + unwind_id = C1StubId::handle_exception_id; } else { - unwind_id = Runtime1::handle_exception_nofpu_id; + unwind_id = C1StubId::handle_exception_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); @@ -2337,7 +2337,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); __ PUSH(src, dst); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ POP(src, dst); __ cbnz(src, cont); diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index 8f1260feba3ea..4acac65ad5bab 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -1246,7 +1246,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1277,14 +1277,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index d0d11d437e83e..8d1b3902ce42e 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -267,7 +267,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == r0, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); @@ -308,7 +308,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == r0, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp index cb9eb03c580d2..0b9acc0f3a885 100644 --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -100,10 +100,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (frame_size() == no_frame_size) { leave(); far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { - far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); + far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); } bind(L); } @@ -358,7 +358,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -370,7 +370,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -390,12 +390,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ str(zr, Address(rthread, JavaThread::vm_result_offset())); __ str(zr, Address(rthread, JavaThread::vm_result_2_offset())); break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { // At this point all registers except exception oop (r0) and // exception pc (lr) are dead. const int frame_size = 2 /*fp, return address*/; @@ -453,13 +453,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ str(r0, Address(rfp, 1*BytesPerWord)); switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != handle_exception_nofpu_id); + restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: break; default: ShouldNotReachHere(); } @@ -611,7 +611,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { const Register exception_oop = r0; const Register exception_pc = r3; @@ -628,7 +628,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMap* oop_map = nullptr; switch (id) { { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -636,31 +636,31 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { Register klass = r3; // Incoming Register obj = r0; // Result - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -679,7 +679,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { Register bci = r0, method = r1; __ enter(); @@ -697,14 +697,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { Register length = r19; // Incoming Register klass = r3; // Incoming Register obj = r0; // Result - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -717,7 +717,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Register t0 = obj; __ ldrw(t0, Address(klass, Klass::layout_helper_offset())); __ asrw(t0, t0, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) + int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ mov(rscratch1, tag); @@ -732,7 +732,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ enter(); OopMap* map = save_live_registers(sasm); int call_offset; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -750,7 +750,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // r0,: klass // r19,: rank @@ -770,7 +770,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -802,19 +802,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Typical calling sequence: // __ push(klass_RInfo); // object klass or other subclass @@ -857,10 +857,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: + case C1StubId::monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case monitorenter_id: + case C1StubId::monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -878,10 +878,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: + case C1StubId::monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case monitorexit_id: + case C1StubId::monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -901,7 +901,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); OopMap* oop_map = save_live_registers(sasm); @@ -918,13 +918,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. @@ -932,54 +932,54 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); // tos + 0: link // + 1: return address @@ -987,7 +987,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); @@ -1005,7 +1005,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // c_rarg0: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); save_live_registers(sasm); diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp index 3d8dbc38071ed..8e85fa88a8749 100644 --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_bci, 0); ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1); - __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::counter_overflow_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -57,7 +57,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); debug_only(__ should_not_reach_here()); @@ -73,10 +73,10 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } if (_throw_index_out_of_bounds_exception) { - __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::throw_index_exception_id), relocInfo::runtime_call_type); } else { __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction? - __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::throw_range_check_failed_id), relocInfo::runtime_call_type); } ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -89,7 +89,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); debug_only(__ should_not_reach_here()); @@ -100,7 +100,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), + __ call(Runtime1::entry_for(C1StubId::throw_div0_exception_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); DEBUG_ONLY(STOP("DivByZero");) @@ -109,14 +109,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -148,7 +148,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { assert(_klass_reg->as_register() == R1, "runtime call setup"); assert(_length->as_register() == R2, "runtime call setup"); __ bind(_entry); - __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::new_type_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -170,7 +170,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { assert(_klass_reg->as_register() == R1, "runtime call setup"); assert(_length->as_register() == R2, "runtime call setup"); __ bind(_entry); - __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::new_object_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -189,9 +189,9 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ str(lock_reg, Address(SP, BytesPerWord)); } - Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ? - Runtime1::monitorenter_id : - Runtime1::monitorenter_nofpu_id; + C1StubId enter_id = ce->compilation()->has_fpu_code() ? + C1StubId::monitorenter_id : + C1StubId::monitorenter_nofpu_id; __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -210,9 +210,9 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ str(lock_reg, Address(SP)); // Non-blocking leaf routine - no call info needed - Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ? - Runtime1::monitorexit_id : - Runtime1::monitorexit_nofpu_id; + C1StubId exit_id = ce->compilation()->has_fpu_code() ? + C1StubId::monitorexit_id : + C1StubId::monitorexit_nofpu_id; __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); __ b(_continuation); } @@ -322,10 +322,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -351,7 +351,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ mov_slow(Rtemp, _trap_request); ce->verify_reserved_argument_area_size(1); __ str(Rtemp, Address(SP)); - __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::deoptimize_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -362,9 +362,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is // probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); __ bind(_entry); diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp index 999f8fe590472..bb6a93e6f8da7 100644 --- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp @@ -213,7 +213,7 @@ int LIR_Assembler::emit_exception_handler() { // check that there is really an exception __ verify_not_null_oop(Rexception_obj); - __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type); __ should_not_reach_here(); assert(code_offset() - offset <= exception_handler_size(), "overflow"); @@ -253,7 +253,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR - __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); + __ jump(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); // Emit the slow path assembly if (stub != nullptr) { @@ -1136,7 +1136,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; @@ -1210,7 +1210,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ cmp(Rtemp, k_RInfo, ne); __ b(*success_target, eq); assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } } else { @@ -1227,7 +1227,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } @@ -1303,7 +1303,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { } __ b(*success_target, eq); assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); if (!op->should_profile()) { move_regs(R0, res); } else { @@ -1334,7 +1334,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); if (!op->should_profile()) { move_regs(R0, res); } @@ -1981,9 +1981,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit assert(exceptionPC->as_register() == Rexception_pc, "must match"); info->add_register_oop(exceptionOop); - Runtime1::StubID handle_id = compilation()->has_fpu_code() ? - Runtime1::handle_exception_id : - Runtime1::handle_exception_nofpu_id; + C1StubId handle_id = compilation()->has_fpu_code() ? + C1StubId::handle_exception_id : + C1StubId::handle_exception_nofpu_id; Label return_address; __ adr(Rexception_pc, return_address); __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); @@ -2260,7 +2260,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ mov(altFP_7_11, R1); __ mov(R0, tmp); __ mov(R1, tmp2); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp __ cmp_32(R0, 0); __ mov(R0, R6); __ mov(R1, altFP_7_11); diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp index f4e3812d77cff..adda0c1c290db 100644 --- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1054,7 +1054,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); @@ -1083,7 +1083,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1091,7 +1091,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, LIR_OprFact::illegalOpr, info_for_exception); } diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp index 335baf5f16638..b5117dedc424e 100644 --- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp @@ -65,7 +65,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre reset_last_Java_frame(Rtemp); assert(frame_size() != no_frame_size, "frame must be fixed"); - if (_stub_id != Runtime1::forward_exception_id) { + if (_stub_id != (int)C1StubId::forward_exception_id) { ldr(R3, Address(Rthread, Thread::pending_exception_offset())); } @@ -81,10 +81,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre // Check for pending exception // unpack_with_exception_in_tls path is taken through // Runtime1::exception_handler_for_pc - if (_stub_id != Runtime1::forward_exception_id) { + if (_stub_id != (int)C1StubId::forward_exception_id) { assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id"); cmp(R3, 0); - jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); + jump(Runtime1::entry_for(C1StubId::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); } else { #ifdef ASSERT // Should not have pending exception in forward_exception stub @@ -280,7 +280,7 @@ static void restore_sp_for_method_handle(StubAssembler* sasm) { } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) { __ block_comment("generate_handle_exception"); bool save_fpu_registers = false; @@ -290,7 +290,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { OopMap* oop_map = nullptr; switch (id) { - case forward_exception_id: { + case C1StubId::forward_exception_id: { save_fpu_registers = HaveVFP; oop_map = generate_oop_map(sasm); __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset())); @@ -299,14 +299,14 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { __ str(zero, Address(Rthread, Thread::pending_exception_offset())); break; } - case handle_exception_id: + case C1StubId::handle_exception_id: save_fpu_registers = HaveVFP; // fall-through - case handle_exception_nofpu_id: + case C1StubId::handle_exception_nofpu_id: // At this point all registers MAY be live. oop_map = save_live_registers(sasm, save_fpu_registers); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: // At this point all registers except exception oop (R4/R19) and // exception pc (R5/R20) are dead. oop_map = save_live_registers(sasm); // TODO it's not required to save all registers @@ -328,13 +328,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { // Restore the registers that were saved at the beginning, remove // frame and jump to the exception handler. switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: restore_live_registers(sasm, save_fpu_registers); // Note: the restore live registers includes the jump to LR (patched to R0) break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: restore_live_registers_without_return(sasm); // must not jump immediately to handler restore_sp_for_method_handle(sasm); __ ret(); @@ -403,7 +403,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { const bool must_gc_arguments = true; const bool dont_gc_arguments = false; @@ -411,16 +411,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { bool save_fpu_registers = HaveVFP; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); // does not return on ARM } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { const Register result = R0; const Register klass = R1; @@ -436,7 +436,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { OopMap* oop_map = save_live_registers(sasm); __ ldr(R1, Address(SP, arg1_offset)); @@ -448,10 +448,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -463,7 +463,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMap* map = save_live_registers(sasm); int call_offset; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -477,7 +477,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { __ set_info("new_multi_array", dont_gc_arguments); @@ -500,7 +500,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -521,78 +521,78 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); generate_unwind_exception(sasm); } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // (in) R0 - sub, destroyed, // (in) R1 - super, not changed @@ -625,10 +625,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: + case C1StubId::monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case monitorenter_id: + case C1StubId::monitorenter_id: { __ set_info("monitorenter", dont_gc_arguments); const Register obj = R1; @@ -643,10 +643,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: + case C1StubId::monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case monitorexit_id: + case C1StubId::monitorexit_id: { __ set_info("monitorexit", dont_gc_arguments); const Register lock = R1; @@ -659,7 +659,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { __ set_info("deoptimize", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); @@ -675,35 +675,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp index dc70c73d4b330..451f3b7e9cd6b 100644 --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -68,7 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -79,8 +79,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { return; } - address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id) - : Runtime1::entry_for(Runtime1::throw_range_check_failed_id); + address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(C1StubId::throw_index_exception_id) + : Runtime1::entry_for(C1StubId::throw_range_check_failed_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -109,7 +109,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -133,7 +133,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ load_const_optimized(R0, md.value()); __ std(R0, -8, R1_SP); - address a = Runtime1::entry_for(Runtime1::counter_overflow_id); + address a = Runtime1::entry_for(C1StubId::counter_overflow_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -150,7 +150,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id); + address stub = Runtime1::entry_for(C1StubId::throw_div0_exception_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -165,9 +165,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } if (ImplicitNullChecks || TrapBasedNullChecks) { @@ -199,14 +199,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -236,7 +236,7 @@ NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr re void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address entry = Runtime1::entry_for(Runtime1::new_type_array_id); + address entry = Runtime1::entry_for(C1StubId::new_type_array_id); //__ load_const_optimized(R0, entry); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended @@ -259,7 +259,7 @@ NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Op void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address entry = Runtime1::entry_for(Runtime1::new_object_array_id); + address entry = Runtime1::entry_for(C1StubId::new_object_array_id); //__ load_const_optimized(R0, entry); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended @@ -272,7 +272,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id); + address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorenter_id : C1StubId::monitorenter_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register()); @@ -289,7 +289,7 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { if (_compute_lock) { ce->monitor_address(_monitor_ix, _lock_reg); } - address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id); + address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorexit_id : C1StubId::monitorexit_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); assert(_lock_reg->as_register() == R4_ARG2, ""); @@ -403,12 +403,12 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } @@ -434,7 +434,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address stub = Runtime1::entry_for(Runtime1::deoptimize_id); + address stub = Runtime1::entry_for(C1StubId::deoptimize_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index 2191b894f6e16..42934dc7c3179 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -176,7 +176,7 @@ int LIR_Assembler::emit_exception_handler() { } int offset = code_offset(); - address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); + address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)); //__ load_const_optimized(R0, entry_point); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); __ mtctr(R0); @@ -222,7 +222,7 @@ int LIR_Assembler::emit_unwind_handler() { } // Dispatch to the unwind logic. - address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); + address unwind_stub = Runtime1::entry_for(C1StubId::unwind_exception_id); //__ load_const_optimized(R0, unwind_stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); if (preserve_exception) { __ mr(Rexception, Rexception_save); } @@ -1800,8 +1800,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); add_call_info(pc_for_athrow_offset, info); // for exception handler - address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id - : Runtime1::handle_exception_nofpu_id); + address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? C1StubId::handle_exception_id + : C1StubId::handle_exception_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -2001,7 +2001,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, &cont, copyfunc_addr != nullptr ? ©func : &slow, nullptr); - address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); + address slow_stc = Runtime1::entry_for(C1StubId::slow_subtype_check_id); //__ load_const_optimized(tmp, slow_stc, tmp2); __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); __ mtctr(tmp); @@ -2452,7 +2452,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ b(*success); } else { // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); + address entry = Runtime1::entry_for(C1StubId::slow_subtype_check_id); // Stub needs fixed registers (tmp1-3). Register original_k_RInfo = op->tmp1()->as_register(); Register original_klass_RInfo = op->tmp2()->as_register(); @@ -2543,7 +2543,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, &done, &failure, nullptr); // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); + const address slow_path = Runtime1::entry_for(C1StubId::slow_subtype_check_id); //__ load_const_optimized(R0, slow_path); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); __ mtctr(R0); @@ -2850,8 +2850,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). - if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || - dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { + if (dest == Runtime1::entry_for(C1StubId::register_finalizer_id) || + dest == Runtime1::entry_for(C1StubId::new_multi_array_id )) { //__ load_const_optimized(R0, dest); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); __ mtctr(R0); diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp index 04762a22c6110..7973e9d05459e 100644 --- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp @@ -1032,7 +1032,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); const LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1067,7 +1067,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1075,7 +1075,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } // Following registers are used by slow_subtype_check: LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 059bb2eae0c3a..c05e97a4e9aa3 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -293,7 +293,7 @@ void C1_MacroAssembler::initialize_object( if (CURRENT_ENV->dtrace_alloc_probes()) { Unimplemented(); // assert(obj == O0, "must be"); -// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)), +// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)), // relocInfo::runtime_call_type); } @@ -369,7 +369,7 @@ void C1_MacroAssembler::allocate_array( if (CURRENT_ENV->dtrace_alloc_probes()) { Unimplemented(); //assert(obj == O0, "must be"); - //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)), + //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)), // relocInfo::runtime_call_type); } @@ -398,7 +398,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) { if (TrapBasedNullChecks) { // SIGTRAP based trap_null_check(r); } else { // explicit - //const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + //const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); assert(Lnull != nullptr, "must have Label for explicit check"); cmpdi(CCR0, r, 0); bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull); diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp index adddfda4ee74f..654626d66d812 100644 --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp @@ -97,12 +97,12 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, //load_const_optimized(R0, StubRoutines::forward_exception_entry()); //mtctr(R0); //bctr(); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { // keep stub frame for next call_RT - //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id)); - add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id))); + //load_const_optimized(R0, Runtime1::entry_for(C1StubId::forward_exception_id)); + add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(C1StubId::forward_exception_id))); mtctr(R0); bctr(); } @@ -388,7 +388,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMapSet* oop_maps = nullptr; // For better readability. @@ -397,22 +397,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // Stub code & info for the different stubs. switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -422,15 +422,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: // Bci and method are on stack. oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -439,7 +439,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { #ifdef ASSERT // Assert object type is really an array of the proper kind. { - int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; + int tag = (id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; Label ok; __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); __ srawi(R0, R0, Klass::_lh_array_tag_shift); @@ -453,7 +453,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // We don't support eden allocation. - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); } else { oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); @@ -461,7 +461,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { // R4: klass // R5: rank @@ -471,7 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); // This code is called via rt_call. Hence, caller-save registers have been saved. @@ -501,50 +501,50 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, @@ -572,28 +572,28 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); const Register sub_klass = R5, super_klass = R4, @@ -605,12 +605,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: - case monitorenter_id: + case C1StubId::monitorenter_nofpu_id: + case C1StubId::monitorenter_id: { __ set_info("monitorenter", dont_gc_arguments); - int save_fpu_registers = (id == monitorenter_id); + int save_fpu_registers = (id == C1StubId::monitorenter_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -624,15 +624,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: - case monitorexit_id: + case C1StubId::monitorexit_nofpu_id: + case C1StubId::monitorexit_id: { // note: Really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually). __ set_info("monitorexit", dont_gc_arguments); - int save_fpu_registers = (id == monitorexit_id); + int save_fpu_registers = (id == C1StubId::monitorexit_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -646,7 +646,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { __ set_info("deoptimize", dont_gc_arguments); __ std(R0, -8, R1_SP); // Pass trap_request on stack. @@ -662,35 +662,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // O0: object __ unimplemented("stub dtrace_object_alloc_id"); __ set_info("dtrace_object_alloc", dont_gc_arguments); @@ -710,7 +710,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); @@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) { __ block_comment("generate_handle_exception"); // Save registers, if required. @@ -764,7 +764,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -780,12 +780,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { __ ld(Rexception_pc, _abi0(lr), Rexception_pc); __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc); + oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Rexception_pc); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: // At this point all registers except exception oop and exception pc are dead. oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); @@ -824,13 +824,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { // Restore the registers that were saved at the beginning, remove // the frame and jump to the exception handler. switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: - restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id); + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: + restore_live_registers(sasm, noreg, noreg, id != C1StubId::handle_exception_nofpu_id); __ bctr(); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { __ pop_frame(); __ ld(Rexception_pc, _abi0(lr), R1_SP); __ mtlr(Rexception_pc); diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index cba3dd919dafb..98ab86bf72eb6 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -1267,6 +1267,7 @@ enum VectorMask { INSN(viota_m, 0b1010111, 0b010, 0b10000, 0b010100); // Vector Single-Width Floating-Point/Integer Type-Convert Instructions + INSN(vfcvt_x_f_v, 0b1010111, 0b001, 0b00001, 0b010010); INSN(vfcvt_f_x_v, 0b1010111, 0b001, 0b00011, 0b010010); INSN(vfcvt_rtz_x_f_v, 0b1010111, 0b001, 0b00111, 0b010010); diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index b7e1b7863efdb..fb81082072610 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -62,7 +62,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ mov_metadata(t0, m); ce->store_parameter(t0, 1); ce->store_parameter(_bci, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ j(_continuation); @@ -71,7 +71,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -84,13 +84,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { __ mv(t0, _index->as_jint()); } - Runtime1::StubID stub_id; + C1StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = Runtime1::throw_index_exception_id; + stub_id = C1StubId::throw_index_exception_id; } else { assert(_array != LIR_Opr::nullOpr(), "sanity"); __ mv(t1, _array->as_pointer_register()); - stub_id = Runtime1::throw_range_check_failed_id; + stub_id = C1StubId::throw_range_check_failed_id; } // t0 and t1 are used as args in generate_exception_throw, // so use ra as the tmp register for rt_call. @@ -106,7 +106,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -118,7 +118,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); #ifdef ASSERT @@ -127,14 +127,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { } // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -163,7 +163,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == x9, "length must in x9"); assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); @@ -183,7 +183,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == x9, "length must in x9"); assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); @@ -195,11 +195,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - Runtime1::StubID enter_id; + C1StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = Runtime1::monitorenter_id; + enter_id = C1StubId::monitorenter_id; } else { - enter_id = Runtime1::monitorenter_nofpu_id; + enter_id = C1StubId::monitorenter_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -215,11 +215,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - Runtime1::StubID exit_id; + C1StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = Runtime1::monitorexit_id; + exit_id = C1StubId::monitorexit_id; } else { - exit_id = Runtime1::monitorexit_nofpu_id; + exit_id = C1StubId::monitorexit_nofpu_id; } __ la(ra, _continuation); __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); @@ -244,7 +244,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -253,9 +253,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a = nullptr; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp index a8f260acae8ce..012932189382c 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -223,7 +223,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); PUSH(src, dst); - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); POP(src, dst); __ bnez(dst, cont); diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 3d146b87707aa..940706b0a7376 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -305,7 +305,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(x10); // search an exception handler (x10: exception oop, x13: throwing pc) - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -361,7 +361,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); __ remove_frame(initial_frame_size_in_bytes()); - __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -1088,7 +1088,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(k_RInfo, Address(sp, 0)); // sub klass __ sd(klass_RInfo, Address(sp, wordSize)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo @@ -1103,7 +1103,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(k_RInfo, Address(sp, 0)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo @@ -1391,7 +1391,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - Runtime1::StubID unwind_id; + C1StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -1414,9 +1414,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(x10); // search an exception handler (x10: exception oop, x13: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = Runtime1::handle_exception_id; + unwind_id = C1StubId::handle_exception_id; } else { - unwind_id = Runtime1::handle_exception_nofpu_id; + unwind_id = C1StubId::handle_exception_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); __ nop(); @@ -2054,16 +2054,16 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(Runtime1::access_field_patching_id); + target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + target = Runtime1::entry_for(C1StubId::load_klass_patching_id); break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); break; default: ShouldNotReachHere(); } @@ -2152,7 +2152,7 @@ void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(k_RInfo, Address(sp, 0)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo diff --git a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp index 409e8dc0a0d95..b328d457192ba 100644 --- a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -1030,7 +1030,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1062,7 +1062,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub = nullptr; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1070,7 +1070,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 1ae64b4f283ba..1e4b66069ee23 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == x10, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); @@ -316,7 +316,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1 if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == x10, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp index 824d03640517e..5e4031727c827 100644 --- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp @@ -98,10 +98,10 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres if (frame_size() == no_frame_size) { leave(); far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { - far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); + far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); } bind(L); } @@ -376,7 +376,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe return oop_maps; } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -388,7 +388,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { OopMap* oop_map = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -407,12 +407,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ sd(zr, Address(xthread, JavaThread::vm_result_offset())); __ sd(zr, Address(xthread, JavaThread::vm_result_2_offset())); break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { // At this point all registers except exception oop (x10) and // exception pc (ra) are dead. const int frame_size = 2 /* fp, return address */; @@ -469,13 +469,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord)); switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != handle_exception_nofpu_id); + restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: break; default: ShouldNotReachHere(); } @@ -621,7 +621,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // for better readability const bool dont_gc_arguments = false; @@ -632,7 +632,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMapSet* oop_maps = nullptr; switch (id) { { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -640,32 +640,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { Register klass = x13; // Incoming Register obj = x10; // Result - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -686,7 +686,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { Register bci = x10; Register method = x11; @@ -710,14 +710,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { Register length = x9; // Incoming Register klass = x13; // Incoming Register obj = x10; // Result - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -730,7 +730,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Register tmp = obj; __ lwu(tmp, Address(klass, Klass::layout_helper_offset())); __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); + int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ mv(t0, tag); __ beq(t0, tmp, ok); __ stop("assert(is an array klass)"); @@ -743,7 +743,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMap* map = save_live_registers(sasm); assert_cond(map != nullptr); int call_offset = 0; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -762,7 +762,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // x10: klass @@ -785,7 +785,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -819,14 +819,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, @@ -834,7 +834,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Typical calling sequence: // push klass_RInfo (object klass or other subclass) @@ -874,10 +874,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: + case C1StubId::monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case monitorenter_id: + case C1StubId::monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -896,10 +896,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: + case C1StubId::monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case monitorexit_id: + case C1StubId::monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -920,7 +920,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); OopMap* oop_map = save_live_registers(sasm); @@ -939,14 +939,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current @@ -955,7 +955,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -963,7 +963,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -971,7 +971,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -979,7 +979,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -987,29 +987,29 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); // tos + 0: link @@ -1018,7 +1018,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); @@ -1038,7 +1038,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // c_rarg0: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); save_live_registers(sasm); diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 1e3a8bde064b3..e2c9b9dd609e0 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -2385,6 +2385,74 @@ void C2_MacroAssembler::expand_bits_l_v(Register dst, Register src, Register mas expand_bits_v(dst, src, mask, /* is_long */ true); } +// j.l.Math.round(float) +// Returns the closest int to the argument, with ties rounding to positive infinity. +// We need to handle 3 special cases defined by java api spec: +// NaN, +// float >= Integer.MAX_VALUE, +// float <= Integer.MIN_VALUE. +void C2_MacroAssembler::java_round_float_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, + BasicType bt, uint vector_length) { + // In riscv, there is no straight corresponding rounding mode to satisfy the behaviour defined, + // in java api spec, i.e. any rounding mode can not handle some corner cases, e.g. + // RNE is the closest one, but it ties to "even", which means 1.5/2.5 both will be converted + // to 2, instead of 2 and 3 respectively. + // RUP does not work either, although java api requires "rounding to positive infinity", + // but both 1.3/1.8 will be converted to 2, instead of 1 and 2 respectively. + // + // The optimal solution for non-NaN cases is: + // src+0.5 => dst, with rdn rounding mode, + // convert dst from float to int, with rnd rounding mode. + // and, this solution works as expected for float >= Integer.MAX_VALUE and float <= Integer.MIN_VALUE. + // + // But, we still need to handle NaN explicilty with vector mask instructions. + // + // Check MacroAssembler::java_round_float and C2_MacroAssembler::vector_round_sve in aarch64 for more details. + + csrwi(CSR_FRM, C2_MacroAssembler::rdn); + vsetvli_helper(bt, vector_length); + + // don't rearrage the instructions sequence order without performance testing. + // check MacroAssembler::java_round_float in riscv64 for more details. + mv(t0, jint_cast(0.5f)); + fmv_w_x(ftmp, t0); + + // replacing vfclass with feq as performance optimization + vmfeq_vv(v0, src, src); + // set dst = 0 in cases of NaN + vmv_v_x(dst, zr); + + // dst = (src + 0.5) rounded down towards negative infinity + vfadd_vf(dst, src, ftmp, Assembler::v0_t); + vfcvt_x_f_v(dst, dst, Assembler::v0_t); // in RoundingMode::rdn + + csrwi(CSR_FRM, C2_MacroAssembler::rne); +} + +// java.lang.Math.round(double a) +// Returns the closest long to the argument, with ties rounding to positive infinity. +void C2_MacroAssembler::java_round_double_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, + BasicType bt, uint vector_length) { + // check C2_MacroAssembler::java_round_float_v above for more details. + + csrwi(CSR_FRM, C2_MacroAssembler::rdn); + vsetvli_helper(bt, vector_length); + + mv(t0, julong_cast(0.5)); + fmv_d_x(ftmp, t0); + + // replacing vfclass with feq as performance optimization + vmfeq_vv(v0, src, src); + // set dst = 0 in cases of NaN + vmv_v_x(dst, zr); + + // dst = (src + 0.5) rounded down towards negative infinity + vfadd_vf(dst, src, ftmp, Assembler::v0_t); + vfcvt_x_f_v(dst, dst, Assembler::v0_t); // in RoundingMode::rdn + + csrwi(CSR_FRM, C2_MacroAssembler::rne); +} + void C2_MacroAssembler::element_compare(Register a1, Register a2, Register result, Register cnt, Register tmp1, Register tmp2, VectorRegister vr1, VectorRegister vr2, VectorRegister vrs, bool islatin, Label &DONE, Assembler::LMUL lmul) { diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 4d7f756923c24..38351565cc626 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -187,6 +187,9 @@ void expand_bits_i_v(Register dst, Register src, Register mask); void expand_bits_l_v(Register dst, Register src, Register mask); + void java_round_float_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length); + void java_round_double_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length); + void float16_to_float_v(VectorRegister dst, VectorRegister src, uint vector_length); void float_to_float16_v(VectorRegister dst, VectorRegister src, VectorRegister vtmp, Register tmp, uint vector_length); diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index db010c9c6c82f..05f55fd0da7af 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1920,6 +1920,18 @@ bool Matcher::match_rule_supported(int opcode) { case Op_EncodeISOArray: return UseRVV; + // Current test shows that, it brings performance gain when MaxVectorSize >= 32, but brings + // regression when MaxVectorSize == 16. So only enable the intrinsic when MaxVectorSize >= 32. + case Op_RoundVF: + return UseRVV && MaxVectorSize >= 32; + + // For double, current test shows that even with MaxVectorSize == 32, there is still some regression. + // Although there is no hardware to verify it for now, from the trend of performance data on hardwares + // (with vlenb == 16 and 32 respectively), it's promising to bring better performance rather than + // regression for double when MaxVectorSize == 64+. So only enable the intrinsic when MaxVectorSize >= 64. + case Op_RoundVD: + return UseRVV && MaxVectorSize >= 64; + case Op_PopCountI: case Op_PopCountL: return UsePopCountInstruction; diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 1a51d7583c9dd..54947f6bf9a19 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -4715,6 +4715,34 @@ instruct vsignum_reg(vReg dst, vReg zero, vReg one, vRegMask_V0 v0) %{ ins_pipe(pipe_slow); %} +// ---------------- Round float/double Vector Operations ---------------- + +instruct vround_f(vReg dst, vReg src, fRegF tmp, vRegMask_V0 v0) %{ + match(Set dst (RoundVF src)); + effect(TEMP_DEF dst, TEMP tmp, TEMP v0); + format %{ "java_round_float_v $dst, $src\t" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vector_length = Matcher::vector_length(this); + __ java_round_float_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), + as_FloatRegister($tmp$$reg), bt, vector_length); + %} + ins_pipe(pipe_slow); +%} + +instruct vround_d(vReg dst, vReg src, fRegD tmp, vRegMask_V0 v0) %{ + match(Set dst (RoundVD src)); + effect(TEMP_DEF dst, TEMP tmp, TEMP v0); + format %{ "java_round_double_v $dst, $src\t" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + uint vector_length = Matcher::vector_length(this); + __ java_round_double_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), + as_FloatRegister($tmp$$reg), bt, vector_length); + %} + ins_pipe(pipe_slow); +%} + // -------------------------------- Reverse Bytes Vector Operations ------------------------ instruct vreverse_bytes_masked(vReg dst_src, vRegMask_V0 v0) %{ diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index b7f1d3605681a..e01e4458e38d3 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -48,7 +48,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -64,11 +64,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ load_const_optimized(Z_R1_scratch, _index->as_jint()); } - Runtime1::StubID stub_id; + C1StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = Runtime1::throw_index_exception_id; + stub_id = C1StubId::throw_index_exception_id; } else { - stub_id = Runtime1::throw_range_check_failed_id; + stub_id = C1StubId::throw_range_check_failed_id; __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register()); } ce->emit_call_c(Runtime1::entry_for (stub_id)); @@ -84,7 +84,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -102,7 +102,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1); ce->store_parameter(_bci, 0); - ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id)); + ce->emit_call_c(Runtime1::entry_for (C1StubId::counter_overflow_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -114,7 +114,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id)); + ce->emit_call_c(Runtime1::entry_for (C1StubId::throw_div0_exception_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); debug_only(__ should_not_reach_here()); @@ -124,9 +124,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for (C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); @@ -151,14 +151,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { debug_only(__ should_not_reach_here()); } -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -186,7 +186,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); __ lgr_if_needed(Z_R13, _length->as_register()); - address a = Runtime1::entry_for (Runtime1::new_type_array_id); + address a = Runtime1::entry_for (C1StubId::new_type_array_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -206,7 +206,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); __ lgr_if_needed(Z_R13, _length->as_register()); - address a = Runtime1::entry_for (Runtime1::new_object_array_id); + address a = Runtime1::entry_for (C1StubId::new_object_array_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -217,11 +217,11 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - Runtime1::StubID enter_id; + C1StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = Runtime1::monitorenter_id; + enter_id = C1StubId::monitorenter_id; } else { - enter_id = Runtime1::monitorenter_nofpu_id; + enter_id = C1StubId::monitorenter_nofpu_id; } __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register()); __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr(). @@ -242,11 +242,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register()); } // Note: non-blocking leaf routine => no call info needed. - Runtime1::StubID exit_id; + C1StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = Runtime1::monitorexit_id; + exit_id = C1StubId::monitorexit_id; } else { - exit_id = Runtime1::monitorexit_nofpu_id; + exit_id = C1StubId::monitorexit_nofpu_id; } ce->emit_call_c(Runtime1::entry_for (exit_id)); CHECK_BAILOUT(); @@ -378,10 +378,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for (C1StubId::access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for (C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for (C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for (C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -406,7 +406,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch. - ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id)); + ce->emit_call_c(Runtime1::entry_for (C1StubId::deoptimize_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index a5e62169a9350..d288f4a893d0a 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -172,7 +172,7 @@ int LIR_Assembler::emit_exception_handler() { int offset = code_offset(); - address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id); + address a = Runtime1::entry_for (C1StubId::handle_exception_from_callee_id); address call_addr = emit_call_c(a); CHECK_BAILOUT_(-1); __ should_not_reach_here(); @@ -212,7 +212,7 @@ int LIR_Assembler::emit_unwind_handler() { // Perform needed unlocking. MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { - // Runtime1::monitorexit_id expects lock address in Z_R1_scratch. + // C1StubId::monitorexit_id expects lock address in Z_R1_scratch. LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); monitor_address(0, lock); stub = new MonitorExitStub(lock, true, 0); @@ -241,7 +241,7 @@ int LIR_Assembler::emit_unwind_handler() { // Z_EXC_PC: exception pc // Dispatch to the unwind logic. - __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id)); + __ load_const_optimized(Z_R5, Runtime1::entry_for (C1StubId::unwind_exception_id)); __ z_br(Z_R5); // Emit the slow path assembly. @@ -1910,8 +1910,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // Reuse the debug info from the safepoint poll for the throw op itself. __ get_PC(Z_EXC_PC); add_call_info(__ offset(), info); // for exception handler - address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id - : Runtime1::handle_exception_nofpu_id); + address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? C1StubId::handle_exception_id + : C1StubId::handle_exception_nofpu_id); emit_call_c(stub); } @@ -2116,7 +2116,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(src_klass, 0); // sub store_parameter(dst_klass, 1); // super - emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id)); + emit_call_c(Runtime1::entry_for (C1StubId::slow_subtype_check_id)); CHECK_BAILOUT2(cont, slow); // Sets condition code 0 for match (2 otherwise). __ branch_optimized(Assembler::bcondEqual, cont); @@ -2539,7 +2539,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L RegisterOrConstant(super_check_offset)); if (need_slow_path) { // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); + address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); store_parameter(klass_RInfo, 0); // sub store_parameter(k_RInfo, 1); // super emit_call_c(a); // Sets condition code 0 for match (2 otherwise). @@ -2614,7 +2614,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // Perform the fast part of the checking logic. __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); + address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); store_parameter(klass_RInfo, 0); // sub store_parameter(k_RInfo, 1); // super emit_call_c(a); // Sets condition code 0 for match (2 otherwise). diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp index 619f0f7174f01..f998e86256f56 100644 --- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp @@ -885,7 +885,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for (x->type()); - __ call_runtime(Runtime1::entry_for (Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for (C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -916,14 +916,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp1 = new_register(objectType); diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index a9140a7925ebd..f3fa19ddb31e0 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -254,7 +254,7 @@ void C1_MacroAssembler::initialize_object( // Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); - // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); + // call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id))); // } verify_oop(obj, FILE_AND_LINE); @@ -315,7 +315,7 @@ void C1_MacroAssembler::allocate_array( // Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); - // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); + // call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id))); // } verify_oop(obj, FILE_AND_LINE); diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp index 41c57043d8234..2f629c108c956 100644 --- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp @@ -98,10 +98,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre restore_return_pc(); load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); z_br(Z_R1); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { - load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id)); + load_const_optimized(Z_R1, Runtime1::entry_for (C1StubId::forward_exception_id)); z_br(Z_R1); } @@ -305,7 +305,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // for better readability const bool must_gc_arguments = true; @@ -318,26 +318,26 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // Stub code and info for the different stubs. OopMapSet* oop_maps = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); // will not return } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { Register klass = Z_R11; // Incoming Register obj = Z_R2; // Result - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -352,7 +352,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { // Arguments : // bci : stack param 0 @@ -371,14 +371,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ z_br(Z_R14); } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { Register length = Z_R13; // Incoming Register klass = Z_R11; // Incoming Register obj = Z_R2; // Result - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -391,7 +391,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Register t0 = obj; __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); __ z_sra(t0, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) + int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); @@ -403,7 +403,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMap* map = save_live_registers_except_r2(sasm); int call_offset; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -418,7 +418,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { __ set_info("new_multi_array", dont_gc_arguments); // Z_R3,: klass // Z_R4,: rank @@ -436,7 +436,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -459,62 +459,62 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // Note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. generate_unwind_exception(sasm); } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { // Z_R1_scratch: object __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Arguments : // sub : stack param 0 @@ -580,13 +580,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ z_br(Z_R14); } break; - case monitorenter_nofpu_id: - case monitorenter_id: + case C1StubId::monitorenter_nofpu_id: + case C1StubId::monitorenter_id: { // Z_R1_scratch : object // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) __ set_info("monitorenter", dont_gc_arguments); - int save_fpu_registers = (id == monitorenter_id); + int save_fpu_registers = (id == C1StubId::monitorenter_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -600,15 +600,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: - case monitorexit_id: + case C1StubId::monitorexit_nofpu_id: + case C1StubId::monitorexit_id: { // Z_R1_scratch : lock address // Note: really a leaf routine but must setup last java sp // => Use call_RT for now (speed can be improved by // doing last java sp setup manually). __ set_info("monitorexit", dont_gc_arguments); - int save_fpu_registers = (id == monitorexit_id); + int save_fpu_registers = (id == C1StubId::monitorexit_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -622,7 +622,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { // Args: Z_R1_scratch: trap request __ set_info("deoptimize", dont_gc_arguments); Register trap_request = Z_R1_scratch; @@ -639,32 +639,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); // We should set up register map. oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; #if 0 - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // We can't gc here so skip the oopmap but make sure that all @@ -679,7 +679,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case fpu2long_stub_id: + case C1StubId::fpu2long_stub_id: { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx @@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { break; #endif // TODO - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); @@ -775,14 +775,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { default: { - __ should_not_reach_here(FILE_AND_LINE, id); + __ should_not_reach_here(FILE_AND_LINE, (int)id); } break; } return oop_maps; } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters: Z_EXC_OOP, Z_EXC_PC @@ -793,7 +793,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { Register reg_fp = Z_R1_scratch; switch (id) { - case forward_exception_id: { + case C1StubId::forward_exception_id: { // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -820,13 +820,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*)); break; } - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) - oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC); + oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Z_EXC_PC); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) __ save_return_pc(Z_EXC_PC); @@ -875,15 +875,15 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ invalidate_registers(Z_R2); switch(id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // Restore the registers that were saved at the beginning. __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. - restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame. + restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); // Pops as well the frame. __ z_br(Z_R1_scratch); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { __ pop_frame(); __ z_br(Z_R2); // Jump to exception handler. } diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index 345b779e8094e..90c2fad1f5d57 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -1385,6 +1385,14 @@ void Assembler::addl(Address dst, int32_t imm32) { emit_arith_operand(0x81, rax, dst, imm32); } +void Assembler::eaddl(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rax, src, imm32); +} + void Assembler::addb(Address dst, int imm8) { InstructionMark im(this); prefix(dst); @@ -1429,11 +1437,26 @@ void Assembler::addl(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::eaddl(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x01); + emit_operand(src2, src1, 0); +} + void Assembler::addl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xC0, dst, imm32); } +void Assembler::eaddl(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xC0, src, imm32); +} + void Assembler::addl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -1441,11 +1464,27 @@ void Assembler::addl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::eaddl(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x03); + emit_operand(src1, src2, 0); +} + void Assembler::addl(Register dst, Register src) { (void) prefix_and_encode(dst->encoding(), src->encoding()); emit_arith(0x03, 0xC0, dst, src); } +void Assembler::eaddl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x01, 0xC0, src1, src2); +} + void Assembler::addr_nop_4() { assert(UseAddressNop, "no CPU support"); // 4 bytes: NOP DWORD PTR [EAX+0] @@ -1632,11 +1671,25 @@ void Assembler::andl(Address dst, int32_t imm32) { emit_arith_operand(0x81, as_Register(4), dst, imm32); } +void Assembler::eandl(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rsp, src, imm32); +} + void Assembler::andl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xE0, dst, imm32); } +void Assembler::eandl(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xE0, src, imm32); +} + void Assembler::andl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); @@ -1651,11 +1704,27 @@ void Assembler::andl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::eandl(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x23); + emit_operand(src1, src2, 0); +} + void Assembler::andl(Register dst, Register src) { (void) prefix_and_encode(dst->encoding(), src->encoding()); emit_arith(0x23, 0xC0, dst, src); } +void Assembler::eandl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x21, 0xC0, src1, src2); +} + void Assembler::andnl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -1803,6 +1872,12 @@ void Assembler::cmovl(Condition cc, Register dst, Register src) { emit_opcode_prefix_and_encoding(0x40 | cc, 0xC0, encode); } +void Assembler::ecmovl(Condition cc, Register dst, Register src1, Register src2) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int16((0x40 | cc), (0xC0 | encode)); +} + void Assembler::cmovl(Condition cc, Register dst, Address src) { InstructionMark im(this); NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); @@ -1811,6 +1886,15 @@ void Assembler::cmovl(Condition cc, Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::ecmovl(Condition cc, Register dst, Register src1, Address src2) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int8((0x40 | cc)); + emit_operand(src1, src2, 0); +} + void Assembler::cmpb(Address dst, Register reg) { assert(reg->has_byte_register(), "must have byte register"); InstructionMark im(this); @@ -2429,6 +2513,15 @@ void Assembler::decl(Address dst) { emit_operand(rcx, dst, 0); } +void Assembler::edecl(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xFF); + emit_operand(rcx, src, 0); +} + void Assembler::divsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); @@ -2474,21 +2567,45 @@ void Assembler::idivl(Register src) { emit_int16((unsigned char)0xF7, (0xF8 | encode)); } +void Assembler::eidivl(Register src, bool no_flags) { // Signed + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xF8 | encode)); +} + void Assembler::divl(Register src) { // Unsigned int encode = prefix_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xF0 | encode)); } +void Assembler::edivl(Register src, bool no_flags) { // Unsigned + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xF0 | encode)); +} + void Assembler::imull(Register src) { int encode = prefix_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xE8 | encode)); } +void Assembler::eimull(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xE8 | encode)); +} + void Assembler::imull(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode); } +void Assembler::eimull(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xAF, (0xC0 | encode)); +} + void Assembler::imull(Register dst, Address src, int32_t value) { InstructionMark im(this); prefix(src, dst); @@ -2503,6 +2620,22 @@ void Assembler::imull(Register dst, Address src, int32_t value) { } } +void Assembler::eimull(Register dst, Address src, int32_t value, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (is8bit(value)) { + emit_int8((unsigned char)0x6B); + emit_operand(dst, src, 1); + emit_int8(value); + } else { + emit_int8((unsigned char)0x69); + emit_operand(dst, src, 4); + emit_int32(value); + } +} + void Assembler::imull(Register dst, Register src, int value) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { @@ -2513,6 +2646,17 @@ void Assembler::imull(Register dst, Register src, int value) { } } +void Assembler::eimull(Register dst, Register src, int value, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (is8bit(value)) { + emit_int24(0x6B, (0xC0 | encode), value & 0xFF); + } else { + emit_int16(0x69, (0xC0 | encode)); + emit_int32(value); + } +} + void Assembler::imull(Register dst, Address src) { InstructionMark im(this); prefix(src, dst, false, true /* is_map1 */); @@ -2520,6 +2664,14 @@ void Assembler::imull(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::eimull(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xAF); + emit_operand(src1, src2, 0); +} void Assembler::incl(Address dst) { // Don't use it directly. Use MacroAssembler::increment() instead. @@ -2529,6 +2681,16 @@ void Assembler::incl(Address dst) { emit_operand(rax, dst, 0); } +void Assembler::eincl(Register dst, Address src, bool no_flags) { + // Don't use it directly. Use MacroAssembler::increment() instead. + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xFF); + emit_operand(rax, src, 0); +} + void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { InstructionMark im(this); assert((0 <= cc) && (cc < 16), "illegal cc"); @@ -2696,6 +2858,13 @@ void Assembler::lzcntl(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode); } +void Assembler::elzcntl(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF5, (0xC0 | encode)); +} + void Assembler::lzcntl(Register dst, Address src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); InstructionMark im(this); @@ -2705,6 +2874,16 @@ void Assembler::lzcntl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::elzcntl(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF5); + emit_operand(dst, src, 0); +} + // Emit mfence instruction void Assembler::mfence() { NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) @@ -3855,11 +4034,26 @@ void Assembler::mull(Address src) { emit_operand(rsp, src, 0); } +void Assembler::emull(Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF7); + emit_operand(rsp, src, 0); +} + void Assembler::mull(Register src) { int encode = prefix_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xE0 | encode)); } +void Assembler::emull(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xE0 | encode)); +} + void Assembler::mulsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); @@ -3901,6 +4095,12 @@ void Assembler::negl(Register dst) { emit_int16((unsigned char)0xF7, (0xD8 | encode)); } +void Assembler::enegl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xD8 | encode)); +} + void Assembler::negl(Address dst) { InstructionMark im(this); prefix(dst); @@ -3908,6 +4108,15 @@ void Assembler::negl(Address dst) { emit_operand(as_Register(3), dst, 0); } +void Assembler::enegl(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF7); + emit_operand(as_Register(3), src, 0); +} + void Assembler::nop(uint i) { #ifdef ASSERT assert(i > 0, " "); @@ -4219,17 +4428,48 @@ void Assembler::notl(Register dst) { emit_int16((unsigned char)0xF7, (0xD0 | encode)); } +void Assembler::enotl(Register dst, Register src) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int16((unsigned char)0xF7, (0xD0 | encode)); +} + +void Assembler::orw(Register dst, Register src) { + (void)prefix_and_encode(dst->encoding(), src->encoding()); + emit_arith(0x0B, 0xC0, dst, src); +} + +void Assembler::eorw(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x0B, 0xC0, src1, src2); +} + void Assembler::orl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst); emit_arith_operand(0x81, rcx, dst, imm32); } +void Assembler::eorl(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rcx, src, imm32); +} + void Assembler::orl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xC8, dst, imm32); } +void Assembler::eorl(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xC8, src, imm32); +} + void Assembler::orl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -4237,11 +4477,27 @@ void Assembler::orl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::eorl(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x0B); + emit_operand(src1, src2, 0); +} + void Assembler::orl(Register dst, Register src) { (void) prefix_and_encode(dst->encoding(), src->encoding()); emit_arith(0x0B, 0xC0, dst, src); } +void Assembler::eorl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x09, 0xC0, src1, src2); +} + void Assembler::orl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); @@ -4249,6 +4505,15 @@ void Assembler::orl(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::eorl(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x09); + emit_operand(src2, src1, 0); +} + void Assembler::orb(Address dst, int imm8) { InstructionMark im(this); prefix(dst); @@ -4257,6 +4522,16 @@ void Assembler::orb(Address dst, int imm8) { emit_int8(imm8); } +void Assembler::eorb(Register dst, Address src, int imm8, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0x80); + emit_operand(rcx, src, 1); + emit_int8(imm8); +} + void Assembler::orb(Address dst, Register src) { InstructionMark im(this); prefix(dst, src, true); @@ -4264,6 +4539,15 @@ void Assembler::orb(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::eorb(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x08); + emit_operand(src2, src1, 0); +} + void Assembler::packsswb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); @@ -5384,6 +5668,16 @@ void Assembler::popcntl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::epopcntl(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_popcnt(), "must support"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0x88); + emit_operand(dst, src, 0); +} + void Assembler::popcntl(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); @@ -5391,6 +5685,13 @@ void Assembler::popcntl(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode); } +void Assembler::epopcntl(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_popcnt(), "must support"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0x88, (0xC0 | encode)); +} + void Assembler::evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { assert(VM_Version::supports_avx512_bitalg(), "must support avx512bitalg feature"); assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), ""); @@ -5979,6 +6280,17 @@ void Assembler::rcll(Register dst, int imm8) { } } +void Assembler::ercll(Register dst, Register src, int imm8) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xD0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); + } +} + void Assembler::rcpps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); @@ -6059,11 +6371,28 @@ void Assembler::roll(Register dst, int imm8) { } } +void Assembler::eroll(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xC0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); + } +} + void Assembler::roll(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xC0 | encode)); } +void Assembler::eroll(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xC0 | encode)); +} + void Assembler::rorl(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); @@ -6074,17 +6403,40 @@ void Assembler::rorl(Register dst, int imm8) { } } +void Assembler::erorl(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xC8 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xc8 | encode), imm8); + } +} + void Assembler::rorl(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xC8 | encode)); } +void Assembler::erorl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xC8 | encode)); +} + #ifdef _LP64 void Assembler::rorq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xC8 | encode)); } +void Assembler::erorq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xC8 | encode)); +} + void Assembler::rorq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -6095,11 +6447,28 @@ void Assembler::rorq(Register dst, int imm8) { } } +void Assembler::erorq(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xC8 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xC8 | encode), imm8); + } +} + void Assembler::rolq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xC0 | encode)); } +void Assembler::erolq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xC0 | encode)); +} + void Assembler::rolq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -6109,6 +6478,17 @@ void Assembler::rolq(Register dst, int imm8) { emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); } } + +void Assembler::erolq(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xC0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xc0 | encode), imm8); + } + } #endif void Assembler::sahf() { @@ -6134,6 +6514,23 @@ void Assembler::sall(Address dst, int imm8) { } } +void Assembler::esall(Register dst, Address src, int imm8, bool no_flags) { + InstructionMark im(this); + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(4), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(4), src, 1); + emit_int8(imm8); + } +} + void Assembler::sall(Address dst) { InstructionMark im(this); prefix(dst); @@ -6141,8 +6538,17 @@ void Assembler::sall(Address dst) { emit_operand(as_Register(4), dst, 0); } -void Assembler::sall(Register dst, int imm8) { - assert(isShiftCount(imm8), "illegal shift count"); +void Assembler::esall(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(4), src, 0); +} + +void Assembler::sall(Register dst, int imm8) { + assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); if (imm8 == 1) { emit_int16((unsigned char)0xD1, (0xE0 | encode)); @@ -6151,11 +6557,28 @@ void Assembler::sall(Register dst, int imm8) { } } +void Assembler::esall(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xE0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); + } +} + void Assembler::sall(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xE0 | encode)); } +void Assembler::esall(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE0 | encode)); +} + void Assembler::sarl(Address dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); InstructionMark im(this); @@ -6171,6 +6594,23 @@ void Assembler::sarl(Address dst, int imm8) { } } +void Assembler::esarl(Register dst, Address src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(7), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(7), src, 1); + emit_int8(imm8); + } +} + void Assembler::sarl(Address dst) { InstructionMark im(this); prefix(dst); @@ -6178,6 +6618,15 @@ void Assembler::sarl(Address dst) { emit_operand(as_Register(7), dst, 0); } +void Assembler::esarl(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(7), src, 0); +} + void Assembler::sarl(Register dst, int imm8) { int encode = prefix_and_encode(dst->encoding()); assert(isShiftCount(imm8), "illegal shift count"); @@ -6188,11 +6637,28 @@ void Assembler::sarl(Register dst, int imm8) { } } +void Assembler::esarl(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xF8 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); + } +} + void Assembler::sarl(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xF8 | encode)); } +void Assembler::esarl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xF8 | encode)); +} + void Assembler::sbbl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst); @@ -6204,7 +6670,6 @@ void Assembler::sbbl(Register dst, int32_t imm32) { emit_arith(0x81, 0xD8, dst, imm32); } - void Assembler::sbbl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -6297,7 +6762,6 @@ void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { emit_int16((unsigned char)0xCD, (0xC0 | encode)); } - void Assembler::shll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); @@ -6308,11 +6772,28 @@ void Assembler::shll(Register dst, int imm8) { } } +void Assembler::eshll(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1 ) { + emit_int16((unsigned char)0xD1, (0xE0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); + } +} + void Assembler::shll(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xE0 | encode)); } +void Assembler::eshll(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE0 | encode)); +} + void Assembler::shrl(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); @@ -6324,11 +6805,29 @@ void Assembler::shrl(Register dst, int imm8) { } } +void Assembler::eshrl(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xE8 | encode)); + } + else { + emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); + } +} + void Assembler::shrl(Register dst) { int encode = prefix_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xE8 | encode)); } +void Assembler::eshrl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE8 | encode)); +} + void Assembler::shrl(Address dst) { InstructionMark im(this); prefix(dst); @@ -6336,6 +6835,15 @@ void Assembler::shrl(Address dst) { emit_operand(as_Register(5), dst, 0); } +void Assembler::eshrl(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(5), src, 0); +} + void Assembler::shrl(Address dst, int imm8) { InstructionMark im(this); assert(isShiftCount(imm8), "illegal shift count"); @@ -6351,37 +6859,89 @@ void Assembler::shrl(Address dst, int imm8) { } } +void Assembler::eshrl(Register dst, Address src, int imm8, bool no_flags) { + InstructionMark im(this); + assert(isShiftCount(imm8), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(5), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(5), src, 1); + emit_int8(imm8); + } +} void Assembler::shldl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xA5, 0xC0, encode); } +void Assembler::eshldl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16(0xA5, (0xC0 | encode)); +} + void Assembler::shldl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8); } +void Assembler::eshldl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int24(0x24, (0xC0 | encode), imm8); +} + void Assembler::shrdl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xAD, 0xC0, encode); } +void Assembler::eshrdl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16(0xAD, (0xC0 | encode)); +} + void Assembler::shrdl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8); } +void Assembler::eshrdl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int24(0x2C, (0xC0 | encode), imm8); +} + #ifdef _LP64 void Assembler::shldq(Register dst, Register src, int8_t imm8) { int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xA4, 0xC0, encode, imm8); } +void Assembler::eshldq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int24(0x24, (0xC0 | encode), imm8); +} + void Assembler::shrdq(Register dst, Register src, int8_t imm8) { int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xAC, 0xC0, encode, imm8); } + +void Assembler::eshrdq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int24(0x2C, (0xC0 | encode), imm8); +} #endif // copies a single word from [esi] to [edi] @@ -6472,6 +7032,14 @@ void Assembler::subl(Address dst, int32_t imm32) { emit_arith_operand(0x81, rbp, dst, imm32); } +void Assembler::esubl(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rbp, src, imm32); +} + void Assembler::subl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); @@ -6479,17 +7047,38 @@ void Assembler::subl(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::esubl(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x29); + emit_operand(src2, src1, 0); +} + void Assembler::subl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xE8, dst, imm32); } +void Assembler::esubl(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xE8, src, imm32); +} + // Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::subl_imm32(Register dst, int32_t imm32) { prefix(dst); emit_arith_imm32(0x81, 0xE8, dst, imm32); } +void Assembler::esubl_imm32(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_imm32(0x81, 0xE8, src, imm32); +} + void Assembler::subl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -6497,11 +7086,27 @@ void Assembler::subl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::esubl(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x2B); + emit_operand(src1, src2, 0); +} + void Assembler::subl(Register dst, Register src) { (void) prefix_and_encode(dst->encoding(), src->encoding()); emit_arith(0x2B, 0xC0, dst, src); } +void Assembler::esubl(Register dst, Register src2, Register src1, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x29, 0xC0, src1, src2); +} + void Assembler::subsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -6605,6 +7210,13 @@ void Assembler::tzcntl(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode); } +void Assembler::etzcntl(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF4, (0xC0 | encode)); +} + void Assembler::tzcntl(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); InstructionMark im(this); @@ -6614,6 +7226,16 @@ void Assembler::tzcntl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::etzcntl(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF4); + emit_operand(dst, src, 0); +} + void Assembler::tzcntq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); @@ -6621,6 +7243,13 @@ void Assembler::tzcntq(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xBC, 0xC0, encode); } +void Assembler::etzcntq(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF4, (0xC0 | encode)); +} + void Assembler::tzcntq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); InstructionMark im(this); @@ -6630,6 +7259,16 @@ void Assembler::tzcntq(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::etzcntq(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF4); + emit_operand(dst, src, 0); +} + void Assembler::ucomisd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); @@ -6749,11 +7388,25 @@ void Assembler::xorl(Address dst, int32_t imm32) { emit_arith_operand(0x81, as_Register(6), dst, imm32); } +void Assembler::exorl(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, as_Register(6), src, imm32); +} + void Assembler::xorl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xF0, dst, imm32); } +void Assembler::exorl(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xF0, src, imm32); +} + void Assembler::xorl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -6761,11 +7414,27 @@ void Assembler::xorl(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::exorl(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x33); + emit_operand(src1, src2, 0); +} + void Assembler::xorl(Register dst, Register src) { (void) prefix_and_encode(dst->encoding(), src->encoding()); emit_arith(0x33, 0xC0, dst, src); } +void Assembler::exorl(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x31, 0xC0, src1, src2); +} + void Assembler::xorl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); @@ -6773,6 +7442,15 @@ void Assembler::xorl(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::exorl(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x31); + emit_operand(src2, src1, 0); +} + void Assembler::xorb(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); @@ -6780,6 +7458,15 @@ void Assembler::xorb(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::exorb(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x32); + emit_operand(src1, src2, 0); +} + void Assembler::xorb(Address dst, Register src) { InstructionMark im(this); prefix(dst, src, true); @@ -6787,6 +7474,15 @@ void Assembler::xorb(Address dst, Register src) { emit_operand(src, dst, 0); } +void Assembler::exorb(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_8bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x30); + emit_operand(src2, src1, 0); +} + void Assembler::xorw(Register dst, Address src) { InstructionMark im(this); emit_int8(0x66); @@ -6795,6 +7491,16 @@ void Assembler::xorw(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::exorw(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + emit_int8(0x66); + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x33); + emit_operand(src1, src2, 0); +} + // AVX 3-operands scalar float-point arithmetic instructions void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { @@ -11378,6 +12084,12 @@ void Assembler::decl(Register dst) { emit_int8(0x48 | dst->encoding()); } +void Assembler::edecl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x48 | src->encoding()); +} + // 64bit doesn't use the x87 void Assembler::fabs() { @@ -11816,7 +12528,7 @@ void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexS // This is a 4 byte encoding void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool eevex_b, bool evex_v, - bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { + bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool no_flags) { // EVEX 0x62 prefix // byte1 = EVEX_4bytes; @@ -11842,11 +12554,17 @@ void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, boo // of form {66, F3, F2} byte3 |= pre; - // P2: byte 4 as zL'Lbv'aaa - // kregs are implemented in the low 3 bits as aaa - int byte4 = (_attributes->is_no_reg_mask()) ? - 0 : - _attributes->get_embedded_opmask_register_specifier(); + // P2: byte 4 as zL'Lbv'aaa or 00LXVF00 where V = V4, X(extended context) = ND and F = NF (no flags) + int byte4 = 0; + if (no_flags) { + assert(_attributes->is_no_reg_mask(), "mask register not supported with no_flags"); + byte4 |= 0x4; + } else { + // kregs are implemented in the low 3 bits as aaa + byte4 = (_attributes->is_no_reg_mask()) ? + 0 : + _attributes->get_embedded_opmask_register_specifier(); + } // EVEX.v` for extending EVEX.vvvv or VIDX byte4 |= (evex_v ? 0: EVEX_V); // third EXEC.b for broadcast actions @@ -11861,11 +12579,12 @@ void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, boo emit_int32(EVEX_4bytes, byte2, byte3, byte4); } -void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { - if (adr.base_needs_rex2() || adr.index_needs_rex2()) { +void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool nds_is_ndd, bool no_flags) { + if (adr.base_needs_rex2() || adr.index_needs_rex2() || nds_is_ndd || no_flags) { assert(UseAPX, "APX features not enabled"); } - bool is_extended = adr.base_needs_rex2() || adr.index_needs_rex2() || nds_enc >= 16 || xreg_enc >= 16; + if (nds_is_ndd) attributes->set_extended_context(); + bool is_extended = adr.base_needs_rex2() || adr.index_needs_rex2() || nds_enc >= 16 || xreg_enc >= 16 || nds_is_ndd; bool vex_r = (xreg_enc & 8) == 8; bool vex_b = adr.base_needs_rex(); bool vex_x; @@ -11908,7 +12627,7 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix bool eevex_x = adr.index_needs_rex2(); bool eevex_b = adr.base_needs_rex2(); attributes->set_is_evex_instruction(); - evex_prefix(vex_r, vex_b, vex_x, evex_r, eevex_b, evex_v, eevex_x, nds_enc, pre, opc); + evex_prefix(vex_r, vex_b, vex_x, evex_r, eevex_b, evex_v, eevex_x, nds_enc, pre, opc, no_flags); } else { if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { attributes->set_rex_vex_w(false); @@ -11917,10 +12636,21 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix } } -int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr) { - if (src_is_gpr && src_enc >= 16) { +void Assembler::evex_prefix_ndd(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) { + attributes->set_is_evex_instruction(); + vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ true, no_flags); +} + +void Assembler::evex_prefix_nf(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool no_flags) { + attributes->set_is_evex_instruction(); + vex_prefix(adr, ndd_enc, xreg_enc, pre, opc, attributes, /* nds_is_ndd */ false, no_flags); +} + +int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr, bool nds_is_ndd, bool no_flags) { + if (nds_is_ndd || no_flags || (src_is_gpr && src_enc >= 16)) { assert(UseAPX, "APX features not enabled"); } + if (nds_is_ndd) attributes->set_extended_context(); bool is_extended = dst_enc >= 16 || nds_enc >= 16 || src_enc >=16; bool vex_r = (dst_enc & 8) == 8; bool vex_b = (src_enc & 8) == 8; @@ -11962,7 +12692,7 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS // can use vex_x as bank extender on rm encoding vex_x = (src_enc >= 16) && !src_is_gpr; attributes->set_is_evex_instruction(); - evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_b, evex_v, false /*eevex_x*/, nds_enc, pre, opc); + evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_b, evex_v, false /*eevex_x*/, nds_enc, pre, opc, no_flags); } else { if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { attributes->set_rex_vex_w(false); @@ -11974,6 +12704,18 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS return (((dst_enc & 7) << 3) | (src_enc & 7)); } +int Assembler::evex_prefix_and_encode_ndd(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags) { + attributes->set_is_evex_instruction(); + return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ true, no_flags); +} + +int Assembler::evex_prefix_and_encode_nf(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags) { + attributes->set_is_evex_instruction(); + return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes, /* src_is_gpr */ true, /* nds_is_ndd */ false, no_flags); +} + void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { if (UseAVX > 0) { @@ -12818,6 +13560,12 @@ void Assembler::incl(Register dst) { emit_int8(0x40 | dst->encoding()); } +void Assembler::eincl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x40 | src->encoding()); +} + void Assembler::lea(Register dst, Address src) { leal(dst, src); } @@ -13442,28 +14190,67 @@ void Assembler::addq(Address dst, int32_t imm32) { emit_arith_operand(0x81, rax, dst, imm32); } +void Assembler::eaddq(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rax, src, imm32); +} + void Assembler::addq(Address dst, Register src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst, src), 0x01); emit_operand(src, dst, 0); } +void Assembler::eaddq(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x01); + emit_operand(src2, src1, 0); +} + void Assembler::addq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xC0, dst, imm32); } +void Assembler::eaddq(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xC0, src, imm32); +} + void Assembler::addq(Register dst, Address src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(src, dst), 0x03); emit_operand(dst, src, 0); } +void Assembler::eaddq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x03); + emit_operand(src1, src2, 0); +} + void Assembler::addq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x03, 0xC0, dst, src); } +void Assembler::eaddq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x01, 0xC0, src1, src2); +} + void Assembler::adcxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); if (needs_rex2(dst, src)) { @@ -13480,6 +14267,12 @@ void Assembler::adcxq(Register dst, Register src) { } } +void Assembler::eadcxq(Register dst, Register src1, Register src2) { + InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3C, &attributes); + emit_int16((unsigned char)0x66, (0xC0 | encode)); +} + void Assembler::adoxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); if (needs_rex2(dst, src)) { @@ -13495,34 +14288,80 @@ void Assembler::adoxq(Register dst, Register src) { (0xC0 | encode)); } } -void Assembler::andq(Address dst, int32_t imm32) { + +void Assembler::eadoxq(Register dst, Register src1, Register src2) { + InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_3C, &attributes); + emit_int16((unsigned char)0x66, (0xC0 | encode)); +} + +void Assembler::andq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); emit_arith_operand(0x81, as_Register(4), dst, imm32); } +void Assembler::eandq(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, as_Register(4), src, imm32); +} + void Assembler::andq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xE0, dst, imm32); } +void Assembler::eandq(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xE0, src, imm32); +} + void Assembler::andq(Register dst, Address src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(src, dst), 0x23); emit_operand(dst, src, 0); } +void Assembler::eandq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x23); + emit_operand(src1, src2, 0); +} + void Assembler::andq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x23, 0xC0, dst, src); } +void Assembler::eandq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x21, 0xC0, src1, src2); +} + void Assembler::andq(Address dst, Register src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst, src), 0x21); emit_operand(src, dst, 0); } +void Assembler::eandq(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x21); + emit_operand(src2, src1, 0); +} + void Assembler::andnq(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -13656,6 +14495,12 @@ void Assembler::cmovq(Condition cc, Register dst, Register src) { emit_opcode_prefix_and_encoding((0x40 | cc), 0xC0, encode); } +void Assembler::ecmovq(Condition cc, Register dst, Register src1, Register src2) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int16((0x40 | cc), (0xC0 | encode)); +} + void Assembler::cmovq(Condition cc, Register dst, Address src) { InstructionMark im(this); int prefix = get_prefixq(src, dst, true /* is_map1 */); @@ -13663,6 +14508,15 @@ void Assembler::cmovq(Condition cc, Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::ecmovq(Condition cc, Register dst, Register src1, Address src2) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int8((0x40 | cc)); + emit_operand(src1, src2, 0); +} + void Assembler::cmpq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); @@ -13764,6 +14618,12 @@ void Assembler::decl(Register dst) { emit_int16((unsigned char)0xFF, (0xC8 | encode)); } +void Assembler::edecl(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xFF, (0xC8 | encode)); +} + void Assembler::decq(Register dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) @@ -13771,6 +14631,12 @@ void Assembler::decq(Register dst) { emit_int16((unsigned char)0xFF, 0xC8 | encode); } +void Assembler::edecq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xFF, (0xC8 | encode)); +} + void Assembler::decq(Address dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. InstructionMark im(this); @@ -13778,6 +14644,15 @@ void Assembler::decq(Address dst) { emit_operand(rcx, dst, 0); } +void Assembler::edecq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xFF); + emit_operand(rcx, src, 0); +} + // can't use REX2 void Assembler::fxrstor(Address src) { InstructionMark im(this); @@ -13811,21 +14686,51 @@ void Assembler::idivq(Register src) { emit_int16((unsigned char)0xF7, (0xF8 | encode)); } +void Assembler::eidivq(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xF8 | encode)); +} + void Assembler::divq(Register src) { int encode = prefixq_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xF0 | encode)); } +void Assembler::edivq(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xF0 | encode)); +} + void Assembler::imulq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xAF, 0xC0, encode); } +void Assembler::eimulq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xAF, (0xC0 | encode)); +} + +void Assembler::eimulq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xAF, (0xC0 | encode)); +} + void Assembler::imulq(Register src) { int encode = prefixq_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xE8 | encode)); } +void Assembler::eimulq(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xE8 | encode)); +} + void Assembler::imulq(Register dst, Address src, int32_t value) { InstructionMark im(this); prefixq(src, dst); @@ -13840,6 +14745,22 @@ void Assembler::imulq(Register dst, Address src, int32_t value) { } } +void Assembler::eimulq(Register dst, Address src, int32_t value, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (is8bit(value)) { + emit_int8((unsigned char)0x6B); + emit_operand(dst, src, 1); + emit_int8(value); + } else { + emit_int8((unsigned char)0x69); + emit_operand(dst, src, 4); + emit_int32(value); + } +} + void Assembler::imulq(Register dst, Register src, int value) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { @@ -13850,6 +14771,17 @@ void Assembler::imulq(Register dst, Register src, int value) { } } +void Assembler::eimulq(Register dst, Register src, int value, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, /* src_is_gpr */ true, /* nds_is_ndd */ false, no_flags); + if (is8bit(value)) { + emit_int24(0x6B, (0xC0 | encode), (value & 0xFF)); + } else { + emit_int16(0x69, (0xC0 | encode)); + emit_int32(value); + } +} + void Assembler::imulq(Register dst, Address src) { InstructionMark im(this); int prefix = get_prefixq(src, dst, true /* is_map1 */); @@ -13857,6 +14789,23 @@ void Assembler::imulq(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::eimulq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes, /* nds_is_ndd */ false, no_flags); + emit_int8((unsigned char)0xAF); + emit_operand(dst, src, 0); +} + +void Assembler::eimulq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_32bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xAF); + emit_operand(src1, src2, 0); +} + void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) @@ -13864,6 +14813,15 @@ void Assembler::incl(Register dst) { emit_int16((unsigned char)0xFF, (0xC0 | encode)); } +void Assembler::eincl(Register dst, Register src, bool no_flags) { + // Don't use it directly. Use MacroAssembler::incrementl() instead. + // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) + InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + // int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xFF, (0xC0 | encode)); +} + void Assembler::incq(Register dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) @@ -13871,6 +14829,14 @@ void Assembler::incq(Register dst) { emit_int16((unsigned char)0xFF, (0xC0 | encode)); } +void Assembler::eincq(Register dst, Register src, bool no_flags) { + // Don't use it directly. Use MacroAssembler::incrementq() instead. + // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xFF, (0xC0 | encode)); +} + void Assembler::incq(Address dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. InstructionMark im(this); @@ -13878,6 +14844,16 @@ void Assembler::incq(Address dst) { emit_operand(rax, dst, 0); } +void Assembler::eincq(Register dst, Address src, bool no_flags) { + // Don't use it directly. Use MacroAssembler::incrementq() instead. + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char) 0xFF); + emit_operand(rax, src, 0); +} + void Assembler::lea(Register dst, Address src) { leaq(dst, src); } @@ -13946,6 +14922,13 @@ void Assembler::lzcntq(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xBD, 0xC0, encode); } +void Assembler::elzcntq(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF5, (0xC0 | encode)); +} + void Assembler::lzcntq(Register dst, Address src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); InstructionMark im(this); @@ -13955,6 +14938,16 @@ void Assembler::lzcntq(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::elzcntq(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF5); + emit_operand(dst, src, 0); +} + void Assembler::movdq(XMMRegister dst, Register src) { // table D-1 says MMX/SSE2 NOT_LP64(assert(VM_Version::supports_sse2(), "")); @@ -14077,11 +15070,26 @@ void Assembler::mulq(Address src) { emit_operand(rsp, src, 0); } +void Assembler::emulq(Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_nf(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0xF7); + emit_operand(rsp, src, 0); +} + void Assembler::mulq(Register src) { int encode = prefixq_and_encode(src->encoding()); emit_int16((unsigned char)0xF7, (0xE0 | encode)); } +void Assembler::emulq(Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(0, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xE0 | encode)); +} + void Assembler::mulxq(Register dst1, Register dst2, Register src) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -14094,17 +15102,38 @@ void Assembler::negq(Register dst) { emit_int16((unsigned char)0xF7, (0xD8 | encode)); } +void Assembler::enegq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xF7, (0xD8 | encode)); +} + void Assembler::negq(Address dst) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7); emit_operand(as_Register(3), dst, 0); } +void Assembler::enegq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xF7); + emit_operand(as_Register(3), src, 0); +} + void Assembler::notq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xF7, (0xD0 | encode)); } +void Assembler::enotq(Register dst, Register src) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + emit_int16((unsigned char)0xF7, (0xD0 | encode)); +} + void Assembler::btq(Register dst, Register src) { int encode = prefixq_and_encode(src->encoding(), dst->encoding(), true /* is_map1 */); emit_opcode_prefix_and_encoding((unsigned char)0xA3, 0xC0, encode); @@ -14141,33 +15170,78 @@ void Assembler::orq(Address dst, int32_t imm32) { emit_arith_operand(0x81, as_Register(1), dst, imm32); } +void Assembler::eorq(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, as_Register(1), src, imm32); +} + void Assembler::orq(Address dst, Register src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst, src), (unsigned char)0x09); emit_operand(src, dst, 0); } +void Assembler::eorq(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x09); + emit_operand(src2, src1, 0); +} + void Assembler::orq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xC8, dst, imm32); } +void Assembler::eorq(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xC8, src, imm32); +} + void Assembler::orq_imm32(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith_imm32(0x81, 0xC8, dst, imm32); } +void Assembler::eorq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_imm32(0x81, 0xC8, src, imm32); +} + void Assembler::orq(Register dst, Address src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(src, dst), 0x0B); emit_operand(dst, src, 0); } +void Assembler::eorq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x0B); + emit_operand(src1, src2, 0); +} + void Assembler::orq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x0B, 0xC0, dst, src); } +void Assembler::eorq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x09, 0xC0, src1, src2); +} + void Assembler::popcntq(Register dst, Address src) { assert(VM_Version::supports_popcnt(), "must support"); InstructionMark im(this); @@ -14176,6 +15250,16 @@ void Assembler::popcntq(Register dst, Address src) { emit_operand(dst, src, 0); } +void Assembler::epopcntq(Register dst, Address src, bool no_flags) { + assert(VM_Version::supports_popcnt(), "must support"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_nf(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char) 0x88); + emit_operand(dst, src, 0); +} + void Assembler::popcntq(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); @@ -14183,6 +15267,13 @@ void Assembler::popcntq(Register dst, Register src) { emit_opcode_prefix_and_encoding((unsigned char)0xB8, 0xC0, encode); } +void Assembler::epopcntq(Register dst, Register src, bool no_flags) { + assert(VM_Version::supports_popcnt(), "must support"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_nf(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0x88, (0xC0 | encode)); +} + void Assembler::popq(Address dst) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0x8F); @@ -14401,6 +15492,17 @@ void Assembler::rclq(Register dst, int imm8) { } } +void Assembler::erclq(Register dst, Register src, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xD0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xD0 | encode), imm8); + } +} + void Assembler::rcrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -14411,6 +15513,17 @@ void Assembler::rcrq(Register dst, int imm8) { } } +void Assembler::ercrq(Register dst, Register src, int imm8) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xD8 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xD8 | encode), imm8); + } +} + void Assembler::rorxl(Register dst, Register src, int imm8) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); @@ -14462,12 +15575,38 @@ void Assembler::salq(Address dst, int imm8) { } } +void Assembler::esalq(Register dst, Address src, int imm8, bool no_flags) { + InstructionMark im(this); + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(4), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(4), src, 1); + emit_int8(imm8); + } +} + void Assembler::salq(Address dst) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3); emit_operand(as_Register(4), dst, 0); } +void Assembler::esalq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(4), src, 0); +} + void Assembler::salq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -14478,11 +15617,28 @@ void Assembler::salq(Register dst, int imm8) { } } +void Assembler::esalq(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xE0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); + } +} + void Assembler::salq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xE0 | encode)); } +void Assembler::esalq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE0 | encode)); +} + void Assembler::sarq(Address dst, int imm8) { InstructionMark im(this); assert(isShiftCount(imm8 >> 1), "illegal shift count"); @@ -14497,12 +15653,38 @@ void Assembler::sarq(Address dst, int imm8) { } } +void Assembler::esarq(Register dst, Address src, int imm8, bool no_flags) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(7), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(7), src, 1); + emit_int8(imm8); + } +} + void Assembler::sarq(Address dst) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3); emit_operand(as_Register(7), dst, 0); } +void Assembler::esarq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(7), src, 0); +} + void Assembler::sarq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -14513,10 +15695,26 @@ void Assembler::sarq(Register dst, int imm8) { } } +void Assembler::esarq(Register dst, Register src, int imm8, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xF8 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xF8 | encode), imm8); + } +} + void Assembler::sarq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xF8 | encode)); } + +void Assembler::esarq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xF8 | encode)); +} #endif void Assembler::sbbq(Address dst, int32_t imm32) { @@ -14551,11 +15749,28 @@ void Assembler::shlq(Register dst, int imm8) { } } +void Assembler::eshlq(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1 ) { + emit_int16((unsigned char)0xD1, (0xE0 | encode)); + } else { + emit_int24((unsigned char)0xC1, (0xE0 | encode), imm8); + } +} + void Assembler::shlq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, (0xE0 | encode)); } +void Assembler::eshlq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE0 | encode)); +} + void Assembler::shrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); @@ -14567,17 +15782,44 @@ void Assembler::shrq(Register dst, int imm8) { } } +void Assembler::eshrq(Register dst, Register src, int imm8, bool no_flags) { + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int16((unsigned char)0xD1, (0xE8 | encode)); + } + else { + emit_int24((unsigned char)0xC1, (0xE8 | encode), imm8); + } +} + void Assembler::shrq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); emit_int16((unsigned char)0xD3, 0xE8 | encode); } +void Assembler::eshrq(Register dst, Register src, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int16((unsigned char)0xD3, (0xE8 | encode)); +} + void Assembler::shrq(Address dst) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xD3); emit_operand(as_Register(5), dst, 0); } +void Assembler::eshrq(Register dst, Address src, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8((unsigned char)0xD3); + emit_operand(as_Register(5), src, 0); +} + void Assembler::shrq(Address dst, int imm8) { InstructionMark im(this); assert(isShiftCount(imm8 >> 1), "illegal shift count"); @@ -14592,40 +15834,102 @@ void Assembler::shrq(Address dst, int imm8) { } } +void Assembler::eshrq(Register dst, Address src, int imm8, bool no_flags) { + InstructionMark im(this); + assert(isShiftCount(imm8 >> 1), "illegal shift count"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + if (imm8 == 1) { + emit_int8((unsigned char)0xD1); + emit_operand(as_Register(5), src, 0); + } + else { + emit_int8((unsigned char)0xC1); + emit_operand(as_Register(5), src, 1); + emit_int8(imm8); + } +} + void Assembler::subq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); emit_arith_operand(0x81, rbp, dst, imm32); } +void Assembler::esubq(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, rbp, src, imm32); +} + void Assembler::subq(Address dst, Register src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst, src), 0x29); emit_operand(src, dst, 0); } +void Assembler::esubq(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x29); + emit_operand(src2, src1, 0); +} + void Assembler::subq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xE8, dst, imm32); } +void Assembler::esubq(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xE8, src, imm32); +} + // Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::subq_imm32(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith_imm32(0x81, 0xE8, dst, imm32); } +void Assembler::esubq_imm32(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_imm32(0x81, 0xE8, src, imm32); +} + void Assembler::subq(Register dst, Address src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(src, dst), 0x2B); emit_operand(dst, src, 0); } +void Assembler::esubq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x2B); + emit_operand(src1, src2, 0); +} + void Assembler::subq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x2B, 0xC0, dst, src); } +void Assembler::esubq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x29, 0xC0, src1, src2); +} + void Assembler::testq(Address dst, int32_t imm32) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst), (unsigned char)0xF7); @@ -14683,29 +15987,68 @@ void Assembler::xorq(Register dst, Register src) { emit_arith(0x33, 0xC0, dst, src); } +void Assembler::exorq(Register dst, Register src1, Register src2, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + (void) evex_prefix_and_encode_ndd(src1->encoding(), dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + // opcode matches gcc + emit_arith(0x31, 0xC0, src1, src2); +} + void Assembler::xorq(Register dst, Address src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(src, dst), 0x33); emit_operand(dst, src, 0); } +void Assembler::exorq(Register dst, Register src1, Address src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src2, dst->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x33); + emit_operand(src1, src2, 0); +} + void Assembler::xorq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xF0, dst, imm32); } +void Assembler::exorq(Register dst, Register src, int32_t imm32, bool no_flags) { + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + evex_prefix_and_encode_ndd(0, dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith(0x81, 0xF0, src, imm32); +} + void Assembler::xorq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); emit_arith_operand(0x81, as_Register(6), dst, imm32); } +void Assembler::exorq(Register dst, Address src, int32_t imm32, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src, dst->encoding(), 0, VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_arith_operand(0x81, as_Register(6), src, imm32); +} + void Assembler::xorq(Address dst, Register src) { InstructionMark im(this); emit_prefix_and_int8(get_prefixq(dst, src), 0x31); emit_operand(src, dst, 0); } +void Assembler::exorq(Register dst, Address src1, Register src2, bool no_flags) { + InstructionMark im(this); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); + attributes.set_address_attributes(/* tuple_type */ EVEX_NOSCALE, /* input_size_in_bits */ EVEX_64bit); + evex_prefix_ndd(src1, dst->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3C, &attributes, no_flags); + emit_int8(0x31); + emit_operand(src2, src1, 0); +} + #endif // !LP64 void InstructionAttr::set_address_attributes(int tuple_type, int input_size_in_bits) { diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp index 9d1a12ca8e5c8..9e7126417b06f 100644 --- a/src/hotspot/cpu/x86/assembler_x86.hpp +++ b/src/hotspot/cpu/x86/assembler_x86.hpp @@ -789,14 +789,26 @@ class Assembler : public AbstractAssembler { void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc); void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_v, bool evex_r, bool evex_b, - bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc); + bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool no_flags = false); + + void evex_prefix_ndd(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags = false); + + void evex_prefix_nf(Address adr, int ndd_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags = false); void vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, - InstructionAttr *attributes); + InstructionAttr *attributes, bool nds_is_ndd = false, bool no_flags = false); int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, - InstructionAttr *attributes, bool src_is_gpr = false); + InstructionAttr *attributes, bool src_is_gpr = false, bool nds_is_ndd = false, bool no_flags = false); + + int evex_prefix_and_encode_ndd(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags = false); + + int evex_prefix_and_encode_nf(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, + InstructionAttr *attributes, bool no_flags = false); void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes); @@ -941,13 +953,20 @@ class Assembler : public AbstractAssembler { // the product flag UseIncDec value. void decl(Register dst); + void edecl(Register dst, Register src, bool no_flags); void decl(Address dst); + void edecl(Register dst, Address src, bool no_flags); void decq(Address dst); + void edecq(Register dst, Address src, bool no_flags); void incl(Register dst); + void eincl(Register dst, Register src, bool no_flags); void incl(Address dst); + void eincl(Register dst, Address src, bool no_flags); void incq(Register dst); + void eincq(Register dst, Register src, bool no_flags); void incq(Address dst); + void eincq(Register dst, Address src, bool no_flags); // New cpus require use of movsd and movss to avoid partial register stall // when loading from memory. But for old Opteron use movlpd instead of movsd. @@ -1031,6 +1050,7 @@ class Assembler : public AbstractAssembler { #endif void vzeroupper_uncached(); void decq(Register dst); + void edecq(Register dst, Register src, bool no_flags); void pusha(); void popa(); @@ -1072,23 +1092,35 @@ class Assembler : public AbstractAssembler { void addw(Address dst, Register src); void addl(Address dst, int32_t imm32); + void eaddl(Register dst, Address src, int32_t imm32, bool no_flags); void addl(Address dst, Register src); + void eaddl(Register dst, Address src1, Register src2, bool no_flags); void addl(Register dst, int32_t imm32); + void eaddl(Register dst, Register src, int32_t imm32, bool no_flags); void addl(Register dst, Address src); + void eaddl(Register dst, Register src1, Address src2, bool no_flags); void addl(Register dst, Register src); + void eaddl(Register dst, Register src1, Register src2, bool no_flags); void addq(Address dst, int32_t imm32); + void eaddq(Register dst, Address src, int32_t imm32, bool no_flags); void addq(Address dst, Register src); + void eaddq(Register dst, Address src1, Register src2, bool no_flags); void addq(Register dst, int32_t imm32); + void eaddq(Register dst, Register src, int32_t imm32, bool no_flags); void addq(Register dst, Address src); + void eaddq(Register dst, Register src1, Address src2, bool no_flags); void addq(Register dst, Register src); + void eaddq(Register dst, Register src1, Register src2, bool no_flags); #ifdef _LP64 //Add Unsigned Integers with Carry Flag void adcxq(Register dst, Register src); + void eadcxq(Register dst, Register src1, Register src2); //Add Unsigned Integers with Overflow Flag void adoxq(Register dst, Register src); + void eadoxq(Register dst, Register src1, Register src2); #endif void addr_nop_4(); @@ -1122,16 +1154,25 @@ class Assembler : public AbstractAssembler { void andb(Address dst, Register src); void andl(Address dst, int32_t imm32); + void eandl(Register dst, Address src, int32_t imm32, bool no_flags); void andl(Register dst, int32_t imm32); + void eandl(Register dst, Register src, int32_t imm32, bool no_flags); void andl(Register dst, Address src); + void eandl(Register dst, Register src1, Address src2, bool no_flags); void andl(Register dst, Register src); + void eandl(Register dst, Register src1, Register src2, bool no_flags); void andl(Address dst, Register src); void andq(Address dst, int32_t imm32); + void eandq(Register dst, Address src, int32_t imm32, bool no_flags); void andq(Register dst, int32_t imm32); + void eandq(Register dst, Register src, int32_t imm32, bool no_flags); void andq(Register dst, Address src); + void eandq(Register dst, Register src1, Address src2, bool no_flags); void andq(Register dst, Register src); + void eandq(Register dst, Register src1, Register src2, bool no_flags); void andq(Address dst, Register src); + void eandq(Register dst, Address src1, Register src2, bool no_flags); // BMI instructions void andnl(Register dst, Register src1, Register src2); @@ -1182,10 +1223,14 @@ class Assembler : public AbstractAssembler { void clwb(Address adr); void cmovl(Condition cc, Register dst, Register src); + void ecmovl(Condition cc, Register dst, Register src1, Register src2); void cmovl(Condition cc, Register dst, Address src); + void ecmovl(Condition cc, Register dst, Register src1, Address src2); void cmovq(Condition cc, Register dst, Register src); + void ecmovq(Condition cc, Register dst, Register src1, Register src2); void cmovq(Condition cc, Register dst, Address src); + void ecmovq(Condition cc, Register dst, Register src1, Address src2); void cmpb(Address dst, int imm8); @@ -1488,25 +1533,41 @@ class Assembler : public AbstractAssembler { void hlt(); void idivl(Register src); + void eidivl(Register src, bool no_flags); void divl(Register src); // Unsigned division + void edivl(Register src, bool no_flags); // Unsigned division #ifdef _LP64 void idivq(Register src); + void eidivq(Register src, bool no_flags); void divq(Register src); // Unsigned division + void edivq(Register src, bool no_flags); // Unsigned division #endif void imull(Register src); + void eimull(Register src, bool no_flags); void imull(Register dst, Register src); + void eimull(Register dst, Register src1, Register src2, bool no_flags); void imull(Register dst, Register src, int value); + void eimull(Register dst, Register src, int value, bool no_flags); void imull(Register dst, Address src, int value); + void eimull(Register dst, Address src, int value, bool no_flags); void imull(Register dst, Address src); + void eimull(Register dst, Register src1, Address src2, bool no_flags); #ifdef _LP64 void imulq(Register dst, Register src); + void eimulq(Register dst, Register src, bool no_flags); + void eimulq(Register dst, Register src1, Register src2, bool no_flags); void imulq(Register dst, Register src, int value); + void eimulq(Register dst, Register src, int value, bool no_flags); void imulq(Register dst, Address src, int value); + void eimulq(Register dst, Address src, int value, bool no_flags); void imulq(Register dst, Address src); + void eimulq(Register dst, Address src, bool no_flags); + void eimulq(Register dst, Register src1, Address src2, bool no_flags); void imulq(Register dst); + void eimulq(Register dst, bool no_flags); #endif // jcc is the generic conditional branch generator to run- @@ -1565,11 +1626,15 @@ class Assembler : public AbstractAssembler { void size_prefix(); void lzcntl(Register dst, Register src); + void elzcntl(Register dst, Register src, bool no_flags); void lzcntl(Register dst, Address src); + void elzcntl(Register dst, Address src, bool no_flags); #ifdef _LP64 void lzcntq(Register dst, Register src); + void elzcntq(Register dst, Register src, bool no_flags); void lzcntq(Register dst, Address src); + void elzcntq(Register dst, Address src, bool no_flags); #endif enum Membar_mask_bits { @@ -1785,11 +1850,15 @@ class Assembler : public AbstractAssembler { // Unsigned multiply with RAX destination register void mull(Address src); + void emull(Address src, bool no_flags); void mull(Register src); + void emull(Register src, bool no_flags); #ifdef _LP64 void mulq(Address src); + void emulq(Address src, bool no_flags); void mulq(Register src); + void emulq(Register src, bool no_flags); void mulxq(Register dst1, Register dst2, Register src); #endif @@ -1802,19 +1871,25 @@ class Assembler : public AbstractAssembler { void mulss(XMMRegister dst, XMMRegister src); void negl(Register dst); + void enegl(Register dst, Register src, bool no_flags); void negl(Address dst); + void enegl(Register dst, Address src, bool no_flags); #ifdef _LP64 void negq(Register dst); + void enegq(Register dst, Register src, bool no_flags); void negq(Address dst); + void enegq(Register dst, Address src, bool no_flags); #endif void nop(uint i = 1); void notl(Register dst); + void enotl(Register dst, Register src); #ifdef _LP64 void notq(Register dst); + void enotq(Register dst, Register src); void btsq(Address dst, int imm8); void btrq(Address dst, int imm8); @@ -1822,21 +1897,37 @@ class Assembler : public AbstractAssembler { #endif void btq(Register dst, Register src); + void orw(Register dst, Register src); + void eorw(Register dst, Register src1, Register src2, bool no_flags); + void orl(Address dst, int32_t imm32); + void eorl(Register dst, Address src, int32_t imm32, bool no_flags); void orl(Register dst, int32_t imm32); + void eorl(Register dst, Register src, int32_t imm32, bool no_flags); void orl(Register dst, Address src); + void eorl(Register dst, Register src1, Address src2, bool no_flags); void orl(Register dst, Register src); + void eorl(Register dst, Register src1, Register src2, bool no_flags); void orl(Address dst, Register src); + void eorl(Register dst, Address src1, Register src2, bool no_flags); void orb(Address dst, int imm8); + void eorb(Register dst, Address src, int imm8, bool no_flags); void orb(Address dst, Register src); + void eorb(Register dst, Address src1, Register src2, bool no_flags); void orq(Address dst, int32_t imm32); + void eorq(Register dst, Address src, int32_t imm32, bool no_flags); void orq(Address dst, Register src); + void eorq(Register dst, Address src1, Register src2, bool no_flags); void orq(Register dst, int32_t imm32); + void eorq(Register dst, Register src, int32_t imm32, bool no_flags); void orq_imm32(Register dst, int32_t imm32); + void eorq_imm32(Register dst, Register src, int32_t imm32, bool no_flags); void orq(Register dst, Address src); + void eorq(Register dst, Register src1, Address src2, bool no_flags); void orq(Register dst, Register src); + void eorq(Register dst, Register src1, Register src2, bool no_flags); // Pack with signed saturation void packsswb(XMMRegister dst, XMMRegister src); @@ -2022,7 +2113,9 @@ class Assembler : public AbstractAssembler { #endif void popcntl(Register dst, Address src); + void epopcntl(Register dst, Address src, bool no_flags); void popcntl(Register dst, Register src); + void epopcntl(Register dst, Register src, bool no_flags); void evpopcntb(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len); void evpopcntw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len); @@ -2031,7 +2124,9 @@ class Assembler : public AbstractAssembler { #ifdef _LP64 void popcntq(Register dst, Address src); + void epopcntq(Register dst, Address src, bool no_flags); void popcntq(Register dst, Register src); + void epopcntq(Register dst, Register src, bool no_flags); #endif // Prefetches (SSE, SSE2, 3DNOW only) @@ -2131,10 +2226,13 @@ class Assembler : public AbstractAssembler { void pushq(Address src); void rcll(Register dst, int imm8); + void ercll(Register dst, Register src, int imm8); void rclq(Register dst, int imm8); + void erclq(Register dst, Register src, int imm8); void rcrq(Register dst, int imm8); + void ercrq(Register dst, Register src, int imm8); void rcpps(XMMRegister dst, XMMRegister src); @@ -2145,18 +2243,26 @@ class Assembler : public AbstractAssembler { void ret(int imm16); void roll(Register dst); + void eroll(Register dst, Register src, bool no_flags); void roll(Register dst, int imm8); + void eroll(Register dst, Register src, int imm8, bool no_flags); void rorl(Register dst); + void erorl(Register dst, Register src, bool no_flags); void rorl(Register dst, int imm8); + void erorl(Register dst, Register src, int imm8, bool no_flags); #ifdef _LP64 void rolq(Register dst); + void erolq(Register dst, Register src, bool no_flags); void rolq(Register dst, int imm8); + void erolq(Register dst, Register src, int imm8, bool no_flags); void rorq(Register dst); + void erorq(Register dst, Register src, bool no_flags); void rorq(Register dst, int imm8); + void erorq(Register dst, Register src, int imm8, bool no_flags); void rorxl(Register dst, Register src, int imm8); void rorxl(Register dst, Address src, int imm8); void rorxq(Register dst, Register src, int imm8); @@ -2166,25 +2272,41 @@ class Assembler : public AbstractAssembler { void sahf(); void sall(Register dst, int imm8); + void esall(Register dst, Register src, int imm8, bool no_flags); void sall(Register dst); + void esall(Register dst, Register src, bool no_flags); void sall(Address dst, int imm8); + void esall(Register dst, Address src, int imm8, bool no_flags); void sall(Address dst); + void esall(Register dst, Address src, bool no_flags); void sarl(Address dst, int imm8); + void esarl(Register dst, Address src, int imm8, bool no_flags); void sarl(Address dst); + void esarl(Register dst, Address src, bool no_flags); void sarl(Register dst, int imm8); + void esarl(Register dst, Register src, int imm8, bool no_flags); void sarl(Register dst); + void esarl(Register dst, Register src, bool no_flags); #ifdef _LP64 void salq(Register dst, int imm8); + void esalq(Register dst, Register src, int imm8, bool no_flags); void salq(Register dst); + void esalq(Register dst, Register src, bool no_flags); void salq(Address dst, int imm8); + void esalq(Register dst, Address src, int imm8, bool no_flags); void salq(Address dst); + void esalq(Register dst, Address src, bool no_flags); void sarq(Address dst, int imm8); + void esarq(Register dst, Address src, int imm8, bool no_flags); void sarq(Address dst); + void esarq(Register dst, Address src, bool no_flags); void sarq(Register dst, int imm8); + void esarq(Register dst, Register src, int imm8, bool no_flags); void sarq(Register dst); + void esarq(Register dst, Register src, bool no_flags); #endif void sbbl(Address dst, int32_t imm32); @@ -2216,29 +2338,47 @@ class Assembler : public AbstractAssembler { void sha256msg2(XMMRegister dst, XMMRegister src); void shldl(Register dst, Register src); + void eshldl(Register dst, Register src1, Register src2, bool no_flags); void shldl(Register dst, Register src, int8_t imm8); + void eshldl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags); void shrdl(Register dst, Register src); + void eshrdl(Register dst, Register src1, Register src2, bool no_flags); void shrdl(Register dst, Register src, int8_t imm8); + void eshrdl(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags); #ifdef _LP64 void shldq(Register dst, Register src, int8_t imm8); + void eshldq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags); void shrdq(Register dst, Register src, int8_t imm8); + void eshrdq(Register dst, Register src1, Register src2, int8_t imm8, bool no_flags); #endif void shll(Register dst, int imm8); + void eshll(Register dst, Register src, int imm8, bool no_flags); void shll(Register dst); + void eshll(Register dst, Register src, bool no_flags); void shlq(Register dst, int imm8); + void eshlq(Register dst, Register src, int imm8, bool no_flags); void shlq(Register dst); + void eshlq(Register dst, Register src, bool no_flags); void shrl(Register dst, int imm8); + void eshrl(Register dst, Register src, int imm8, bool no_flags); void shrl(Register dst); + void eshrl(Register dst, Register src, bool no_flags); void shrl(Address dst); + void eshrl(Register dst, Address src, bool no_flags); void shrl(Address dst, int imm8); + void eshrl(Register dst, Address src, int imm8, bool no_flags); void shrq(Register dst, int imm8); + void eshrq(Register dst, Register src, int imm8, bool no_flags); void shrq(Register dst); + void eshrq(Register dst, Register src, bool no_flags); void shrq(Address dst); + void eshrq(Register dst, Address src, bool no_flags); void shrq(Address dst, int imm8); + void eshrq(Register dst, Address src, int imm8, bool no_flags); void smovl(); // QQQ generic? @@ -2258,20 +2398,32 @@ class Assembler : public AbstractAssembler { void stmxcsr( Address dst ); void subl(Address dst, int32_t imm32); + void esubl(Register dst, Address src, int32_t imm32, bool no_flags); void subl(Address dst, Register src); + void esubl(Register dst, Address src1, Register src2, bool no_flags); void subl(Register dst, int32_t imm32); + void esubl(Register dst, Register src, int32_t imm32, bool no_flags); void subl(Register dst, Address src); + void esubl(Register dst, Register src1, Address src2, bool no_flags); void subl(Register dst, Register src); + void esubl(Register dst, Register src1, Register src2, bool no_flags); void subq(Address dst, int32_t imm32); + void esubq(Register dst, Address src, int32_t imm32, bool no_flags); void subq(Address dst, Register src); + void esubq(Register dst, Address src1, Register src2, bool no_flags); void subq(Register dst, int32_t imm32); + void esubq(Register dst, Register src, int32_t imm32, bool no_flags); void subq(Register dst, Address src); + void esubq(Register dst, Register src1, Address src2, bool no_flags); void subq(Register dst, Register src); + void esubq(Register dst, Register src1, Register src2, bool no_flags); // Force generation of a 4 byte immediate value even if it fits into 8bit void subl_imm32(Register dst, int32_t imm32); + void esubl_imm32(Register dst, Register src, int32_t imm32, bool no_flags); void subq_imm32(Register dst, int32_t imm32); + void esubq_imm32(Register dst, Register src, int32_t imm32, bool no_flags); // Subtract Scalar Double-Precision Floating-Point Values void subsd(XMMRegister dst, Address src); @@ -2296,9 +2448,13 @@ class Assembler : public AbstractAssembler { // BMI - count trailing zeros void tzcntl(Register dst, Register src); + void etzcntl(Register dst, Register src, bool no_flags); void tzcntl(Register dst, Address src); + void etzcntl(Register dst, Address src, bool no_flags); void tzcntq(Register dst, Register src); + void etzcntq(Register dst, Register src, bool no_flags); void tzcntq(Register dst, Address src); + void etzcntq(Register dst, Address src, bool no_flags); // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS void ucomisd(XMMRegister dst, Address src); @@ -2331,20 +2487,33 @@ class Assembler : public AbstractAssembler { void xgetbv(); void xorl(Register dst, int32_t imm32); + void exorl(Register dst, Register src, int32_t imm32, bool no_flags); void xorl(Address dst, int32_t imm32); + void exorl(Register dst, Address src, int32_t imm32, bool no_flags); void xorl(Register dst, Address src); + void exorl(Register dst, Register src1, Address src2, bool no_flags); void xorl(Register dst, Register src); + void exorl(Register dst, Register src1, Register src2, bool no_flags); void xorl(Address dst, Register src); + void exorl(Register dst, Address src1, Register src2, bool no_flags); void xorb(Address dst, Register src); + void exorb(Register dst, Address src1, Register src2, bool no_flags); void xorb(Register dst, Address src); + void exorb(Register dst, Register src1, Address src2, bool no_flags); void xorw(Register dst, Address src); + void exorw(Register dst, Register src1, Address src2, bool no_flags); void xorq(Register dst, Address src); + void exorq(Register dst, Register src1, Address src2, bool no_flags); void xorq(Address dst, int32_t imm32); + void exorq(Register dst, Address src, int32_t imm32, bool no_flags); void xorq(Register dst, Register src); + void exorq(Register dst, Register src1, Register src2, bool no_flags); void xorq(Register dst, int32_t imm32); + void exorq(Register dst, Register src, int32_t imm32, bool no_flags); void xorq(Address dst, Register src); + void exorq(Register dst, Address src1, Register src2, bool no_flags); // AVX 3-operands scalar instructions (encoded with VEX prefix) diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 7d89b148ba22f..71ca9351f86c9 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,7 +110,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { Metadata *m = _method->as_constant_ptr()->as_metadata(); ce->store_parameter(m, 1); ce->store_parameter(_bci, 0); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ jmp(_continuation); @@ -119,7 +119,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -133,11 +133,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { ce->store_parameter(_index->as_jint(), 0); } - Runtime1::StubID stub_id; + C1StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = Runtime1::throw_index_exception_id; + stub_id = C1StubId::throw_index_exception_id; } else { - stub_id = Runtime1::throw_range_check_failed_id; + stub_id = C1StubId::throw_range_check_failed_id; ce->store_parameter(_array->as_pointer_register(), 1); } __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); @@ -152,7 +152,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -164,7 +164,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); ce->add_call_info_here(_info); debug_only(__ should_not_reach_here()); } @@ -172,14 +172,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -212,7 +212,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == rbx, "length must in rbx,"); assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == rax, "result must in rax,"); @@ -235,7 +235,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == rbx, "length must in rbx,"); assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == rax, "result must in rax,"); @@ -247,11 +247,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - Runtime1::StubID enter_id; + C1StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = Runtime1::monitorenter_id; + enter_id = C1StubId::monitorenter_id; } else { - enter_id = Runtime1::monitorenter_nofpu_id; + enter_id = C1StubId::monitorenter_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -268,11 +268,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - Runtime1::StubID exit_id; + C1StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = Runtime1::monitorexit_id; + exit_id = C1StubId::monitorexit_id; } else { - exit_id = Runtime1::monitorexit_nofpu_id; + exit_id = C1StubId::monitorexit_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); __ jmp(_continuation); @@ -407,10 +407,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -440,7 +440,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -450,9 +450,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index e2fde10b98d86..c3444d5a5abce 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -399,7 +399,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(rax); // search an exception handler (rax: exception oop, rdx: throwing pc) - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -463,7 +463,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ remove_frame(initial_frame_size_in_bytes()); - __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -1566,7 +1566,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { // instruction sequence too long to inline it here { - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::fpu2long_stub_id))); } break; #endif // _LP64 @@ -1781,7 +1781,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L #else __ pushklass(k->constant_encoding(), noreg); #endif // _LP64 - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(klass_RInfo); // result is a boolean @@ -1795,7 +1795,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo); __ push(k_RInfo); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(k_RInfo); // result is a boolean @@ -1874,7 +1874,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo); __ push(k_RInfo); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(k_RInfo); // result is a boolean @@ -2893,7 +2893,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - Runtime1::StubID unwind_id; + C1StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -2905,9 +2905,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(rax); // search an exception handler (rax: exception oop, rdx: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = Runtime1::handle_exception_id; + unwind_id = C1StubId::handle_exception_id; } else { - unwind_id = Runtime1::handle_exception_nofpu_id; + unwind_id = C1StubId::handle_exception_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); @@ -3262,7 +3262,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ push(src); __ push(dst); - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); __ pop(dst); __ pop(src); diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index d3add6975b4f2..ff237d16d2216 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -1430,7 +1430,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1463,12 +1463,12 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 4dcacd00a6339..bf5b90db5fcb0 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -271,7 +271,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); - call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); @@ -309,7 +309,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); - call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); + call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp index 11b39ce15eb1a..1ccb06df48937 100644 --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -60,7 +60,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre #ifdef _LP64 // At a method handle call, the stack may not be properly aligned // when returning with an exception. - align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); + align_stack = (stub_id() == (int)C1StubId::handle_exception_from_callee_id); #endif #ifdef _LP64 @@ -124,10 +124,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (frame_size() == no_frame_size) { leave(); jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { - jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); + jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); } bind(L); } @@ -671,7 +671,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -684,7 +684,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -703,12 +703,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, 1 /*thread*/, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { // At this point all registers except exception oop (RAX) and // exception pc (RDX) are dead. const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); @@ -775,13 +775,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ movptr(Address(rbp, 1*BytesPerWord), rax); switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != handle_exception_nofpu_id); + restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP // since we do a leave anyway. @@ -935,7 +935,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { __ testptr(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, - RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); + RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); // the deopt blob expects exceptions in the special fields of // JavaThread, so copy and clear pending exception. @@ -1007,7 +1007,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // for better readability const bool must_gc_arguments = true; @@ -1019,7 +1019,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // stub code & info for the different stubs OopMapSet* oop_maps = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -1027,19 +1027,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { Register klass = rdx; // Incoming Register obj = rax; // Result - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -1058,7 +1058,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { Register bci = rax, method = rbx; __ enter(); @@ -1076,14 +1076,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { Register length = rbx; // Incoming Register klass = rdx; // Incoming Register obj = rax; // Result - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -1096,7 +1096,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Register t0 = obj; __ movl(t0, Address(klass, Klass::layout_helper_offset())); __ sarl(t0, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) + int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ cmpl(t0, tag); @@ -1110,7 +1110,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ enter(); OopMap* map = save_live_registers(sasm, 3); int call_offset; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -1128,7 +1128,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // rax,: klass // rbx,: rank @@ -1145,7 +1145,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -1185,44 +1185,44 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. @@ -1230,7 +1230,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); // tos + 0: link // + 1: return address @@ -1238,19 +1238,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Typical calling sequence: // __ push(klass_RInfo); // object klass or other subclass @@ -1303,10 +1303,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: + case C1StubId::monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case monitorenter_id: + case C1StubId::monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); @@ -1324,10 +1324,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: + case C1StubId::monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case monitorexit_id: + case C1StubId::monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); @@ -1347,7 +1347,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments); const int num_rt_args = 2; // thread, trap_request @@ -1364,35 +1364,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // we can't gc here so skip the oopmap but make sure that all @@ -1407,7 +1407,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case fpu2long_stub_id: + case C1StubId::fpu2long_stub_id: { #ifdef _LP64 Label done; @@ -1496,7 +1496,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp index 04e379842e152..9abfa45785bda 100644 --- a/src/hotspot/share/c1/c1_CodeStubs.hpp +++ b/src/hotspot/share/c1/c1_CodeStubs.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,10 +264,10 @@ class NewInstanceStub: public CodeStub { LIR_Opr _klass_reg; LIR_Opr _result; CodeEmitInfo* _info; - Runtime1::StubID _stub_id; + C1StubId _stub_id; public: - NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id); + NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id); virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } virtual void visit(LIR_OpVisitState* visitor) { @@ -515,11 +515,11 @@ class DeoptimizeStub : public CodeStub { class SimpleExceptionStub: public CodeStub { private: LIR_Opr _obj; - Runtime1::StubID _stub; + C1StubId _stub; CodeEmitInfo* _info; public: - SimpleExceptionStub(Runtime1::StubID stub, LIR_Opr obj, CodeEmitInfo* info): + SimpleExceptionStub(C1StubId stub, LIR_Opr obj, CodeEmitInfo* info): _obj(obj), _stub(stub), _info(info) { FrameMap* f = Compilation::current()->frame_map(); f->update_reserved_argument_area_size(2 * BytesPerWord); @@ -546,7 +546,7 @@ class SimpleExceptionStub: public CodeStub { class ArrayStoreExceptionStub: public SimpleExceptionStub { public: - ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(Runtime1::throw_array_store_exception_id, obj, info) {} + ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(C1StubId::throw_array_store_exception_id, obj, info) {} #ifndef PRODUCT virtual void print_name(outputStream* out) const { out->print("ArrayStoreExceptionStub"); } #endif // PRODUCT diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index 7b519804bfecd..4e63736503fe0 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -659,7 +659,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr if (UseFastNewInstance && klass->is_loaded() && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { - Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; + C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id; CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); @@ -670,7 +670,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); } else { - CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); + CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id); __ branch(lir_cond_always, slow_path); __ branch_destination(slow_path->continuation()); } @@ -1479,7 +1479,7 @@ void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { args->append(receiver.result()); CodeEmitInfo* info = state_for(x, x->state()); call_runtime(&signature, args, - CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), + CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::register_finalizer_id)), voidType, info); set_no_result(x); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 8524f37177b45..5b44d5c0f1983 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -109,10 +109,13 @@ void StubAssembler::set_num_rt_args(int args) { // Implementation of Runtime1 -CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids]; +CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS]; + +#define C1_BLOB_NAME_DEFINE(name) "C1 Runtime " # name "_blob", const char *Runtime1::_blob_names[] = { - RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME) + C1_STUBS_DO(C1_BLOB_NAME_DEFINE) }; +#undef C1_STUB_NAME_DEFINE #ifndef PRODUCT // statistics @@ -190,17 +193,17 @@ static void deopt_caller(JavaThread* current) { } } -class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { +class C1StubIdStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { private: - Runtime1::StubID _id; + C1StubId _id; public: - StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {} + C1StubIdStubAssemblerCodeGenClosure(C1StubId id) : _id(id) {} virtual OopMapSet* generate_code(StubAssembler* sasm) { return Runtime1::generate_code_for(_id, sasm); } }; -CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { +CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { ResourceMark rm; // create code buffer for code storage CodeBuffer code(buffer_blob); @@ -212,7 +215,7 @@ CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const ch Compilation::setup_code_buffer(&code, 0); // create assembler for code generation - StubAssembler* sasm = new StubAssembler(&code, name, stub_id); + StubAssembler* sasm = new StubAssembler(&code, name, (int)id); // generate code for runtime stub oop_maps = cl->generate_code(sasm); assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size, @@ -237,40 +240,41 @@ CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const ch return blob; } -void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) { - assert(0 <= id && id < number_of_ids, "illegal stub id"); +void Runtime1::generate_blob_for(BufferBlob* buffer_blob, C1StubId id) { + assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); bool expect_oop_map = true; #ifdef ASSERT // Make sure that stubs that need oopmaps have them switch (id) { // These stubs don't need to have an oopmap - case dtrace_object_alloc_id: - case slow_subtype_check_id: - case fpu2long_stub_id: - case unwind_exception_id: - case counter_overflow_id: + case C1StubId::dtrace_object_alloc_id: + case C1StubId::slow_subtype_check_id: + case C1StubId::fpu2long_stub_id: + case C1StubId::unwind_exception_id: + case C1StubId::counter_overflow_id: expect_oop_map = false; break; default: break; } #endif - StubIDStubAssemblerCodeGenClosure cl(id); + C1StubIdStubAssemblerCodeGenClosure cl(id); CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); // install blob - _blobs[id] = blob; + _blobs[(int)id] = blob; } void Runtime1::initialize(BufferBlob* blob) { // platform-dependent initialization initialize_pd(); // generate stubs - for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id); + int limit = (int)C1StubId::NUM_STUBIDS; + for (int id = 0; id < limit; id++) generate_blob_for(blob, (C1StubId)id); // printing #ifndef PRODUCT if (PrintSimpleStubs) { ResourceMark rm; - for (int id = 0; id < number_of_ids; id++) { + for (int id = 0; id < limit; id++) { _blobs[id]->print(); if (_blobs[id]->oop_maps() != nullptr) { _blobs[id]->oop_maps()->print(); @@ -282,20 +286,22 @@ void Runtime1::initialize(BufferBlob* blob) { bs->generate_c1_runtime_stubs(blob); } -CodeBlob* Runtime1::blob_for(StubID id) { - assert(0 <= id && id < number_of_ids, "illegal stub id"); - return _blobs[id]; +CodeBlob* Runtime1::blob_for(C1StubId id) { + assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); + return _blobs[(int)id]; } -const char* Runtime1::name_for(StubID id) { - assert(0 <= id && id < number_of_ids, "illegal stub id"); - return _blob_names[id]; +const char* Runtime1::name_for(C1StubId id) { + assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); + return _blob_names[(int)id]; } const char* Runtime1::name_for_address(address entry) { - for (int id = 0; id < number_of_ids; id++) { - if (entry == entry_for((StubID)id)) return name_for((StubID)id); + int limit = (int)C1StubId::NUM_STUBIDS; + for (int i = 0; i < limit; i++) { + C1StubId id = (C1StubId)i; + if (entry == entry_for(id)) return name_for(id); } #define FUNCTION_CASE(a, f) \ @@ -425,8 +431,8 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int JRT_END -JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id)) - tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id); +JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id)) + tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id); JRT_END @@ -525,8 +531,8 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c // This function is called when we are about to throw an exception. Therefore, // we have to poll the stack watermark barrier to make sure that not yet safe // stack frames are made safe before returning into them. - if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) { - // The Runtime1::handle_exception_from_callee_id handler is invoked after the + if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) { + // The C1StubId::handle_exception_from_callee_id handler is invoked after the // frame has been unwound. It instead builds its own stub frame, to call the // runtime. But the throwing frame has already been unwound here. StackWatermarkSet::after_unwind(current); @@ -922,7 +928,7 @@ static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TR // Therefore, if there is any chance of a race condition, we try to // patch only naturally aligned words, as single, full-word writes. -JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id )) +JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) #ifndef PRODUCT if (PrintC1Statistics) { _patch_code_slowcase_cnt++; @@ -959,9 +965,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ Handle mirror(current, nullptr); // oop needed by load_mirror_patching code Handle appendix(current, nullptr); // oop needed by appendix_patching code bool load_klass_or_mirror_patch_id = - (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); + (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id); - if (stub_id == Runtime1::access_field_patching_id) { + if (stub_id == C1StubId::access_field_patching_id) { Bytecode_field field_access(caller_method, bci); fieldDescriptor result; // initialize class if needed @@ -1044,7 +1050,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); } load_klass = k; - } else if (stub_id == load_appendix_patching_id) { + } else if (stub_id == C1StubId::load_appendix_patching_id) { Bytecode_invoke bytecode(caller_method, bci); Bytecodes::Code bc = bytecode.invoke_code(); @@ -1128,7 +1134,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ if (TracePatching) { ttyLocker ttyl; tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci, - p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass"); + p2i(instr_pc), (stub_id == C1StubId::access_field_patching_id) ? "field" : "klass"); nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc()); assert(caller_code != nullptr, "nmethod not found"); @@ -1144,7 +1150,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ } // depending on the code below, do_patch says whether to copy the patch body back into the nmethod bool do_patch = true; - if (stub_id == Runtime1::access_field_patching_id) { + if (stub_id == C1StubId::access_field_patching_id) { // The offset may not be correct if the class was not loaded at code generation time. // Set it now. NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); @@ -1170,7 +1176,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ assert(n_copy->data() == 0 || n_copy->data() == (intptr_t)Universe::non_oop_word(), "illegal init value"); - if (stub_id == Runtime1::load_klass_patching_id) { + if (stub_id == C1StubId::load_klass_patching_id) { assert(load_klass != nullptr, "klass not set"); n_copy->set_data((intx) (load_klass)); } else { @@ -1182,7 +1188,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } } - } else if (stub_id == Runtime1::load_appendix_patching_id) { + } else if (stub_id == C1StubId::load_appendix_patching_id) { NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); assert(n_copy->data() == 0 || n_copy->data() == (intptr_t)Universe::non_oop_word(), @@ -1201,7 +1207,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ // first replace the tail, then the call #ifdef ARM if((load_klass_or_mirror_patch_id || - stub_id == Runtime1::load_appendix_patching_id) && + stub_id == C1StubId::load_appendix_patching_id) && nativeMovConstReg_at(copy_buff)->is_pc_relative()) { nmethod* nm = CodeCache::find_nmethod(instr_pc); address addr = nullptr; @@ -1209,13 +1215,13 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ RelocIterator mds(nm, copy_buff, copy_buff + 1); while (mds.next()) { if (mds.type() == relocInfo::oop_type) { - assert(stub_id == Runtime1::load_mirror_patching_id || - stub_id == Runtime1::load_appendix_patching_id, "wrong stub id"); + assert(stub_id == C1StubId::load_mirror_patching_id || + stub_id == C1StubId::load_appendix_patching_id, "wrong stub id"); oop_Relocation* r = mds.oop_reloc(); addr = (address)r->oop_addr(); break; } else if (mds.type() == relocInfo::metadata_type) { - assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id"); + assert(stub_id == C1StubId::load_klass_patching_id, "wrong stub id"); metadata_Relocation* r = mds.metadata_reloc(); addr = (address)r->metadata_addr(); break; @@ -1238,9 +1244,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_ NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); if (load_klass_or_mirror_patch_id || - stub_id == Runtime1::load_appendix_patching_id) { + stub_id == C1StubId::load_appendix_patching_id) { relocInfo::relocType rtype = - (stub_id == Runtime1::load_klass_patching_id) ? + (stub_id == C1StubId::load_klass_patching_id) ? relocInfo::metadata_type : relocInfo::oop_type; // update relocInfo to metadata @@ -1278,9 +1284,9 @@ JRT_END #else // DEOPTIMIZE_WHEN_PATCHING -static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) { - if (stub_id == Runtime1::load_klass_patching_id || - stub_id == Runtime1::load_mirror_patching_id) { +static bool is_patching_needed(JavaThread* current, C1StubId stub_id) { + if (stub_id == C1StubId::load_klass_patching_id || + stub_id == C1StubId::load_mirror_patching_id) { // last java frame on stack vframeStream vfst(current, true); assert(!vfst.at_end(), "Java frame must exist"); @@ -1309,7 +1315,7 @@ static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) { return true; } -void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) { +void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) { #ifndef PRODUCT if (PrintC1Statistics) { _patch_code_slowcase_cnt++; @@ -1364,7 +1370,7 @@ int Runtime1::move_klass_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, load_klass_patching_id); + patch_code(current, C1StubId::load_klass_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1381,7 +1387,7 @@ int Runtime1::move_mirror_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, load_mirror_patching_id); + patch_code(current, C1StubId::load_mirror_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1398,7 +1404,7 @@ int Runtime1::move_appendix_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, load_appendix_patching_id); + patch_code(current, C1StubId::load_appendix_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1425,7 +1431,7 @@ int Runtime1::access_field_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, access_field_patching_id); + patch_code(current, C1StubId::access_field_patching_id); } // Back in JAVA, use no oops DON'T safepoint diff --git a/src/hotspot/share/c1/c1_Runtime1.hpp b/src/hotspot/share/c1/c1_Runtime1.hpp index 2e4c9f8a73379..330c40675041b 100644 --- a/src/hotspot/share/c1/c1_Runtime1.hpp +++ b/src/hotspot/share/c1/c1_Runtime1.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "interpreter/interpreter.hpp" #include "memory/allStatic.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/stubDeclarations.hpp" class StubAssembler; @@ -37,61 +38,26 @@ class StubAssembler; // runtime routines needed by code code generated // by the Compiler1. -#define RUNTIME1_STUBS(stub, last_entry) \ - stub(dtrace_object_alloc) \ - stub(unwind_exception) \ - stub(forward_exception) \ - stub(throw_range_check_failed) /* throws ArrayIndexOutOfBoundsException */ \ - stub(throw_index_exception) /* throws IndexOutOfBoundsException */ \ - stub(throw_div0_exception) \ - stub(throw_null_pointer_exception) \ - stub(register_finalizer) \ - stub(new_instance) \ - stub(fast_new_instance) \ - stub(fast_new_instance_init_check) \ - stub(new_type_array) \ - stub(new_object_array) \ - stub(new_multi_array) \ - stub(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ - stub(handle_exception) \ - stub(handle_exception_from_callee) \ - stub(throw_array_store_exception) \ - stub(throw_class_cast_exception) \ - stub(throw_incompatible_class_change_error) \ - stub(slow_subtype_check) \ - stub(monitorenter) \ - stub(monitorenter_nofpu) /* optimized version that does not preserve fpu registers */ \ - stub(monitorexit) \ - stub(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \ - stub(deoptimize) \ - stub(access_field_patching) \ - stub(load_klass_patching) \ - stub(load_mirror_patching) \ - stub(load_appendix_patching) \ - stub(fpu2long_stub) \ - stub(counter_overflow) \ - stub(predicate_failed_trap) \ - last_entry(number_of_ids) - -#define DECLARE_STUB_ID(x) x ## _id , -#define DECLARE_LAST_STUB_ID(x) x -#define STUB_NAME(x) #x " Runtime1 stub", -#define LAST_STUB_NAME(x) #x " Runtime1 stub" - class StubAssemblerCodeGenClosure: public Closure { public: virtual OopMapSet* generate_code(StubAssembler* sasm) = 0; }; +// define C1StubId enum tags: unwind_exception_id etc + +#define C1_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name), +enum class C1StubId :int { + NO_STUBID = -1, + C1_STUBS_DO(C1_STUB_ID_ENUM_DECLARE) + NUM_STUBIDS +}; +#undef C1_STUB_ID_ENUM_DECLARE + class Runtime1: public AllStatic { friend class VMStructs; friend class ArrayCopyStub; - public: - enum StubID { - RUNTIME1_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID) - }; - +public: // statistics #ifndef PRODUCT static uint _generic_arraycopystub_cnt; @@ -115,17 +81,17 @@ class Runtime1: public AllStatic { #endif private: - static CodeBlob* _blobs[number_of_ids]; + static CodeBlob* _blobs[(int)C1StubId::NUM_STUBIDS]; static const char* _blob_names[]; // stub generation public: - static CodeBlob* generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl); - static void generate_blob_for(BufferBlob* blob, StubID id); - static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm); + static CodeBlob* generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl); + static void generate_blob_for(BufferBlob* blob, C1StubId id); + static OopMapSet* generate_code_for(C1StubId id, StubAssembler* sasm); private: static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument); - static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm); + static OopMapSet* generate_handle_exception(C1StubId id, StubAssembler* sasm); static void generate_unwind_exception(StubAssembler *sasm); static OopMapSet* generate_patching(StubAssembler* sasm, address target); @@ -140,7 +106,7 @@ class Runtime1: public AllStatic { static address counter_overflow(JavaThread* current, int bci, Method* method); - static void unimplemented_entry(JavaThread* current, StubID id); + static void unimplemented_entry(JavaThread* current, C1StubId id); static address exception_handler_for_pc(JavaThread* current); @@ -162,7 +128,7 @@ class Runtime1: public AllStatic { static int move_mirror_patching(JavaThread* current); static int move_appendix_patching(JavaThread* current); - static void patch_code(JavaThread* current, StubID stub_id); + static void patch_code(JavaThread* current, C1StubId stub_id); public: // initialization @@ -170,9 +136,9 @@ class Runtime1: public AllStatic { static void initialize_pd(); // stubs - static CodeBlob* blob_for (StubID id); - static address entry_for(StubID id) { return blob_for(id)->code_begin(); } - static const char* name_for (StubID id); + static CodeBlob* blob_for (C1StubId id); + static address entry_for(C1StubId id) { return blob_for(id)->code_begin(); } + static const char* name_for (C1StubId id); static const char* name_for_address(address entry); // platform might add runtime names. diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp index f82b5cfcc556a..449ff2e4acf8b 100644 --- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp +++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -219,8 +219,8 @@ class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { C1G1PreBarrierCodeGenClosure pre_code_gen_cl; C1G1PostBarrierCodeGenClosure post_code_gen_cl; - _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_pre_barrier_slow", + _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow", false, &pre_code_gen_cl); - _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_post_barrier_slow", + _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow", false, &post_code_gen_cl); } diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp index 6b104e7e19935..0e8b02d247e9b 100644 --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. + * Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,27 +263,27 @@ class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeG void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl; - _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, + _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "shenandoah_pre_barrier_slow", false, &pre_code_gen_cl); if (ShenandoahLoadRefBarrier) { C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF); - _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "shenandoah_load_reference_barrier_strong_slow", false, &lrb_strong_code_gen_cl); C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE); - _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "shenandoah_load_reference_barrier_strong_native_slow", false, &lrb_strong_native_code_gen_cl); C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF); - _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "shenandoah_load_reference_barrier_weak_slow", false, &lrb_weak_code_gen_cl); C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE); - _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "shenandoah_load_reference_barrier_phantom_slow", false, &lrb_phantom_code_gen_cl); } diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp index caf7eb2e514e9..6f64392cefced 100644 --- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp +++ b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -225,7 +225,7 @@ class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { XLoadBarrierRuntimeStubCodeGenClosure cl(decorators); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); return code_blob->code_begin(); } diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp index 68e215e9ebca0..9c16714b26e3f 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -507,7 +507,7 @@ class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure static address generate_c1_load_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); return code_blob->code_begin(); } @@ -527,7 +527,7 @@ class ZStoreBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosur static address generate_c1_store_runtime_stub(BufferBlob* blob, bool self_healing, const char* name) { ZStoreBarrierRuntimeStubCodeGenClosure cl(self_healing); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); return code_blob->code_begin(); } diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index eb6887f11baf6..6ab28eaa6eeba 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -2043,7 +2043,7 @@ void ConnectionGraph::add_call_node(CallNode* call) { ciMethod* meth = call->as_CallJava()->method(); if (meth == nullptr) { const char* name = call->as_CallStaticJava()->_name; - assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); + assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check"); // Returns a newly allocated non-escaped object. add_java_object(call, PointsToNode::NoEscape); set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); @@ -2738,7 +2738,7 @@ int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { #ifdef ASSERT if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { const char* name = alloc->as_CallStaticJava()->_name; - assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); + assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity"); } #endif // Non-escaped allocation returned from Java or runtime call have unknown values in fields. diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 54408146d0c26..3dd94f619fd1a 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -88,34 +88,27 @@ // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 - - -// Compiled code entry points -address OptoRuntime::_new_instance_Java = nullptr; -address OptoRuntime::_new_array_Java = nullptr; -address OptoRuntime::_new_array_nozero_Java = nullptr; -address OptoRuntime::_multianewarray2_Java = nullptr; -address OptoRuntime::_multianewarray3_Java = nullptr; -address OptoRuntime::_multianewarray4_Java = nullptr; -address OptoRuntime::_multianewarray5_Java = nullptr; -address OptoRuntime::_multianewarrayN_Java = nullptr; -address OptoRuntime::_vtable_must_compile_Java = nullptr; -address OptoRuntime::_complete_monitor_locking_Java = nullptr; -address OptoRuntime::_monitor_notify_Java = nullptr; -address OptoRuntime::_monitor_notifyAll_Java = nullptr; -address OptoRuntime::_rethrow_Java = nullptr; - -address OptoRuntime::_slow_arraycopy_Java = nullptr; -address OptoRuntime::_register_finalizer_Java = nullptr; -#if INCLUDE_JVMTI -address OptoRuntime::_notify_jvmti_vthread_start = nullptr; -address OptoRuntime::_notify_jvmti_vthread_end = nullptr; -address OptoRuntime::_notify_jvmti_vthread_mount = nullptr; -address OptoRuntime::_notify_jvmti_vthread_unmount = nullptr; -#endif - -UncommonTrapBlob* OptoRuntime::_uncommon_trap_blob; -ExceptionBlob* OptoRuntime::_exception_blob; +#define C2_BLOB_FIELD_DEFINE(name, type) \ + type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; +#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java +#define C2_STUB_FIELD_DEFINE(name, f, t, r) \ + address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; +#define C2_JVMTI_STUB_FIELD_DEFINE(name) \ + address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr; +C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE) +#undef C2_BLOB_FIELD_DEFINE +#undef C2_STUB_FIELD_DEFINE +#undef C2_JVMTI_STUB_FIELD_DEFINE + +#define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", +#define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, +#define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, +const char* OptoRuntime::_stub_names[] = { + C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) +}; +#undef C2_BLOB_NAME_DEFINE +#undef C2_STUB_NAME_DEFINE +#undef C2_JVMTI_STUB_NAME_DEFINE // This should be called in an assertion at the start of OptoRuntime routines // which are entered from compiled code (all of them) @@ -132,46 +125,72 @@ static bool check_compiled_frame(JavaThread* thread) { } #endif // ASSERT - +/* #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ if (var == nullptr) { return false; } +*/ -bool OptoRuntime::generate(ciEnv* env) { +#define GEN_C2_BLOB(name, type) \ + generate_ ## name ## _blob(); + +// a few helper macros to conjure up generate_stub call arguments +#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java +#define C2_STUB_TYPEFUNC(name) name ## _Type +#define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) +#define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) + +// Almost all the C functions targeted from the generated stubs are +// implemented locally to OptoRuntime with names that can be generated +// from the stub name by appending suffix '_C'. However, in two cases +// a common target method also needs to be called from shared runtime +// stubs. In these two cases the opto stubs rely on method +// imlementations defined in class SharedRuntime. The following +// defines temporarily rebind the generated names to reference the +// relevant implementations. + +#define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc ) \ + C2_STUB_FIELD_NAME(name) = \ + generate_stub(env, \ + C2_STUB_TYPEFUNC(name), \ + C2_STUB_C_FUNC(name), \ + C2_STUB_NAME(name), \ + fancy_jump, \ + pass_tls, \ + pass_retpc); \ + if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; } \ + +#define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name) + +#define GEN_C2_JVMTI_STUB(name) \ + STUB_FIELD_NAME(name) = \ + generate_stub(env, \ + notify_jvmti_vthread_Type, \ + C2_JVMTI_STUB_C_FUNC(name), \ + C2_STUB_NAME(name), \ + 0, \ + true, \ + false); \ + if (STUB_FIELD_NAME(name) == nullptr) { return false; } \ - generate_uncommon_trap_blob(); - generate_exception_blob(); - - // Note: tls: Means fetching the return oop out of the thread-local storage - // - // variable/name type-function-gen , runtime method ,fncy_jp, tls,retpc - // ------------------------------------------------------------------------------------------------------------------------------- - gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true, false); - gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true, false); - gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true, false); - gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true, false); - gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true, false); - gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true, false); - gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true, false); - gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true, false); -#if INCLUDE_JVMTI - gen(env, _notify_jvmti_vthread_start , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_start, 0, true, false); - gen(env, _notify_jvmti_vthread_end , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_end, 0, true, false); - gen(env, _notify_jvmti_vthread_mount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_mount, 0, true, false); - gen(env, _notify_jvmti_vthread_unmount , notify_jvmti_vthread_Type , SharedRuntime::notify_jvmti_vthread_unmount, 0, true, false); -#endif - gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false); - gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false); - gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false); - gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , true ); +bool OptoRuntime::generate(ciEnv* env) { - gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false); - gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false); + C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB) return true; } -#undef gen +#undef GEN_C2_BLOB + +#undef C2_STUB_FIELD_NAME +#undef C2_STUB_TYPEFUNC +#undef C2_STUB_C_FUNC +#undef C2_STUB_NAME +#undef GEN_C2_STUB + +#undef C2_JVMTI_STUB_C_FUNC +#undef GEN_C2_JVMTI_STUB +// #undef gen // Helper method to do generation of RunTimeStub's @@ -201,6 +220,19 @@ const char* OptoRuntime::stub_name(address entry) { #endif } +// local methods passed as arguments to stub generator that forward +// control to corresponding JRT methods of SharedRuntime + +void OptoRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, + oopDesc* dest, jint dest_pos, + jint length, JavaThread* thread) { + SharedRuntime::slow_arraycopy_C(src, src_pos, dest, dest_pos, length, thread); +} + +void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) { + SharedRuntime::complete_monitor_locking_C(obj, lock, current); +} + //============================================================================= // Opto compiler runtime routines @@ -529,6 +561,10 @@ const TypeFunc *OptoRuntime::new_array_Type() { return TypeFunc::make(domain, range); } +const TypeFunc *OptoRuntime::new_array_nozero_Type() { + return new_array_Type(); +} + const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { // create input type (domain) const int nargs = ndim + 1; @@ -607,6 +643,9 @@ const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { return TypeFunc::make(domain,range); } +const TypeFunc *OptoRuntime::complete_monitor_locking_Type() { + return complete_monitor_enter_Type(); +} //----------------------------------------------------------------------------- const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { @@ -637,6 +676,10 @@ const TypeFunc *OptoRuntime::monitor_notify_Type() { return TypeFunc::make(domain, range); } +const TypeFunc *OptoRuntime::monitor_notifyAll_Type() { + return monitor_notify_Type(); +} + const TypeFunc* OptoRuntime::flush_windows_Type() { // create input type (domain) const Type** fields = TypeTuple::fields(1); @@ -1827,7 +1870,7 @@ const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { } -JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* current)) +JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current)) assert(oopDesc::is_oop(obj), "must be a valid oop"); assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); InstanceKlass::register_finalizer(instanceOop(obj), CHECK); diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp index 34c2780a2f8d7..da08fc0f38660 100644 --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -30,6 +30,7 @@ #include "opto/optoreg.hpp" #include "opto/type.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/stubDeclarations.hpp" #include "runtime/vframe.hpp" //------------------------------OptoRuntime------------------------------------ @@ -98,37 +99,46 @@ class NamedCounter : public CHeapObj { typedef const TypeFunc*(*TypeFunc_generator)(); +// define OptoStubId enum tags: uncommon_trap_id etc + +#define C2_BLOB_ID_ENUM_DECLARE(name, type) STUB_ID_NAME(name), +#define C2_STUB_ID_ENUM_DECLARE(name, f, t, r) STUB_ID_NAME(name), +#define C2_JVMTI_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name), +enum class OptoStubId :int { + NO_STUBID = -1, + C2_STUBS_DO(C2_BLOB_ID_ENUM_DECLARE, C2_STUB_ID_ENUM_DECLARE, C2_JVMTI_STUB_ID_ENUM_DECLARE) + NUM_STUBIDS +}; +#undef C2_BLOB_ID_ENUM_DECLARE +#undef C2_STUB_ID_ENUM_DECLARE +#undef C2_JVMTI_STUB_ID_ENUM_DECLARE + class OptoRuntime : public AllStatic { friend class Matcher; // allow access to stub names private: + // declare opto stub address/blob holder static fields +#define C2_BLOB_FIELD_DECLARE(name, type) \ + static type BLOB_FIELD_NAME(name); +#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java +#define C2_STUB_FIELD_DECLARE(name, f, t, r) \ + static address C2_STUB_FIELD_NAME(name) ; +#define C2_JVMTI_STUB_FIELD_DECLARE(name) \ + static address STUB_FIELD_NAME(name); + + C2_STUBS_DO(C2_BLOB_FIELD_DECLARE, C2_STUB_FIELD_DECLARE, C2_JVMTI_STUB_FIELD_DECLARE) + +#undef C2_BLOB_FIELD_DECLARE +#undef C2_STUB_FIELD_NAME +#undef C2_STUB_FIELD_DECLARE +#undef C2_JVMTI_STUB_FIELD_DECLARE + + // Stub names indexed by sharedStubId + static const char *_stub_names[]; + // define stubs static address generate_stub(ciEnv* ci_env, TypeFunc_generator gen, address C_function, const char* name, int is_fancy_jump, bool pass_tls, bool return_pc); - // References to generated stubs - static address _new_instance_Java; - static address _new_array_Java; - static address _new_array_nozero_Java; - static address _multianewarray2_Java; - static address _multianewarray3_Java; - static address _multianewarray4_Java; - static address _multianewarray5_Java; - static address _multianewarrayN_Java; - static address _vtable_must_compile_Java; - static address _complete_monitor_locking_Java; - static address _rethrow_Java; - static address _monitor_notify_Java; - static address _monitor_notifyAll_Java; - - static address _slow_arraycopy_Java; - static address _register_finalizer_Java; -#if INCLUDE_JVMTI - static address _notify_jvmti_vthread_start; - static address _notify_jvmti_vthread_end; - static address _notify_jvmti_vthread_mount; - static address _notify_jvmti_vthread_unmount; -#endif - // // Implementation of runtime methods // ================================= @@ -148,6 +158,13 @@ class OptoRuntime : public AllStatic { static void multianewarray5_C(Klass* klass, int len1, int len2, int len3, int len4, int len5, JavaThread* current); static void multianewarrayN_C(Klass* klass, arrayOopDesc* dims, JavaThread* current); + // local methods passed as arguments to stub generator that forward + // control to corresponding JRT methods of SharedRuntime + static void slow_arraycopy_C(oopDesc* src, jint src_pos, + oopDesc* dest, jint dest_pos, + jint length, JavaThread* thread); + static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current); + public: static void monitor_notify_C(oopDesc* obj, JavaThread* current); static void monitor_notifyAll_C(oopDesc* obj, JavaThread* current); @@ -168,13 +185,10 @@ class OptoRuntime : public AllStatic { // CodeBlob support // =================================================================== - static UncommonTrapBlob* _uncommon_trap_blob; - static ExceptionBlob* _exception_blob; - static void generate_uncommon_trap_blob(void); static void generate_exception_blob(); - static void register_finalizer(oopDesc* obj, JavaThread* current); + static void register_finalizer_C(oopDesc* obj, JavaThread* current); public: @@ -188,6 +202,12 @@ class OptoRuntime : public AllStatic { // Returns the name of a stub static const char* stub_name(address entry); + // Returns the name associated with a given stub id + static const char* stub_name(OptoStubId id) { + assert(id > OptoStubId::NO_STUBID && id < OptoStubId::NUM_STUBIDS, "stub id out of range"); + return _stub_names[(int)id]; + } + // access to runtime stubs entry points for java code static address new_instance_Java() { return _new_instance_Java; } static address new_array_Java() { return _new_array_Java; } @@ -197,7 +217,6 @@ class OptoRuntime : public AllStatic { static address multianewarray4_Java() { return _multianewarray4_Java; } static address multianewarray5_Java() { return _multianewarray5_Java; } static address multianewarrayN_Java() { return _multianewarrayN_Java; } - static address vtable_must_compile_stub() { return _vtable_must_compile_Java; } static address complete_monitor_locking_Java() { return _complete_monitor_locking_Java; } static address monitor_notify_Java() { return _monitor_notify_Java; } static address monitor_notifyAll_Java() { return _monitor_notifyAll_Java; } @@ -227,6 +246,7 @@ class OptoRuntime : public AllStatic { static const TypeFunc* new_instance_Type(); // object allocation (slow case) static const TypeFunc* new_array_Type (); // [a]newarray (slow case) + static const TypeFunc* new_array_nozero_Type (); // [a]newarray (slow case) static const TypeFunc* multianewarray_Type(int ndim); // multianewarray static const TypeFunc* multianewarray2_Type(); // multianewarray static const TypeFunc* multianewarray3_Type(); // multianewarray @@ -234,8 +254,10 @@ class OptoRuntime : public AllStatic { static const TypeFunc* multianewarray5_Type(); // multianewarray static const TypeFunc* multianewarrayN_Type(); // multianewarray static const TypeFunc* complete_monitor_enter_Type(); + static const TypeFunc* complete_monitor_locking_Type(); static const TypeFunc* complete_monitor_exit_Type(); static const TypeFunc* monitor_notify_Type(); + static const TypeFunc* monitor_notifyAll_Type(); static const TypeFunc* uncommon_trap_Type(); static const TypeFunc* athrow_Type(); static const TypeFunc* rethrow_Type(); diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp index c9e946906f796..ccca14c61b5b8 100644 --- a/src/hotspot/share/runtime/stubDeclarations.hpp +++ b/src/hotspot/share/runtime/stubDeclarations.hpp @@ -70,13 +70,108 @@ /* other stubs */ \ SHARED_JFR_STUBS_DO(do_blob) \ -// generate a stub id enum tag from a name +// C1 stubs are always generated in a generic CodeBlob + +#ifdef COMPILER1 +// do_blob(name) +#define C1_STUBS_DO(do_blob) \ + do_blob(dtrace_object_alloc) \ + do_blob(unwind_exception) \ + do_blob(forward_exception) \ + do_blob(throw_range_check_failed) /* throws ArrayIndexOutOfBoundsException */ \ + do_blob(throw_index_exception) /* throws IndexOutOfBoundsException */ \ + do_blob(throw_div0_exception) \ + do_blob(throw_null_pointer_exception) \ + do_blob(register_finalizer) \ + do_blob(new_instance) \ + do_blob(fast_new_instance) \ + do_blob(fast_new_instance_init_check) \ + do_blob(new_type_array) \ + do_blob(new_object_array) \ + do_blob(new_multi_array) \ + do_blob(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ + do_blob(handle_exception) \ + do_blob(handle_exception_from_callee) \ + do_blob(throw_array_store_exception) \ + do_blob(throw_class_cast_exception) \ + do_blob(throw_incompatible_class_change_error) \ + do_blob(slow_subtype_check) \ + do_blob(monitorenter) \ + do_blob(monitorenter_nofpu) /* optimized version that does not preserve fpu registers */ \ + do_blob(monitorexit) \ + do_blob(monitorexit_nofpu) /* optimized version that does not preserve fpu registers */ \ + do_blob(deoptimize) \ + do_blob(access_field_patching) \ + do_blob(load_klass_patching) \ + do_blob(load_mirror_patching) \ + do_blob(load_appendix_patching) \ + do_blob(fpu2long_stub) \ + do_blob(counter_overflow) \ + do_blob(predicate_failed_trap) \ + +#else +#define C1_STUBS_DO(do_blob) +#endif + +// Opto stubs can be stored as entries with just an address or as +// blobs of different types. The former may include some JVMTI stubs. +// +// n.b. blobs and stub defines are generated in the order defined by +// C2_STUBS_DO, allowing dependencies from any givem stub on its +// predecessors to be guaranteed. That explains the initial placement +// of the blob declarations and intermediate placement of the jvmti +// stubs. + +#ifdef COMPILER2 +// do_jvmti_stub(name) +#if INCLUDE_JVMTI +#define C2_JVMTI_STUBS_DO(do_jvmti_stub) \ + do_jvmti_stub(notify_jvmti_vthread_start) \ + do_jvmti_stub(notify_jvmti_vthread_end) \ + do_jvmti_stub(notify_jvmti_vthread_mount) \ + do_jvmti_stub(notify_jvmti_vthread_unmount) \ + +#else +#define C2_JVMTI_STUBS_DO(do_jvmti_stub) +#endif // INCLUDE_JVMTI + +// do_blob(name, type) +// do_stub(name, fancy_jump, pass_tls, return_pc) +// do_jvmti_stub(name) +// +// n.b. non-jvmti stubs may employ a special type of jump (0, 1 or 2) +// and require access to TLS and the return pc. jvmti stubs always +// employ jump 0, and require no special access +#define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) \ + do_blob(uncommon_trap, UncommonTrapBlob*) \ + do_blob(exception, ExceptionBlob*) \ + do_stub(new_instance, 0, true, false) \ + do_stub(new_array, 0, true, false) \ + do_stub(new_array_nozero, 0, true, false) \ + do_stub(multianewarray2, 0, true, false) \ + do_stub(multianewarray3, 0, true, false) \ + do_stub(multianewarray4, 0, true, false) \ + do_stub(multianewarray5, 0, true, false) \ + do_stub(multianewarrayN, 0, true, false) \ + C2_JVMTI_STUBS_DO(do_jvmti_stub) \ + do_stub(complete_monitor_locking, 0, false, false) \ + do_stub(monitor_notify, 0, false, false) \ + do_stub(monitor_notifyAll, 0, false, false) \ + do_stub(rethrow, 2, true, true) \ + do_stub(slow_arraycopy, 0, false, false) \ + do_stub(register_finalizer, 0, false, false) \ + +#else +#define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) +#endif + +// generate a stub or blob id enum tag from a name #define STUB_ID_NAME(base) base##_id -// generate a blob id enum tag from a name +// generate a stub field name -#define BLOB_ID_NAME(base) base##_id +#define STUB_FIELD_NAME(base) _##base // generate a blob field name diff --git a/src/java.base/share/classes/java/lang/classfile/Annotation.java b/src/java.base/share/classes/java/lang/classfile/Annotation.java index 357a7cb77ee83..009248ffd7856 100644 --- a/src/java.base/share/classes/java/lang/classfile/Annotation.java +++ b/src/java.base/share/classes/java/lang/classfile/Annotation.java @@ -34,6 +34,8 @@ import java.lang.constant.ClassDesc; import java.util.List; + +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -78,7 +80,7 @@ public sealed interface Annotation * {@return the annotation interface, as a symbolic descriptor} */ default ClassDesc classSymbol() { - return ClassDesc.ofDescriptor(className().stringValue()); + return Util.fieldTypeSymbol(className()); } /** @@ -115,7 +117,7 @@ static Annotation of(Utf8Entry annotationClass, */ static Annotation of(ClassDesc annotationClass, List elements) { - return of(TemporaryConstantPool.INSTANCE.utf8Entry(annotationClass.descriptorString()), elements); + return of(TemporaryConstantPool.INSTANCE.utf8Entry(annotationClass), elements); } /** @@ -125,6 +127,6 @@ static Annotation of(ClassDesc annotationClass, */ static Annotation of(ClassDesc annotationClass, AnnotationElement... elements) { - return of(TemporaryConstantPool.INSTANCE.utf8Entry(annotationClass.descriptorString()), elements); + return of(TemporaryConstantPool.INSTANCE.utf8Entry(annotationClass), elements); } } diff --git a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java index 613142077552d..c4474a248b520 100644 --- a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java +++ b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java @@ -38,6 +38,8 @@ import java.lang.constant.Constable; import java.util.ArrayList; import java.util.List; + +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -405,7 +407,7 @@ sealed interface OfClass extends AnnotationValue /** {@return the class descriptor} */ default ClassDesc classSymbol() { - return ClassDesc.ofDescriptor(className().stringValue()); + return Util.fieldTypeSymbol(className()); } } @@ -423,7 +425,7 @@ sealed interface OfEnum extends AnnotationValue /** {@return the enum class descriptor} */ default ClassDesc classSymbol() { - return ClassDesc.ofDescriptor(className().stringValue()); + return Util.fieldTypeSymbol(className()); } /** {@return the enum constant name} */ @@ -452,7 +454,7 @@ static OfEnum ofEnum(Utf8Entry className, * @param constantName the name of the enum constant */ static OfEnum ofEnum(ClassDesc className, String constantName) { - return ofEnum(TemporaryConstantPool.INSTANCE.utf8Entry(className.descriptorString()), + return ofEnum(TemporaryConstantPool.INSTANCE.utf8Entry(className), TemporaryConstantPool.INSTANCE.utf8Entry(constantName)); } @@ -469,7 +471,7 @@ static OfClass ofClass(Utf8Entry className) { * @param className the descriptor of the class */ static OfClass ofClass(ClassDesc className) { - return ofClass(TemporaryConstantPool.INSTANCE.utf8Entry(className.descriptorString())); + return ofClass(TemporaryConstantPool.INSTANCE.utf8Entry(className)); } /** diff --git a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java index 1d5bb271dbe21..905c7355c3406 100644 --- a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java @@ -194,7 +194,9 @@ default ClassBuilder withField(String name, default ClassBuilder withField(String name, ClassDesc descriptor, int flags) { - return withField(name, descriptor, Util.buildingFlags(flags)); + return withField(constantPool().utf8Entry(name), + constantPool().utf8Entry(descriptor), + flags); } /** diff --git a/src/java.base/share/classes/java/lang/classfile/CodeBuilder.java b/src/java.base/share/classes/java/lang/classfile/CodeBuilder.java index be0778d198458..5f4e5b72a1919 100644 --- a/src/java.base/share/classes/java/lang/classfile/CodeBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/CodeBuilder.java @@ -769,7 +769,7 @@ default CodeBuilder localVariable(int slot, Utf8Entry nameEntry, Utf8Entry descr default CodeBuilder localVariable(int slot, String name, ClassDesc descriptor, Label startScope, Label endScope) { return localVariable(slot, constantPool().utf8Entry(name), - constantPool().utf8Entry(descriptor.descriptorString()), + constantPool().utf8Entry(descriptor), startScope, endScope); } diff --git a/src/java.base/share/classes/java/lang/classfile/FieldModel.java b/src/java.base/share/classes/java/lang/classfile/FieldModel.java index e14f264ca2a98..006103d5f9cbc 100644 --- a/src/java.base/share/classes/java/lang/classfile/FieldModel.java +++ b/src/java.base/share/classes/java/lang/classfile/FieldModel.java @@ -31,6 +31,7 @@ import java.lang.classfile.constantpool.Utf8Entry; import jdk.internal.classfile.impl.BufferedFieldBuilder; import jdk.internal.classfile.impl.FieldImpl; +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -59,6 +60,6 @@ public sealed interface FieldModel /** {@return the field descriptor of this field, as a symbolic descriptor} */ default ClassDesc fieldTypeSymbol() { - return ClassDesc.ofDescriptor(fieldType().stringValue()); + return Util.fieldTypeSymbol(fieldType()); } } diff --git a/src/java.base/share/classes/java/lang/classfile/MethodModel.java b/src/java.base/share/classes/java/lang/classfile/MethodModel.java index 651bc194ee3e0..bd51f3c97d72e 100644 --- a/src/java.base/share/classes/java/lang/classfile/MethodModel.java +++ b/src/java.base/share/classes/java/lang/classfile/MethodModel.java @@ -31,6 +31,7 @@ import java.lang.classfile.constantpool.Utf8Entry; import jdk.internal.classfile.impl.BufferedMethodBuilder; import jdk.internal.classfile.impl.MethodImpl; +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -59,7 +60,7 @@ public sealed interface MethodModel /** {@return the method descriptor of this method, as a symbolic descriptor} */ default MethodTypeDesc methodTypeSymbol() { - return MethodTypeDesc.ofDescriptor(methodType().stringValue()); + return Util.methodTypeSymbol(methodType()); } /** {@return the body of this method, if there is one} */ diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableInfo.java b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableInfo.java index 954682f665def..0ef8542e05c55 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableInfo.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableInfo.java @@ -28,6 +28,7 @@ import java.lang.classfile.constantpool.Utf8Entry; import jdk.internal.classfile.impl.BoundLocalVariable; import jdk.internal.classfile.impl.UnboundAttribute; +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -65,7 +66,7 @@ public sealed interface LocalVariableInfo * {@return the field descriptor of the local variable} */ default ClassDesc typeSymbol() { - return ClassDesc.ofDescriptor(type().stringValue()); + return Util.fieldTypeSymbol(type()); } /** diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RecordComponentInfo.java b/src/java.base/share/classes/java/lang/classfile/attribute/RecordComponentInfo.java index ab7df7a5db2b4..5a4c0d87b839a 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RecordComponentInfo.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RecordComponentInfo.java @@ -33,6 +33,7 @@ import jdk.internal.classfile.impl.BoundRecordComponentInfo; import jdk.internal.classfile.impl.TemporaryConstantPool; import jdk.internal.classfile.impl.UnboundAttribute; +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -58,7 +59,7 @@ public sealed interface RecordComponentInfo * {@return the field descriptor of this component, as a {@linkplain ClassDesc}} */ default ClassDesc descriptorSymbol() { - return ClassDesc.ofDescriptor(descriptor().stringValue()); + return Util.fieldTypeSymbol(descriptor()); } /** @@ -95,7 +96,7 @@ static RecordComponentInfo of(String name, ClassDesc descriptor, List> attributes) { return new UnboundAttribute.UnboundRecordComponentInfo(TemporaryConstantPool.INSTANCE.utf8Entry(name), - TemporaryConstantPool.INSTANCE.utf8Entry(descriptor.descriptorString()), + TemporaryConstantPool.INSTANCE.utf8Entry(descriptor), attributes); } diff --git a/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java b/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java index 7334d8e54604e..1c0d6e55e3143 100644 --- a/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java @@ -223,9 +223,7 @@ default ModuleEntry moduleEntry(ModuleDesc moduleDesc) { * @param type the symbolic descriptor for a field type */ default NameAndTypeEntry nameAndTypeEntry(String name, ClassDesc type) { - var ret = (NameAndTypeEntryImpl)nameAndTypeEntry(utf8Entry(name), utf8Entry(type.descriptorString())); - ret.typeSym = type; - return ret; + return nameAndTypeEntry(utf8Entry(name), utf8Entry(type)); } /** @@ -238,9 +236,7 @@ default NameAndTypeEntry nameAndTypeEntry(String name, ClassDesc type) { * @param type the symbolic descriptor for a method type */ default NameAndTypeEntry nameAndTypeEntry(String name, MethodTypeDesc type) { - var ret = (NameAndTypeEntryImpl)nameAndTypeEntry(utf8Entry(name), utf8Entry(type.descriptorString())); - ret.typeSym = type; - return ret; + return nameAndTypeEntry(utf8Entry(name), utf8Entry(type)); } /** diff --git a/src/java.base/share/classes/java/lang/classfile/instruction/LocalVariable.java b/src/java.base/share/classes/java/lang/classfile/instruction/LocalVariable.java index 4d3d755386763..84a8599315218 100644 --- a/src/java.base/share/classes/java/lang/classfile/instruction/LocalVariable.java +++ b/src/java.base/share/classes/java/lang/classfile/instruction/LocalVariable.java @@ -36,6 +36,7 @@ import jdk.internal.classfile.impl.AbstractPseudoInstruction; import jdk.internal.classfile.impl.BoundLocalVariable; import jdk.internal.classfile.impl.TemporaryConstantPool; +import jdk.internal.classfile.impl.Util; import jdk.internal.javac.PreviewFeature; /** @@ -70,7 +71,7 @@ public sealed interface LocalVariable extends PseudoInstruction * {@return the local variable type, as a symbolic descriptor} */ default ClassDesc typeSymbol() { - return ClassDesc.ofDescriptor(type().stringValue()); + return Util.fieldTypeSymbol(type()); } /** @@ -109,7 +110,7 @@ static LocalVariable of(int slot, Utf8Entry nameEntry, Utf8Entry descriptorEntry static LocalVariable of(int slot, String name, ClassDesc descriptor, Label startScope, Label endScope) { return of(slot, TemporaryConstantPool.INSTANCE.utf8Entry(name), - TemporaryConstantPool.INSTANCE.utf8Entry(descriptor.descriptorString()), + TemporaryConstantPool.INSTANCE.utf8Entry(descriptor), startScope, endScope); } } diff --git a/src/java.base/share/classes/java/lang/foreign/MemorySegment.java b/src/java.base/share/classes/java/lang/foreign/MemorySegment.java index 38fd36bbb1584..cb1f3707db68c 100644 --- a/src/java.base/share/classes/java/lang/foreign/MemorySegment.java +++ b/src/java.base/share/classes/java/lang/foreign/MemorySegment.java @@ -43,6 +43,7 @@ import java.util.stream.Stream; import jdk.internal.foreign.AbstractMemorySegmentImpl; import jdk.internal.foreign.MemorySessionImpl; +import jdk.internal.foreign.SegmentBulkOperations; import jdk.internal.foreign.SegmentFactories; import jdk.internal.javac.Restricted; import jdk.internal.reflect.CallerSensitive; @@ -1571,7 +1572,7 @@ static MemorySegment ofAddress(long address) { static void copy(MemorySegment srcSegment, long srcOffset, MemorySegment dstSegment, long dstOffset, long bytes) { - AbstractMemorySegmentImpl.copy((AbstractMemorySegmentImpl) srcSegment, srcOffset, + SegmentBulkOperations.copy((AbstractMemorySegmentImpl) srcSegment, srcOffset, (AbstractMemorySegmentImpl) dstSegment, dstOffset, bytes); } @@ -2635,8 +2636,9 @@ static void copy(Object srcArray, int srcIndex, */ static long mismatch(MemorySegment srcSegment, long srcFromOffset, long srcToOffset, MemorySegment dstSegment, long dstFromOffset, long dstToOffset) { - return AbstractMemorySegmentImpl.mismatch(srcSegment, srcFromOffset, srcToOffset, - dstSegment, dstFromOffset, dstToOffset); + return SegmentBulkOperations.mismatch( + (AbstractMemorySegmentImpl)Objects.requireNonNull(srcSegment), srcFromOffset, srcToOffset, + (AbstractMemorySegmentImpl)Objects.requireNonNull(dstSegment), dstFromOffset, dstToOffset); } /** diff --git a/src/java.base/share/classes/java/util/zip/ZipCoder.java b/src/java.base/share/classes/java/util/zip/ZipCoder.java index 25c6e3a2e4acd..6692703c1b3f0 100644 --- a/src/java.base/share/classes/java/util/zip/ZipCoder.java +++ b/src/java.base/share/classes/java/util/zip/ZipCoder.java @@ -158,13 +158,6 @@ static int hash(String name) { return hsh; } - boolean hasTrailingSlash(byte[] a, int end) { - byte[] slashBytes = slashBytes(); - return end >= slashBytes.length && - Arrays.mismatch(a, end - slashBytes.length, end, slashBytes, 0, slashBytes.length) == -1; - } - - private byte[] slashBytes; private final Charset cs; protected CharsetDecoder dec; private CharsetEncoder enc; @@ -191,23 +184,6 @@ private CharsetEncoder encoder() { return enc; } - // This method produces an array with the bytes that will correspond to a - // trailing '/' in the chosen character encoding. - // - // While in most charsets a trailing slash will be encoded as the byte - // value of '/', this does not hold in the general case. E.g., in charsets - // such as UTF-16 and UTF-32 it will be represented by a sequence of 2 or 4 - // bytes, respectively. - private byte[] slashBytes() { - if (slashBytes == null) { - // Take into account charsets that produce a BOM, e.g., UTF-16 - byte[] slash = "/".getBytes(cs); - byte[] doubleSlash = "//".getBytes(cs); - slashBytes = Arrays.copyOfRange(doubleSlash, slash.length, doubleSlash.length); - } - return slashBytes; - } - /** * This method is used by ZipFile.Source.getEntryPos when comparing the * name being looked up to candidate names encoded in the CEN byte @@ -297,8 +273,7 @@ int checkedHash(byte[] a, int off, int len) throws Exception { return h; } - @Override - boolean hasTrailingSlash(byte[] a, int end) { + private boolean hasTrailingSlash(byte[] a, int end) { return end > 0 && a[end - 1] == '/'; } diff --git a/src/java.base/share/classes/java/util/zip/ZipFile.java b/src/java.base/share/classes/java/util/zip/ZipFile.java index 6ab70d4bdb81d..80126bc79734d 100644 --- a/src/java.base/share/classes/java/util/zip/ZipFile.java +++ b/src/java.base/share/classes/java/util/zip/ZipFile.java @@ -347,9 +347,12 @@ public ZipEntry getEntry(String name) { ZipEntry entry = null; synchronized (this) { ensureOpen(); - int pos = res.zsrc.getEntryPos(name, true); - if (pos != -1) { - entry = getZipEntry(name, pos); + // Look up the name and CEN header position of the entry. + // The resolved name may include a trailing slash. + // See Source::getEntryPos for details. + EntryPos pos = res.zsrc.getEntryPos(name, true); + if (pos != null) { + entry = getZipEntry(pos.name, pos.pos); } } return entry; @@ -387,7 +390,12 @@ public InputStream getInputStream(ZipEntry entry) throws IOException { if (Objects.equals(lastEntryName, entry.name)) { pos = lastEntryPos; } else { - pos = zsrc.getEntryPos(entry.name, false); + EntryPos entryPos = zsrc.getEntryPos(entry.name, false); + if (entryPos != null) { + pos = entryPos.pos; + } else { + pos = -1; + } } if (pos == -1) { return null; @@ -540,7 +548,8 @@ public T next() { throw new NoSuchElementException(); } // each "entry" has 3 ints in table entries - return (T)getZipEntry(null, res.zsrc.getEntryPos(i++ * 3)); + int pos = res.zsrc.getEntryPos(i++ * 3); + return (T)getZipEntry(getEntryName(pos), pos); } } @@ -612,7 +621,7 @@ public Stream stream() { synchronized (this) { ensureOpen(); return StreamSupport.stream(new EntrySpliterator<>(0, res.zsrc.total, - pos -> getZipEntry(null, pos)), false); + pos -> getZipEntry(getEntryName(pos), pos)), false); } } @@ -655,7 +664,7 @@ private Stream jarStream() { synchronized (this) { ensureOpen(); return StreamSupport.stream(new EntrySpliterator<>(0, res.zsrc.total, - pos -> (JarEntry)getZipEntry(null, pos)), false); + pos -> (JarEntry)getZipEntry(getEntryName(pos), pos)), false); } } @@ -665,30 +674,10 @@ private Stream jarStream() { /* Check ensureOpen() before invoking this method */ private ZipEntry getZipEntry(String name, int pos) { byte[] cen = res.zsrc.cen; - int nlen = CENNAM(cen, pos); - int elen = CENEXT(cen, pos); - int clen = CENCOM(cen, pos); + ZipEntry e = this instanceof JarFile jarFile + ? Source.JUJA.entryFor(jarFile, name) + : new ZipEntry(name); - ZipCoder zc = res.zsrc.zipCoderForPos(pos); - if (name != null) { - // only need to check for mismatch of trailing slash - if (nlen > 0 && - !name.isEmpty() && - zc.hasTrailingSlash(cen, pos + CENHDR + nlen) && - !name.endsWith("/")) - { - name += '/'; - } - } else { - // invoked from iterator, use the entry name stored in cen - name = zc.toString(cen, pos + CENHDR, nlen); - } - ZipEntry e; - if (this instanceof JarFile) { - e = Source.JUJA.entryFor((JarFile)this, name); - } else { - e = new ZipEntry(name); - } e.flag = CENFLG(cen, pos); e.xdostime = CENTIM(cen, pos); e.crc = CENCRC(cen, pos); @@ -700,12 +689,17 @@ private ZipEntry getZipEntry(String name, int pos) { e.externalFileAttributes = CENATX_PERMS(cen, pos) & 0xFFFF; } + int nlen = CENNAM(cen, pos); + int elen = CENEXT(cen, pos); + int clen = CENCOM(cen, pos); + if (elen != 0) { int start = pos + CENHDR + nlen; e.setExtra0(Arrays.copyOfRange(cen, start, start + elen), true, false); } if (clen != 0) { int start = pos + CENHDR + nlen + elen; + ZipCoder zc = res.zsrc.zipCoderForPos(pos); e.comment = zc.toString(cen, start, clen); } lastEntryName = e.name; @@ -1176,6 +1170,8 @@ public void setExternalFileAttributes(ZipEntry ze, int externalFileAttributes) { } ); } + // Represents the resolved name and position of a CEN record + static record EntryPos(String name, int pos) {} private static class Source { // While this is only used from ZipFile, defining it there would cause @@ -1849,12 +1845,12 @@ private static void zerror(String msg) throws ZipException { } /* - * Returns the {@code pos} of the ZIP cen entry corresponding to the - * specified entry name, or -1 if not found. + * Returns the resolved name and position of the ZIP cen entry corresponding + * to the specified entry name, or {@code null} if not found. */ - private int getEntryPos(String name, boolean addSlash) { + private EntryPos getEntryPos(String name, boolean addSlash) { if (total == 0) { - return -1; + return null; } int hsh = ZipCoder.hash(name); @@ -1877,7 +1873,7 @@ private int getEntryPos(String name, boolean addSlash) { switch (zc.compare(name, cen, noff, nlen, addSlash)) { case EXACT_MATCH: // We found an exact match for "name" - return pos; + return new EntryPos(name, pos); case DIRECTORY_MATCH: // We found the directory "name/" // Track its position, then continue the search for "name" @@ -1892,10 +1888,10 @@ private int getEntryPos(String name, boolean addSlash) { // Reaching this point means we did not find "name". // Return the position of "name/" if we found it if (dirPos != -1) { - return dirPos; + return new EntryPos(name + "/", dirPos); } // No entry found - return -1; + return null; } private ZipCoder zipCoderForPos(int pos) { diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java index 450f6ae1c8c90..7f14560fd1854 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java @@ -54,6 +54,7 @@ import jdk.internal.access.JavaLangAccess; import jdk.internal.access.SharedSecrets; import jdk.internal.util.ArraysSupport; +import jdk.internal.vm.annotation.Stable; public abstract sealed class AbstractPoolEntry { /* @@ -146,12 +147,14 @@ enum State { RAW, BYTE, CHAR, STRING } private final int offset; private final int rawLen; // Set in any state other than RAW - private int hash; - private int charLen; + private @Stable int hash; + private @Stable int charLen; // Set in CHAR state - private char[] chars; + private @Stable char[] chars; // Only set in STRING state - private String stringValue; + private @Stable String stringValue; + // The descriptor symbol, if this is a descriptor + @Stable TypeDescriptor typeSym; Utf8EntryImpl(ConstantPool cpm, int index, byte[] rawBytes, int offset, int rawLen) { @@ -187,6 +190,7 @@ enum State { RAW, BYTE, CHAR, STRING } this.charLen = u.charLen; this.chars = u.chars; this.stringValue = u.stringValue; + this.typeSym = u.typeSym; } /** @@ -419,6 +423,22 @@ void writeTo(BufWriterImpl pool) { pool.writeUTF(stringValue); } } + + public ClassDesc fieldTypeSymbol() { + if (typeSym instanceof ClassDesc cd) + return cd; + var ret = ClassDesc.ofDescriptor(stringValue()); + typeSym = ret; + return ret; + } + + public MethodTypeDesc methodTypeSymbol() { + if (typeSym instanceof MethodTypeDesc mtd) + return mtd; + var ret = MethodTypeDesc.ofDescriptor(stringValue()); + typeSym = ret; + return ret; + } } abstract static sealed class AbstractRefEntry extends AbstractPoolEntry { @@ -585,8 +605,6 @@ public boolean equals(Object o) { public static final class NameAndTypeEntryImpl extends AbstractRefsEntry implements NameAndTypeEntry { - public TypeDescriptor typeSym = null; - NameAndTypeEntryImpl(ConstantPool cpm, int index, Utf8EntryImpl name, Utf8EntryImpl type) { super(cpm, ClassFile.TAG_NAMEANDTYPE, index, name, type); } @@ -601,31 +619,12 @@ public Utf8Entry type() { return ref2; } - public ClassDesc fieldTypeSymbol() { - if (typeSym instanceof ClassDesc cd) { - return cd; - } else { - return (ClassDesc)(typeSym = ClassDesc.ofDescriptor(ref2.stringValue())); - } - } - - public MethodTypeDesc methodTypeSymbol() { - if (typeSym instanceof MethodTypeDesc mtd) { - return mtd; - } else { - return (MethodTypeDesc)(typeSym = MethodTypeDesc.ofDescriptor(ref2.stringValue())); - } - } - @Override public NameAndTypeEntry clone(ConstantPoolBuilder cp) { if (cp.canWriteDirect(constantPool)) { return this; - } else { - var ret = (NameAndTypeEntryImpl)cp.nameAndTypeEntry(ref1, ref2); - ret.typeSym = typeSym; - return ret; } + return cp.nameAndTypeEntry(ref1, ref2); } @Override @@ -897,8 +896,6 @@ public static final class MethodTypeEntryImpl extends AbstractRefEntry implements MethodTypeEntry { - public MethodTypeDesc sym = null; - MethodTypeEntryImpl(ConstantPool cpm, int index, Utf8EntryImpl descriptor) { super(cpm, ClassFile.TAG_METHODTYPE, index, descriptor); } @@ -912,20 +909,13 @@ public Utf8Entry descriptor() { public MethodTypeEntry clone(ConstantPoolBuilder cp) { if (cp.canWriteDirect(constantPool)) { return this; - } else { - var ret = (MethodTypeEntryImpl)cp.methodTypeEntry(ref1); - ret.sym = sym; - return ret; } + return cp.methodTypeEntry(ref1); } @Override public MethodTypeDesc asSymbol() { - var sym = this.sym; - if (sym != null) { - return sym; - } - return this.sym = MethodTypeDesc.ofDescriptor(descriptor().stringValue()); + return ref1.methodTypeSymbol(); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BoundLocalVariable.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BoundLocalVariable.java index a5953c86f5dcc..3bbcd243cc11e 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BoundLocalVariable.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BoundLocalVariable.java @@ -46,7 +46,7 @@ public Utf8Entry type() { @Override public ClassDesc typeSymbol() { - return ClassDesc.ofDescriptor(type().stringValue()); + return Util.fieldTypeSymbol(type()); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java index 84ddd09b990ec..bc6ab555ae597 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java @@ -53,7 +53,6 @@ public final class BufferedMethodBuilder private AccessFlags flags; private final MethodModel original; private int[] parameterSlots; - MethodTypeDesc mDesc; public BufferedMethodBuilder(SplitConstantPool constantPool, ClassFileImpl context, @@ -102,14 +101,7 @@ public Utf8Entry methodType() { @Override public MethodTypeDesc methodTypeSymbol() { - if (mDesc == null) { - if (original instanceof MethodInfo mi) { - mDesc = mi.methodTypeSymbol(); - } else { - mDesc = MethodTypeDesc.ofDescriptor(methodType().stringValue()); - } - } - return mDesc; + return Util.methodTypeSymbol(methodType()); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java index 50c1590e8a271..8f05f20d739be 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java @@ -79,15 +79,6 @@ public ClassBuilder withMethod(Utf8Entry name, Utf8Entry descriptor, int flags, return this; } - @Override - public ClassBuilder withMethod(String name, MethodTypeDesc descriptor, int flags, Consumer handler) { - var mb = new BufferedMethodBuilder(terminal.constantPool, terminal.context, - constantPool().utf8Entry(name), constantPool().utf8Entry(descriptor), flags, null); - mb.mDesc = descriptor; - consumer.accept(mb.run(handler).toModel()); - return this; - } - @Override public ClassBuilder transformMethod(MethodModel method, MethodTransform transform) { BufferedMethodBuilder builder = new BufferedMethodBuilder(terminal.constantPool, terminal.context, diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java index 7d67ed272aff2..6901ae7e24cf9 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java @@ -90,14 +90,6 @@ public ClassBuilder withFlags(int flags) { return this; } - @Override - public ClassBuilder withField(String name, - ClassDesc descriptor, - int flags) { - return withField(new DirectFieldBuilder(constantPool, context, - constantPool.utf8Entry(name), constantPool.utf8Entry(descriptor), flags, null)); - } - @Override public ClassBuilder withField(Utf8Entry name, Utf8Entry descriptor, @@ -130,13 +122,6 @@ public ClassBuilder withMethod(Utf8Entry name, .run(handler)); } - @Override - public ClassBuilder withMethod(String name, MethodTypeDesc descriptor, int flags, Consumer handler) { - var method = new DirectMethodBuilder(constantPool, context, constantPool.utf8Entry(name), constantPool.utf8Entry(descriptor), flags, null); - method.mDesc = descriptor; - return withMethod(method.run(handler)); - } - @Override public ClassBuilder transformMethod(MethodModel method, MethodTransform transform) { DirectMethodBuilder builder = new DirectMethodBuilder(constantPool, context, method.methodName(), diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectMethodBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectMethodBuilder.java index fac6b7384e2fc..ec4e148baf30d 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectMethodBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectMethodBuilder.java @@ -46,7 +46,6 @@ public final class DirectMethodBuilder final Utf8Entry desc; int flags; int[] parameterSlots; - MethodTypeDesc mDesc; public DirectMethodBuilder(SplitConstantPool constantPool, ClassFileImpl context, @@ -87,14 +86,7 @@ public Utf8Entry methodType() { @Override public MethodTypeDesc methodTypeSymbol() { - if (mDesc == null) { - if (original instanceof MethodInfo mi) { - mDesc = mi.methodTypeSymbol(); - } else { - mDesc = MethodTypeDesc.ofDescriptor(methodType().stringValue()); - } - } - return mDesc; + return Util.methodTypeSymbol(methodType()); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/EntryMap.java b/src/java.base/share/classes/jdk/internal/classfile/impl/EntryMap.java index 4270dbf79a36f..d5ebbbebe7ada 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/EntryMap.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/EntryMap.java @@ -33,10 +33,10 @@ * element is the hash and the second is the mapped index. To look something up * in the map, provide a hash value and an index to map it to, and invoke * firstToken(hash). This returns an opaque token that can be provided to - * nextToken(hash, token) to get the next candidate, or to getElementByToken(token) - * or getIndexByToken to get the mapped element or index. + * nextToken(hash, token) to get the next candidate, or to getIndexByToken to + * get the mapped element or index. */ -public abstract class EntryMap { +public final class EntryMap { public static final int NO_VALUE = -1; /** @@ -77,8 +77,6 @@ public EntryMap(int size, float fillFactor) { data = new int[capacity * 2]; } - protected abstract T fetchElement(int index); - public int firstToken(int hash) { if (hash == 0) throw new IllegalArgumentException("hash must be nonzero"); @@ -110,10 +108,6 @@ public int getIndexByToken(int token) { return data[token + 1]; } - public T getElementByToken(int token) { - return fetchElement(data[token + 1]); - } - public void put(int hash, int index) { if (hash == 0) throw new IllegalArgumentException("hash must be nonzero"); diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java index 05de881ba297f..8467152504ee5 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java @@ -41,7 +41,6 @@ public final class MethodImpl private final int startPos, endPos, attributesPos; private List> attributes; private int[] parameterSlots; - private MethodTypeDesc mDesc; public MethodImpl(ClassReader reader, int startPos, int endPos, int attrStart) { this.reader = reader; @@ -75,10 +74,7 @@ public Utf8Entry methodType() { @Override public MethodTypeDesc methodTypeSymbol() { - if (mDesc == null) { - mDesc = MethodTypeDesc.ofDescriptor(methodType().stringValue()); - } - return mDesc; + return Util.methodTypeSymbol(methodType()); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/SplitConstantPool.java b/src/java.base/share/classes/jdk/internal/classfile/impl/SplitConstantPool.java index 4f48c8b731db0..0b5d64023b962 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/SplitConstantPool.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/SplitConstantPool.java @@ -24,7 +24,7 @@ */ package jdk.internal.classfile.impl; -import java.lang.constant.ConstantDesc; +import java.lang.constant.ClassDesc; import java.lang.constant.MethodTypeDesc; import java.util.Arrays; import java.util.List; @@ -63,8 +63,8 @@ public final class SplitConstantPool implements ConstantPoolBuilder { private PoolEntry[] myEntries; private BootstrapMethodEntryImpl[] myBsmEntries; private boolean doneFullScan; - private EntryMap map; - private EntryMap bsmMap; + private EntryMap map; + private EntryMap bsmMap; public SplitConstantPool() { this.size = 1; @@ -178,14 +178,10 @@ void writeTo(BufWriterImpl buf) { } } - private EntryMap map() { + private EntryMap map() { if (map == null) { - map = new EntryMap<>(Math.max(size, 1024), .75f) { - @Override - protected PoolEntry fetchElement(int index) { - return entryByIndex(index); - } - }; + map = new EntryMap(Math.max(size, 1024), .75f); + // Doing a full scan here yields fall-off-the-cliff performance results, // especially if we only need a few entries that are already // inflated (such as attribute names). @@ -220,14 +216,9 @@ private void fullScan() { doneFullScan = true; } - private EntryMap bsmMap() { + private EntryMap bsmMap() { if (bsmMap == null) { - bsmMap = new EntryMap<>(Math.max(bsmSize, 16), .75f) { - @Override - protected BootstrapMethodEntryImpl fetchElement(int index) { - return bootstrapMethodEntry(index); - } - }; + bsmMap = new EntryMap(Math.max(bsmSize, 16), .75f); for (int i=0; i map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == TAG_INTEGER && e instanceof AbstractPoolEntry.IntegerEntryImpl ce && ce.intValue() == val) @@ -285,9 +276,9 @@ private IntegerEntry findIntEntry(int val) { private LongEntry findLongEntry(long val) { int hash = AbstractPoolEntry.hash1(TAG_LONG, Long.hashCode(val)); - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == TAG_LONG && e instanceof AbstractPoolEntry.LongEntryImpl ce && ce.longValue() == val) @@ -302,9 +293,9 @@ private LongEntry findLongEntry(long val) { private FloatEntry findFloatEntry(float val) { int hash = AbstractPoolEntry.hash1(TAG_FLOAT, Float.hashCode(val)); - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == TAG_FLOAT && e instanceof AbstractPoolEntry.FloatEntryImpl ce && ce.floatValue() == val) @@ -319,9 +310,9 @@ private FloatEntry findFloatEntry(float val) { private DoubleEntry findDoubleEntry(double val) { int hash = AbstractPoolEntry.hash1(TAG_DOUBLE, Double.hashCode(val)); - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == TAG_DOUBLE && e instanceof AbstractPoolEntry.DoubleEntryImpl ce && ce.doubleValue() == val) @@ -337,9 +328,9 @@ private DoubleEntry findDoubleEntry(double val) { private AbstractPoolEntry findEntry(int tag, T ref1) { // invariant: canWriteDirect(ref1.constantPool()) int hash = AbstractPoolEntry.hash1(tag, ref1.index()); - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == tag && e instanceof AbstractPoolEntry.AbstractRefEntry re && re.ref1 == ref1) @@ -356,9 +347,9 @@ private AbstractPoolEntry findEntry(int tag, T ref1 AbstractPoolEntry findEntry(int tag, T ref1, U ref2) { // invariant: canWriteDirect(ref1.constantPool()), canWriteDirect(ref2.constantPool()) int hash = AbstractPoolEntry.hash2(tag, ref1.index(), ref2.index()); - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == tag && e instanceof AbstractPoolEntry.AbstractRefsEntry re && re.ref1 == ref1 @@ -374,10 +365,10 @@ AbstractPoolEntry findEntry(int tag, T ref1, U ref2) { } private AbstractPoolEntry.Utf8EntryImpl tryFindUtf8(int hash, String target) { - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == ClassFile.TAG_UTF8 && e instanceof AbstractPoolEntry.Utf8EntryImpl ce && ce.hashCode() == hash @@ -392,9 +383,9 @@ private AbstractPoolEntry.Utf8EntryImpl tryFindUtf8(int hash, String target) { } private AbstractPoolEntry.Utf8EntryImpl tryFindUtf8(int hash, AbstractPoolEntry.Utf8EntryImpl target) { - EntryMap map = map(); + EntryMap map = map(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - PoolEntry e = map.getElementByToken(token); + PoolEntry e = entryByIndex(map.getIndexByToken(token)); if (e.tag() == ClassFile.TAG_UTF8 && e instanceof AbstractPoolEntry.Utf8EntryImpl ce && target.equalsUtf8(ce)) @@ -407,6 +398,20 @@ private AbstractPoolEntry.Utf8EntryImpl tryFindUtf8(int hash, AbstractPoolEntry. return null; } + @Override + public Utf8Entry utf8Entry(ClassDesc desc) { + var utf8 = utf8Entry(desc.descriptorString()); + utf8.typeSym = desc; + return utf8; + } + + @Override + public Utf8Entry utf8Entry(MethodTypeDesc desc) { + var utf8 = utf8Entry(desc.descriptorString()); + utf8.typeSym = desc; + return utf8; + } + @Override public AbstractPoolEntry.Utf8EntryImpl utf8Entry(String s) { int hash = AbstractPoolEntry.hashString(s.hashCode()); @@ -489,9 +494,7 @@ public InterfaceMethodRefEntry interfaceMethodRefEntry(ClassEntry owner, NameAnd @Override public MethodTypeEntry methodTypeEntry(MethodTypeDesc descriptor) { - var ret = (AbstractPoolEntry.MethodTypeEntryImpl)methodTypeEntry(utf8Entry(descriptor.descriptorString())); - ret.sym = descriptor; - return ret; + return methodTypeEntry(utf8Entry(descriptor)); } @Override @@ -513,9 +516,9 @@ public MethodHandleEntry methodHandleEntry(int refKind, MemberRefEntry reference } int hash = AbstractPoolEntry.hash2(TAG_METHODHANDLE, refKind, reference.index()); - EntryMap map1 = map(); + EntryMap map1 = map(); for (int token = map1.firstToken(hash); token != -1; token = map1.nextToken(hash, token)) { - PoolEntry e = map1.getElementByToken(token); + PoolEntry e = entryByIndex(map1.getIndexByToken(token)); if (e.tag() == TAG_METHODHANDLE && e instanceof AbstractPoolEntry.MethodHandleEntryImpl ce && ce.kind() == refKind && ce.reference() == reference) @@ -539,9 +542,9 @@ public InvokeDynamicEntry invokeDynamicEntry(BootstrapMethodEntry bootstrapMetho nameAndType = nameAndTypeEntry(nameAndType.name(), nameAndType.type()); int hash = AbstractPoolEntry.hash2(TAG_INVOKEDYNAMIC, bootstrapMethodEntry.bsmIndex(), nameAndType.index()); - EntryMap map1 = map(); + EntryMap map1 = map(); for (int token = map1.firstToken(hash); token != -1; token = map1.nextToken(hash, token)) { - PoolEntry e = map1.getElementByToken(token); + PoolEntry e = entryByIndex(map1.getIndexByToken(token)); if (e.tag() == TAG_INVOKEDYNAMIC && e instanceof AbstractPoolEntry.InvokeDynamicEntryImpl ce && ce.bootstrap() == bootstrapMethodEntry && ce.nameAndType() == nameAndType) @@ -570,9 +573,9 @@ public ConstantDynamicEntry constantDynamicEntry(BootstrapMethodEntry bootstrapM nameAndType = nameAndTypeEntry(nameAndType.name(), nameAndType.type()); int hash = AbstractPoolEntry.hash2(TAG_CONSTANTDYNAMIC, bootstrapMethodEntry.bsmIndex(), nameAndType.index()); - EntryMap map1 = map(); + EntryMap map1 = map(); for (int token = map1.firstToken(hash); token != -1; token = map1.nextToken(hash, token)) { - PoolEntry e = map1.getElementByToken(token); + PoolEntry e = entryByIndex(map1.getIndexByToken(token)); if (e.tag() == TAG_CONSTANTDYNAMIC && e instanceof AbstractPoolEntry.ConstantDynamicEntryImpl ce && ce.bootstrap() == bootstrapMethodEntry && ce.nameAndType() == nameAndType) @@ -640,9 +643,9 @@ public BootstrapMethodEntry bsmEntry(MethodHandleEntry methodReference, } AbstractPoolEntry.MethodHandleEntryImpl mre = (AbstractPoolEntry.MethodHandleEntryImpl) methodReference; int hash = BootstrapMethodEntryImpl.computeHashCode(mre, arguments); - EntryMap map = bsmMap(); + EntryMap map = bsmMap(); for (int token = map.firstToken(hash); token != -1; token = map.nextToken(hash, token)) { - BootstrapMethodEntryImpl e = map.getElementByToken(token); + BootstrapMethodEntryImpl e = bootstrapMethodEntry(map.getIndexByToken(token)); if (e.bootstrapMethod() == mre && e.arguments().equals(arguments)) { return e; } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java b/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java index b0f1de65d9291..7f3dd914e7f8c 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java @@ -30,6 +30,7 @@ import java.lang.classfile.MethodBuilder; import java.lang.classfile.PseudoInstruction; import java.lang.classfile.constantpool.PoolEntry; +import java.lang.classfile.constantpool.Utf8Entry; import java.lang.constant.ClassDesc; import java.lang.constant.MethodTypeDesc; import java.util.AbstractList; @@ -220,12 +221,20 @@ public static boolean has(AccessFlag.Location location, int flagsMask, AccessFla return (flag.mask() & flagsMask) == flag.mask() && flag.locations().contains(location); } + public static ClassDesc fieldTypeSymbol(Utf8Entry utf8) { + return ((AbstractPoolEntry.Utf8EntryImpl) utf8).fieldTypeSymbol(); + } + + public static MethodTypeDesc methodTypeSymbol(Utf8Entry utf8) { + return ((AbstractPoolEntry.Utf8EntryImpl) utf8).methodTypeSymbol(); + } + public static ClassDesc fieldTypeSymbol(NameAndTypeEntry nat) { - return ((AbstractPoolEntry.NameAndTypeEntryImpl)nat).fieldTypeSymbol(); + return fieldTypeSymbol(nat.type()); } public static MethodTypeDesc methodTypeSymbol(NameAndTypeEntry nat) { - return ((AbstractPoolEntry.NameAndTypeEntryImpl)nat).methodTypeSymbol(); + return methodTypeSymbol(nat.type()); } @SuppressWarnings("unchecked") diff --git a/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java b/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java index 83b11b7ce686b..64994af5cb790 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java +++ b/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java @@ -72,8 +72,6 @@ public abstract sealed class AbstractMemorySegmentImpl implements MemorySegment, SegmentAllocator, BiFunction, RuntimeException> permits HeapMemorySegmentImpl, NativeMemorySegmentImpl { - private static final ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess(); - static final JavaNioAccess NIO_ACCESS = SharedSecrets.getJavaNioAccess(); final long length; @@ -189,53 +187,10 @@ public Stream elements(MemoryLayout elementLayout) { return StreamSupport.stream(spliterator(elementLayout), false); } - // FILL_NATIVE_THRESHOLD must be a power of two and should be greater than 2^3 - // Update the value for Aarch64 once 8338975 is fixed. - private static final long FILL_NATIVE_THRESHOLD = 1L << (Architecture.isAARCH64() ? 10 : 5); - - @Override @ForceInline + @Override public final MemorySegment fill(byte value) { - checkReadOnly(false); - if (length == 0) { - // Implicit state check - checkValidState(); - } else if (length < FILL_NATIVE_THRESHOLD) { - // 0 <= length < FILL_NATIVE_LIMIT : 0...0X...XXXX - - // Handle smaller segments directly without transitioning to native code - final long u = Byte.toUnsignedLong(value); - final long longValue = u << 56 | u << 48 | u << 40 | u << 32 | u << 24 | u << 16 | u << 8 | u; - - int offset = 0; - // 0...0X...X000 - final int limit = (int) (length & (FILL_NATIVE_THRESHOLD - 8)); - for (; offset < limit; offset += 8) { - SCOPED_MEMORY_ACCESS.putLong(sessionImpl(), unsafeGetBase(), unsafeGetOffset() + offset, longValue); - } - int remaining = (int) length - limit; - // 0...0X00 - if (remaining >= 4) { - SCOPED_MEMORY_ACCESS.putInt(sessionImpl(), unsafeGetBase(), unsafeGetOffset() + offset, (int) longValue); - offset += 4; - remaining -= 4; - } - // 0...00X0 - if (remaining >= 2) { - SCOPED_MEMORY_ACCESS.putShort(sessionImpl(), unsafeGetBase(), unsafeGetOffset() + offset, (short) longValue); - offset += 2; - remaining -= 2; - } - // 0...000X - if (remaining == 1) { - SCOPED_MEMORY_ACCESS.putByte(sessionImpl(), unsafeGetBase(), unsafeGetOffset() + offset, value); - } - // We have now fully handled 0...0X...XXXX - } else { - // Handle larger segments via native calls - SCOPED_MEMORY_ACCESS.setMemory(sessionImpl(), unsafeGetBase(), unsafeGetOffset(), length, value); - } - return this; + return SegmentBulkOperations.fill(this, value); } @Override @@ -244,38 +199,6 @@ public MemorySegment allocate(long byteSize, long byteAlignment) { return asSlice(0, byteSize, byteAlignment); } - /** - * Mismatch over long lengths. - */ - public static long vectorizedMismatchLargeForBytes(MemorySessionImpl aSession, MemorySessionImpl bSession, - Object a, long aOffset, - Object b, long bOffset, - long length) { - long off = 0; - long remaining = length; - int i, size; - boolean lastSubRange = false; - while (remaining > 7 && !lastSubRange) { - if (remaining > Integer.MAX_VALUE) { - size = Integer.MAX_VALUE; - } else { - size = (int) remaining; - lastSubRange = true; - } - i = SCOPED_MEMORY_ACCESS.vectorizedMismatch(aSession, bSession, - a, aOffset + off, - b, bOffset + off, - size, ArraysSupport.LOG2_ARRAY_BYTE_INDEX_SCALE); - if (i >= 0) - return off + i; - - i = size - ~i; - off += i; - remaining -= i; - } - return ~remaining; - } - @Override public final ByteBuffer asByteBuffer() { checkArraySize("ByteBuffer", 1); @@ -314,7 +237,7 @@ public final Optional asOverlappingSlice(MemorySegment other) { } @ForceInline - private boolean overlaps(AbstractMemorySegmentImpl that) { + boolean overlaps(AbstractMemorySegmentImpl that) { if (unsafeGetBase() == that.unsafeGetBase()) { // both either native or the same heap segment final long thisStart = this.unsafeGetOffset(); final long thatStart = that.unsafeGetOffset(); @@ -334,7 +257,8 @@ public MemorySegment copyFrom(MemorySegment src) { @Override public long mismatch(MemorySegment other) { Objects.requireNonNull(other); - return MemorySegment.mismatch(this, 0, byteSize(), other, 0, other.byteSize()); + return SegmentBulkOperations.mismatch(this, 0, byteSize(), + (AbstractMemorySegmentImpl) other, 0, other.byteSize()); } @Override @@ -650,64 +574,6 @@ private static Object bufferRef(Buffer buffer) { } } - // COPY_NATIVE_THRESHOLD must be a power of two and should be greater than 2^3 - private static final long COPY_NATIVE_THRESHOLD = 1 << 6; - - @ForceInline - public static void copy(AbstractMemorySegmentImpl src, long srcOffset, - AbstractMemorySegmentImpl dst, long dstOffset, - long size) { - - Utils.checkNonNegativeIndex(size, "size"); - // Implicit null check for src and dst - src.checkAccess(srcOffset, size, true); - dst.checkAccess(dstOffset, size, false); - - if (size <= 0) { - // Do nothing - } else if (size < COPY_NATIVE_THRESHOLD && !src.overlaps(dst)) { - // 0 < size < FILL_NATIVE_LIMIT : 0...0X...XXXX - // - // Strictly, we could check for !src.asSlice(srcOffset, size).overlaps(dst.asSlice(dstOffset, size) but - // this is a bit slower and it likely very unusual there is any difference in the outcome. Also, if there - // is an overlap, we could tolerate one particular direction of overlap (but not the other). - - // 0...0X...X000 - final int limit = (int) (size & (COPY_NATIVE_THRESHOLD - 8)); - int offset = 0; - for (; offset < limit; offset += 8) { - final long v = SCOPED_MEMORY_ACCESS.getLong(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset); - SCOPED_MEMORY_ACCESS.putLong(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v); - } - int remaining = (int) size - offset; - // 0...0X00 - if (remaining >= 4) { - final int v = SCOPED_MEMORY_ACCESS.getInt(src.sessionImpl(), src.unsafeGetBase(),src.unsafeGetOffset() + srcOffset + offset); - SCOPED_MEMORY_ACCESS.putInt(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v); - offset += 4; - remaining -= 4; - } - // 0...00X0 - if (remaining >= 2) { - final short v = SCOPED_MEMORY_ACCESS.getShort(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset); - SCOPED_MEMORY_ACCESS.putShort(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v); - offset += 2; - remaining -=2; - } - // 0...000X - if (remaining == 1) { - final byte v = SCOPED_MEMORY_ACCESS.getByte(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset); - SCOPED_MEMORY_ACCESS.putByte(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v); - } - // We have now fully handled 0...0X...XXXX - } else { - // For larger sizes, the transition to native code pays off - SCOPED_MEMORY_ACCESS.copyMemory(src.sessionImpl(), dst.sessionImpl(), - src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset, - dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset, size); - } - } - @ForceInline public static void copy(MemorySegment srcSegment, ValueLayout srcElementLayout, long srcOffset, MemorySegment dstSegment, ValueLayout dstElementLayout, long dstOffset, @@ -794,40 +660,6 @@ public static void copy(Object srcArray, int srcIndex, } } - public static long mismatch(MemorySegment srcSegment, long srcFromOffset, long srcToOffset, - MemorySegment dstSegment, long dstFromOffset, long dstToOffset) { - AbstractMemorySegmentImpl srcImpl = (AbstractMemorySegmentImpl)Objects.requireNonNull(srcSegment); - AbstractMemorySegmentImpl dstImpl = (AbstractMemorySegmentImpl)Objects.requireNonNull(dstSegment); - long srcBytes = srcToOffset - srcFromOffset; - long dstBytes = dstToOffset - dstFromOffset; - srcImpl.checkAccess(srcFromOffset, srcBytes, true); - dstImpl.checkAccess(dstFromOffset, dstBytes, true); - - long bytes = Math.min(srcBytes, dstBytes); - long i = 0; - if (bytes > 7) { - if (srcImpl.get(JAVA_BYTE, srcFromOffset) != dstImpl.get(JAVA_BYTE, dstFromOffset)) { - return 0; - } - i = AbstractMemorySegmentImpl.vectorizedMismatchLargeForBytes(srcImpl.sessionImpl(), dstImpl.sessionImpl(), - srcImpl.unsafeGetBase(), srcImpl.unsafeGetOffset() + srcFromOffset, - dstImpl.unsafeGetBase(), dstImpl.unsafeGetOffset() + dstFromOffset, - bytes); - if (i >= 0) { - return i; - } - long remaining = ~i; - assert remaining < 8 : "remaining greater than 7: " + remaining; - i = bytes - remaining; - } - for (; i < bytes; i++) { - if (srcImpl.get(JAVA_BYTE, srcFromOffset + i) != dstImpl.get(JAVA_BYTE, dstFromOffset + i)) { - return i; - } - } - return srcBytes != dstBytes ? bytes : -1; - } - private static int getScaleFactor(Buffer buffer) { return switch (buffer) { case ByteBuffer _ -> 0; diff --git a/src/java.base/share/classes/jdk/internal/foreign/SegmentBulkOperations.java b/src/java.base/share/classes/jdk/internal/foreign/SegmentBulkOperations.java new file mode 100644 index 0000000000000..41ae247293d3f --- /dev/null +++ b/src/java.base/share/classes/jdk/internal/foreign/SegmentBulkOperations.java @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.internal.foreign; + +import jdk.internal.misc.ScopedMemoryAccess; +import jdk.internal.util.Architecture; +import jdk.internal.util.ArraysSupport; +import jdk.internal.vm.annotation.ForceInline; +import sun.security.action.GetIntegerAction; + +import java.lang.foreign.MemorySegment; + +/** + * This class contains optimized bulk operation methods that operate on one or several + * memory segments. + *

+ * Generally, the methods attempt to work with as-large-as-possible units of memory at + * a time. + *

+ * It should be noted that when invoking scoped memory access get/set operations, it + * is imperative from a performance perspective to convey the sharp types from the + * call site in order for the compiler to pick the correct Unsafe access variant. + */ +public final class SegmentBulkOperations { + + private SegmentBulkOperations() {} + + private static final ScopedMemoryAccess SCOPED_MEMORY_ACCESS = ScopedMemoryAccess.getScopedMemoryAccess(); + + // All the threshold values below MUST be a power of two and should preferably be + // greater or equal to 2^3. + + // Update the FILL value for Aarch64 once 8338975 is fixed. + private static final int NATIVE_THRESHOLD_FILL = powerOfPropertyOr("fill", Architecture.isAARCH64() ? 10 : 5); + private static final int NATIVE_THRESHOLD_MISMATCH = powerOfPropertyOr("mismatch", 6); + private static final int NATIVE_THRESHOLD_COPY = powerOfPropertyOr("copy", 6); + + @ForceInline + public static MemorySegment fill(AbstractMemorySegmentImpl dst, byte value) { + dst.checkReadOnly(false); + if (dst.length == 0) { + // Implicit state check + dst.checkValidState(); + } else if (dst.length < NATIVE_THRESHOLD_FILL) { + // 0 <= length < FILL_NATIVE_LIMIT : 0...0X...XXXX + + // Handle smaller segments directly without transitioning to native code + final long u = Byte.toUnsignedLong(value); + final long longValue = u << 56 | u << 48 | u << 40 | u << 32 | u << 24 | u << 16 | u << 8 | u; + + int offset = 0; + // 0...0X...X000 + final int limit = (int) (dst.length & (NATIVE_THRESHOLD_FILL - 8)); + for (; offset < limit; offset += 8) { + SCOPED_MEMORY_ACCESS.putLongUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + offset, longValue, !Architecture.isLittleEndian()); + } + int remaining = (int) dst.length - limit; + // 0...0X00 + if (remaining >= 4) { + SCOPED_MEMORY_ACCESS.putIntUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + offset, (int) longValue, !Architecture.isLittleEndian()); + offset += 4; + remaining -= 4; + } + // 0...00X0 + if (remaining >= 2) { + SCOPED_MEMORY_ACCESS.putShortUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + offset, (short) longValue, !Architecture.isLittleEndian()); + offset += 2; + remaining -= 2; + } + // 0...000X + if (remaining == 1) { + SCOPED_MEMORY_ACCESS.putByte(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + offset, value); + } + // We have now fully handled 0...0X...XXXX + } else { + // Handle larger segments via native calls + SCOPED_MEMORY_ACCESS.setMemory(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset(), dst.length, value); + } + return dst; + } + + @ForceInline + public static void copy(AbstractMemorySegmentImpl src, long srcOffset, + AbstractMemorySegmentImpl dst, long dstOffset, + long size) { + + Utils.checkNonNegativeIndex(size, "size"); + // Implicit null check for src and dst + src.checkAccess(srcOffset, size, true); + dst.checkAccess(dstOffset, size, false); + + if (size <= 0) { + // Do nothing + } else if (size < NATIVE_THRESHOLD_COPY && !src.overlaps(dst)) { + // 0 < size < FILL_NATIVE_LIMIT : 0...0X...XXXX + // + // Strictly, we could check for !src.asSlice(srcOffset, size).overlaps(dst.asSlice(dstOffset, size) but + // this is a bit slower and it likely very unusual there is any difference in the outcome. Also, if there + // is an overlap, we could tolerate one particular direction of overlap (but not the other). + + // 0...0X...X000 + final int limit = (int) (size & (NATIVE_THRESHOLD_COPY - 8)); + int offset = 0; + for (; offset < limit; offset += 8) { + final long v = SCOPED_MEMORY_ACCESS.getLongUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset, !Architecture.isLittleEndian()); + SCOPED_MEMORY_ACCESS.putLongUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v, !Architecture.isLittleEndian()); + } + int remaining = (int) size - offset; + // 0...0X00 + if (remaining >= 4) { + final int v = SCOPED_MEMORY_ACCESS.getIntUnaligned(src.sessionImpl(), src.unsafeGetBase(),src.unsafeGetOffset() + srcOffset + offset, !Architecture.isLittleEndian()); + SCOPED_MEMORY_ACCESS.putIntUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v, !Architecture.isLittleEndian()); + offset += 4; + remaining -= 4; + } + // 0...00X0 + if (remaining >= 2) { + final short v = SCOPED_MEMORY_ACCESS.getShortUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset, !Architecture.isLittleEndian()); + SCOPED_MEMORY_ACCESS.putShortUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v, !Architecture.isLittleEndian()); + offset += 2; + remaining -=2; + } + // 0...000X + if (remaining == 1) { + final byte v = SCOPED_MEMORY_ACCESS.getByte(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset + offset); + SCOPED_MEMORY_ACCESS.putByte(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset + offset, v); + } + // We have now fully handled 0...0X...XXXX + } else { + // For larger sizes, the transition to native code pays off + SCOPED_MEMORY_ACCESS.copyMemory(src.sessionImpl(), dst.sessionImpl(), + src.unsafeGetBase(), src.unsafeGetOffset() + srcOffset, + dst.unsafeGetBase(), dst.unsafeGetOffset() + dstOffset, size); + } + } + + @ForceInline + public static long mismatch(AbstractMemorySegmentImpl src, long srcFromOffset, long srcToOffset, + AbstractMemorySegmentImpl dst, long dstFromOffset, long dstToOffset) { + final long srcBytes = srcToOffset - srcFromOffset; + final long dstBytes = dstToOffset - dstFromOffset; + src.checkAccess(srcFromOffset, srcBytes, true); + dst.checkAccess(dstFromOffset, dstBytes, true); + + final long length = Math.min(srcBytes, dstBytes); + final boolean srcAndDstBytesDiffer = srcBytes != dstBytes; + + if (length == 0) { + return srcAndDstBytesDiffer ? 0 : -1; + } else if (length < NATIVE_THRESHOLD_MISMATCH) { + return mismatch(src, srcFromOffset, dst, dstFromOffset, 0, (int) length, srcAndDstBytesDiffer); + } else { + long i; + if (SCOPED_MEMORY_ACCESS.getByte(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset) != + SCOPED_MEMORY_ACCESS.getByte(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset)) { + return 0; + } + i = vectorizedMismatchLargeForBytes(src.sessionImpl(), dst.sessionImpl(), + src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset, + dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset, + length); + if (i >= 0) { + return i; + } + final long remaining = ~i; + assert remaining < 8 : "remaining greater than 7: " + remaining; + i = length - remaining; + return mismatch(src, srcFromOffset + i, dst, dstFromOffset + i, i, (int) remaining, srcAndDstBytesDiffer); + } + } + + // Mismatch is handled in chunks of 64 (unroll of eight 8s), 8, 4, 2, and 1 byte(s). + @ForceInline + private static long mismatch(AbstractMemorySegmentImpl src, long srcFromOffset, + AbstractMemorySegmentImpl dst, long dstFromOffset, + long start, int length, boolean srcAndDstBytesDiffer) { + int offset = 0; + final int limit = length & (NATIVE_THRESHOLD_MISMATCH - 8); + for (; offset < limit; offset += 8) { + final long s = SCOPED_MEMORY_ACCESS.getLongUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset + offset, !Architecture.isLittleEndian()); + final long d = SCOPED_MEMORY_ACCESS.getLongUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset + offset, !Architecture.isLittleEndian()); + if (s != d) { + return start + offset + mismatch(s, d); + } + } + int remaining = length - offset; + // 0...XXX000 + for (; remaining >= 8; remaining -= 8) { + final long s = SCOPED_MEMORY_ACCESS.getLongUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset + offset, !Architecture.isLittleEndian()); + final long d = SCOPED_MEMORY_ACCESS.getLongUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset + offset, !Architecture.isLittleEndian()); + if (s != d) { + return start + offset + mismatch(s, d); + } + offset += 8; + } + + // 0...0X00 + if (remaining >= 4) { + final int s = SCOPED_MEMORY_ACCESS.getIntUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset + offset, !Architecture.isLittleEndian()); + final int d = SCOPED_MEMORY_ACCESS.getIntUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset + offset, !Architecture.isLittleEndian()); + if (s != d) { + return start + offset + mismatch(s, d); + } + offset += 4; + remaining -= 4; + } + // 0...00X0 + if (remaining >= 2) { + final short s = SCOPED_MEMORY_ACCESS.getShortUnaligned(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset + offset, !Architecture.isLittleEndian()); + final short d = SCOPED_MEMORY_ACCESS.getShortUnaligned(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset + offset, !Architecture.isLittleEndian()); + if (s != d) { + return start + offset + mismatch(s, d); + } + offset += 2; + remaining -= 2; + } + // 0...000X + if (remaining == 1) { + final byte s = SCOPED_MEMORY_ACCESS.getByte(src.sessionImpl(), src.unsafeGetBase(), src.unsafeGetOffset() + srcFromOffset + offset); + final byte d = SCOPED_MEMORY_ACCESS.getByte(dst.sessionImpl(), dst.unsafeGetBase(), dst.unsafeGetOffset() + dstFromOffset + offset); + if (s != d) { + return start + offset; + } + } + return srcAndDstBytesDiffer ? (start + length) : -1; + // We have now fully handled 0...0X...XXXX + } + + @ForceInline + private static int mismatch(long first, long second) { + final long x = first ^ second; + return (Architecture.isLittleEndian() + ? Long.numberOfTrailingZeros(x) + : Long.numberOfLeadingZeros(x)) / 8; + } + + @ForceInline + private static int mismatch(int first, int second) { + final int x = first ^ second; + return (Architecture.isLittleEndian() + ? Integer.numberOfTrailingZeros(x) + : Integer.numberOfLeadingZeros(x)) / 8; + } + + @ForceInline + private static int mismatch(short first, short second) { + if (Architecture.isLittleEndian()) { + return ((0xff & first) == (0xff & second)) ? 1 : 0; + } else { + return ((0xff & first) == (0xff & second)) ? 0 : 1; + } + } + + /** + * Mismatch over long lengths. + */ + private static long vectorizedMismatchLargeForBytes(MemorySessionImpl aSession, MemorySessionImpl bSession, + Object a, long aOffset, + Object b, long bOffset, + long length) { + long off = 0; + long remaining = length; + int i, size; + boolean lastSubRange = false; + while (remaining > 7 && !lastSubRange) { + if (remaining > Integer.MAX_VALUE) { + size = Integer.MAX_VALUE; + } else { + size = (int) remaining; + lastSubRange = true; + } + i = SCOPED_MEMORY_ACCESS.vectorizedMismatch(aSession, bSession, + a, aOffset + off, + b, bOffset + off, + size, ArraysSupport.LOG2_ARRAY_BYTE_INDEX_SCALE); + if (i >= 0) + return off + i; + + i = size - ~i; + off += i; + remaining -= i; + } + return ~remaining; + } + + static final String PROPERTY_PATH = "java.lang.foreign.native.threshold.power."; + + // The returned value is in the interval [0, 2^30] + static int powerOfPropertyOr(String name, int defaultPower) { + final int power = GetIntegerAction.privilegedGetProperty(PROPERTY_PATH + name, defaultPower); + return 1 << Math.clamp(power, 0, Integer.SIZE - 2); + } + +} diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java index 346aab18a60fd..a2937b73a143f 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Flags.java @@ -285,7 +285,12 @@ public static EnumSet asFlagSet(long flags) { /** * Flag that marks a synthetic method body for a lambda expression */ - public static final long LAMBDA_METHOD = 1L<<49; + public static final long LAMBDA_METHOD = 1L<<49; //MethodSymbols only + + /** + * Flag that marks a synthetic local capture field in a local/anon class + */ + public static final long LOCAL_CAPTURE_FIELD = 1L<<49; //VarSymbols only /** * Flag to control recursion in TransTypes diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java index 44ccbade4a900..07a136c17003c 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java @@ -182,6 +182,7 @@ protected Attr(Context context) { unknownTypeInfo = new ResultInfo(KindSelector.TYP, Type.noType); unknownTypeExprInfo = new ResultInfo(KindSelector.VAL_TYP, Type.noType); recoveryInfo = new RecoveryInfo(deferredAttr.emptyDeferredAttrContext); + initBlockType = new MethodType(List.nil(), syms.voidType, List.nil(), syms.methodClass); } /** Switch: reifiable types in instanceof enabled? @@ -628,6 +629,7 @@ public void report(DiagnosticPosition pos, JCDiagnostic details) { final ResultInfo unknownTypeInfo; final ResultInfo unknownTypeExprInfo; final ResultInfo recoveryInfo; + final MethodType initBlockType; Type pt() { return resultInfo.pt; @@ -1421,7 +1423,7 @@ public void visitBlock(JCBlock tree) { // created BLOCK-method. Symbol fakeOwner = new MethodSymbol(tree.flags | BLOCK | - env.info.scope.owner.flags() & STRICTFP, names.empty, null, + env.info.scope.owner.flags() & STRICTFP, names.empty, initBlockType, env.info.scope.owner); final Env localEnv = env.dup(tree, env.info.dup(env.info.scope.dupUnshared(fakeOwner))); @@ -3524,62 +3526,26 @@ void checkLambdaCompatible(JCLambda tree, Type descriptor, CheckContext checkCon } } - /* Map to hold 'fake' clinit methods. If a lambda is used to initialize a - * static field and that lambda has type annotations, these annotations will - * also be stored at these fake clinit methods. - * - * LambdaToMethod also use fake clinit methods so they can be reused. - * Also as LTM is a phase subsequent to attribution, the methods from - * clinits can be safely removed by LTM to save memory. - */ - private Map clinits = new HashMap<>(); - - public MethodSymbol removeClinit(ClassSymbol sym) { - return clinits.remove(sym); - } - /* This method returns an environment to be used to attribute a lambda * expression. * * The owner of this environment is a method symbol. If the current owner - * is not a method, for example if the lambda is used to initialize - * a field, then if the field is: - * - * - an instance field, we use the first constructor. - * - a static field, we create a fake clinit method. + * is not a method (e.g. if the lambda occurs in a field initializer), then + * a synthetic method symbol owner is created. */ public Env lambdaEnv(JCLambda that, Env env) { Env lambdaEnv; Symbol owner = env.info.scope.owner; if (owner.kind == VAR && owner.owner.kind == TYP) { - //field initializer + // If the lambda is nested in a field initializer, we need to create a fake init method. + // Uniqueness of this symbol is not important (as e.g. annotations will be added on the + // init symbol's owner). ClassSymbol enclClass = owner.enclClass(); - Symbol newScopeOwner = env.info.scope.owner; - /* if the field isn't static, then we can get the first constructor - * and use it as the owner of the environment. This is what - * LTM code is doing to look for type annotations so we are fine. - */ - if ((owner.flags() & STATIC) == 0) { - for (Symbol s : enclClass.members_field.getSymbolsByName(names.init)) { - newScopeOwner = s; - break; - } - } else { - /* if the field is static then we need to create a fake clinit - * method, this method can later be reused by LTM. - */ - MethodSymbol clinit = clinits.get(enclClass); - if (clinit == null) { - Type clinitType = new MethodType(List.nil(), - syms.voidType, List.nil(), syms.methodClass); - clinit = new MethodSymbol(STATIC | SYNTHETIC | PRIVATE, - names.clinit, clinitType, enclClass); - clinit.params = List.nil(); - clinits.put(enclClass, clinit); - } - newScopeOwner = clinit; - } - lambdaEnv = env.dup(that, env.info.dup(env.info.scope.dupUnshared(newScopeOwner))); + Name initName = owner.isStatic() ? names.clinit : names.init; + MethodSymbol initSym = new MethodSymbol(BLOCK | (owner.isStatic() ? STATIC : 0) | SYNTHETIC | PRIVATE, + initName, initBlockType, enclClass); + initSym.params = List.nil(); + lambdaEnv = env.dup(that, env.info.dup(env.info.scope.dupUnshared(initSym))); } else { lambdaEnv = env.dup(that, env.info.dup(env.info.scope.dup())); } @@ -3936,6 +3902,7 @@ private void setFunctionalInfo(final Env env, final JCFunctionalExp inferenceContext -> setFunctionalInfo(env, fExpr, pt, inferenceContext.asInstType(descriptorType), inferenceContext.asInstType(primaryTarget), checkContext)); } else { + fExpr.owner = env.info.scope.owner; if (pt.hasTag(CLASS)) { fExpr.target = primaryTarget; } diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/CaptureScanner.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/CaptureScanner.java new file mode 100644 index 0000000000000..ef8a3ff584de6 --- /dev/null +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/CaptureScanner.java @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package com.sun.tools.javac.comp; + +import com.sun.tools.javac.code.Symbol; +import com.sun.tools.javac.code.Symbol.VarSymbol; +import com.sun.tools.javac.tree.JCTree; +import com.sun.tools.javac.tree.TreeScanner; +import com.sun.tools.javac.util.List; + +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.SequencedSet; +import java.util.Set; + +import static com.sun.tools.javac.code.Kinds.Kind.MTH; +import static com.sun.tools.javac.code.Kinds.Kind.VAR; + +/** + * A visitor which collects the set of local variables "captured" by a given tree. + */ +public class CaptureScanner extends TreeScanner { + + /** + * The tree under analysis. + */ + private final JCTree tree; + + /** + * The set of local variable declarations encountered in the tree under analysis. + */ + private final Set seenVars = new HashSet<>(); + + /** + * The set of captured local variables accessed from within the tree under analysis. + */ + private final SequencedSet fvs = new LinkedHashSet<>(); + + public CaptureScanner(JCTree ownerTree) { + this.tree = ownerTree; + } + + @Override + public void visitIdent(JCTree.JCIdent tree) { + Symbol sym = tree.sym; + if (sym.kind == VAR && sym.owner.kind == MTH) { + Symbol.VarSymbol vsym = (Symbol.VarSymbol) sym; + if (vsym.getConstValue() == null && !seenVars.contains(vsym)) { + addFreeVar(vsym); + } + } + } + + /** + * Add free variable to fvs list unless it is already there. + */ + protected void addFreeVar(Symbol.VarSymbol v) { + fvs.add(v); + } + + @Override + public void visitVarDef(JCTree.JCVariableDecl tree) { + if (tree.sym.owner.kind == MTH) { + seenVars.add(tree.sym); + } + super.visitVarDef(tree); + } + + /** + * Obtains the list of captured local variables in the tree under analysis. + */ + List analyzeCaptures() { + scan(tree); + return List.from(fvs); + } +} diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java index 2c3d79c0ab6da..adfc3ceaa0d94 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java @@ -25,50 +25,87 @@ package com.sun.tools.javac.comp; -import com.sun.tools.javac.code.Symbol.MethodHandleSymbol; -import com.sun.tools.javac.code.Types.SignatureGenerator.InvalidSignatureException; -import com.sun.tools.javac.jvm.PoolConstant.LoadableConstant; -import com.sun.tools.javac.resources.CompilerProperties.Errors; -import com.sun.tools.javac.resources.CompilerProperties.Fragments; -import com.sun.tools.javac.tree.*; -import com.sun.tools.javac.tree.JCTree.*; -import com.sun.tools.javac.tree.JCTree.JCMemberReference.ReferenceKind; -import com.sun.tools.javac.tree.TreeMaker; -import com.sun.tools.javac.tree.TreeTranslator; import com.sun.tools.javac.code.Attribute; +import com.sun.tools.javac.code.Flags; import com.sun.tools.javac.code.Symbol; import com.sun.tools.javac.code.Symbol.ClassSymbol; import com.sun.tools.javac.code.Symbol.DynamicMethodSymbol; +import com.sun.tools.javac.code.Symbol.MethodHandleSymbol; import com.sun.tools.javac.code.Symbol.MethodSymbol; import com.sun.tools.javac.code.Symbol.VarSymbol; import com.sun.tools.javac.code.Symtab; import com.sun.tools.javac.code.Type; import com.sun.tools.javac.code.Type.MethodType; import com.sun.tools.javac.code.Types; -import com.sun.tools.javac.comp.LambdaToMethod.LambdaAnalyzerPreprocessor.*; +import com.sun.tools.javac.code.Types.SignatureGenerator.InvalidSignatureException; +import com.sun.tools.javac.jvm.PoolConstant.LoadableConstant; +import com.sun.tools.javac.main.Option; +import com.sun.tools.javac.resources.CompilerProperties.Errors; +import com.sun.tools.javac.resources.CompilerProperties.Fragments; import com.sun.tools.javac.resources.CompilerProperties.Notes; -import com.sun.tools.javac.util.*; +import com.sun.tools.javac.tree.JCTree; +import com.sun.tools.javac.tree.JCTree.JCAnnotation; +import com.sun.tools.javac.tree.JCTree.JCBinary; +import com.sun.tools.javac.tree.JCTree.JCBlock; +import com.sun.tools.javac.tree.JCTree.JCBreak; +import com.sun.tools.javac.tree.JCTree.JCCase; +import com.sun.tools.javac.tree.JCTree.JCClassDecl; +import com.sun.tools.javac.tree.JCTree.JCExpression; +import com.sun.tools.javac.tree.JCTree.JCFieldAccess; +import com.sun.tools.javac.tree.JCTree.JCFunctionalExpression; +import com.sun.tools.javac.tree.JCTree.JCIdent; +import com.sun.tools.javac.tree.JCTree.JCLambda; +import com.sun.tools.javac.tree.JCTree.JCMemberReference; +import com.sun.tools.javac.tree.JCTree.JCMethodDecl; +import com.sun.tools.javac.tree.JCTree.JCMethodInvocation; +import com.sun.tools.javac.tree.JCTree.JCNewClass; +import com.sun.tools.javac.tree.JCTree.JCReturn; +import com.sun.tools.javac.tree.JCTree.JCStatement; +import com.sun.tools.javac.tree.JCTree.JCSwitch; +import com.sun.tools.javac.tree.JCTree.JCVariableDecl; +import com.sun.tools.javac.tree.JCTree.Tag; +import com.sun.tools.javac.tree.TreeInfo; +import com.sun.tools.javac.tree.TreeMaker; +import com.sun.tools.javac.tree.TreeTranslator; +import com.sun.tools.javac.util.Assert; +import com.sun.tools.javac.util.Context; +import com.sun.tools.javac.util.DiagnosticSource; +import com.sun.tools.javac.util.InvalidUtfException; +import com.sun.tools.javac.util.JCDiagnostic; import com.sun.tools.javac.util.JCDiagnostic.DiagnosticPosition; +import com.sun.tools.javac.util.List; +import com.sun.tools.javac.util.ListBuffer; +import com.sun.tools.javac.util.Log; +import com.sun.tools.javac.util.Name; +import com.sun.tools.javac.util.Names; +import com.sun.tools.javac.util.Options; -import java.util.EnumMap; +import javax.lang.model.element.ElementKind; +import java.lang.invoke.LambdaMetafactory; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.function.Consumer; import java.util.function.Supplier; -import static com.sun.tools.javac.comp.LambdaToMethod.LambdaSymbolKind.*; -import static com.sun.tools.javac.code.Flags.*; -import static com.sun.tools.javac.code.Kinds.Kind.*; -import static com.sun.tools.javac.code.TypeTag.*; -import static com.sun.tools.javac.tree.JCTree.Tag.*; - -import javax.lang.model.element.ElementKind; - -import com.sun.tools.javac.main.Option; +import static com.sun.tools.javac.code.Flags.ABSTRACT; +import static com.sun.tools.javac.code.Flags.BLOCK; +import static com.sun.tools.javac.code.Flags.DEFAULT; +import static com.sun.tools.javac.code.Flags.FINAL; +import static com.sun.tools.javac.code.Flags.INTERFACE; +import static com.sun.tools.javac.code.Flags.LAMBDA_METHOD; +import static com.sun.tools.javac.code.Flags.LOCAL_CAPTURE_FIELD; +import static com.sun.tools.javac.code.Flags.PARAMETER; +import static com.sun.tools.javac.code.Flags.PRIVATE; +import static com.sun.tools.javac.code.Flags.STATIC; +import static com.sun.tools.javac.code.Flags.STRICTFP; +import static com.sun.tools.javac.code.Flags.SYNTHETIC; +import static com.sun.tools.javac.code.Kinds.Kind.MTH; +import static com.sun.tools.javac.code.Kinds.Kind.TYP; +import static com.sun.tools.javac.code.Kinds.Kind.VAR; +import static com.sun.tools.javac.code.TypeTag.BOT; +import static com.sun.tools.javac.code.TypeTag.VOID; /** * This pass desugars lambda expressions into static methods @@ -80,31 +117,28 @@ */ public class LambdaToMethod extends TreeTranslator { - private Attr attr; - private JCDiagnostic.Factory diags; - private Log log; - private Lower lower; - private Names names; - private Symtab syms; - private Resolve rs; - private Operators operators; + private final Attr attr; + private final JCDiagnostic.Factory diags; + private final Log log; + private final Lower lower; + private final Names names; + private final Symtab syms; + private final Resolve rs; + private final Operators operators; private TreeMaker make; - private Types types; - private TransTypes transTypes; + private final Types types; + private final TransTypes transTypes; private Env attrEnv; - /** the analyzer scanner */ - private LambdaAnalyzerPreprocessor analyzer; - - /** map from lambda trees to translation contexts */ - private Map> contextMap; - - /** current translation context (visitor argument) */ - private TranslationContext context; - /** info about the current class being processed */ private KlassInfo kInfo; + /** translation context of the current lambda expression */ + private LambdaTranslationContext lambdaContext; + + /** the variable whose initializer is pending */ + private VarSymbol pendingVar; + /** dump statistics about lambda code generation */ private final boolean dumpLambdaToMethodStats; @@ -121,13 +155,13 @@ public class LambdaToMethod extends TreeTranslator { private final boolean deduplicateLambdas; /** Flag for alternate metafactories indicating the lambda object is intended to be serializable */ - public static final int FLAG_SERIALIZABLE = 1 << 0; + public static final int FLAG_SERIALIZABLE = LambdaMetafactory.FLAG_SERIALIZABLE; /** Flag for alternate metafactories indicating the lambda object has multiple targets */ - public static final int FLAG_MARKERS = 1 << 1; + public static final int FLAG_MARKERS = LambdaMetafactory.FLAG_MARKERS; /** Flag for alternate metafactories indicating the lambda object requires multiple bridges */ - public static final int FLAG_BRIDGES = 1 << 2; + public static final int FLAG_BRIDGES = LambdaMetafactory.FLAG_BRIDGES; // protected static final Context.Key unlambdaKey = new Context.Key<>(); @@ -151,18 +185,17 @@ private LambdaToMethod(Context context) { make = TreeMaker.instance(context); types = Types.instance(context); transTypes = TransTypes.instance(context); - analyzer = new LambdaAnalyzerPreprocessor(); Options options = Options.instance(context); dumpLambdaToMethodStats = options.isSet("debug.dumpLambdaToMethodStats"); attr = Attr.instance(context); forceSerializable = options.isSet("forceSerializable"); boolean lineDebugInfo = - options.isUnset(Option.G_CUSTOM) || - options.isSet(Option.G_CUSTOM, "lines"); + options.isUnset(Option.G_CUSTOM) || + options.isSet(Option.G_CUSTOM, "lines"); boolean varDebugInfo = - options.isUnset(Option.G_CUSTOM) - ? options.isSet(Option.G) - : options.isSet(Option.G_CUSTOM, "vars"); + options.isUnset(Option.G_CUSTOM) + ? options.isSet(Option.G) + : options.isSet(Option.G_CUSTOM, "vars"); debugLinesOrVars = lineDebugInfo || varDebugInfo; verboseDeduplication = options.isSet("debug.dumpLambdaToMethodDeduplication"); deduplicateLambdas = options.getBoolean("deduplicateLambdas", true); @@ -180,7 +213,6 @@ class DedupedLambda { this.tree = tree; } - @Override public int hashCode() { int hashCode = this.hashCode; @@ -203,18 +235,18 @@ private class KlassInfo { /** * list of methods to append */ - private ListBuffer appendedMethodList; + private ListBuffer appendedMethodList = new ListBuffer<>(); - private Map dedupedLambdas; + private final Map dedupedLambdas = new HashMap<>(); - private Map dynMethSyms = new HashMap<>(); + private final Map dynMethSyms = new HashMap<>(); /** * list of deserialization cases */ - private final Map> deserializeCases; + private final Map> deserializeCases = new HashMap<>(); - /** + /** * deserialize method symbol */ private final MethodSymbol deserMethodSym; @@ -226,11 +258,10 @@ private class KlassInfo { private final JCClassDecl clazz; + private final Map syntheticNames = new HashMap<>(); + private KlassInfo(JCClassDecl clazz) { this.clazz = clazz; - appendedMethodList = new ListBuffer<>(); - dedupedLambdas = new HashMap<>(); - deserializeCases = new HashMap<>(); MethodType type = new MethodType(List.of(syms.serializedLambdaType), syms.objectType, List.nil(), syms.methodClass); deserMethodSym = makePrivateSyntheticMethod(STATIC, names.deserializeLambda, type, clazz.sym); @@ -241,62 +272,50 @@ private KlassInfo(JCClassDecl clazz) { private void addMethod(JCTree decl) { appendedMethodList = appendedMethodList.prepend(decl); } - } - - // - @Override - public T translate(T tree) { - TranslationContext newContext = contextMap.get(tree); - return translate(tree, newContext != null ? newContext : context); - } - T translate(T tree, TranslationContext newContext) { - TranslationContext prevContext = context; - try { - context = newContext; - return super.translate(tree); - } - finally { - context = prevContext; - } - } - - List translate(List trees, TranslationContext newContext) { - ListBuffer buf = new ListBuffer<>(); - for (T tree : trees) { - buf.append(translate(tree, newContext)); + int syntheticNameIndex(StringBuilder buf, int start) { + String temp = buf.toString(); + Integer count = syntheticNames.get(temp); + if (count == null) { + count = start; + } + syntheticNames.put(temp, count + 1); + return count; } - return buf.toList(); } + // public JCTree translateTopLevelClass(Env env, JCTree cdef, TreeMaker make) { this.make = make; this.attrEnv = env; - this.context = null; - this.contextMap = new HashMap<>(); - cdef = analyzer.analyzeAndPreprocessClass((JCClassDecl) cdef); return translate(cdef); } - // - // /** * Visit a class. * Maintain the translatedMethodList across nested classes. * Append the translatedMethodList to the class after it is translated. - * @param tree */ @Override public void visitClassDef(JCClassDecl tree) { KlassInfo prevKlassInfo = kInfo; + DiagnosticSource prevSource = log.currentSource(); + LambdaTranslationContext prevLambdaContext = lambdaContext; + VarSymbol prevPendingVar = pendingVar; try { kInfo = new KlassInfo(tree); + log.useSource(tree.sym.sourcefile); + lambdaContext = null; + pendingVar = null; super.visitClassDef(tree); + if (prevLambdaContext != null) { + tree.sym.owner = prevLambdaContext.translatedSym; + } if (!kInfo.deserializeCases.isEmpty()) { int prevPos = make.pos; try { make.at(tree); - kInfo.addMethod(makeDeserializeMethod(tree.sym)); + kInfo.addMethod(makeDeserializeMethod()); } finally { make.at(prevPos); } @@ -310,6 +329,9 @@ public void visitClassDef(JCClassDecl tree) { result = tree; } finally { kInfo = prevKlassInfo; + log.useSource(prevSource.getFile()); + lambdaContext = prevLambdaContext; + pendingVar = prevPendingVar; } } @@ -317,11 +339,10 @@ public void visitClassDef(JCClassDecl tree) { * Translate a lambda into a method to be inserted into the class. * Then replace the lambda site with an invokedynamic call of to lambda * meta-factory, which will use the lambda method. - * @param tree */ @Override public void visitLambda(JCLambda tree) { - LambdaTranslationContext localContext = (LambdaTranslationContext)context; + LambdaTranslationContext localContext = new LambdaTranslationContext(tree); MethodSymbol sym = localContext.translatedSym; MethodType lambdaType = (MethodType) sym.type; @@ -332,26 +353,26 @@ public void visitLambda(JCLambda tree) { lambda and attach it to the implementation method. */ - Symbol owner = localContext.owner; + Symbol owner = tree.owner; apportionTypeAnnotations(tree, owner::getRawTypeAttributes, owner::setTypeAttributes, sym::setTypeAttributes); - - boolean init; - if ((init = (owner.name == names.init)) || owner.name == names.clinit) { - owner = owner.owner; + final long ownerFlags = owner.flags(); + if ((ownerFlags & Flags.BLOCK) != 0) { + ClassSymbol cs = (ClassSymbol) owner.owner; + boolean isStaticInit = (ownerFlags & Flags.STATIC) != 0; apportionTypeAnnotations(tree, - init ? owner::getInitTypeAttributes : owner::getClassInitTypeAttributes, - init ? owner::setInitTypeAttributes : owner::setClassInitTypeAttributes, + isStaticInit ? cs::getClassInitTypeAttributes : cs::getInitTypeAttributes, + isStaticInit ? cs::setClassInitTypeAttributes : cs::setInitTypeAttributes, sym::appendUniqueTypeAttributes); } - if (localContext.self != null && localContext.self.getKind() == ElementKind.FIELD) { - owner = localContext.self; + + if (pendingVar != null && pendingVar.getKind() == ElementKind.FIELD) { apportionTypeAnnotations(tree, - owner::getRawTypeAttributes, - owner::setTypeAttributes, + pendingVar::getRawTypeAttributes, + pendingVar::setTypeAttributes, sym::appendUniqueTypeAttributes); } } @@ -363,35 +384,13 @@ public void visitLambda(JCLambda tree) { List.nil(), localContext.syntheticParams, lambdaType.getThrownTypes() == null ? - List.nil() : - make.Types(lambdaType.getThrownTypes()), + List.nil() : + make.Types(lambdaType.getThrownTypes()), null, null); lambdaDecl.sym = sym; lambdaDecl.type = lambdaType; - //translate lambda body - //As the lambda body is translated, all references to lambda locals, - //captured variables, enclosing members are adjusted accordingly - //to refer to the static method parameters (rather than i.e. accessing - //captured members directly). - lambdaDecl.body = translate(makeLambdaBody(tree, lambdaDecl)); - - boolean dedupe = false; - if (deduplicateLambdas && !debugLinesOrVars && !localContext.isSerializable()) { - DedupedLambda dedupedLambda = new DedupedLambda(lambdaDecl.sym, lambdaDecl.body); - DedupedLambda existing = kInfo.dedupedLambdas.putIfAbsent(dedupedLambda, dedupedLambda); - if (existing != null) { - sym = existing.symbol; - dedupe = true; - if (verboseDeduplication) log.note(tree, Notes.VerboseL2mDeduplicate(sym)); - } - } - if (!dedupe) { - //Add the method to the list of methods to be added to this class. - kInfo.addMethod(lambdaDecl); - } - //now that we have generated a method for the lambda expression, //we can translate the lambda into a method reference pointing to the newly //created method. @@ -408,46 +407,72 @@ public void visitLambda(JCLambda tree) { if (!sym.isStatic()) { syntheticInits.append(makeThis( sym.owner.enclClass().asType(), - localContext.owner.enclClass())); + tree.owner.enclClass())); } //add captured locals - for (Symbol fv : localContext.getSymbolMap(CAPTURED_VAR).keySet()) { - if (fv != localContext.self) { - JCExpression captured_local = make.Ident(fv).setType(fv.type); - syntheticInits.append(captured_local); - } + for (Symbol fv : localContext.capturedVars) { + JCExpression captured_local = make.Ident(fv).setType(fv.type); + syntheticInits.append(captured_local); } //then, determine the arguments to the indy call - List indy_args = translate(syntheticInits.toList(), localContext.prev); + List indy_args = translate(syntheticInits.toList()); + + LambdaTranslationContext prevLambdaContext = lambdaContext; + try { + lambdaContext = localContext; + //translate lambda body + //As the lambda body is translated, all references to lambda locals, + //captured variables, enclosing members are adjusted accordingly + //to refer to the static method parameters (rather than i.e. accessing + //captured members directly). + lambdaDecl.body = translate(makeLambdaBody(tree, lambdaDecl)); + } finally { + lambdaContext = prevLambdaContext; + } + + boolean dedupe = false; + if (deduplicateLambdas && !debugLinesOrVars && !isSerializable(tree)) { + DedupedLambda dedupedLambda = new DedupedLambda(lambdaDecl.sym, lambdaDecl.body); + DedupedLambda existing = kInfo.dedupedLambdas.putIfAbsent(dedupedLambda, dedupedLambda); + if (existing != null) { + sym = existing.symbol; + dedupe = true; + if (verboseDeduplication) log.note(tree, Notes.VerboseL2mDeduplicate(sym)); + } + } + if (!dedupe) { + //Add the method to the list of methods to be added to this class. + kInfo.addMethod(lambdaDecl); + } //convert to an invokedynamic call - result = makeMetafactoryIndyCall(context, sym.asHandle(), indy_args); + result = makeMetafactoryIndyCall(tree, sym.asHandle(), localContext.translatedSym, indy_args); } // where - // Reassign type annotations from the source that should really belong to the lambda - private void apportionTypeAnnotations(JCLambda tree, - Supplier> source, - Consumer> owner, - Consumer> lambda) { - - ListBuffer ownerTypeAnnos = new ListBuffer<>(); - ListBuffer lambdaTypeAnnos = new ListBuffer<>(); - - for (Attribute.TypeCompound tc : source.get()) { - if (tc.position.onLambda == tree) { - lambdaTypeAnnos.append(tc); - } else { - ownerTypeAnnos.append(tc); - } - } - if (lambdaTypeAnnos.nonEmpty()) { - owner.accept(ownerTypeAnnos.toList()); - lambda.accept(lambdaTypeAnnos.toList()); + // Reassign type annotations from the source that should really belong to the lambda + private void apportionTypeAnnotations(JCLambda tree, + Supplier> source, + Consumer> owner, + Consumer> lambda) { + + ListBuffer ownerTypeAnnos = new ListBuffer<>(); + ListBuffer lambdaTypeAnnos = new ListBuffer<>(); + + for (Attribute.TypeCompound tc : source.get()) { + if (tc.position.onLambda == tree) { + lambdaTypeAnnos.append(tc); + } else { + ownerTypeAnnos.append(tc); } } + if (lambdaTypeAnnos.nonEmpty()) { + owner.accept(ownerTypeAnnos.toList()); + lambda.accept(lambdaTypeAnnos.toList()); + } + } private JCIdent makeThis(Type type, Symbol owner) { VarSymbol _this = new VarSymbol(PARAMETER | FINAL | SYNTHETIC, @@ -460,65 +485,46 @@ private JCIdent makeThis(Type type, Symbol owner) { /** * Translate a method reference into an invokedynamic call to the * meta-factory. - * @param tree */ @Override public void visitReference(JCMemberReference tree) { - ReferenceTranslationContext localContext = (ReferenceTranslationContext)context; - //first determine the method symbol to be used to generate the sam instance //this is either the method reference symbol, or the bridged reference symbol MethodSymbol refSym = (MethodSymbol)tree.sym; //the qualifying expression is treated as a special captured arg - JCExpression init; - switch(tree.kind) { - - case IMPLICIT_INNER: /** Inner :: new */ - case SUPER: /** super :: instMethod */ - init = makeThis( - localContext.owner.enclClass().asType(), - localContext.owner.enclClass()); - break; - - case BOUND: /** Expr :: instMethod */ - init = transTypes.coerce(attrEnv, tree.getQualifierExpression(), - types.erasure(tree.sym.owner.type)); - init = attr.makeNullCheck(init); - break; - - case UNBOUND: /** Type :: instMethod */ - case STATIC: /** Type :: staticMethod */ - case TOPLEVEL: /** Top level :: new */ - case ARRAY_CTOR: /** ArrayType :: new */ - init = null; - break; - - default: - throw new InternalError("Should not have an invalid kind"); - } - - List indy_args = init==null? List.nil() : translate(List.of(init), localContext.prev); - + JCExpression init = switch (tree.kind) { + case IMPLICIT_INNER, /* Inner :: new */ + SUPER -> /* super :: instMethod */ + makeThis(tree.owner.enclClass().asType(), tree.owner.enclClass()); + case BOUND -> /* Expr :: instMethod */ + attr.makeNullCheck(transTypes.coerce(attrEnv, tree.getQualifierExpression(), + types.erasure(tree.sym.owner.type))); + case UNBOUND, /* Type :: instMethod */ + STATIC, /* Type :: staticMethod */ + TOPLEVEL, /* Top level :: new */ + ARRAY_CTOR -> /* ArrayType :: new */ + null; + }; + + List indy_args = (init == null) ? + List.nil() : translate(List.of(init)); //build a sam instance using an indy call to the meta-factory - result = makeMetafactoryIndyCall(localContext, refSym.asHandle(), indy_args); + result = makeMetafactoryIndyCall(tree, refSym.asHandle(), refSym, indy_args); } /** * Translate identifiers within a lambda to the mapped identifier - * @param tree */ @Override public void visitIdent(JCIdent tree) { - if (context == null || !analyzer.lambdaIdentSymbolFilter(tree.sym)) { + if (lambdaContext == null) { super.visitIdent(tree); } else { int prevPos = make.pos; try { make.at(tree); - - LambdaTranslationContext lambdaContext = (LambdaTranslationContext) context; JCTree ltree = lambdaContext.translate(tree); if (ltree != null) { result = ltree; @@ -535,13 +541,18 @@ public void visitIdent(JCIdent tree) { @Override public void visitVarDef(JCVariableDecl tree) { - LambdaTranslationContext lambdaContext = (LambdaTranslationContext)context; - if (context != null && lambdaContext.getSymbolMap(LOCAL_VAR).containsKey(tree.sym)) { - tree.init = translate(tree.init); - tree.sym = (VarSymbol) lambdaContext.getSymbolMap(LOCAL_VAR).get(tree.sym); - result = tree; - } else { - super.visitVarDef(tree); + VarSymbol prevPendingVar = pendingVar; + try { + pendingVar = tree.sym; + if (lambdaContext != null) { + tree.sym = lambdaContext.addLocal(tree.sym); + tree.init = translate(tree.init); + result = tree; + } else { + super.visitVarDef(tree); + } + } finally { + pendingVar = prevPendingVar; } } @@ -609,7 +620,7 @@ public void visitReturn(JCReturn tree) { if (isTarget_void && !isLambda_void) { //Void to void conversion: // { TYPE $loc = RET-EXPR; return; } - VarSymbol loc = makeSyntheticVar(0, names.fromString("$loc"), tree.expr.type, lambdaMethodDecl.sym); + VarSymbol loc = new VarSymbol(SYNTHETIC, names.fromString("$loc"), tree.expr.type, lambdaMethodDecl.sym); JCVariableDecl varDef = make.VarDef(loc, tree.expr); result = make.Block(0, List.of(varDef, make.Return(null))); } else { @@ -628,7 +639,7 @@ public void visitReturn(JCReturn tree) { return trans_block; } - private JCMethodDecl makeDeserializeMethod(Symbol kSym) { + private JCMethodDecl makeDeserializeMethod() { ListBuffer cases = new ListBuffer<>(); ListBuffer breaks = new ListBuffer<>(); for (Map.Entry> entry : kInfo.deserializeCases.entrySet()) { @@ -644,16 +655,16 @@ private JCMethodDecl makeDeserializeMethod(Symbol kSym) { JCBlock body = make.Block(0L, List.of( sw, make.Throw(makeNewClass( - syms.illegalArgumentExceptionType, - List.of(make.Literal("Invalid lambda deserialization")))))); + syms.illegalArgumentExceptionType, + List.of(make.Literal("Invalid lambda deserialization")))))); JCMethodDecl deser = make.MethodDef(make.Modifiers(kInfo.deserMethodSym.flags()), - names.deserializeLambda, - make.QualIdent(kInfo.deserMethodSym.getReturnType().tsym), - List.nil(), - List.of(make.VarDef(kInfo.deserParamSym, null)), - List.nil(), - body, - null); + names.deserializeLambda, + make.QualIdent(kInfo.deserMethodSym.getReturnType().tsym), + List.nil(), + List.of(make.VarDef(kInfo.deserParamSym, null)), + List.nil(), + body, + null); deser.sym = kInfo.deserMethodSym; deser.type = kInfo.deserMethodSym.type; //System.err.printf("DESER: '%s'\n", deser); @@ -667,7 +678,7 @@ private JCMethodDecl makeDeserializeMethod(Symbol kSym) { */ JCNewClass makeNewClass(Type ctype, List args, Symbol cons) { JCNewClass tree = make.NewClass(null, - null, make.QualIdent(ctype.tsym), args, null); + null, make.QualIdent(ctype.tsym), args, null); tree.constructor = cons; tree.type = ctype; return tree; @@ -680,7 +691,7 @@ JCNewClass makeNewClass(Type ctype, List args, Symbol cons) { JCNewClass makeNewClass(Type ctype, List args) { return makeNewClass(ctype, args, rs.resolveConstructor(null, attrEnv, ctype, TreeInfo.types(args), List.nil())); - } + } private void addDeserializationCase(MethodHandleSymbol refSym, Type targetType, MethodSymbol samSym, DiagnosticPosition pos, List staticArgs, MethodType indyType) { @@ -711,17 +722,17 @@ private void addDeserializationCase(MethodHandleSymbol refSym, Type targetType, } JCStatement stmt = make.If( deserTest(deserTest(deserTest(deserTest(deserTest( - kindTest, - "getFunctionalInterfaceClass", functionalInterfaceClass), - "getFunctionalInterfaceMethodName", functionalInterfaceMethodName), - "getFunctionalInterfaceMethodSignature", functionalInterfaceMethodSignature), - "getImplClass", implClass), - "getImplMethodSignature", implMethodSignature), + kindTest, + "getFunctionalInterfaceClass", functionalInterfaceClass), + "getFunctionalInterfaceMethodName", functionalInterfaceMethodName), + "getFunctionalInterfaceMethodSignature", functionalInterfaceMethodSignature), + "getImplClass", implClass), + "getImplMethodSignature", implMethodSignature), make.Return(makeIndyCall( - pos, - syms.lambdaMetafactory, - names.altMetafactory, - staticArgs, indyType, serArgs.toList(), samSym.name)), + pos, + syms.lambdaMetafactory, + names.altMetafactory, + staticArgs, indyType, serArgs.toList(), samSym.name)), null); ListBuffer stmts = kInfo.deserializeCases.get(implMethodName); if (stmts == null) { @@ -742,8 +753,8 @@ private void addDeserializationCase(MethodHandleSymbol refSym, Type targetType, } private JCExpression eqTest(Type argType, JCExpression arg1, JCExpression arg2) { - JCBinary testExpr = make.Binary(JCTree.Tag.EQ, arg1, arg2); - testExpr.operator = operators.resolveBinary(testExpr, JCTree.Tag.EQ, argType, argType); + JCBinary testExpr = make.Binary(Tag.EQ, arg1, arg2); + testExpr.operator = operators.resolveBinary(testExpr, Tag.EQ, argType, argType); testExpr.setType(syms.booleanType); return testExpr; } @@ -756,8 +767,8 @@ private JCExpression deserTest(JCExpression prev, String func, String lit) { make.Select(deserGetter(func, syms.stringType), eqsym).setType(eqmt), List.of(make.Literal(lit))); eqtest.setType(syms.booleanType); - JCBinary compound = make.Binary(JCTree.Tag.AND, prev, eqtest); - compound.operator = operators.resolveBinary(compound, JCTree.Tag.AND, syms.booleanType, syms.booleanType); + JCBinary compound = make.Binary(Tag.AND, prev, eqtest); + compound.operator = operators.resolveBinary(compound, Tag.AND, syms.booleanType, syms.booleanType); compound.setType(syms.booleanType); return compound; } @@ -770,9 +781,9 @@ private JCExpression deserGetter(String func, Type type, List argTypes, Li MethodType getmt = new MethodType(argTypes, type, List.nil(), syms.methodClass); Symbol getsym = rs.resolveQualifiedMethod(null, attrEnv, syms.serializedLambdaType, names.fromString(func), argTypes, List.nil()); return make.Apply( - List.nil(), - make.Select(make.Ident(kInfo.deserParamSym).setType(syms.serializedLambdaType), getsym).setType(getmt), - args).setType(type); + List.nil(), + make.Select(make.Ident(kInfo.deserParamSym).setType(syms.serializedLambdaType), getsym).setType(getmt), + args).setType(type); } /** @@ -782,29 +793,20 @@ private MethodSymbol makePrivateSyntheticMethod(long flags, Name name, Type type return new MethodSymbol(flags | SYNTHETIC | PRIVATE, name, type, owner); } - /** - * Create new synthetic variable with given flags, name, type, owner - */ - private VarSymbol makeSyntheticVar(long flags, Name name, Type type, Symbol owner) { - return new VarSymbol(flags | SYNTHETIC, name, type, owner); - } - - // - private MethodType typeToMethodType(Type mt) { Type type = types.erasure(mt); return new MethodType(type.getParameterTypes(), - type.getReturnType(), - type.getThrownTypes(), - syms.methodClass); + type.getReturnType(), + type.getThrownTypes(), + syms.methodClass); } /** * Generate an indy method call to the meta factory */ - private JCExpression makeMetafactoryIndyCall(TranslationContext context, - MethodHandleSymbol refSym, List indy_args) { - JCFunctionalExpression tree = context.tree; + private JCExpression makeMetafactoryIndyCall(JCFunctionalExpression tree, + MethodHandleSymbol refSym, MethodSymbol nonDedupedRefSym, + List indy_args) { //determine the static bsm args MethodSymbol samSym = (MethodSymbol) types.findDescriptorSymbol(tree.target.tsym); List staticArgs = List.of( @@ -824,10 +826,17 @@ private JCExpression makeMetafactoryIndyCall(TranslationContext context, List.nil(), syms.methodClass); - Name metafactoryName = context.needsAltMetafactory() ? + List bridges = bridges(tree); + boolean isSerializable = isSerializable(tree); + boolean needsAltMetafactory = tree.target.isIntersection() || + isSerializable || bridges.length() > 1; + + dumpStats(tree, needsAltMetafactory, nonDedupedRefSym); + + Name metafactoryName = needsAltMetafactory ? names.altMetafactory : names.metafactory; - if (context.needsAltMetafactory()) { + if (needsAltMetafactory) { ListBuffer markers = new ListBuffer<>(); List targets = tree.target.isIntersection() ? types.directSupertypes(tree.target) : @@ -835,14 +844,14 @@ private JCExpression makeMetafactoryIndyCall(TranslationContext context, for (Type t : targets) { t = types.erasure(t); if (t.tsym != syms.serializableType.tsym && - t.tsym != tree.type.tsym && - t.tsym != syms.objectType.tsym) { + t.tsym != tree.type.tsym && + t.tsym != syms.objectType.tsym) { markers.append(t); } } - int flags = context.isSerializable() ? FLAG_SERIALIZABLE : 0; + int flags = isSerializable ? FLAG_SERIALIZABLE : 0; boolean hasMarkers = markers.nonEmpty(); - boolean hasBridges = context.bridges.nonEmpty(); + boolean hasBridges = bridges.nonEmpty(); if (hasMarkers) { flags |= FLAG_MARKERS; } @@ -855,15 +864,15 @@ private JCExpression makeMetafactoryIndyCall(TranslationContext context, staticArgs = staticArgs.appendList(List.convert(LoadableConstant.class, markers.toList())); } if (hasBridges) { - staticArgs = staticArgs.append(LoadableConstant.Int(context.bridges.length() - 1)); - for (Symbol s : context.bridges) { + staticArgs = staticArgs.append(LoadableConstant.Int(bridges.length() - 1)); + for (Symbol s : bridges) { Type s_erasure = s.erasure(types); if (!types.isSameType(s_erasure, samSym.erasure(types))) { staticArgs = staticArgs.append(((MethodType)s.erasure(types))); } } } - if (context.isSerializable()) { + if (isSerializable) { int prevPos = make.pos; try { make.at(kInfo.clazz); @@ -889,18 +898,18 @@ private JCExpression makeIndyCall(DiagnosticPosition pos, Type site, Name bsmNam try { make.at(pos); List bsm_staticArgs = List.of(syms.methodHandleLookupType, - syms.stringType, - syms.methodTypeType).appendList(staticArgs.map(types::constantType)); + syms.stringType, + syms.methodTypeType).appendList(staticArgs.map(types::constantType)); MethodSymbol bsm = rs.resolveInternalMethod(pos, attrEnv, site, bsmName, bsm_staticArgs, List.nil()); DynamicMethodSymbol dynSym = new DynamicMethodSymbol(methName, - syms.noSymbol, - bsm.asHandle(), - indyType, - staticArgs.toArray(new LoadableConstant[staticArgs.length()])); + syms.noSymbol, + bsm.asHandle(), + indyType, + staticArgs.toArray(new LoadableConstant[staticArgs.length()])); JCFieldAccess qualifier = make.Select(make.QualIdent(site.tsym), bsmName); DynamicMethodSymbol existing = kInfo.dynMethSyms.putIfAbsent( dynSym.poolKey(types), dynSym); @@ -915,833 +924,337 @@ private JCExpression makeIndyCall(DiagnosticPosition pos, Type site, Name bsmNam } } - // - /** - * This visitor collects information about translation of a lambda expression. - * More specifically, it keeps track of the enclosing contexts and captured locals - * accessed by the lambda being translated (as well as other useful info). - * It also translates away problems for LambdaToMethod. - */ - class LambdaAnalyzerPreprocessor extends TreeTranslator { - - /** the frame stack - used to reconstruct translation info about enclosing scopes */ - private List frameStack; + List bridges(JCFunctionalExpression tree) { + ClassSymbol csym = + types.makeFunctionalInterfaceClass(attrEnv, names.empty, tree.target, ABSTRACT | INTERFACE); + return types.functionalInterfaceBridges(csym); + } - /** - * keep the count of lambda expression (used to generate unambiguous - * names) - */ - private int lambdaCount = 0; + /** does this functional expression require serialization support? */ + boolean isSerializable(JCFunctionalExpression tree) { + if (forceSerializable) { + return true; + } + return types.asSuper(tree.target, syms.serializableType.tsym) != null; + } - /** - * keep the count of lambda expression defined in given context (used to - * generate unambiguous names for serializable lambdas) - */ - private class SyntheticMethodNameCounter { - private Map map = new HashMap<>(); - int getIndex(StringBuilder buf) { - String temp = buf.toString(); - Integer count = map.get(temp); - if (count == null) { - count = 0; - } - ++count; - map.put(temp, count); - return count; + void dumpStats(JCFunctionalExpression tree, boolean needsAltMetafactory, Symbol sym) { + if (dumpLambdaToMethodStats) { + if (tree instanceof JCLambda lambda) { + log.note(tree, diags.noteKey(lambda.wasMethodReference ? "mref.stat.1" : "lambda.stat", + needsAltMetafactory, sym)); + } else if (tree instanceof JCMemberReference) { + log.note(tree, Notes.MrefStat(needsAltMetafactory, null)); } } - private SyntheticMethodNameCounter syntheticMethodNameCounts = - new SyntheticMethodNameCounter(); + } - private Map localClassDefs; + /** + * This class retains all the useful information about a lambda expression, + * and acts as a translation map that is used by the main translation routines + * in order to adjust references to captured locals/members, etc. + */ + class LambdaTranslationContext { - /** - * maps for fake clinit symbols to be used as owners of lambda occurring in - * a static var init context - */ - private Map clinits = new HashMap<>(); + /** the underlying (untranslated) tree */ + final JCFunctionalExpression tree; - private JCClassDecl analyzeAndPreprocessClass(JCClassDecl tree) { - frameStack = List.nil(); - localClassDefs = new HashMap<>(); - return translate(tree); - } + /** a translation map from source symbols to translated symbols */ + final Map lambdaProxies = new HashMap<>(); - @Override - public void visitBlock(JCBlock tree) { - List prevStack = frameStack; - try { - if (frameStack.nonEmpty() && frameStack.head.tree.hasTag(CLASSDEF)) { - frameStack = frameStack.prepend(new Frame(tree)); - } - super.visitBlock(tree); - } - finally { - frameStack = prevStack; - } - } + /** the list of symbols captured by this lambda expression */ + final List capturedVars; - @Override - public void visitClassDef(JCClassDecl tree) { - List prevStack = frameStack; - int prevLambdaCount = lambdaCount; - SyntheticMethodNameCounter prevSyntheticMethodNameCounts = - syntheticMethodNameCounts; - Map prevClinits = clinits; - DiagnosticSource prevSource = log.currentSource(); - try { - log.useSource(tree.sym.sourcefile); - lambdaCount = 0; - syntheticMethodNameCounts = new SyntheticMethodNameCounter(); - prevClinits = new HashMap<>(); - if (tree.sym.owner.kind == MTH) { - localClassDefs.put(tree.sym, tree); - } - if (directlyEnclosingLambda() != null) { - tree.sym.owner = owner(); - } - frameStack = frameStack.prepend(new Frame(tree)); - super.visitClassDef(tree); - } - finally { - log.useSource(prevSource.getFile()); - frameStack = prevStack; - lambdaCount = prevLambdaCount; - syntheticMethodNameCounts = prevSyntheticMethodNameCounts; - clinits = prevClinits; - } - } + /** the synthetic symbol for the method hoisting the translated lambda */ + final MethodSymbol translatedSym; - @Override - public void visitIdent(JCIdent tree) { - if (context() != null && lambdaIdentSymbolFilter(tree.sym)) { - if (tree.sym.kind == VAR && - tree.sym.owner.kind == MTH && - tree.type.constValue() == null) { - TranslationContext localContext = context(); - while (localContext != null) { - if (localContext.tree.getTag() == LAMBDA) { - JCTree block = capturedDecl(localContext.depth, tree.sym); - if (block == null) break; - ((LambdaTranslationContext)localContext) - .addSymbol(tree.sym, CAPTURED_VAR); - } - localContext = localContext.prev; - } - } else if (tree.sym.owner.kind == TYP) { - TranslationContext localContext = context(); - while (localContext != null && !localContext.owner.isStatic()) { - if (localContext.tree.hasTag(LAMBDA)) { - JCTree block = capturedDecl(localContext.depth, tree.sym); - if (block == null) break; - switch (block.getTag()) { - case CLASSDEF: - JCClassDecl cdecl = (JCClassDecl)block; - ((LambdaTranslationContext)localContext) - .addSymbol(cdecl.sym, CAPTURED_THIS); - break; - default: - Assert.error("bad block kind"); - } - } - localContext = localContext.prev; + /** the list of parameter declarations of the translated lambda method */ + final List syntheticParams; + + LambdaTranslationContext(JCLambda tree) { + this.tree = tree; + // This symbol will be filled-in in complete + Symbol owner = tree.owner; + if (owner.kind == MTH) { + final MethodSymbol originalOwner = (MethodSymbol)owner.clone(owner.owner); + this.translatedSym = new MethodSymbol(0, null, null, owner.enclClass()) { + @Override + public MethodSymbol originalEnclosingMethod() { + return originalOwner; } - } + }; + } else { + this.translatedSym = makePrivateSyntheticMethod(0, null, null, owner.enclClass()); } - super.visitIdent(tree); - } - - @Override - public void visitLambda(JCLambda tree) { - analyzeLambda(tree, tree.wasMethodReference ? "mref.stat.1" : "lambda.stat"); - } - - private LambdaTranslationContext analyzeLambda(JCLambda tree, String statKey) { - List prevStack = frameStack; - try { - LambdaTranslationContext context = new LambdaTranslationContext(tree); - frameStack = frameStack.prepend(new Frame(tree)); - for (JCVariableDecl param : tree.params) { - context.addSymbol(param.sym, PARAM); - frameStack.head.addLocal(param.sym); - } - contextMap.put(tree, context); - super.visitLambda(tree); - context.complete(); - if (dumpLambdaToMethodStats) { - log.note(tree, diags.noteKey(statKey, context.needsAltMetafactory(), context.translatedSym)); - } - return context; + ListBuffer params = new ListBuffer<>(); + ListBuffer parameterSymbols = new ListBuffer<>(); + LambdaCaptureScanner captureScanner = new LambdaCaptureScanner(tree); + capturedVars = captureScanner.analyzeCaptures(); + for (VarSymbol captured : capturedVars) { + VarSymbol trans = addSymbol(captured, LambdaSymbolKind.CAPTURED_VAR); + params.append(make.VarDef(trans, null)); + parameterSymbols.add(trans); } - finally { - frameStack = prevStack; + for (JCVariableDecl param : tree.params) { + VarSymbol trans = addSymbol(param.sym, LambdaSymbolKind.PARAM); + params.append(make.VarDef(trans, null)); + parameterSymbols.add(trans); } + syntheticParams = params.toList(); + completeLambdaMethodSymbol(owner, captureScanner.capturesThis); + translatedSym.params = parameterSymbols.toList(); } - @Override - public void visitMethodDef(JCMethodDecl tree) { - List prevStack = frameStack; - try { - frameStack = frameStack.prepend(new Frame(tree)); - super.visitMethodDef(tree); - } - finally { - frameStack = prevStack; - } + void completeLambdaMethodSymbol(Symbol owner, boolean thisReferenced) { + boolean inInterface = owner.enclClass().isInterface(); + + // Compute and set the lambda name + Name name = isSerializable(tree) + ? serializedLambdaName(owner) + : lambdaName(owner); + + //prepend synthetic args to translated lambda method signature + Type type = types.createMethodTypeWithParameters( + generatedLambdaSig(), + TreeInfo.types(syntheticParams)); + + // If instance access isn't needed, make it static. + // Interface instance methods must be default methods. + // Lambda methods are private synthetic. + // Inherit ACC_STRICT from the enclosing method, or, for clinit, + // from the class. + long flags = SYNTHETIC | LAMBDA_METHOD | + owner.flags_field & STRICTFP | + owner.owner.flags_field & STRICTFP | + PRIVATE | + (thisReferenced? (inInterface? DEFAULT : 0) : STATIC); + + translatedSym.type = type; + translatedSym.name = name; + translatedSym.flags_field = flags; } /** - * Method references to local class constructors, may, if the local - * class references local variables, have implicit constructor - * parameters added in Lower; As a result, the invokedynamic bootstrap - * information added in the LambdaToMethod pass will have the wrong - * signature. Hooks between Lower and LambdaToMethod have been added to - * handle normal "new" in this case. This visitor converts potentially - * affected method references into a lambda containing a normal - * expression. + * For a serializable lambda, generate a disambiguating string + * which maximizes stability across deserialization. * - * @param tree + * @return String to differentiate synthetic lambda method names */ - @Override - public void visitReference(JCMemberReference tree) { - ReferenceTranslationContext rcontext = new ReferenceTranslationContext(tree); - contextMap.put(tree, rcontext); - super.visitReference(tree); - if (dumpLambdaToMethodStats) { - log.note(tree, Notes.MrefStat(rcontext.needsAltMetafactory(), null)); + private String serializedLambdaDisambiguation(Symbol owner) { + StringBuilder buf = new StringBuilder(); + // Append the enclosing method signature to differentiate + // overloaded enclosing methods. For lambdas enclosed in + // lambdas, the generated lambda method will not have type yet, + // but the enclosing method's name will have been generated + // with this same method, so it will be unique and never be + // overloaded. + Assert.check( + owner.type != null || + lambdaContext != null); + if (owner.type != null) { + buf.append(typeSig(owner.type, true)); + buf.append(":"); } - } - @Override - public void visitSelect(JCFieldAccess tree) { - if (context() != null && tree.sym.kind == VAR && - (tree.sym.name == names._this || - tree.sym.name == names._super)) { - // A select of this or super means, if we are in a lambda, - // we much have an instance context - TranslationContext localContext = context(); - while (localContext != null && !localContext.owner.isStatic()) { - if (localContext.tree.hasTag(LAMBDA)) { - JCClassDecl clazz = (JCClassDecl)capturedDecl(localContext.depth, tree.sym); - if (clazz == null) break; - ((LambdaTranslationContext)localContext).addSymbol(clazz.sym, CAPTURED_THIS); - } - localContext = localContext.prev; - } - } - super.visitSelect(tree); - } + // Add target type info + buf.append(types.findDescriptorSymbol(tree.type.tsym).owner.flatName()); + buf.append(" "); - @Override - public void visitVarDef(JCVariableDecl tree) { - TranslationContext context = context(); - if (context != null && context instanceof LambdaTranslationContext lambdaContext) { - for (Frame frame : frameStack) { - if (frame.tree.hasTag(VARDEF)) { - //skip variable frames inside a lambda: - continue; - } else if (frame.tree.hasTag(LAMBDA)) { - lambdaContext.addSymbol(tree.sym, LOCAL_VAR); - } else { - break; - } - } - // Check for type variables (including as type arguments). - // If they occur within class nested in a lambda, mark for erasure - Type type = tree.sym.asType(); + // Add variable assigned to + if (pendingVar != null) { + buf.append(pendingVar.flatName()); + buf.append("="); } - - List prevStack = frameStack; - try { - if (tree.sym.owner.kind == MTH) { - frameStack.head.addLocal(tree.sym); + //add captured locals info: type, name, order + for (Symbol fv : capturedVars) { + if (fv != owner) { + buf.append(typeSig(fv.type, true)); + buf.append(" "); + buf.append(fv.flatName()); + buf.append(","); } - frameStack = frameStack.prepend(new Frame(tree)); - super.visitVarDef(tree); - } - finally { - frameStack = prevStack; } + + return buf.toString(); } /** - * Return a valid owner given the current declaration stack - * (required to skip synthetic lambda symbols) + * For a non-serializable lambda, generate a simple method. + * + * @return Name to use for the synthetic lambda method name */ - private Symbol owner() { - return owner(false); + private Name lambdaName(Symbol owner) { + StringBuilder buf = new StringBuilder(); + buf.append(names.lambda); + buf.append(syntheticMethodNameComponent(owner)); + buf.append("$"); + buf.append(kInfo.syntheticNameIndex(buf, 0)); + return names.fromString(buf.toString()); } - @SuppressWarnings("fallthrough") - private Symbol owner(boolean skipLambda) { - List frameStack2 = frameStack; - while (frameStack2.nonEmpty()) { - switch (frameStack2.head.tree.getTag()) { - case VARDEF: - if (((JCVariableDecl)frameStack2.head.tree).sym.isDirectlyOrIndirectlyLocal()) { - frameStack2 = frameStack2.tail; - break; - } - JCClassDecl cdecl = (JCClassDecl)frameStack2.tail.head.tree; - return initSym(cdecl.sym, - ((JCVariableDecl)frameStack2.head.tree).sym.flags() & STATIC); - case BLOCK: - JCClassDecl cdecl2 = (JCClassDecl)frameStack2.tail.head.tree; - return initSym(cdecl2.sym, - ((JCBlock)frameStack2.head.tree).flags & STATIC); - case CLASSDEF: - return ((JCClassDecl)frameStack2.head.tree).sym; - case METHODDEF: - return ((JCMethodDecl)frameStack2.head.tree).sym; - case LAMBDA: - if (!skipLambda) - return ((LambdaTranslationContext)contextMap - .get(frameStack2.head.tree)).translatedSym; - default: - frameStack2 = frameStack2.tail; - } - } - Assert.error(); - return null; - } - - private Symbol initSym(ClassSymbol csym, long flags) { - boolean isStatic = (flags & STATIC) != 0; - if (isStatic) { - /* static clinits are generated in Gen, so we need to use a fake - * one. Attr creates a fake clinit method while attributing - * lambda expressions used as initializers of static fields, so - * let's use that one. - */ - MethodSymbol clinit = attr.removeClinit(csym); - if (clinit != null) { - clinits.put(csym, clinit); - return clinit; - } - - /* if no clinit is found at Attr, then let's try at clinits. - */ - clinit = (MethodSymbol)clinits.get(csym); - if (clinit == null) { - /* no luck, let's create a new one - */ - clinit = makePrivateSyntheticMethod(STATIC, - names.clinit, - new MethodType(List.nil(), syms.voidType, - List.nil(), syms.methodClass), - csym); - clinits.put(csym, clinit); - } - return clinit; + /** + * @return Method name in a form that can be folded into a + * component of a synthetic method name + */ + String syntheticMethodNameComponent(Symbol owner) { + long ownerFlags = owner.flags(); + if ((ownerFlags & BLOCK) != 0) { + return (ownerFlags & STATIC) != 0 ? + "static" : "new"; + } else if (owner.isConstructor()) { + return "new"; } else { - //get the first constructor and treat it as the instance init sym - for (Symbol s : csym.members_field.getSymbolsByName(names.init)) { - return s; - } + return owner.name.toString(); } - Assert.error("init not found"); - return null; - } - - private JCTree directlyEnclosingLambda() { - if (frameStack.isEmpty()) { - return null; - } - List frameStack2 = frameStack; - while (frameStack2.nonEmpty()) { - switch (frameStack2.head.tree.getTag()) { - case CLASSDEF: - case METHODDEF: - return null; - case LAMBDA: - return frameStack2.head.tree; - default: - frameStack2 = frameStack2.tail; - } - } - Assert.error(); - return null; - } - - private boolean inClassWithinLambda() { - if (frameStack.isEmpty()) { - return false; - } - List frameStack2 = frameStack; - boolean classFound = false; - while (frameStack2.nonEmpty()) { - switch (frameStack2.head.tree.getTag()) { - case LAMBDA: - return classFound; - case CLASSDEF: - classFound = true; - frameStack2 = frameStack2.tail; - break; - default: - frameStack2 = frameStack2.tail; - } - } - // No lambda - return false; } /** - * Return the declaration corresponding to a symbol in the enclosing - * scope; the depth parameter is used to filter out symbols defined - * in nested scopes (which do not need to undergo capture). + * For a serializable lambda, generate a method name which maximizes + * name stability across deserialization. + * + * @return Name to use for the synthetic lambda method name */ - private JCTree capturedDecl(int depth, Symbol sym) { - Assert.check(sym.kind != TYP); - int currentDepth = frameStack.size() - 1; - for (Frame block : frameStack) { - switch (block.tree.getTag()) { - case CLASSDEF: - ClassSymbol clazz = ((JCClassDecl)block.tree).sym; - if (clazz.isSubClass(sym.enclClass(), types)) { - return currentDepth > depth ? null : block.tree; - } - break; - case VARDEF: - if ((((JCVariableDecl)block.tree).sym == sym && - sym.owner.kind == MTH) || //only locals are captured - (block.locals != null && block.locals.contains(sym))) { - return currentDepth > depth ? null : block.tree; - } - break; - case BLOCK: - case METHODDEF: - case LAMBDA: - if (block.locals != null && block.locals.contains(sym)) { - return currentDepth > depth ? null : block.tree; - } - break; - default: - Assert.error("bad decl kind " + block.tree.getTag()); - } - currentDepth--; - } - return null; - } - - private TranslationContext context() { - for (Frame frame : frameStack) { - TranslationContext context = contextMap.get(frame.tree); - if (context != null) { - return context; - } - } - return null; + private Name serializedLambdaName(Symbol owner) { + StringBuilder buf = new StringBuilder(); + buf.append(names.lambda); + // Append the name of the method enclosing the lambda. + buf.append(syntheticMethodNameComponent(owner)); + buf.append('$'); + // Append a hash of the disambiguating string : enclosing method + // signature, etc. + String disam = serializedLambdaDisambiguation(owner); + buf.append(Integer.toHexString(disam.hashCode())); + buf.append('$'); + // The above appended name components may not be unique, append + // a count based on the above name components. + buf.append(kInfo.syntheticNameIndex(buf, 1)); + String result = buf.toString(); + //System.err.printf("serializedLambdaName: %s -- %s\n", result, disam); + return names.fromString(result); } /** - * This is used to filter out those identifiers that needs to be adjusted - * when translating away lambda expressions + * Translate a symbol of a given kind into something suitable for the + * synthetic lambda body */ - private boolean lambdaIdentSymbolFilter(Symbol sym) { - return (sym.kind == VAR || sym.kind == MTH) - && !sym.isStatic() - && sym.name != names.init; - } - - private class Frame { - final JCTree tree; - List locals; - - public Frame(JCTree tree) { - this.tree = tree; + VarSymbol translate(final VarSymbol sym, LambdaSymbolKind skind) { + VarSymbol ret; + boolean propagateAnnos = true; + switch (skind) { + case CAPTURED_VAR: + Name name = (sym.flags() & LOCAL_CAPTURE_FIELD) != 0 ? + sym.baseSymbol().name : sym.name; + ret = new VarSymbol(SYNTHETIC | FINAL | PARAMETER, name, types.erasure(sym.type), translatedSym); + propagateAnnos = false; + break; + case LOCAL_VAR: + ret = new VarSymbol(sym.flags() & FINAL, sym.name, sym.type, translatedSym); + ret.pos = sym.pos; + // If sym.data == ElementKind.EXCEPTION_PARAMETER, + // set ret.data = ElementKind.EXCEPTION_PARAMETER too. + // Because method com.sun.tools.javac.jvm.Code.fillExceptionParameterPositions and + // com.sun.tools.javac.jvm.Code.fillLocalVarPosition would use it. + // See JDK-8257740 for more information. + if (sym.isExceptionParameter()) { + ret.setData(ElementKind.EXCEPTION_PARAMETER); + } + break; + case PARAM: + ret = new VarSymbol((sym.flags() & FINAL) | PARAMETER, sym.name, types.erasure(sym.type), translatedSym); + ret.pos = sym.pos; + break; + default: + Assert.error(skind.name()); + throw new AssertionError(); } - - void addLocal(Symbol sym) { - if (locals == null) { - locals = List.nil(); - } - locals = locals.prepend(sym); + if (ret != sym && propagateAnnos) { + ret.setDeclarationAttributes(sym.getRawAttributes()); + ret.setTypeAttributes(sym.getRawTypeAttributes()); } + return ret; } - /** - * This class is used to store important information regarding translation of - * lambda expression/method references (see subclasses). - */ - abstract class TranslationContext { - - /** the underlying (untranslated) tree */ - final T tree; - - /** points to the adjusted enclosing scope in which this lambda/mref expression occurs */ - final Symbol owner; - - /** the depth of this lambda expression in the frame stack */ - final int depth; - - /** the enclosing translation context (set for nested lambdas/mref) */ - final TranslationContext prev; - - /** list of methods to be bridged by the meta-factory */ - final List bridges; - - TranslationContext(T tree) { - this.tree = tree; - this.owner = owner(true); - this.depth = frameStack.size() - 1; - this.prev = context(); - ClassSymbol csym = - types.makeFunctionalInterfaceClass(attrEnv, names.empty, tree.target, ABSTRACT | INTERFACE); - this.bridges = types.functionalInterfaceBridges(csym); - } - - /** does this functional expression need to be created using alternate metafactory? */ - boolean needsAltMetafactory() { - return tree.target.isIntersection() || - isSerializable() || - bridges.length() > 1; - } + VarSymbol addLocal(VarSymbol sym) { + return addSymbol(sym, LambdaSymbolKind.LOCAL_VAR); + } - /** does this functional expression require serialization support? */ - boolean isSerializable() { - if (forceSerializable) { - return true; - } - return types.asSuper(tree.target, syms.serializableType.tsym) != null; - } + private VarSymbol addSymbol(VarSymbol sym, LambdaSymbolKind skind) { + return lambdaProxies.computeIfAbsent(sym, s -> translate(s, skind)); + } - /** - * @return Name of the enclosing method to be folded into synthetic - * method name - */ - String enclosingMethodName() { - return syntheticMethodNameComponent(owner.name); - } + JCTree translate(JCIdent lambdaIdent) { + Symbol tSym = lambdaProxies.get(lambdaIdent.sym); + return tSym != null ? + make.Ident(tSym).setType(lambdaIdent.type) : + null; + } - /** - * @return Method name in a form that can be folded into a - * component of a synthetic method name - */ - String syntheticMethodNameComponent(Name name) { - if (name == null) { - return "null"; - } - String methodName = name.toString(); - if (methodName.equals("")) { - methodName = "static"; - } else if (methodName.equals("")) { - methodName = "new"; - } - return methodName; - } + Type generatedLambdaSig() { + return types.erasure(tree.getDescriptorType(types)); } /** - * This class retains all the useful information about a lambda expression; - * the contents of this class are filled by the LambdaAnalyzer visitor, - * and the used by the main translation routines in order to adjust references - * to captured locals/members, etc. + * Compute the set of local variables captured by this lambda expression. + * Also determines whether this lambda expression captures the enclosing 'this'. */ - class LambdaTranslationContext extends TranslationContext { - - /** variable in the enclosing context to which this lambda is assigned */ - final Symbol self; - - /** variable in the enclosing context to which this lambda is assigned */ - final Symbol assignedTo; - - Map> translatedSymbols; - - /** the synthetic symbol for the method hoisting the translated lambda */ - MethodSymbol translatedSym; - - List syntheticParams; - - LambdaTranslationContext(JCLambda tree) { - super(tree); - Frame frame = frameStack.head; - switch (frame.tree.getTag()) { - case VARDEF: - assignedTo = self = ((JCVariableDecl) frame.tree).sym; - break; - case ASSIGN: - self = null; - assignedTo = TreeInfo.symbol(((JCAssign) frame.tree).getVariable()); - break; - default: - assignedTo = self = null; - break; - } - - // This symbol will be filled-in in complete - if (owner.kind == MTH) { - final MethodSymbol originalOwner = (MethodSymbol)owner.clone(owner.owner); - this.translatedSym = new MethodSymbol(SYNTHETIC | PRIVATE, null, null, owner.enclClass()) { - @Override - public MethodSymbol originalEnclosingMethod() { - return originalOwner; - } - }; - } else { - this.translatedSym = makePrivateSyntheticMethod(0, null, null, owner.enclClass()); - } - translatedSymbols = new EnumMap<>(LambdaSymbolKind.class); - - translatedSymbols.put(PARAM, new LinkedHashMap<>()); - translatedSymbols.put(LOCAL_VAR, new LinkedHashMap<>()); - translatedSymbols.put(CAPTURED_VAR, new LinkedHashMap<>()); - translatedSymbols.put(CAPTURED_THIS, new LinkedHashMap<>()); - } - - /** - * For a serializable lambda, generate a disambiguating string - * which maximizes stability across deserialization. - * - * @return String to differentiate synthetic lambda method names - */ - private String serializedLambdaDisambiguation() { - StringBuilder buf = new StringBuilder(); - // Append the enclosing method signature to differentiate - // overloaded enclosing methods. For lambdas enclosed in - // lambdas, the generated lambda method will not have type yet, - // but the enclosing method's name will have been generated - // with this same method, so it will be unique and never be - // overloaded. - Assert.check( - owner.type != null || - directlyEnclosingLambda() != null); - if (owner.type != null) { - buf.append(typeSig(owner.type, true)); - buf.append(":"); - } - - // Add target type info - buf.append(types.findDescriptorSymbol(tree.type.tsym).owner.flatName()); - buf.append(" "); - - // Add variable assigned to - if (assignedTo != null) { - buf.append(assignedTo.flatName()); - buf.append("="); - } - //add captured locals info: type, name, order - for (Symbol fv : getSymbolMap(CAPTURED_VAR).keySet()) { - if (fv != self) { - buf.append(typeSig(fv.type, true)); - buf.append(" "); - buf.append(fv.flatName()); - buf.append(","); - } - } - - return buf.toString(); - } + class LambdaCaptureScanner extends CaptureScanner { + boolean capturesThis; + Set seenClasses = new HashSet<>(); - /** - * For a non-serializable lambda, generate a simple method. - * - * @return Name to use for the synthetic lambda method name - */ - private Name lambdaName() { - return names.lambda.append(names.fromString(enclosingMethodName() + "$" + lambdaCount++)); + LambdaCaptureScanner(JCLambda ownerTree) { + super(ownerTree); } - /** - * For a serializable lambda, generate a method name which maximizes - * name stability across deserialization. - * - * @return Name to use for the synthetic lambda method name - */ - private Name serializedLambdaName() { - StringBuilder buf = new StringBuilder(); - buf.append(names.lambda); - // Append the name of the method enclosing the lambda. - buf.append(enclosingMethodName()); - buf.append('$'); - // Append a hash of the disambiguating string : enclosing method - // signature, etc. - String disam = serializedLambdaDisambiguation(); - buf.append(Integer.toHexString(disam.hashCode())); - buf.append('$'); - // The above appended name components may not be unique, append - // a count based on the above name components. - buf.append(syntheticMethodNameCounts.getIndex(buf)); - String result = buf.toString(); - //System.err.printf("serializedLambdaName: %s -- %s\n", result, disam); - return names.fromString(result); - } - - /** - * Translate a symbol of a given kind into something suitable for the - * synthetic lambda body - */ - Symbol translate(final Symbol sym, LambdaSymbolKind skind) { - Symbol ret; - switch (skind) { - case CAPTURED_THIS: - ret = sym; // self represented - break; - case CAPTURED_VAR: - ret = new VarSymbol(SYNTHETIC | FINAL | PARAMETER, sym.name, types.erasure(sym.type), translatedSym) { - @Override - public Symbol baseSymbol() { - //keep mapping with original captured symbol - return sym; - } - }; - break; - case LOCAL_VAR: - ret = new VarSymbol(sym.flags() & FINAL, sym.name, sym.type, translatedSym) { - @Override - public Symbol baseSymbol() { - //keep mapping with original symbol - return sym; - } - }; - ((VarSymbol) ret).pos = ((VarSymbol) sym).pos; - // If sym.data == ElementKind.EXCEPTION_PARAMETER, - // set ret.data = ElementKind.EXCEPTION_PARAMETER too. - // Because method com.sun.tools.javac.jvm.Code.fillExceptionParameterPositions and - // com.sun.tools.javac.jvm.Code.fillLocalVarPosition would use it. - // See JDK-8257740 for more information. - if (((VarSymbol) sym).isExceptionParameter()) { - ((VarSymbol) ret).setData(ElementKind.EXCEPTION_PARAMETER); - } - break; - case PARAM: - ret = new VarSymbol((sym.flags() & FINAL) | PARAMETER, sym.name, types.erasure(sym.type), translatedSym); - ((VarSymbol) ret).pos = ((VarSymbol) sym).pos; - // Set ret.data. Same as case LOCAL_VAR above. - if (((VarSymbol) sym).isExceptionParameter()) { - ((VarSymbol) ret).setData(ElementKind.EXCEPTION_PARAMETER); - } - break; - default: - Assert.error(skind.name()); - throw new AssertionError(); - } - if (ret != sym && skind.propagateAnnotations()) { - ret.setDeclarationAttributes(sym.getRawAttributes()); - ret.setTypeAttributes(sym.getRawTypeAttributes()); - } - return ret; - } - - void addSymbol(Symbol sym, LambdaSymbolKind skind) { - Map transMap = getSymbolMap(skind); - if (!transMap.containsKey(sym)) { - transMap.put(sym, translate(sym, skind)); - } - } - - Map getSymbolMap(LambdaSymbolKind skind) { - Map m = translatedSymbols.get(skind); - Assert.checkNonNull(m); - return m; + @Override + public void visitClassDef(JCClassDecl tree) { + seenClasses.add(tree.sym); + super.visitClassDef(tree); } - JCTree translate(JCIdent lambdaIdent) { - for (LambdaSymbolKind kind : LambdaSymbolKind.values()) { - Map m = getSymbolMap(kind); - switch(kind) { - default: - if (m.containsKey(lambdaIdent.sym)) { - Symbol tSym = m.get(lambdaIdent.sym); - JCTree t = make.Ident(tSym).setType(lambdaIdent.type); - return t; - } - break; + @Override + public void visitIdent(JCIdent tree) { + if (!tree.sym.isStatic() && + tree.sym.owner.kind == TYP && + (tree.sym.kind == VAR || tree.sym.kind == MTH) && + !seenClasses.contains(tree.sym.owner)) { + if ((tree.sym.flags() & LOCAL_CAPTURE_FIELD) != 0) { + // a local, captured by Lower - re-capture! + addFreeVar((VarSymbol) tree.sym); + } else { + // a reference to an enclosing field or method, we need to capture 'this' + capturesThis = true; } + } else { + // might be a local capture + super.visitIdent(tree); } - return null; } - /** - * The translatedSym is not complete/accurate until the analysis is - * finished. Once the analysis is finished, the translatedSym is - * "completed" -- updated with type information, access modifiers, - * and full parameter list. - */ - void complete() { - if (syntheticParams != null) { - return; - } - boolean inInterface = translatedSym.owner.isInterface(); - boolean thisReferenced = !getSymbolMap(CAPTURED_THIS).isEmpty(); - - // If instance access isn't needed, make it static. - // Interface instance methods must be default methods. - // Lambda methods are private synthetic. - // Inherit ACC_STRICT from the enclosing method, or, for clinit, - // from the class. - translatedSym.flags_field = SYNTHETIC | LAMBDA_METHOD | - owner.flags_field & STRICTFP | - owner.owner.flags_field & STRICTFP | - PRIVATE | - (thisReferenced? (inInterface? DEFAULT : 0) : STATIC); - - //compute synthetic params - ListBuffer params = new ListBuffer<>(); - ListBuffer parameterSymbols = new ListBuffer<>(); - - // The signature of the method is augmented with the following - // synthetic parameters: - // - // 1) reference to enclosing contexts captured by the lambda expression - // 2) enclosing locals captured by the lambda expression - for (Symbol thisSym : getSymbolMap(CAPTURED_VAR).values()) { - params.append(make.VarDef((VarSymbol) thisSym, null)); - parameterSymbols.append((VarSymbol) thisSym); - } - for (Symbol thisSym : getSymbolMap(PARAM).values()) { - params.append(make.VarDef((VarSymbol) thisSym, null)); - parameterSymbols.append((VarSymbol) thisSym); + @Override + public void visitSelect(JCFieldAccess tree) { + if (tree.sym.kind == VAR && + (tree.sym.name == names._this || + tree.sym.name == names._super) && + !seenClasses.contains(tree.sym.type.tsym)) { + capturesThis = true; } - syntheticParams = params.toList(); - - translatedSym.params = parameterSymbols.toList(); - - // Compute and set the lambda name - translatedSym.name = isSerializable() - ? serializedLambdaName() - : lambdaName(); - - //prepend synthetic args to translated lambda method signature - translatedSym.type = types.createMethodTypeWithParameters( - generatedLambdaSig(), - TreeInfo.types(syntheticParams)); + super.visitSelect(tree); } - Type generatedLambdaSig() { - return types.erasure(tree.getDescriptorType(types)); + @Override + public void visitAnnotation(JCAnnotation tree) { + // do nothing (annotation values look like captured instance fields) } } - /** - * Simple subclass modelling the translation context of a method reference. + /* + * These keys provide mappings for various translated lambda symbols + * and the prevailing order must be maintained. */ - final class ReferenceTranslationContext extends TranslationContext { - - ReferenceTranslationContext(JCMemberReference tree) { - super(tree); - } - } - } - // - - /* - * These keys provide mappings for various translated lambda symbols - * and the prevailing order must be maintained. - */ - enum LambdaSymbolKind { - PARAM, // original to translated lambda parameters - LOCAL_VAR, // original to translated lambda locals - CAPTURED_VAR, // variables in enclosing scope to translated synthetic parameters - CAPTURED_THIS; // class symbols to translated synthetic parameters (for captured member access) - - boolean propagateAnnotations() { - switch (this) { - case CAPTURED_VAR: - case CAPTURED_THIS: - return false; - default: - return true; - } + enum LambdaSymbolKind { + PARAM, // original to translated lambda parameters + LOCAL_VAR, // original to translated lambda locals + CAPTURED_VAR; // variables in enclosing scope to translated synthetic parameters } } @@ -1791,7 +1304,7 @@ private class L2MSignatureGenerator extends Types.SignatureGenerator { /** * Are signatures incompatible with JVM spec allowed? - * Used by {@link LambdaTranslationContext#serializedLambdaDisambiguation()}. + * Used by {@link LambdaTranslationContext#serializedLambdaDisambiguation(Symbol)}}. */ boolean allowIllegalSignatures; diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java index c5fd2177d49a7..62117583a96e9 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Lower.java @@ -267,26 +267,22 @@ List enumNamesFor(ClassSymbol c) { Map> freevarCache; /** A navigator class for collecting the free variables accessed - * from a local class. There is only one case; all other cases simply - * traverse down the tree. This class doesn't deal with the specific - * of Lower - it's an abstract visitor that is meant to be reused in - * order to share the local variable capture logic. + * from a local class. */ - abstract class BasicFreeVarCollector extends TreeScanner { + class FreeVarCollector extends CaptureScanner { - /** Add all free variables of class c to fvs list - * unless they are already there. - */ - abstract void addFreeVars(ClassSymbol c); + FreeVarCollector(JCTree ownerTree) { + super(ownerTree); + } - /** If tree refers to a variable in owner of local class, add it to - * free variables list. - */ - public void visitIdent(JCIdent tree) { - visitSymbol(tree.sym); + void addFreeVars(ClassSymbol c) { + List fvs = freevarCache.get(c); + if (fvs != null) { + for (List l = fvs; l.nonEmpty(); l = l.tail) { + addFreeVar(l.head); + } + } } - // where - abstract void visitSymbol(Symbol _sym); /** If tree refers to a class instance creation expression * add all free variables of the freshly created class. @@ -306,84 +302,6 @@ public void visitApply(JCMethodInvocation tree) { } super.visitApply(tree); } - - @Override - public void visitYield(JCYield tree) { - scan(tree.value); - } - - } - - /** - * Lower-specific subclass of {@code BasicFreeVarCollector}. - */ - class FreeVarCollector extends BasicFreeVarCollector { - - /** The owner of the local class. - */ - Symbol owner; - - /** The local class. - */ - ClassSymbol clazz; - - /** The list of owner's variables accessed from within the local class, - * without any duplicates. - */ - List fvs; - - FreeVarCollector(ClassSymbol clazz) { - this.clazz = clazz; - this.owner = clazz.owner; - this.fvs = List.nil(); - } - - /** Add free variable to fvs list unless it is already there. - */ - private void addFreeVar(VarSymbol v) { - for (List l = fvs; l.nonEmpty(); l = l.tail) - if (l.head == v) return; - fvs = fvs.prepend(v); - } - - @Override - void addFreeVars(ClassSymbol c) { - List fvs = freevarCache.get(c); - if (fvs != null) { - for (List l = fvs; l.nonEmpty(); l = l.tail) { - addFreeVar(l.head); - } - } - } - - @Override - void visitSymbol(Symbol _sym) { - Symbol sym = _sym; - if (sym.kind == VAR || sym.kind == MTH) { - if (sym != null && sym.owner != owner) - sym = proxies.get(sym); - if (sym != null && sym.owner == owner) { - VarSymbol v = (VarSymbol)sym; - if (v.getConstValue() == null) { - addFreeVar(v); - } - } - } - } - } - - ClassSymbol ownerToCopyFreeVarsFrom(ClassSymbol c) { - if (!c.isDirectlyOrIndirectlyLocal()) { - return null; - } - Symbol currentOwner = c.owner; - while (currentOwner.owner.kind.matches(KindSelector.TYP) && currentOwner.isDirectlyOrIndirectlyLocal()) { - currentOwner = currentOwner.owner; - } - if (currentOwner.owner.kind.matches(KindSelector.VAL_MTH) && c.isSubClass(currentOwner, types)) { - return (ClassSymbol)currentOwner; - } - return null; } /** Return the variables accessed from within a local class, which @@ -395,22 +313,10 @@ List freevars(ClassSymbol c) { if (fvs != null) { return fvs; } - if (c.owner.kind.matches(KindSelector.VAL_MTH) && !c.isStatic()) { - FreeVarCollector collector = new FreeVarCollector(c); - collector.scan(classDef(c)); - fvs = collector.fvs; - freevarCache.put(c, fvs); - return fvs; - } else { - ClassSymbol owner = ownerToCopyFreeVarsFrom(c); - if (owner != null) { - fvs = freevarCache.get(owner); - freevarCache.put(c, fvs); - return fvs; - } else { - return List.nil(); - } - } + FreeVarCollector collector = new FreeVarCollector(classDef(c)); + fvs = collector.analyzeCaptures().reverse(); + freevarCache.put(c, fvs); + return fvs; } Map enumSwitchMap = new LinkedHashMap<>(); @@ -1501,7 +1407,7 @@ Name proxyName(Name name, int index) { * @param owner The class in which the definitions go. */ List freevarDefs(int pos, List freevars, Symbol owner) { - return freevarDefs(pos, freevars, owner, 0); + return freevarDefs(pos, freevars, owner, LOCAL_CAPTURE_FIELD); } List freevarDefs(int pos, List freevars, Symbol owner, @@ -1517,7 +1423,12 @@ List freevarDefs(int pos, List freevars, Symbol owner proxyName = proxyName(v.name, index++); } while (!proxyNames.add(proxyName)); VarSymbol proxy = new VarSymbol( - flags, proxyName, v.erasure(types), owner); + flags, proxyName, v.erasure(types), owner) { + @Override + public Symbol baseSymbol() { + return v; + } + }; proxies.put(v, proxy); JCVariableDecl vd = make.at(pos).VarDef(proxy, null); vd.vartype = access(vd.vartype); diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java index bfe35dbec9c1f..d232f2e6d9f3d 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/TransTypes.java @@ -679,6 +679,7 @@ JCExpression lambda() { JCLambda slam = make.Lambda(params.toList(), expr); slam.target = tree.target; + slam.owner = tree.owner; slam.type = tree.type; slam.pos = tree.pos; slam.wasMethodReference = true; diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java index dfa92efae7421..6679bb43fb807 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassWriter.java @@ -329,7 +329,7 @@ protected int writeEnclosingMethodAttribute(Name attributeName, ClassSymbol c) { int alenIdx = writeAttr(attributeName); ClassSymbol enclClass = c.owner.enclClass(); MethodSymbol enclMethod = - (c.owner.type == null // local to init block + ((c.owner.flags() & BLOCK) != 0 // local to init block || c.owner.kind != MTH) // or member init ? null : ((MethodSymbol)c.owner).originalEnclosingMethod(); diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java index 6f3b8b5d8aaca..6041da6723aa4 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java @@ -807,6 +807,8 @@ public JCFunctionalExpression() { /** list of target types inferred for this functional expression. */ public Type target; + /** The owner of this functional expression. */ + public Symbol owner; public Type getDescriptorType(Types types) { return target != null ? types.findDescriptorType(target) : types.createErrorType(null); diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeScanner.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeScanner.java index 0336f3c4191ac..b9ae35da9df1f 100644 --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeScanner.java +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeScanner.java @@ -273,8 +273,8 @@ public void visitNewArray(JCNewArray tree) { } public void visitLambda(JCLambda tree) { - scan(tree.body); scan(tree.params); + scan(tree.body); } public void visitParens(JCParens tree) { diff --git a/src/jdk.hotspot.agent/share/man/jhsdb.1 b/src/jdk.hotspot.agent/share/man/jhsdb.1 index 5b65f7eafb465..93cc1fedb158a 100644 --- a/src/jdk.hotspot.agent/share/man/jhsdb.1 +++ b/src/jdk.hotspot.agent/share/man/jhsdb.1 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. +.\" Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. .\" .\" This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,10 @@ analyze the content of a core dump from a crashed Java Virtual Machine (JVM) .SH SYNOPSIS .PP +\f[B]WARNING:\f[R] The \f[V]debugd\f[R] subcommand and +\f[V]--connect\f[R] options are deprecated. +They will be removed in a future release. +.PP \f[V]jhsdb\f[R] \f[V]clhsdb\f[R] [\f[V]--pid\f[R] \f[I]pid\f[R] | \f[V]--exe\f[R] \f[I]executable\f[R] \f[V]--core\f[R] \f[I]coredump\f[R]] diff --git a/src/jdk.jpackage/share/man/jpackage.1 b/src/jdk.jpackage/share/man/jpackage.1 index 13d9c41c31d6a..09d340ec03399 100644 --- a/src/jdk.jpackage/share/man/jpackage.1 +++ b/src/jdk.jpackage/share/man/jpackage.1 @@ -111,6 +111,16 @@ Path where generated output file is placed Defaults to the current working directory. .RE .TP +\f[V]--resource-dir\f[R] \f[I]path\f[R] +Path to override jpackage resources +.RS +.PP +(absolute path or relative to the current directory) +.PP +Icons, template files, and other resources of jpackage can be +over-ridden by adding replacement resources to this directory. +.RE +.TP \f[V]--temp\f[R] \f[I]directory\f[R] Path of a new or empty directory used to create temporary files .RS @@ -207,9 +217,9 @@ of key, value pairs .PP The keys \[dq]module\[dq], \[dq]main-jar\[dq], \[dq]main-class\[dq], \[dq]description\[dq], \[dq]arguments\[dq], \[dq]java-options\[dq], -\[dq]app-version\[dq], \[dq]icon\[dq], \[dq]launcher-as-service\[dq], -\[dq]win-console\[dq], \[dq]win-shortcut\[dq], \[dq]win-menu\[dq], -\[dq]linux-app-category\[dq], and \[dq]linux-shortcut\[dq] can be used. +\[dq]icon\[dq], \[dq]launcher-as-service\[dq], \[dq]win-console\[dq], +\[dq]win-shortcut\[dq], \[dq]win-menu\[dq], and \[dq]linux-shortcut\[dq] +can be used. .PP These options are added to, or used to overwrite, the original command line options to build an additional alternative launcher. @@ -260,7 +270,7 @@ When this option is specified, the main module will be linked in the Java runtime image. Either --module or --main-jar option can be specified but not both. .RE -.SS Platform dependent option for creating the application launcher: +.SS Platform dependent options for creating the application launcher: .SS Windows platform options (available only when running on Windows): .TP \f[V]--win-console\f[R] @@ -357,16 +367,6 @@ Path to the license file (absolute path or relative to the current directory) .RE .TP -\f[V]--resource-dir\f[R] \f[I]path\f[R] -Path to override jpackage resources -.RS -.PP -(absolute path or relative to the current directory) -.PP -Icons, template files, and other resources of jpackage can be -over-ridden by adding replacement resources to this directory. -.RE -.TP \f[V]--runtime-image\f[R] \f[I]path\f[R] Path of the predefined runtime image to install .RS diff --git a/test/hotspot/jtreg/compiler/floatingpoint/TestRoundFloatAll.java b/test/hotspot/jtreg/compiler/floatingpoint/TestRoundFloatAll.java new file mode 100644 index 0000000000000..2ba60fd8e319f --- /dev/null +++ b/test/hotspot/jtreg/compiler/floatingpoint/TestRoundFloatAll.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Rivos Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8321010 + * @summary Test intrinsic for Math.round(float) in full 32 bits range + * + * @library /test/lib / + * @modules java.base/jdk.internal.math + * @requires os.arch == "riscv64" + * @run main/othervm -XX:-TieredCompilation -Xbatch -XX:CompileThresholdScaling=0.3 -XX:-UseSuperWord + * -XX:CompileCommand=compileonly,compiler.floatingpoint.TestRoundFloatAll::test* + * compiler.floatingpoint.TestRoundFloatAll + */ + +package compiler.floatingpoint; + +import static compiler.lib.golden.GoldenRound.golden_round; + +public class TestRoundFloatAll { + + public static void main(String args[]) { + test(); + } + + // return true when test fails + static boolean test(int n, float f) { + int actual = Math.round(f); + int expected = golden_round(f); + if (actual != expected) { + System.err.println("round error, input: " + f + ", res: " + actual + "expected: " + expected + ", input hex: " + n); + return true; + } + return false; + } + + static void test() { + final int ITERS = 11000; + boolean fail = false; + + // Warmup + System.out.println("Warmup"); + for (int i=0; i> (FloatConsts.SIGNIFICAND_WIDTH - 1); + int shift = (FloatConsts.SIGNIFICAND_WIDTH - 2 + + FloatConsts.EXP_BIAS) - biasedExp; + if ((shift & -32) == 0) { // shift >= 0 && shift < 32 + // a is a finite number such that pow(2,-32) <= ulp(a) < 1 + int r = ((intBits & FloatConsts.SIGNIF_BIT_MASK) + | (FloatConsts.SIGNIF_BIT_MASK + 1)); + if (intBits < 0) { + r = -r; + } + // In the comments below each Java expression evaluates to the value + // the corresponding mathematical expression: + // (r) evaluates to a / ulp(a) + // (r >> shift) evaluates to floor(a * 2) + // ((r >> shift) + 1) evaluates to floor((a + 1/2) * 2) + // (((r >> shift) + 1) >> 1) evaluates to floor(a + 1/2) + return ((r >> shift) + 1) >> 1; + } else { + // a is either + // - a finite number with abs(a) < exp(2,FloatConsts.SIGNIFICAND_WIDTH-32) < 1/2 + // - a finite number with ulp(a) >= 1 and hence a is a mathematical integer + // - an infinity or NaN + return (int) a; + } + } + + + public static long golden_round(double a) { + // below code is copied from java.base/share/classes/java/lang/Math.java + // public static int round(double a) { ... } + + long longBits = Double.doubleToRawLongBits(a); + long biasedExp = (longBits & DoubleConsts.EXP_BIT_MASK) + >> (DoubleConsts.SIGNIFICAND_WIDTH - 1); + long shift = (DoubleConsts.SIGNIFICAND_WIDTH - 2 + + DoubleConsts.EXP_BIAS) - biasedExp; + if ((shift & -64) == 0) { // shift >= 0 && shift < 64 + // a is a finite number such that pow(2,-64) <= ulp(a) < 1 + long r = ((longBits & DoubleConsts.SIGNIF_BIT_MASK) + | (DoubleConsts.SIGNIF_BIT_MASK + 1)); + if (longBits < 0) { + r = -r; + } + // In the comments below each Java expression evaluates to the value + // the corresponding mathematical expression: + // (r) evaluates to a / ulp(a) + // (r >> shift) evaluates to floor(a * 2) + // ((r >> shift) + 1) evaluates to floor((a + 1/2) * 2) + // (((r >> shift) + 1) >> 1) evaluates to floor(a + 1/2) + return ((r >> shift) + 1) >> 1; + } else { + // a is either + // - a finite number with abs(a) < exp(2,DoubleConsts.SIGNIFICAND_WIDTH-64) < 1/2 + // - a finite number with ulp(a) >= 1 and hence a is a mathematical integer + // - an infinity or NaN + return (long) a; + } + } +} diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java index 162f384dc07b2..24fbe7e64afa0 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java @@ -276,25 +276,25 @@ public class IRNode { public static final String ALLOC = PREFIX + "ALLOC" + POSTFIX; static { - String optoRegex = "(.*precise .*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; + String optoRegex = "(.*precise .*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_instance" + END; allocNodes(ALLOC, "Allocate", optoRegex); } public static final String ALLOC_OF = COMPOSITE_PREFIX + "ALLOC_OF" + POSTFIX; static { - String regex = "(.*precise .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; + String regex = "(.*precise .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_instance" + END; optoOnly(ALLOC_OF, regex); } public static final String ALLOC_ARRAY = PREFIX + "ALLOC_ARRAY" + POSTFIX; static { - String optoRegex = "(.*precise \\[.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; + String optoRegex = "(.*precise \\[.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_array" + END; allocNodes(ALLOC_ARRAY, "AllocateArray", optoRegex); } public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "ALLOC_ARRAY_OF" + POSTFIX; static { - String regex = "(.*precise \\[.*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; + String regex = "(.*precise \\[.*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_array" + END; optoOnly(ALLOC_ARRAY_OF, regex); } diff --git a/test/hotspot/jtreg/compiler/vectorization/TestRoundVectRiscv64.java b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectRiscv64.java new file mode 100644 index 0000000000000..530befbe2bcb5 --- /dev/null +++ b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectRiscv64.java @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Rivos Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 1234567 + * @summary Auto-vectorize Math.round API + * @requires vm.compiler2.enabled + * @requires os.arch == "riscv64" + * @requires vm.cpu.features ~= ".*rvv.*" + * @library /test/lib / + * @run driver compiler.vectorization.TestRoundVectRiscv64 + */ + +package compiler.vectorization; + +import compiler.lib.ir_framework.*; + +public class TestRoundVectRiscv64 { + private static final int ARRLEN = 1024; + private static final int ITERS = 11000; + + private static double [] dinp; + private static long [] lout; + private static float [] finp; + private static int [] iout; + + public static void main(String args[]) { + TestFramework.runWithFlags("-XX:-TieredCompilation", + "-XX:CompileThresholdScaling=0.3"); + System.out.println("PASSED"); + } + + @Test + @IR(counts = {IRNode.ROUND_VD, "> 0"}) + public void test_round_double(long[] lout, double[] dinp) { + for (int i = 0; i < lout.length; i+=1) { + lout[i] = Math.round(dinp[i]); + } + } + + @Run(test = {"test_round_double"}, mode = RunMode.STANDALONE) + public void kernel_test_round_double() { + dinp = new double[ARRLEN]; + lout = new long[ARRLEN]; + for (int i = 0 ; i < ARRLEN; i++) { + dinp[i] = (double)i*1.4; + } + for (int i = 0; i < ITERS; i++) { + test_round_double(lout , dinp); + } + } + + @Test + @IR(counts = {IRNode.ROUND_VF, "> 0"}) + public void test_round_float(int[] iout, float[] finp) { + for (int i = 0; i < finp.length; i+=1) { + iout[i] = Math.round(finp[i]); + } + } + + @Run(test = {"test_round_float"}, mode = RunMode.STANDALONE) + public void kernel_test_round() { + finp = new float[ARRLEN]; + iout = new int[ARRLEN]; + for (int i = 0 ; i < ARRLEN; i++) { + finp[i] = (float)i*1.4f; + } + for (int i = 0; i < ITERS; i++) { + test_round_float(iout , finp); + } + } +} diff --git a/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorDoubleRandom.java b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorDoubleRandom.java new file mode 100644 index 0000000000000..cd1a59c6d24e0 --- /dev/null +++ b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorDoubleRandom.java @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Rivos Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @key randomness + * @bug 8321011 + * @summary Test vector intrinsic for Math.round(double) with random input in 64 bits range, verify IR at the same time. + * + * @library /test/lib / + * @modules java.base/jdk.internal.math + * @requires os.arch == "riscv64" & vm.cpu.features ~= ".*rvv.*" + * @run main compiler.vectorization.TestRoundVectorDoubleRandom + */ + +package compiler.vectorization; + +import java.util.Random; +import static compiler.lib.golden.GoldenRound.golden_round; +import compiler.lib.ir_framework.IR; +import compiler.lib.ir_framework.IRNode; +import compiler.lib.ir_framework.Run; +import compiler.lib.ir_framework.RunInfo; +import compiler.lib.ir_framework.Test; +import compiler.lib.ir_framework.TestFramework; +import compiler.lib.ir_framework.Warmup; + +public class TestRoundVectorDoubleRandom { + private static final Random rand = new Random(); + + private static final int ITERS = 11000; + private static final int ARRLEN = rand.nextInt(4096-997) + 997; + private static final double ADD_INIT = -7500.; + + private static final double[] input = new double[ARRLEN]; + private static final long [] res = new long[ARRLEN]; + + public static void main(String args[]) { + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=8"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=16"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=32"); + } + + @Test + @IR(counts = {IRNode.ROUND_VD, "> 0"}, + applyIf = {"MaxVectorSize", ">= 64"}) + static void test_round(long[] a0, double[] a1) { + for (int i = 0; i < a0.length; i+=1) { + a0[i] = Math.round(a1[i]); + } + } + + @Run(test = "test_round") + @Warmup(ITERS) + static void test_rounds(RunInfo runInfo) { + // Initialize + for (int i = 0; i < ARRLEN; i++) { + double val = ADD_INIT+(double)i; + input[i] = val; + } + + test_round(res, input); + // skip test/verify when warming up + if (runInfo.isWarmUp()) { + return; + } + + int errn = 0; + // a double precise float point is composed of 3 parts: sign/exponent/signicand + // exponent part of a float value + final int exponentShift = 52; + final int exponentWidth = 11; + final int exponentBound = 1 << exponentWidth; + // significant part of a float value + final int signicandWidth = exponentShift; + final long signicandBound = 1L << signicandWidth; + final int signicandNum = 256; + + // prepare for data of significand part + long signicandValues[] = new long[signicandNum]; + int signicandIdx = 0; + for (; signicandIdx < signicandWidth; signicandIdx++) { + signicandValues[signicandIdx] = 1L << signicandIdx; + } + for (; signicandIdx < signicandNum; signicandIdx++) { + signicandValues[signicandIdx] = rand.nextLong(signicandBound); + } + signicandValues[rand.nextInt(signicandNum)] = 0; + + // generate input arrays for testing, then run tests & verify results + + // generate input arrays by combining different parts + for (long sv : signicandValues) { + // generate test input by combining different parts: + // previously generated significand values, + // random value in exponent range, + // both positive and negative of previous combined values (exponent+significand) + final int exponentStart = rand.nextInt(9); + final int exponentStep = (1 << 3) + rand.nextInt(3); + // Here, we could have iterated the whole range of exponent values, but it would + // take more time to run the test, so just randomly choose some of exponent values. + int ev = exponentStart; + int inputIdx = 0; + for (; ev < exponentBound; ev += exponentStep) { + inputIdx = ev/exponentStep; + // combine exponent and significand + long bits = ((long)ev << exponentShift) + sv; + // combine sign(+/-) with exponent and significand + // positive values + input[inputIdx*2] = Double.longBitsToDouble(bits); + // negative values + bits = bits | (1L << 63); + input[inputIdx*2+1] = Double.longBitsToDouble(bits); + } + // add specific test cases where it looks like in binary format: + // s111 1111 1111 xxxx xxxx xxxx xxxx xxxx ... + // these are for the NaN and Inf. + inputIdx = inputIdx*2+2; + long bits = (1L << exponentWidth) - 1L; + bits <<= exponentShift; + input[inputIdx++] = Double.longBitsToDouble(bits); + bits = bits | (1L << 63); + input[inputIdx] = Double.longBitsToDouble(bits); + + // run tests + test_round(res, input); + + // verify results + ev = exponentStart; + inputIdx = ev/exponentStep; + for (; ev < exponentBound; ev += exponentStep) { + for (int sign = 0; sign < 2; sign++) { + int idx = inputIdx * 2 + sign; + if (res[idx] != golden_round(input[idx])) { + errn++; + System.err.println("round error, input: " + input[idx] + + ", res: " + res[idx] + "expected: " + golden_round(input[idx]) + + ", input hex: " + Double.doubleToLongBits(input[idx]) + + ", fi: " + sv + ", ei: " + ev + ", sign: " + sign); + } + } + } + } + + // generate pure random input arrays, which does not depend on significand/exponent values + for(int i = 0; i < 128; i++) { + for (int j = 0; j < ARRLEN; j++) { + input[j] = rand.nextDouble(); + } + + // run tests + test_round(res, input); + + // verify results + for (int j = 0; j < ARRLEN; j++) { + if (res[j] != golden_round(input[j])) { + errn++; + System.err.println("round error, input: " + input[j] + + ", res: " + res[j] + "expected: " + golden_round(input[j]) + + ", input hex: " + Double.doubleToLongBits(input[j])); + } + } + } + + // test cases for NaN, Inf, subnormal, and so on + { + Double[] dv = new Double[] { + +0.0, + -0.0, + Double.MAX_VALUE, + Double.MIN_VALUE, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.NaN, + Double.longBitsToDouble(0x7ff0000000000001L), // another NaN + Double.MIN_NORMAL, + 0x0.fffffffffffffp-1022, // Maximum Subnormal Value + 1.5, + 100.5, + 10000.5, + -1.5, + -100.5, + -10000.5 + }; + for (int j = 0; j < ARRLEN; j++) { + input[j] = dv[rand.nextInt(dv.length)]; + } + + // run tests + test_round(res, input); + + // verify results + for (int j = 0; j < ARRLEN; j++) { + if (res[j] != golden_round(input[j])) { + errn++; + System.err.println("round error, input: " + input[j] + + ", res: " + res[j] + "expected: " + golden_round(input[j]) + + ", input hex: " + Double.doubleToLongBits(input[j])); + } + } + } + + if (errn > 0) { + throw new RuntimeException("There are some round error detected!"); + } + } +} diff --git a/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatAll.java b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatAll.java new file mode 100644 index 0000000000000..a85ac3eaa22e0 --- /dev/null +++ b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatAll.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Rivos Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8321010 + * @summary Test vector intrinsic for Math.round(float) in full 32 bits range + * + * @library /test/lib / + * @modules java.base/jdk.internal.math + * @requires os.arch == "riscv64" & vm.cpu.features ~= ".*rvv.*" + * @run main/othervm -XX:-TieredCompilation -XX:CompileThresholdScaling=0.3 -XX:+UseSuperWord -XX:CompileCommand=compileonly,compiler.vectorization.TestRoundVectorFloatAll::test* compiler.vectorization.TestRoundVectorFloatAll + * @run main/othervm -XX:-TieredCompilation -XX:CompileThresholdScaling=0.3 -XX:MaxVectorSize=32 -XX:+UseSuperWord -XX:CompileCommand=compileonly,compiler.vectorization.TestRoundVectorFloatAll::test* compiler.vectorization.TestRoundVectorFloatAll + */ + +package compiler.vectorization; + +import java.util.Random; +import static compiler.lib.golden.GoldenRound.golden_round; + +public class TestRoundVectorFloatAll { + private static final Random rand = new Random(); + + private static final int ITERS = 11000; + private static final int ARRLEN = rand.nextInt(4096-997) + 997; + private static final float ADD_INIT = -7500.f; + + public static void main(String args[]) { + test(); + } + + static void test() { + float[] input = new float[ARRLEN]; + int [] res = new int[ARRLEN]; + + // Warmup + System.out.println("Warmup"); + for (int i=0; i 0) { + throw new RuntimeException("There are some round error detected!"); + } + } + + static void test_round(int[] a0, float[] a1) { + for (int i = 0; i < a0.length; i+=1) { + a0[i] = Math.round(a1[i]); + } + } +} diff --git a/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatRandom.java b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatRandom.java new file mode 100644 index 0000000000000..7288e4d9f6396 --- /dev/null +++ b/test/hotspot/jtreg/compiler/vectorization/TestRoundVectorFloatRandom.java @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Rivos Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @key randomness + * @bug 8321010 + * @summary Test vector intrinsic for Math.round(float) with random input in 32 bits range, verify IR at the same time. + * + * @library /test/lib / + * @modules java.base/jdk.internal.math + * @requires os.arch == "riscv64" & vm.cpu.features ~= ".*rvv.*" + * @run main compiler.vectorization.TestRoundVectorFloatRandom + */ + +package compiler.vectorization; + +import java.util.Random; +import static compiler.lib.golden.GoldenRound.golden_round; +import compiler.lib.ir_framework.IR; +import compiler.lib.ir_framework.IRNode; +import compiler.lib.ir_framework.Run; +import compiler.lib.ir_framework.RunInfo; +import compiler.lib.ir_framework.Test; +import compiler.lib.ir_framework.TestFramework; +import compiler.lib.ir_framework.Warmup; + +public class TestRoundVectorFloatRandom { + private static final Random rand = new Random(); + + private static final int ITERS = 11000; + private static final int ARRLEN = rand.nextInt(4096-997) + 997; + private static final float ADD_INIT = -7500.f; + + private static final float[] input = new float[ARRLEN]; + private static final int[] res = new int[ARRLEN]; + + public static void main(String args[]) { + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=8"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=16"); + TestFramework.runWithFlags("-XX:-TieredCompilation", "-XX:CompileThresholdScaling=0.3", "-XX:MaxVectorSize=32"); + } + + @Test + @IR(counts = {IRNode.ROUND_VF, "> 0"}, + applyIf = {"MaxVectorSize", ">= 32"}) + static void test_round(int[] a0, float[] a1) { + for (int i = 0; i < a0.length; i+=1) { + a0[i] = Math.round(a1[i]); + } + } + + @Run(test = "test_round") + @Warmup(ITERS) + static void test_rounds(RunInfo runInfo) { + // Initialize + for (int i = 0; i < ARRLEN; i++) { + float val = ADD_INIT+(float)i; + input[i] = val; + } + + test_round(res, input); + // skip test/verify when warming up + if (runInfo.isWarmUp()) { + return; + } + + int errn = 0; + // a single precise float point is composed of 3 parts: sign/exponent/signicand + // exponent part of a float value + final int exponentStart = 0; + final int exponentShift = 23; + final int exponentWidth = 8; + final int exponentBound = 1 << exponentWidth; + // significant part of a float value + final int signicandWidth = exponentShift; + final int signicandBound = 1 << signicandWidth; + final int signicandNum = 128; + + // prepare for data of significand part + int signicandValues[] = new int[signicandNum]; + int signicandIdx = 0; + for (; signicandIdx < signicandWidth; signicandIdx++) { + signicandValues[signicandIdx] = 1 << signicandIdx; + } + for (; signicandIdx < signicandNum; signicandIdx++) { + signicandValues[signicandIdx] = rand.nextInt(signicandBound); + } + signicandValues[rand.nextInt(signicandNum)] = 0; + + // generate input arrays for testing, then run tests & verify results + + // generate input arrays by combining different parts + for (int sv : signicandValues) { + // generate test input by combining different parts: + // previously generated significand values, + // all values in exponent range, + // both positive and negative of previous combined values (exponent+significand) + for (int ev = exponentStart; ev < exponentBound; ev++) { + // combine exponent and significand + int bits = (ev << exponentShift) + sv; + // combine sign(+/-) with exponent and significand + // positive values + input[ev*2] = Float.intBitsToFloat(bits); + // negative values + bits = bits | (1 << 31); + input[ev*2+1] = Float.intBitsToFloat(bits); + } + + // run tests + test_round(res, input); + + // verify results + for (int ev = exponentStart; ev < exponentBound; ev++) { + for (int sign = 0; sign < 2; sign++) { + int idx = ev * 2 + sign; + if (res[idx] != golden_round(input[idx])) { + errn++; + System.err.println("round error, input: " + input[idx] + + ", res: " + res[idx] + "expected: " + golden_round(input[idx]) + + ", input hex: " + Float.floatToIntBits(input[idx]) + + ", fi: " + sv + ", ei: " + ev + ", sign: " + sign); + } + } + } + } + + // generate pure random input arrays, which does not depend on significand/exponent values + for(int i = 0; i < 128; i++) { + for (int j = 0; j < ARRLEN; j++) { + input[j] = rand.nextFloat(); + } + + // run tests + test_round(res, input); + + // verify results + for (int j = 0; j < ARRLEN; j++) { + if (res[j] != golden_round(input[j])) { + errn++; + System.err.println("round error, input: " + input[j] + + ", res: " + res[j] + "expected: " + golden_round(input[j]) + + ", input hex: " + Float.floatToIntBits(input[j])); + } + } + } + + // test cases for NaN, Inf, subnormal, and so on + { + Float[] dv = new Float[] { + +0.0f, + -0.0f, + Float.MAX_VALUE, + Float.MIN_VALUE, + Float.NEGATIVE_INFINITY, + Float.POSITIVE_INFINITY, + Float.NaN, + Float.intBitsToFloat(0x7f800001), // another NaN + Float.MIN_NORMAL, + 0x0.fffffep-126f, // Maximum Subnormal Value + 1.5f, + 100.5f, + 10000.5f, + -1.5f, + -100.5f, + -10000.5f + }; + for (int j = 0; j < ARRLEN; j++) { + input[j] = dv[rand.nextInt(dv.length)]; + } + + // run tests + test_round(res, input); + + // verify results + for (int j = 0; j < ARRLEN; j++) { + if (res[j] != golden_round(input[j])) { + errn++; + System.err.println("round error, input: " + input[j] + + ", res: " + res[j] + "expected: " + golden_round(input[j]) + + ", input hex: " + Float.floatToIntBits(input[j])); + } + } + } + + if (errn > 0) { + throw new RuntimeException("There are some round error detected!"); + } + } +} diff --git a/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java b/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java index 3bf83d9076864..bacd7a2078e97 100644 --- a/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java +++ b/test/hotspot/jtreg/gc/x/TestAllocateHeapAt.java @@ -41,7 +41,7 @@ public static void main(String[] args) throws Exception { final String heapBackingFile = "Heap Backing File: " + directory; final String failedToCreateFile = "Failed to create file " + directory; - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:-ZGenerational", "-Xlog:gc*", diff --git a/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java b/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java index cb8685d0d090c..a48b6f77e17ef 100644 --- a/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java +++ b/test/hotspot/jtreg/gc/x/TestPageCacheFlush.java @@ -68,7 +68,7 @@ public static void main(String[] args) throws Exception { } public static void main(String[] args) throws Exception { - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:-ZGenerational", "-Xms128M", diff --git a/test/hotspot/jtreg/gc/x/TestSmallHeap.java b/test/hotspot/jtreg/gc/x/TestSmallHeap.java index 8fc6f07be398f..a7e8042f92474 100644 --- a/test/hotspot/jtreg/gc/x/TestSmallHeap.java +++ b/test/hotspot/jtreg/gc/x/TestSmallHeap.java @@ -53,7 +53,7 @@ public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception { for (var maxCapacity: args) { - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:-ZGenerational", "-Xlog:gc,gc+init,gc+reloc,gc+heap", diff --git a/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java b/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java index 9f47c4b60d3cb..28d2ebd6aea01 100644 --- a/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java +++ b/test/hotspot/jtreg/gc/z/TestAllocateHeapAt.java @@ -41,7 +41,7 @@ public static void main(String[] args) throws Exception { final String heapBackingFile = "Heap Backing File: " + directory; final String failedToCreateFile = "Failed to create file " + directory; - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:+ZGenerational", "-Xlog:gc*", diff --git a/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java b/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java index 387053b580ad8..3b666ddc2c8a6 100644 --- a/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java +++ b/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java @@ -68,7 +68,7 @@ public static void main(String[] args) throws Exception { } public static void main(String[] args) throws Exception { - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:+ZGenerational", "-Xms128M", diff --git a/test/hotspot/jtreg/gc/z/TestSmallHeap.java b/test/hotspot/jtreg/gc/z/TestSmallHeap.java index 354cd4164f1fd..67d9d33d2815e 100644 --- a/test/hotspot/jtreg/gc/z/TestSmallHeap.java +++ b/test/hotspot/jtreg/gc/z/TestSmallHeap.java @@ -53,7 +53,7 @@ public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception { for (var maxCapacity: args) { - ProcessTools.executeLimitedTestJava( + ProcessTools.executeTestJava( "-XX:+UseZGC", "-XX:+ZGenerational", "-Xlog:gc,gc+init,gc+reloc,gc+heap", diff --git a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java index f77be989421b2..f45f708070ab0 100644 --- a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java +++ b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ public static void main(String[] args) { runCheck(new String[] {"-XX:TLABRefillWasteFraction=50", "-XX:+UsePerfData", "-XX:+UseTLAB"}, BadFailOnConstraint.create(AndOr1.class, "test1(int)", 1, "CallStaticJava")); runCheck(new String[] {"-XX:TLABRefillWasteFraction=50", "-XX:-UsePerfData", "-XX:+UseTLAB"}, BadFailOnConstraint.create(AndOr1.class, "test2()", 1, "CallStaticJava")); - String[] allocMatches = { "MyClass", "wrapper for: _new_instance_Java" }; + String[] allocMatches = { "MyClass", "wrapper for: C2 Runtime new_instance" }; runCheck(BadFailOnConstraint.create(MultipleFailOnBad.class, "fail1()", 1, 1, "Store"), BadFailOnConstraint.create(MultipleFailOnBad.class, "fail1()", 1, 3, "Store"), GoodFailOnRegexConstraint.create(MultipleFailOnBad.class, "fail1()", 1, 2, 4), @@ -114,7 +114,7 @@ public static void main(String[] args) { GoodRuleConstraint.create(Calls.class, "calls()", 3) ); - String[] allocArrayMatches = { "MyClass", "wrapper for: _new_array_Java"}; + String[] allocArrayMatches = { "MyClass", "wrapper for: C2 Runtime new_array"}; runCheck(BadFailOnConstraint.create(AllocArray.class, "allocArray()", 1, allocArrayMatches), BadFailOnConstraint.create(AllocArray.class, "allocArray()", 2, allocArrayMatches), GoodFailOnConstraint.create(AllocArray.class, "allocArray()", 3), diff --git a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/ThreadController.java b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/ThreadController.java index ea15f3d0d35bc..9d121b9a94f79 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/ThreadController.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/ThreadController.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -663,6 +663,9 @@ public SleepingThread(ThreadController controller, String name, Log log, Threads expectedMethods.add(Thread.class.getName() + ".currentCarrierThread"); expectedMethods.add(Thread.class.getName() + ".currentThread"); // jdk.internal.event.ThreadSleepEvent not accessible + expectedMethods.add("java.lang.Object."); + expectedMethods.add("jdk.internal.event.Event."); + expectedMethods.add("jdk.internal.event.ThreadSleepEvent."); expectedMethods.add("jdk.internal.event.ThreadSleepEvent."); expectedMethods.add("jdk.internal.event.ThreadSleepEvent.isEnabled"); expectedMethods.add(SleepingThread.class.getName() + ".run"); diff --git a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SleepingThread.java b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SleepingThread.java index ed2a1a60c5088..217c2cdfdc82b 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SleepingThread.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/share/thread/SleepingThread.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,9 @@ public class SleepingThread extends RecursiveMonitoringThread { "java.lang.Thread.beforeSleep", "java.lang.Thread.afterSleep", "java.util.concurrent.TimeUnit.toNanos", + "java.lang.Object.", + "jdk.internal.event.Event.", + "jdk.internal.event.ThreadSleepEvent.", "jdk.internal.event.ThreadSleepEvent.", "jdk.internal.event.ThreadSleepEvent.isEnabled", "nsk.monitoring.share.thread.SleepingThread.runInside" diff --git a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java index 03d2066cf3289..0700032f6a7c2 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -150,6 +150,8 @@ private static boolean fillTrace() { "java.lang.Thread.currentThread", "java.util.concurrent.TimeUnit.toNanos", "jdk.internal.event.ThreadSleepEvent.", + "java.lang.Object.", + "jdk.internal.event.Event.", "jdk.internal.event.ThreadSleepEvent.", "jdk.internal.event.ThreadSleepEvent.isEnabled" }; diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index 47a2e13b460cf..58949cf728256 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -133,6 +133,9 @@ java/awt/Focus/MouseClickRequestFocusRaceTest/MouseClickRequestFocusRaceTest.jav java/awt/Focus/NoAutotransferToDisabledCompTest/NoAutotransferToDisabledCompTest.java 7152980 macosx-all java/awt/Focus/ToFrontFocusTest/ToFrontFocus.java 7156130 linux-all java/awt/Focus/WrongKeyTypedConsumedTest/WrongKeyTypedConsumedTest.java 8169096 macosx-all +java/awt/Focus/TestDisabledAutoTransfer.java 8159871 macosx-all,windows-all +java/awt/Focus/TestDisabledAutoTransferSwing.java 6962362 windows-all +java/awt/Focus/ActivateOnProperAppContextTest.java 8136516 macosx-all java/awt/EventQueue/6980209/bug6980209.java 8198615 macosx-all java/awt/grab/EmbeddedFrameTest1/EmbeddedFrameTest1.java 7080150 macosx-all java/awt/event/InputEvent/EventWhenTest/EventWhenTest.java 8168646 generic-all diff --git a/test/jdk/java/awt/Choice/ChoiceInsertTest.java b/test/jdk/java/awt/Choice/ChoiceInsertTest.java new file mode 100644 index 0000000000000..5eafa83a13991 --- /dev/null +++ b/test/jdk/java/awt/Choice/ChoiceInsertTest.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Choice; +import java.awt.EventQueue; +import java.awt.Frame; +import java.awt.GridLayout; +import java.awt.Label; +import java.awt.Robot; + +/* + * @test + * @bug 4082078 + * @summary Test for bug(s): 4082078, Multiple calls to Choice.insert cause core dump + * @key headful + * @run main ChoiceInsertTest + */ + +public class ChoiceInsertTest extends Frame { + Choice c; + Label l; + + private static ChoiceInsertTest choiceInsertTest; + + public ChoiceInsertTest() { + c = new Choice(); + l = new Label("If you see this, the choice insert bug is fixed!"); + c.add("Initial choice"); + add(c); + } + + public void testInsertion() { + // inserting 30 or so items aborts Solaris VM + // in JDK's before 1.1.5 + for (int nchoice = 0; nchoice < 30; nchoice++) { + c.insert("new choice", 0); + } + // if you made it to here the bug is not there anymore... + remove(l); + add(l); + validate(); + } + + public static void main(String[] args) throws Exception { + Robot robot = new Robot(); + try { + EventQueue.invokeAndWait(() ->{ + choiceInsertTest = new ChoiceInsertTest(); + choiceInsertTest.setTitle("ChoiceInsertTest"); + choiceInsertTest.setLocationRelativeTo(null); + choiceInsertTest.setSize(500, 300); + choiceInsertTest.setLayout(new GridLayout()); + choiceInsertTest.setVisible(true); + }); + robot.waitForIdle(); + robot.delay(500); + EventQueue.invokeAndWait(choiceInsertTest::testInsertion); + robot.delay(1000); + } finally { + EventQueue.invokeAndWait(() -> { + if (choiceInsertTest != null) { + choiceInsertTest.dispose(); + } + }); + } + + System.err.println("ChoiceInsertTest: Didn't abort VM inserting 30 items, so we passed!"); + } +} diff --git a/test/jdk/java/awt/Choice/ChoiceMouseDragTest.java b/test/jdk/java/awt/Choice/ChoiceMouseDragTest.java new file mode 100644 index 0000000000000..0c0c059032dda --- /dev/null +++ b/test/jdk/java/awt/Choice/ChoiceMouseDragTest.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +import java.awt.BorderLayout; +import java.awt.Choice; +import java.awt.EventQueue; +import java.awt.Frame; +import java.awt.Point; +import java.awt.event.InputEvent; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; + +/* + * @test + * @bug 4328557 + * @summary Tests that MouseDragged and MouseReleased are triggered on choice + * @library /lib/client + * @build ExtendedRobot + * @key headful + * @run main ChoiceMouseDragTest + */ + + +public class ChoiceMouseDragTest extends Frame { + private static final Choice choice = new Choice(); + + private static ExtendedRobot robot; + private volatile boolean isDragged; + private volatile boolean isReleased; + + private static volatile ChoiceMouseDragTest choiceMouseDragTest; + + public ChoiceMouseDragTest() { + super("ChoiceMouseDragTest"); + this.setLayout(new BorderLayout()); + choice.add("item-1"); + choice.add("item-2"); + choice.add("item-3"); + choice.add("item-4"); + add("Center", choice); + choice.addMouseListener(new MouseEventHandler()); + choice.addMouseMotionListener(new MouseMotionEventHandler()); + setSize(400, 200); + setLocationRelativeTo(null); + setVisible(true); + } + + public static void main(String[] args) throws Exception { + try { + EventQueue.invokeAndWait(() -> + choiceMouseDragTest = new ChoiceMouseDragTest()); + + robot = new ExtendedRobot(); + robot.waitForIdle(); + robot.delay(500); + + Point pointToDrag = choice.getLocationOnScreen(); + pointToDrag.x += choice.getWidth() - 10; + pointToDrag.y += choice.getHeight() / 2 ; + + choiceMouseDragTest.test(InputEvent.BUTTON3_DOWN_MASK, pointToDrag); + choiceMouseDragTest.test(InputEvent.BUTTON1_DOWN_MASK, pointToDrag); + } finally { + EventQueue.invokeAndWait(() -> { + if (choiceMouseDragTest != null) { + choiceMouseDragTest.dispose(); + } + }); + } + } + + void test(int buttonToTest, Point pointToDrag) { + isDragged = false; + isReleased = false; + + robot.mouseMove(pointToDrag.x, pointToDrag.y); + robot.waitForIdle(); + + robot.mousePress(buttonToTest); + + robot.glide(pointToDrag.x + 100, pointToDrag.y); + robot.waitForIdle(); + + robot.mouseRelease(buttonToTest); + robot.waitForIdle(); + + if (!isReleased || !isDragged) { + throw new RuntimeException(("Test failed: button %d dragged(received %b) or " + + "released(received %b)") + .formatted(buttonToTest, isDragged, isReleased)); + } + + robot.delay(500); + } + + class MouseEventHandler extends MouseAdapter { + public void mousePressed(MouseEvent me) { + System.out.println(me.paramString()); + } + + public void mouseReleased(MouseEvent me) { + System.out.println(me.paramString()); + isReleased = true; + } + + public void mouseClicked(MouseEvent me) { + System.out.println(me.paramString()); + } + } + + class MouseMotionEventHandler extends MouseAdapter { + public void mouseDragged(MouseEvent me) { + System.out.println(me.paramString()); + isDragged = true; + } + } +} diff --git a/test/jdk/java/awt/Choice/WheelEventsConsumed.java b/test/jdk/java/awt/Choice/WheelEventsConsumed.java new file mode 100644 index 0000000000000..37200214734bb --- /dev/null +++ b/test/jdk/java/awt/Choice/WheelEventsConsumed.java @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.awt.Choice; +import java.awt.EventQueue; +import java.awt.FlowLayout; +import java.awt.Frame; +import java.awt.Point; +import java.awt.Robot; +import java.awt.Toolkit; +import java.awt.event.InputEvent; +import java.awt.event.KeyEvent; +import java.awt.event.MouseWheelEvent; +import java.awt.event.MouseWheelListener; + +/* + * @test + * @bug 6253211 + * @summary PIT: MouseWheel events not triggered for Choice drop down in XAWT + * @requires (os.family == "linux") + * @key headful + * @run main WheelEventsConsumed + */ + +public class WheelEventsConsumed extends Frame implements MouseWheelListener +{ + Robot robot; + Choice choice1 = new Choice(); + Point pt; + final static int delay = 100; + boolean mouseWheeled = false; + final static int OUTSIDE_CHOICE = 1; + final static int INSIDE_LIST_OF_CHOICE = 2; + final static int INSIDE_CHOICE_COMPONENT = 3; + static String toolkit; + + private static volatile WheelEventsConsumed frame = null; + + public static void main(String[] args) throws Exception { + toolkit = Toolkit.getDefaultToolkit().getClass().getName(); + try { + EventQueue.invokeAndWait(() -> { + frame = new WheelEventsConsumed(); + frame.initAndShow(); + }); + frame.test(); + } finally { + EventQueue.invokeAndWait(() -> { + if (frame != null) { + frame.dispose(); + } + }); + } + } + + public void mouseWheelMoved(MouseWheelEvent me) { + mouseWheeled = true; + System.out.println(me); + } + + public void initAndShow() { + setTitle("WheelEventsConsumed test"); + for (int i = 1; i < 10; i++) { + choice1.add("item-0" + i); + } + + choice1.addMouseWheelListener(this); + add(choice1); + setLayout(new FlowLayout()); + setSize(200, 200); + setLocationRelativeTo(null); + setVisible(true); + validate(); + } + + public void test() { + try { + robot = new Robot(); + robot.setAutoWaitForIdle(true); + robot.setAutoDelay(50); + robot.waitForIdle(); + robot.delay(delay * 5); + testMouseWheel(1, OUTSIDE_CHOICE); + robot.delay(delay); + testMouseWheel(-1, INSIDE_LIST_OF_CHOICE); + robot.delay(delay); + testMouseWheel(1, INSIDE_CHOICE_COMPONENT); + robot.delay(delay); + } catch (Throwable e) { + throw new RuntimeException("Test failed. Exception thrown: " + e); + } + } + + public void testMouseWheel(int amt, int mousePosition) { + pt = choice1.getLocationOnScreen(); + robot.mouseMove(pt.x + choice1.getWidth() / 2, pt.y + choice1.getHeight() / 2); + + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.delay(50); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.delay(50); + + switch (mousePosition) { + case OUTSIDE_CHOICE: + robot.mouseMove(pt.x + choice1.getWidth() * 3 / 2, pt.y + choice1.getHeight() / 2); + break; + case INSIDE_LIST_OF_CHOICE: + robot.mouseMove(pt.x + choice1.getWidth() / 2, pt.y + choice1.getHeight() * 4); + break; + case INSIDE_CHOICE_COMPONENT: + robot.mouseMove(pt.x + choice1.getWidth() / 2, pt.y + choice1.getHeight() / 2); + break; + } + + robot.delay(delay); + for (int i = 0; i < 10; i++) { + robot.mouseWheel(amt); + robot.delay(delay); + } + + if (!mouseWheeled) { + if (toolkit.equals("sun.awt.windows.WToolkit") && mousePosition == OUTSIDE_CHOICE) { + System.out.println("Passed. Separate case on Win32. Choice generated MouseWheel events" + mousePosition); + } else { + throw new RuntimeException("Test failed. Choice should generate MOUSE_WHEEL events." + mousePosition); + } + } else { + System.out.println("Passed. Choice generated MouseWheel events" + mousePosition); + } + robot.keyPress(KeyEvent.VK_ESCAPE); + robot.delay(10); + robot.keyRelease(KeyEvent.VK_ESCAPE); + robot.delay(200); + mouseWheeled = false; + } +} diff --git a/test/jdk/java/awt/Focus/ActivateOnProperAppContextTest.java b/test/jdk/java/awt/Focus/ActivateOnProperAppContextTest.java new file mode 100644 index 0000000000000..ab08398332147 --- /dev/null +++ b/test/jdk/java/awt/Focus/ActivateOnProperAppContextTest.java @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2006, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +/* +* @test +* @bug 6385277 +* @key headful +* @summary Tests that activation happens on correct AppContext. +* @modules java.desktop/sun.awt +* @run main ActivateOnProperAppContextTest +*/ + +import sun.awt.AppContext; +import sun.awt.SunToolkit; + +import java.awt.Button; +import java.awt.Component; +import java.awt.Container; +import java.awt.Cursor; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.Frame; +import java.awt.Label; +import java.awt.Point; +import java.awt.Robot; +import java.awt.Toolkit; +import java.awt.Window; +import java.awt.event.InputEvent; +import java.util.concurrent.atomic.AtomicBoolean; + +public class ActivateOnProperAppContextTest { + static Robot robot; + SunToolkit toolkit; + + ThreadGroup threadGroup = new ThreadGroup("Test_Thread_Group"); + AppContext appContext; + Frame frame; + volatile boolean passed = true; + AtomicBoolean cond = new AtomicBoolean(false); + + public static void main(String[] args) throws Exception { + ActivateOnProperAppContextTest app = new ActivateOnProperAppContextTest(); + robot = new Robot(); + app.start(); + } + + public void start() { + toolkit = (SunToolkit)Toolkit.getDefaultToolkit(); + + Runnable runnable = new Runnable() { + public void run() { + test(); + + synchronized (cond) { + cond.set(true); + cond.notifyAll(); + } + } + }; + + Thread thread = new Thread(threadGroup, runnable, "Test Thread"); + + synchronized (cond) { + + thread.start(); + + while (!cond.get()) { + try { + cond.wait(); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + } + + if (passed) { + System.out.println("Test passed."); + } else { + throw new TestFailedException("Test failed!"); + } + } + + void test() { + appContext = SunToolkit.createNewAppContext(); + System.out.println("Created new AppContext: " + appContext); + + frame = new Frame("ActivateOnProperAppContextTest Frame") { + public boolean isActive() { + verifyAppContext("Frame.isActive()"); + return super.isActive(); + } + public boolean isFocused() { + verifyAppContext("Frame.isFocused()"); + return super.isFocused(); + } + public boolean isFocusable() { + verifyAppContext("Frame.isFocusable()"); + return super.isFocusable(); + } + public Window getOwner() { + verifyAppContext("Frame.getOwner()"); + return super.getOwner(); + } + public boolean isEnabled() { + verifyAppContext("Frame.isEnabled()"); + return super.isEnabled(); + } + public boolean isVisible() { + verifyAppContext("Frame.isVisible()"); + return super.isVisible(); + } + public Container getParent() { + verifyAppContext("Frame.getParent()"); + return super.getParent(); + } + public Cursor getCursor() { + verifyAppContext("Frame.getCursor()"); + return super.getCursor(); + } + public Point getLocation() { + verifyAppContext("Frame.getLocation()"); + return super.getLocation(); + } + public Point getLocationOnScreen() { + verifyAppContext("Frame.getLocationOnScreen()"); + return super.getLocationOnScreen(); + } + }; + Window window = new Window(frame) { + public boolean isFocused() { + verifyAppContext("Window.isFocused()"); + return super.isFocused(); + } + public boolean isFocusable() { + verifyAppContext("Window.isFocusable()"); + return super.isFocusable(); + } + public Window getOwner() { + verifyAppContext("Window.getOwner()"); + return super.getOwner(); + } + public boolean isEnabled() { + verifyAppContext("Window.isEnabled()"); + return super.isEnabled(); + } + public boolean isVisible() { + verifyAppContext("Window.isVisible()"); + return super.isVisible(); + } + public Container getParent() { + verifyAppContext("Window.getParent()"); + return super.getParent(); + } + public Cursor getCursor() { + verifyAppContext("Window.getCursor()"); + return super.getCursor(); + } + public Point getLocation() { + verifyAppContext("Window.getLocation()"); + return super.getLocation(); + } + public Point getLocationOnScreen() { + verifyAppContext("Window.getLocationOnScreen()"); + return super.getLocationOnScreen(); + } + }; + Button button = new Button("button"); + Label label = new Label("label"); + + window.setLayout(new FlowLayout()); + window.add(button); + window.add(label); + window.setLocation(800, 0); + window.pack(); + window.setVisible(true); + + frame.setBounds(800, 100, 100, 50); + frame.setVisible(true); + + toolkit.realSync(); + + /* + * When the label is clicked in the window some of + * the owner's public method get called. + */ + clickOn(label); + } + + void verifyAppContext(String methodName) { + AppContext ac = AppContext.getAppContext(); + println(methodName + " called on AppContext: " + ac); + + if (ac != appContext) { + passed = false; + System.err.println("Test failed: " + methodName + " is called on wrong AppContext!"); + Thread.dumpStack(); + } + } + + void clickOn(Component c) { + Point p = c.getLocationOnScreen(); + Dimension d = c.getSize(); + + robot.mouseMove(p.x + (int)(d.getWidth()/2), p.y + (int)(d.getHeight()/2)); + + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.delay(20); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + + toolkit.realSync(); + } + + void println(final String msg) { + SunToolkit.executeOnEventHandlerThread(frame, new Runnable() { + public void run() { + System.out.println(msg); + } + }); + } +} + +class TestFailedException extends RuntimeException { + TestFailedException(String msg) { + super(msg); + } +} diff --git a/test/jdk/java/awt/Focus/KillFocusTest.java b/test/jdk/java/awt/Focus/KillFocusTest.java new file mode 100644 index 0000000000000..f61c18971f6f9 --- /dev/null +++ b/test/jdk/java/awt/Focus/KillFocusTest.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +/* + * @test + * @bug 4402942 + * @summary After deactivation and activation of frame, focus should be restored correctlty + * @library /java/awt/regtesthelpers + * @build PassFailJFrame + * @run main/manual KillFocusTest +*/ + +import java.awt.Frame; +import java.awt.TextField; +import java.awt.event.FocusEvent; +import java.awt.event.FocusListener; + +public class KillFocusTest { + + private static final String INSTRUCTIONS = """ + After starting the test you should see \"Test Frame\" + with the \"Click me\" text field. + Click on this text field and try to type something in it. + Make sure that the field receives focus and you can enter text in it. + Click on any non-java window. + Click on \"Click me\" text field to return focus to it + If the caret is in the text field and you are able to type + in it then press pass else press fail."""; + + public static void main(String[] args) throws Exception { + PassFailJFrame.builder() + .title("KillFocusTest Instructions") + .instructions(INSTRUCTIONS) + .rows((int) INSTRUCTIONS.lines().count() + 2) + .columns(35) + .testUI(KillFocusTest::createTestUI) + .logArea() + .build() + .awaitAndCheck(); + } + + private static Frame createTestUI() { + + Frame frame = new Frame("KillFocusTest Frame"); + TextField textField = new TextField("Click me", 10); + textField.addFocusListener(new FocusListener() { + public void focusGained(FocusEvent fe) { + PassFailJFrame.log("Focus gained"); + } + public void focusLost(FocusEvent fe) { + PassFailJFrame.log("Focus lost"); + } + }); + frame.add(textField); + frame.setSize(200, 100); + return frame; + } + + +} + diff --git a/test/jdk/java/awt/Focus/TestDisabledAutoTransfer.java b/test/jdk/java/awt/Focus/TestDisabledAutoTransfer.java new file mode 100644 index 0000000000000..d7928b7db1c70 --- /dev/null +++ b/test/jdk/java/awt/Focus/TestDisabledAutoTransfer.java @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +/* + * @test + * @bug 6180261 + * @summary Test that auto-transfer doesn't happen when there are pending focus requests + * @key headful + * @run main TestDisabledAutoTransfer +*/ + +import java.awt.Button; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.Frame; +import java.awt.Point; +import java.awt.Robot; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.FocusAdapter; +import java.awt.event.FocusEvent; +import java.awt.event.FocusListener; +import java.awt.event.InputEvent; +import java.util.concurrent.atomic.AtomicBoolean; + +public class TestDisabledAutoTransfer { + static Frame frame; + static Robot robot; + Button b1; + Button desired; + AtomicBoolean focused = new AtomicBoolean(); + ActionListener mover; + volatile Point loc; + volatile Dimension dim; + + public static void main(String[] args) throws Exception { + robot = new Robot(); + try { + TestDisabledAutoTransfer test = new TestDisabledAutoTransfer(); + test.createTestUI(); + robot.waitForIdle(); + robot.delay(1000); + test.doTest(); + } finally { + if (frame != null) { + frame.dispose(); + } + } + } + + public void createTestUI() { + frame = new Frame("TestDisabledAutoTransfer"); + frame.setLayout(new FlowLayout()); + desired = new Button("Desired"); + FocusAdapter watcher = new FocusAdapter() { + public void focusGained(FocusEvent e) { + synchronized(focused) { + focused.set(true); + } + } + }; + b1 = new Button("Press to disable"); + mover = new ActionListener() { + public void actionPerformed(ActionEvent e) { + desired.requestFocus(); + ((Component)e.getSource()).setEnabled(false); + } + }; + b1.addFocusListener(watcher); + desired.addFocusListener(watcher); + frame.add(b1); + Button misc = new Button("Next"); + frame.add(misc); + misc.addFocusListener(watcher); + frame.add(desired); + frame.setSize(200, 200); + frame.setLocationRelativeTo(null); + frame.setVisible(true); + frame.validate(); + + } + + public void doTest() { + + loc = b1.getLocationOnScreen(); + dim = b1.getSize(); + robot.mouseMove(loc.x + dim.width / 2, loc.y + dim.height / 2); + robot.waitForIdle(); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.waitForIdle(); + b1.requestFocus(); + + try { + synchronized(focused) { + if (!focused.get()) { + focused.wait(1000); + } + } + } catch (InterruptedException ie) { + throw new RuntimeException("Test was interrupted"); + } + + if (!focused.get()) { + throw new RuntimeException("b1 didn't get focus"); + } + focused.set(false); + + b1.addActionListener(mover); + robot.mouseMove(loc.x + dim.width / 2, loc.y + dim.height / 2); + robot.waitForIdle(); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.waitForIdle(); + + try { + synchronized(focused) { + if (!focused.get()) { + focused.wait(1000); + } + } + } catch (InterruptedException ie) { + throw new RuntimeException("Test was interrupted"); + } + + if (!focused.get()) { + throw new RuntimeException("none got focus"); + } + + if (!desired.isFocusOwner()) { + throw new RuntimeException("desired didn't get focus"); + } + } + +} + diff --git a/test/jdk/java/awt/Focus/TestDisabledAutoTransferSwing.java b/test/jdk/java/awt/Focus/TestDisabledAutoTransferSwing.java new file mode 100644 index 0000000000000..0793316a989cd --- /dev/null +++ b/test/jdk/java/awt/Focus/TestDisabledAutoTransferSwing.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +/* + * @test + * @bug 6180261 + * @summary Test that auto-transfer doesn't happen when there are pending focus requests + * @key headful + * @run main TestDisabledAutoTransferSwing +*/ + +import java.awt.Component; +import java.awt.Dimension; +import java.awt.FlowLayout; +import java.awt.Point; +import java.awt.Robot; +import javax.swing.JButton; +import javax.swing.JFrame; +import javax.swing.SwingUtilities; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.InputEvent; +import java.awt.event.FocusAdapter; +import java.awt.event.FocusEvent; +import java.util.concurrent.atomic.AtomicBoolean; + +public class TestDisabledAutoTransferSwing { + static JFrame frame; + static Robot robot; + JButton b1; + JButton desired; + AtomicBoolean focused = new AtomicBoolean(); + ActionListener mover; + volatile Point loc; + volatile Dimension dim; + + public static void main(String[] args) throws Exception { + robot = new Robot(); + try { + TestDisabledAutoTransferSwing test = new TestDisabledAutoTransferSwing(); + SwingUtilities.invokeAndWait(() -> { + test.createTestUI(); + }); + robot.waitForIdle(); + robot.delay(1000); + test.doTest(); + } finally { + SwingUtilities.invokeAndWait(() -> { + if (frame != null) { + frame.dispose(); + } + }); + } + } + + public void createTestUI() { + frame = new JFrame("TestDisabledAutoTransferSwing"); + frame.setLayout (new FlowLayout ()); + desired = new JButton("Desired"); + FocusAdapter watcher = new FocusAdapter() { + public void focusGained(FocusEvent e) { + synchronized(focused) { + focused.set(true); + } + } + }; + b1 = new JButton("Press to disable"); + mover = new ActionListener() { + public void actionPerformed(ActionEvent e) { + desired.requestFocus(); + ((Component)e.getSource()).setEnabled(false); + } + }; + b1.addFocusListener(watcher); + desired.addFocusListener(watcher); + frame.add(b1); + JButton misc = new JButton("Next"); + frame.add(misc); + misc.addFocusListener(watcher); + frame.add(desired); + frame.setSize(200, 200); + frame.setLocationRelativeTo(null); + frame.setVisible(true); + frame.validate(); + + } + + public void doTest() throws Exception { + + SwingUtilities.invokeAndWait(() -> { + loc = b1.getLocationOnScreen(); + dim = b1.getSize(); + }); + robot.mouseMove(loc.x + dim.width / 2, loc.y + dim.height / 2); + robot.waitForIdle(); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.waitForIdle(); + SwingUtilities.invokeAndWait(() -> { + b1.requestFocus(); + }); + + try { + synchronized(focused) { + if (!focused.get()) { + focused.wait(2000); + } + } + } catch (InterruptedException ie) { + throw new RuntimeException("Test was interrupted"); + } + + if (!focused.get()) { + throw new RuntimeException("b1 didn't get focus"); + } + focused.set(false); + + SwingUtilities.invokeAndWait(() -> { + b1.addActionListener(mover); + }); + robot.mouseMove(loc.x + dim.width / 2, loc.y + dim.height / 2); + robot.waitForIdle(); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + robot.waitForIdle(); + + try { + synchronized(focused) { + if (!focused.get()) { + focused.wait(2000); + } + } + } catch (InterruptedException ie) { + throw new RuntimeException("Test was interrupted"); + } + + if (!focused.get()) { + throw new RuntimeException("none got focus"); + } + + if (!desired.isFocusOwner()) { + throw new RuntimeException("desired didn't get focus"); + } + } + +} diff --git a/test/jdk/java/foreign/TestMismatch.java b/test/jdk/java/foreign/TestMismatch.java index 9549b2508ff64..f50621e34154c 100644 --- a/test/jdk/java/foreign/TestMismatch.java +++ b/test/jdk/java/foreign/TestMismatch.java @@ -29,7 +29,9 @@ import java.lang.foreign.Arena; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Random; import java.util.concurrent.atomic.AtomicReference; import java.lang.foreign.MemorySegment; @@ -122,6 +124,68 @@ public void testSameValuesStatic(SliceOffsetAndSize ss1, SliceOffsetAndSize ss2) } } + @Test + public void random() { + try (var arena = Arena.ofConfined()) { + var rnd = new Random(42); + for (int size = 1; size < 64; size++) { + // Repeat a fair number of rounds + for (int i = 0; i < 147; i++) { + var src = arena.allocate(size); + // The dst segment might be zero to eight bytes longer + var dst = arena.allocate(size + rnd.nextInt(8 + 1)); + // Fill the src with random data + for (int j = 0; j < size; j++) { + src.set(ValueLayout.JAVA_BYTE, j, randomByte(rnd)); + } + // copy the random data from src to dst + dst.copyFrom(src); + // Fill the rest (if any) of the dst with random data + for (long j = src.byteSize(); j < dst.byteSize(); j++) { + dst.set(ValueLayout.JAVA_BYTE, j, randomByte(rnd)); + } + + if (rnd.nextBoolean()) { + // In this branch, we inject one or more deviating bytes + int beginDiff = rnd.nextInt(size); + int endDiff = rnd.nextInt(beginDiff, size); + for (int d = beginDiff; d <= endDiff; d++) { + byte existing = dst.get(ValueLayout.JAVA_BYTE, d); + // Make sure we never get back the same value + byte mutatedValue; + do { + mutatedValue = randomByte(rnd); + } while (existing == mutatedValue); + dst.set(ValueLayout.JAVA_BYTE, d, mutatedValue); + } + + // They are not equal and differs in position beginDiff + assertEquals(src.mismatch(dst), beginDiff); + assertEquals(dst.mismatch(src), beginDiff); + } else { + // In this branch, there is no injection + + if (src.byteSize() == dst.byteSize()) { + // The content matches and they are of equal size + assertEquals(src.mismatch(dst), -1); + assertEquals(dst.mismatch(src), -1); + } else { + // The content matches but they are of different length + // Remember, the size of src is always smaller or equal + // to the size of dst. + assertEquals(src.mismatch(dst), src.byteSize()); + assertEquals(dst.mismatch(src), src.byteSize()); + } + } + } + } + } + } + + static byte randomByte(Random rnd) { + return (byte) rnd.nextInt(Byte.MIN_VALUE, Byte.MAX_VALUE + 1); + } + @Test(dataProvider = "slices") public void testDifferentValues(MemorySegment s1, MemorySegment s2) { out.format("testDifferentValues s1:%s, s2:%s\n", s1, s2); diff --git a/test/jdk/jdk/internal/vm/Continuation/Scoped.java b/test/jdk/jdk/internal/vm/Continuation/Scoped.java index b034243d596de..1bad8e4aebd00 100644 --- a/test/jdk/jdk/internal/vm/Continuation/Scoped.java +++ b/test/jdk/jdk/internal/vm/Continuation/Scoped.java @@ -74,27 +74,27 @@ public void test1() { frames = cont.stackWalker().walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("No scope: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); frames = cont.stackWalker(EnumSet.noneOf(StackWalker.Option.class), A).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("A: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); frames = cont.stackWalker(EnumSet.noneOf(StackWalker.Option.class), B).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("B: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$8", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$0", "run", "enter0", "enter")); frames = cont.stackWalker(EnumSet.noneOf(StackWalker.Option.class), C).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("C: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter")); frames = cont.stackWalker(EnumSet.noneOf(StackWalker.Option.class), K).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("K: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); frames = cont.stackWalker(EnumSet.noneOf(StackWalker.Option.class), null).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); System.out.println("null: " + frames); - assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$14", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); + assertEquals(frames, cont.isDone() ? List.of() : Arrays.asList("yield0", "yield", "lambda$bar$0", "run", "enter0", "enter", "yield0", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "yield0", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); } assertEquals(res.get(), 2); } @@ -119,23 +119,23 @@ static String bar(long b) { List frames = StackWalker.getInstance().walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); - assertEquals(frames.subList(0, 18), Arrays.asList("lambda$bar$14", "run", "enter0", "enter", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter", "run", "test1")); + assertEquals(frames.subList(0, 18), Arrays.asList("lambda$bar$0", "run", "enter0", "enter", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter", "run", "test1")); frames = StackWalkerHelper.getInstance(C).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); - assertEquals(frames, Arrays.asList("lambda$bar$14", "run", "enter0", "enter")); + assertEquals(frames, Arrays.asList("lambda$bar$0", "run", "enter0", "enter")); frames = StackWalkerHelper.getInstance(B).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); - assertEquals(frames, Arrays.asList("lambda$bar$14", "run", "enter0", "enter", "run", "bar", "lambda$foo$8", "run", "enter0", "enter")); + assertEquals(frames, Arrays.asList("lambda$bar$0", "run", "enter0", "enter", "run", "bar", "lambda$foo$0", "run", "enter0", "enter")); frames = StackWalkerHelper.getInstance(A).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); - assertEquals(frames, Arrays.asList("lambda$bar$14", "run", "enter0", "enter", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); + assertEquals(frames, Arrays.asList("lambda$bar$0", "run", "enter0", "enter", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter")); frames = StackWalkerHelper.getInstance(K).walk(fs -> fs.map(StackWalker.StackFrame::getMethodName).collect(Collectors.toList())); - assertEquals(frames.subList(0, 18), Arrays.asList("lambda$bar$14", "run", "enter0", "enter", "run", "bar", "lambda$foo$8", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter", "run", "test1")); + assertEquals(frames.subList(0, 18), Arrays.asList("lambda$bar$0", "run", "enter0", "enter", "run", "bar", "lambda$foo$0", "run", "enter0", "enter", "run", "foo", "lambda$test1$0", "run", "enter0", "enter", "run", "test1")); long r = b+1; }); diff --git a/test/langtools/tools/javac/MethodParameters/LambdaTest.out b/test/langtools/tools/javac/MethodParameters/LambdaTest.out index b03cc76a97be9..36b91b5e36de1 100644 --- a/test/langtools/tools/javac/MethodParameters/LambdaTest.out +++ b/test/langtools/tools/javac/MethodParameters/LambdaTest.out @@ -1,7 +1,7 @@ class LambdaTest -- LambdaTest.() LambdaTest.foo(i) -LambdaTest.lambda$static$1(arg0)/*synthetic*/ -LambdaTest.lambda$static$0(arg0, arg1)/*synthetic*/ +LambdaTest.lambda$static$0(arg0)/*synthetic*/ +LambdaTest.lambda$static$1(arg0, arg1)/*synthetic*/ static interface LambdaTest$I -- inner LambdaTest$I.m(x) diff --git a/test/langtools/tools/javac/MethodParameters/LocalClassTest.out b/test/langtools/tools/javac/MethodParameters/LocalClassTest.out index 3b95739e74a5e..7b0028e1fb0cf 100644 --- a/test/langtools/tools/javac/MethodParameters/LocalClassTest.out +++ b/test/langtools/tools/javac/MethodParameters/LocalClassTest.out @@ -1,7 +1,7 @@ class LocalClassTest$1 -- anon LocalClassTest$1.(final this$0/*implicit*/, final j, final val$i/*synthetic*/) class LocalClassTest$1CapturingLocal$1 -- anon -LocalClassTest$1CapturingLocal$1.(final this$0/*implicit*/, final val$val$i/*synthetic*/) +LocalClassTest$1CapturingLocal$1.(final this$0/*implicit*/, final val$i/*synthetic*/) LocalClassTest$1CapturingLocal$1.test() class LocalClassTest$1CapturingLocal -- inner LocalClassTest$1CapturingLocal.(final this$0/*implicit*/, final j, final val$i/*synthetic*/) diff --git a/test/langtools/tools/javac/T8019486/WrongLNTForLambdaTest.java b/test/langtools/tools/javac/T8019486/WrongLNTForLambdaTest.java index 24c9b81c96471..4ddc40fa231a2 100644 --- a/test/langtools/tools/javac/T8019486/WrongLNTForLambdaTest.java +++ b/test/langtools/tools/javac/T8019486/WrongLNTForLambdaTest.java @@ -136,15 +136,15 @@ void run() throws Exception { checkClassFile(new File(Paths.get(System.getProperty("user.dir"), "Foo.class").toUri()), "lambda$bar$0", simpleLambdaExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), - "Foo.class").toUri()), "lambda$variablesInLambdas$1", lambdaWithVarsExpectedLNT); + "Foo.class").toUri()), "lambda$variablesInLambdas$0", lambdaWithVarsExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), "Foo$1FooBar.class").toUri()), "run", insideLambdaWithVarsExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), - "Foo.class").toUri()), "lambda$variablesInLambdas$2", lambdaVoid2VoidExpectedLNT); + "Foo.class").toUri()), "lambda$variablesInLambdas$1", lambdaVoid2VoidExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), "Foo.class").toUri()), "$deserializeLambda$", deserializeExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), - "Foo.class").toUri()), "lambda$variablesInLambdas$3", lambdaBridgeExpectedLNT); + "Foo.class").toUri()), "lambda$variablesInLambdas$2", lambdaBridgeExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), "Foo.class").toUri()), "assignLambda", assignmentExpectedLNT); checkClassFile(new File(Paths.get(System.getProperty("user.dir"), diff --git a/test/langtools/tools/javac/classfiles/attributes/EnclosingMethod/EnclosingMethodTest.java b/test/langtools/tools/javac/classfiles/attributes/EnclosingMethod/EnclosingMethodTest.java index b555014bebe67..f15771840f59a 100644 --- a/test/langtools/tools/javac/classfiles/attributes/EnclosingMethod/EnclosingMethodTest.java +++ b/test/langtools/tools/javac/classfiles/attributes/EnclosingMethod/EnclosingMethodTest.java @@ -245,7 +245,6 @@ class EnclosingInitialization { // anonymous and local classes in lambda @ExpectedEnclosingMethod( info = "EnclosingLambda in EnclosingMethodTest", - enclosingMethod = "", enclosingClazz = EnclosingMethodTest.class ) class EnclosingLambda { @@ -325,7 +324,6 @@ class EnclosingInitialization { // anonymous and local classes in lambda @ExpectedEnclosingMethod( info = "EnclosingLambda in notEnclosing01", - enclosingMethod = "", enclosingClazz = notEnclosing01.class ) class EnclosingLambda { @@ -382,7 +380,6 @@ public interface notEnclosing02 { // anonymous and local classes in lambda @ExpectedEnclosingMethod( info = "EnclosingLambda in notEnclosing02", - enclosingMethod = "", enclosingClazz = notEnclosing02.class ) class EnclosingLambda { @@ -460,7 +457,6 @@ class EnclosingInitialization { // anonymous and local classes in lambda @ExpectedEnclosingMethod( info = "EnclosingLambda in notEnclosing03", - enclosingMethod = "", enclosingClazz = notEnclosing03.class ) class EnclosingLambda { @@ -517,7 +513,6 @@ static class VariableInitializer { // anonymous and local classes in lambda @ExpectedEnclosingMethod( info = "EnclosingLambda in notEnclosing04", - enclosingMethod = "", enclosingClazz = notEnclosing04.class ) class EnclosingLambda { diff --git a/test/langtools/tools/javac/lambda/CaptureVarOrder.java b/test/langtools/tools/javac/lambda/CaptureVarOrder.java new file mode 100644 index 0000000000000..0bfa061cb4f32 --- /dev/null +++ b/test/langtools/tools/javac/lambda/CaptureVarOrder.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8336492 + * @summary Regression in lambda serialization + */ + +public class CaptureVarOrder { + static Object m(String s, int i, Object o) { + return new Object() { + final byte B = 0; + void g() { System.out.println(s + i + B + o); } + }; + } + + static Runnable r(String s, int i, Object o) { + final byte B = 0; + return () -> System.out.println(s + i + B + o); + } + + public static void main(String[] args) throws ReflectiveOperationException { + CaptureVarOrder.class.getDeclaredMethod("lambda$r$0", String.class, int.class, Object.class); + m("", 1, null).getClass().getDeclaredConstructor(String.class, int.class, Object.class); + } +} diff --git a/test/langtools/tools/javac/lambda/SerializedLambdaInLocalClass.java b/test/langtools/tools/javac/lambda/SerializedLambdaInLocalClass.java new file mode 100644 index 0000000000000..c015182f44956 --- /dev/null +++ b/test/langtools/tools/javac/lambda/SerializedLambdaInLocalClass.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8336492 + * @summary Regression in lambda serialization + */ + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.function.*; + +public class SerializedLambdaInLocalClass { + + public static void main(String[] args) { + SerializedLambdaInLocalClass s = new SerializedLambdaInLocalClass(); + s.test(s::f_lambda_in_anon); + s.test(s::f_lambda_in_local); + s.test(s::f_lambda_in_lambda); + } + + void test(IntFunction> fSupplier) { + try { + F f = fSupplier.apply(42).get(); + var baos = new ByteArrayOutputStream(); + // write + try (var oos = new ObjectOutputStream(baos)) { + oos.writeObject(f); + } + byte[] bytes = baos.toByteArray(); + var bais = new ByteArrayInputStream(bytes); + // read + try (var ois = new ObjectInputStream(bais)) { + F f2 = (F)ois.readObject(); + if (f2.getValue() != f.getValue()) { + throw new AssertionError(String.format("Found: %d, expected %d", f2.getValue(), f.getValue())); + } + } + } catch (IOException | ClassNotFoundException ex) { + throw new AssertionError(ex); + } + } + + interface F extends Serializable { + int getValue(); + } + + Supplier f_lambda_in_anon(int x) { + return new Supplier() { + @Override + public F get() { + return () -> x; + } + }; + } + + Supplier f_lambda_in_local(int x) { + class FSupplier implements Supplier { + @Override + public F get() { + return () -> x; + } + } + return new FSupplier(); + } + + Supplier f_lambda_in_lambda(int x) { + return () -> () -> x; + } +} diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/CopyTest.java b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkCopy.java similarity index 76% rename from test/micro/org/openjdk/bench/java/lang/foreign/CopyTest.java rename to test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkCopy.java index 8996b1de11742..22ef139aac08a 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/CopyTest.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkCopy.java @@ -41,21 +41,16 @@ import java.nio.ByteBuffer; import java.util.concurrent.TimeUnit; -import static java.lang.foreign.ValueLayout.*; - @BenchmarkMode(Mode.AverageTime) @Warmup(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS) @State(Scope.Thread) @OutputTimeUnit(TimeUnit.NANOSECONDS) @Fork(value = 3) -public class CopyTest { +public class SegmentBulkCopy { - @Param({"0", "1", "2", "3", "4", "5", "6", "7", "8", - "9", "10", "11", "12", "13", "14", "15", "16", - "17", "18", "19", "20", "21", "22", "23", "24", - "25", "26", "27", "28", "29", "30", "31", "32", - "33", "36", "40", "44", "48", "52", "56", "60", "63", "64", "128"}) + @Param({"2", "3", "4", "5", "6", "7", "8", "64", "512", + "4096", "32768", "262144", "2097152", "16777216", "134217728"}) public int ELEM_SIZE; byte[] srcArray; @@ -80,28 +75,37 @@ public void setup() { } @Benchmark - public void array_copy() { + public void arrayCopy() { System.arraycopy(srcArray, 0, dstArray, 0, ELEM_SIZE); } @Benchmark - public void heap_segment_copy5Arg() { + public void bufferCopy() { + dstBuffer.put(srcBuffer); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.copy=31"}) + @Benchmark + public void heapSegmentCopyJava() { MemorySegment.copy(heapSrcSegment, 0, heapDstSegment, 0, ELEM_SIZE); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.copy=0"}) @Benchmark - public void native_segment_copy5Arg() { - MemorySegment.copy(nativeSrcSegment, 0, nativeDstSegment, 0, ELEM_SIZE); + public void heapSegmentCopyUnsafe() { + MemorySegment.copy(heapSrcSegment, 0, heapDstSegment, 0, ELEM_SIZE); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.copy=31"}) @Benchmark - public void heap_segment_copy7arg() { - MemorySegment.copy(heapSrcSegment, JAVA_BYTE, 0, heapDstSegment, JAVA_BYTE, 0, ELEM_SIZE); + public void nativeSegmentCopyJava() { + MemorySegment.copy(nativeSrcSegment, 0, nativeDstSegment, 0, ELEM_SIZE); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.copy=0"}) @Benchmark - public void buffer_copy() { - dstBuffer.put(srcBuffer); + public void nativeSegmentCopyUnsafe() { + MemorySegment.copy(nativeSrcSegment, 0, nativeDstSegment, 0, ELEM_SIZE); } } diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/TestFill.java b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkFill.java similarity index 69% rename from test/micro/org/openjdk/bench/java/lang/foreign/TestFill.java rename to test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkFill.java index 78719f03bc377..95ca722896944 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/TestFill.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkFill.java @@ -48,13 +48,10 @@ @State(Scope.Thread) @OutputTimeUnit(TimeUnit.NANOSECONDS) @Fork(value = 3) -public class TestFill { +public class SegmentBulkFill { - @Param({"0", "1", "2", "3", "4", "5", "6", "7", - "8", "9", "10", "11", "12", "13", "14", "15", - "16", "17", "18", "19", "20", "21", "22", "23", - "24", "25", "26", "27", "28", "29", "30", "31", - "32", "128", "256", "384", "511", "512"}) + @Param({"2", "3", "4", "5", "6", "7", "8", "64", "512", + "4096", "32768", "262144", "2097152", "16777216", "134217728"}) public int ELEM_SIZE; byte[] array; @@ -73,22 +70,43 @@ public void setup() { } @Benchmark - public void arrays_fill() { + public void arraysFill() { Arrays.fill(array, (byte) 0); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=31"}) @Benchmark - public void heap_segment_fill() { + public void heapSegmentFillJava() { heapSegment.fill((byte) 0); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=0"}) @Benchmark - public void native_segment_fill() { + public void heapSegmentFillUnsafe() { + heapSegment.fill((byte) 0); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=31"}) + @Benchmark + public void nativeSegmentFillJava() { + nativeSegment.fill((byte) 0); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=0"}) + @Benchmark + public void nativeSegmentFillUnsafe() { nativeSegment.fill((byte) 0); } + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=31"}) + @Benchmark + public void unalignedSegmentFillJava() { + unalignedSegment.fill((byte) 0); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.fill=0"}) @Benchmark - public void unaligned_segment_fill() { + public void unalignedSegmentFillUnsafe() { unalignedSegment.fill((byte) 0); } diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkMismatch.java b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkMismatch.java new file mode 100644 index 0000000000000..5656b2f6b9f74 --- /dev/null +++ b/test/micro/org/openjdk/bench/java/lang/foreign/SegmentBulkMismatch.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package org.openjdk.bench.java.lang.foreign; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.util.Arrays; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import static java.lang.foreign.ValueLayout.*; + +@BenchmarkMode(Mode.AverageTime) +@Warmup(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) +@Measurement(iterations = 10, time = 500, timeUnit = TimeUnit.MILLISECONDS) +@State(Scope.Thread) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@Fork(value = 3) +public class SegmentBulkMismatch { + + @Param({"2", "3", "4", "5", "6", "7", "8", "64", "512", + "4096", "32768", "262144", "2097152", "16777216", "134217728"}) + public int ELEM_SIZE; + + MemorySegment srcNative; + MemorySegment dstNative; + byte[] srcArray; + byte[] dstArray; + MemorySegment srcHeap; + MemorySegment dstHeap; + + @Setup + public void setup() { + // Always use the same alignment regardless of size + srcNative = Arena.ofAuto().allocate(ELEM_SIZE,16); + dstNative = Arena.ofAuto().allocate(ELEM_SIZE, 16); + var rnd = new Random(42); + for (int i = 0; i < ELEM_SIZE; i++) { + srcNative.set(JAVA_BYTE, i, (byte) rnd.nextInt(Byte.MIN_VALUE, Byte.MAX_VALUE)); + } + dstNative.copyFrom(srcNative); + srcArray = srcNative.toArray(JAVA_BYTE); + dstArray = dstNative.toArray(JAVA_BYTE); + srcHeap = MemorySegment.ofArray(srcArray); + dstHeap = MemorySegment.ofArray(dstArray); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.mismatch=31"}) + @Benchmark + public long nativeSegmentJava() { + return srcNative.mismatch(dstNative); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.mismatch=31"}) + @Benchmark + public long heapSegmentJava() { + return srcHeap.mismatch(dstHeap); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.mismatch=0"}) + @Benchmark + public long nativeSegmentUnsafe() { + return srcNative.mismatch(dstNative); + } + + @Fork(value = 3, jvmArgsAppend = {"-Djava.lang.foreign.native.threshold.power.mismatch=0"}) + @Benchmark + public long heapSegmentUnsafe() { + return srcHeap.mismatch(dstHeap); + } + + @Benchmark + public long array() { + return Arrays.mismatch(srcArray, dstArray); + } + +} +