diff --git a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp index 6713a9c755a66..753f075efb244 100644 --- a/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp +++ b/src/hotspot/cpu/riscv/downcallLinker_riscv.cpp @@ -37,68 +37,6 @@ #define __ _masm-> -class DowncallStubGenerator : public StubCodeGenerator { - BasicType* _signature; - int _num_args; - BasicType _ret_bt; - - const ABIDescriptor& _abi; - const GrowableArray& _input_registers; - const GrowableArray& _output_registers; - - bool _needs_return_buffer; - int _captured_state_mask; - bool _needs_transition; - - int _frame_complete; - int _frame_size_slots; - OopMapSet* _oop_maps; -public: - DowncallStubGenerator(CodeBuffer* buffer, - BasicType* signature, - int num_args, - BasicType ret_bt, - const ABIDescriptor& abi, - const GrowableArray& input_registers, - const GrowableArray& output_registers, - bool needs_return_buffer, - int captured_state_mask, - bool needs_transition) - : StubCodeGenerator(buffer, PrintMethodHandleStubs), - _signature(signature), - _num_args(num_args), - _ret_bt(ret_bt), - _abi(abi), - _input_registers(input_registers), - _output_registers(output_registers), - _needs_return_buffer(needs_return_buffer), - _captured_state_mask(captured_state_mask), - _needs_transition(needs_transition), - _frame_complete(0), - _frame_size_slots(0), - _oop_maps(nullptr) { - } - - void generate(); - - int frame_complete() const { - return _frame_complete; - } - - int framesize() const { - return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt)); - } - - OopMapSet* oop_maps() const { - return _oop_maps; - } -}; - -void DowncallLinker::StubGenerator::pd_add_offset_to_oop(VMStorage reg_oop, VMStorage reg_offset, - VMStorage tmp1, VMStorage tmp2) const { - Unimplemented(); -} - static const int native_invoker_code_base_size = 256; static const int native_invoker_size_per_arg = 8; @@ -114,10 +52,10 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_arg); int locs_size = 1; // must be non-zero CodeBuffer code("nep_invoker_blob", code_size, locs_size); - DowncallStubGenerator g(&code, signature, num_args, ret_bt, abi, - input_registers, output_registers, - needs_return_buffer, captured_state_mask, - needs_transition); + StubGenerator g(&code, signature, num_args, ret_bt, abi, + input_registers, output_registers, + needs_return_buffer, captured_state_mask, + needs_transition); g.generate(); code.log_section_sizes("nep_invoker_blob"); @@ -140,6 +78,35 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature, return stub; } +static constexpr int FP_BIAS = 0; // sender_sp_offset is 0 on RISCV + +void DowncallLinker::StubGenerator::pd_add_offset_to_oop(VMStorage reg_oop, VMStorage reg_offset, VMStorage tmp1, VMStorage tmp2) const { + Register r_tmp1 = as_Register(tmp1); + Register r_tmp2 = as_Register(tmp2); + if (reg_oop.is_reg()) { + assert(reg_oop.type() == StorageType::INTEGER, "expected"); + Register reg_oop_reg = as_Register(reg_oop); + if (reg_offset.is_reg()) { + assert(reg_offset.type() == StorageType::INTEGER, "expected"); + __ add(reg_oop_reg, reg_oop_reg, as_Register(reg_offset)); + } else { + assert(reg_offset.is_stack(), "expected"); + assert(reg_offset.stack_size() == 8, "expected long"); + __ ld(r_tmp1, Address(fp, FP_BIAS + reg_offset.offset())); + __ add(reg_oop_reg, reg_oop_reg, r_tmp1); + } + } else { + assert(reg_oop.is_stack(), "expected"); + assert(reg_oop.stack_size() == 8, "expected long"); + assert(reg_offset.is_stack(), "expected"); + assert(reg_offset.stack_size() == 8, "expected long"); + __ ld(r_tmp1, Address(fp, FP_BIAS + reg_offset.offset())); + __ ld(r_tmp2, Address(fp, FP_BIAS + reg_oop.offset())); + __ add(r_tmp1, r_tmp2, r_tmp1); + __ sd(r_tmp1, Address(fp, FP_BIAS + reg_oop.offset())); + } +} + void DowncallStubGenerator::generate() { enum layout { fp_off, @@ -152,6 +119,13 @@ void DowncallStubGenerator::generate() { // out arg area (e.g. for stack args) }; + GrowableArray java_regs; + ForeignGlobals::java_calling_convention(_signature, _num_args, java_regs); + bool has_objects = false; + GrowableArray filtered_java_regs = ForeignGlobals::downcall_filter_offset_regs(java_regsm, _signature, + _num_regs, has_objects); + assert(!(_needs_transition && has_objects), "can not pass objects when doing transition"); + int allocated_frame_size = 0; assert(_abi._shadow_space_bytes == 0, "not expecting shadow space on RISCV64"); allocated_frame_size += ForeignGlobals::compute_out_arg_bytes(_input_registers); @@ -181,9 +155,21 @@ void DowncallStubGenerator::generate() { allocated_frame_size += BytesPerWord; } + // The space we have allocated will look like: + // + // FP-> | | + // |---------------------| = frame_bottom_offset = frame_size + // | (optional) | + // | capture state buf | + // |---------------------| = StubLocations::CAPTURED_STATE_BUFFER + // | (optional) | + // | return buffer | + // |---------------------| = StubLocations::RETURN_BUFFER + // SP-> | out/stack args | or | out_reg_spiller area | + // + // Note how the last chunk can be shared, since the 3 uses occur at different times. + VMStorage shuffle_reg = as_VMStorage(x9); - GrowableArray java_regs; - ForeignGlobals::java_calling_convention(_signature, _num_args, java_regs); GrowableArray out_regs = ForeignGlobals::replace_place_holders(_input_registers, locs); ArgumentShuffle arg_shuffle(java_regs, out_regs, shuffle_reg); @@ -225,6 +211,10 @@ void DowncallStubGenerator::generate() { __ block_comment("} thread java2native"); } + if (has_objects) { + add_offset_to_oops(java_regs, as_VMStorage(t0), as_VMStorage(t1)); + } + __ block_comment("{ argument shuffle"); arg_shuffle.generate(_masm, shuffle_reg, 0, _abi._shadow_space_bytes); __ block_comment("} argument shuffle"); diff --git a/src/java.base/share/classes/jdk/internal/foreign/abi/riscv64/linux/LinuxRISCV64CallArranger.java b/src/java.base/share/classes/jdk/internal/foreign/abi/riscv64/linux/LinuxRISCV64CallArranger.java index 8da7a124abc17..1bfa43402defd 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/abi/riscv64/linux/LinuxRISCV64CallArranger.java +++ b/src/java.base/share/classes/jdk/internal/foreign/abi/riscv64/linux/LinuxRISCV64CallArranger.java @@ -87,8 +87,8 @@ public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, bool public static Bindings getBindings(MethodType mt, FunctionDescriptor cDesc, boolean forUpcall, LinkerOptions options) { CallingSequenceBuilder csb = new CallingSequenceBuilder(CLinux, forUpcall, options); - BindingCalculator argCalc = forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true); - BindingCalculator retCalc = forUpcall ? new UnboxBindingCalculator(false) : new BoxBindingCalculator(false); + BindingCalculator argCalc = forUpcall ? new BoxBindingCalculator(true) : new UnboxBindingCalculator(true, options.allowsHeapAccess()); + BindingCalculator retCalc = forUpcall ? new UnboxBindingCalculator(false, false) : new BoxBindingCalculator(false); boolean returnInMemory = isInMemoryReturn(cDesc.returnLayout()); if (returnInMemory) { @@ -254,11 +254,13 @@ protected BindingCalculator(boolean forArguments) { } static class UnboxBindingCalculator extends BindingCalculator { - boolean forArguments; + protected final boolean forArguments; + private final boolean useAddressPairs; - UnboxBindingCalculator(boolean forArguments) { + UnboxBindingCalculator(boolean forArguments, boolean useAddressPairs) { super(forArguments); this.forArguments = forArguments; + this.useAddressPairs = useAddressPairs; } @Override @@ -282,9 +284,17 @@ List getBindings(Class carrier, MemoryLayout layout, TypeClass argum bindings.vmStore(storage, carrier); } case POINTER -> { - bindings.unboxAddress(); VMStorage storage = storageCalculator.getStorage(StorageType.INTEGER); - bindings.vmStore(storage, long.class); + if (useAddressPairs) { + bindings.dup() + .segmentBase() + .vmStore(storage, Object.class) + .segmentOffsetAllowHeap() + .vmStore(null, long.class); + } else { + bindings.unboxAddress(); + bindings.vmStore(storage, long.class); + } } case STRUCT_REGISTER_X -> { assert carrier == MemorySegment.class;