Skip to content

Commit

Permalink
YJIT: Fallback send instructions to vm_sendish (ruby#8106)
Browse files Browse the repository at this point in the history
  • Loading branch information
k0kubun authored Jul 24, 2023
1 parent c4e893c commit cef60e9
Show file tree
Hide file tree
Showing 7 changed files with 178 additions and 7 deletions.
6 changes: 3 additions & 3 deletions test/ruby/test_yjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ def foo &blk

def test_getblockparamproxy
# Currently two side exits as OPTIMIZED_METHOD_TYPE_CALL is unimplemented
assert_compiles(<<~'RUBY', insns: [:getblockparamproxy], exits: { opt_send_without_block: 2 })
assert_compiles(<<~'RUBY', insns: [:getblockparamproxy])
def foo &blk
p blk.call
p blk.call
Expand Down Expand Up @@ -607,7 +607,7 @@ def jit_method

def test_send_kwargs
# For now, this side-exits when calls include keyword args
assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A", exits: {opt_send_without_block: 1})
assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A")
def internal_method(**kw)
"#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}"
end
Expand Down Expand Up @@ -647,7 +647,7 @@ def jit_method

def test_send_kwargs_splat
# For now, this side-exits when calling with a splat
assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B", exits: {opt_send_without_block: 1})
assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B")
def internal_method(**kw)
"#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}"
end
Expand Down
10 changes: 10 additions & 0 deletions vm_exec.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,20 @@ default: \
#define THROW_EXCEPTION(exc) return (VALUE)(exc)
#endif

// Run the interpreter from the JIT
#define VM_EXEC(ec, val) do { \
if (val == Qundef) { \
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); \
val = vm_exec(ec); \
} \
} while (0)

// Run the JIT from the interpreter
#define JIT_EXEC(ec, val) do { \
rb_jit_func_t func; \
if (val == Qundef && (func = jit_compile(ec))) { \
val = func(ec, ec->cfp); \
RESTORE_REGS(); /* fix cfp for tailcall */ \
if (ec->tag->state) THROW_EXCEPTION(val); \
} \
} while (0)
Expand Down
36 changes: 36 additions & 0 deletions vm_insnhelper.c
Original file line number Diff line number Diff line change
Expand Up @@ -5528,6 +5528,42 @@ vm_sendish(
return val;
}

VALUE
rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
{
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
VM_EXEC(ec, val);
return val;
}

VALUE
rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
{
VALUE bh = VM_BLOCK_HANDLER_NONE;
VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method);
VM_EXEC(ec, val);
return val;
}

VALUE
rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq)
{
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super);
VM_EXEC(ec, val);
return val;
}

VALUE
rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd)
{
VALUE bh = VM_BLOCK_HANDLER_NONE;
VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock);
VM_EXEC(ec, val);
return val;
}

/* object.c */
VALUE rb_nil_to_s(VALUE);
VALUE rb_true_to_s(VALUE);
Expand Down
14 changes: 14 additions & 0 deletions yjit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1122,6 +1122,20 @@ rb_yjit_assert_holding_vm_lock(void)
ASSERT_vm_locking();
}

// The number of stack slots that vm_sendish() pops for send and invokesuper.
size_t
rb_yjit_sendish_sp_pops(const struct rb_callinfo *ci)
{
return 1 - sp_inc_of_sendish(ci); // + 1 to ignore return value push
}

// The number of stack slots that vm_sendish() pops for invokeblock.
size_t
rb_yjit_invokeblock_sp_pops(const struct rb_callinfo *ci)
{
return 1 - sp_inc_of_invokeblock(ci); // + 1 to ignore return value push
}

// Primitives used by yjit.rb
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);
Expand Down
2 changes: 2 additions & 0 deletions yjit/bindgen/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,8 @@ fn main() {
.allowlist_function("rb_yjit_icache_invalidate")
.allowlist_function("rb_optimized_call")
.allowlist_function("rb_yjit_assert_holding_vm_lock")
.allowlist_function("rb_yjit_sendish_sp_pops")
.allowlist_function("rb_yjit_invokeblock_sp_pops")

// from vm_sync.h
.allowlist_function("rb_vm_barrier")
Expand Down
115 changes: 111 additions & 4 deletions yjit/src/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6428,6 +6428,38 @@ fn gen_struct_aset(
Some(EndBlock)
}

// Generate code that calls a method with dynamic dispatch
fn gen_send_dynamic<F: Fn(&mut Assembler) -> Opnd>(
jit: &mut JITState,
asm: &mut Assembler,
cd: *const rb_call_data,
sp_pops: usize,
vm_sendish: F,
) -> Option<CodegenStatus> {
// Our frame handling is not compatible with tailcall
if unsafe { vm_ci_flag((*cd).ci) } & VM_CALL_TAILCALL != 0 {
return None;
}

// Save PC and SP to prepare for dynamic dispatch
jit_prepare_routine_call(jit, asm);

// Pop arguments and a receiver
asm.stack_pop(sp_pops);

// Dispatch a method
let ret = vm_sendish(asm);

// Push the return value
let stack_ret = asm.stack_push(Type::Unknown);
asm.mov(stack_ret, ret);

// Fix the interpreter SP deviated by vm_sendish
asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), SP);

Some(KeepCompiling)
}

fn gen_send_general(
jit: &mut JITState,
asm: &mut Assembler,
Expand Down Expand Up @@ -6909,33 +6941,84 @@ fn gen_opt_send_without_block(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
// Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
if let Some(status) = gen_send_general(jit, asm, ocb, cd, None) {
return Some(status);
}

gen_send_general(jit, asm, ocb, cd, None)
// Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
extern "C" {
fn rb_vm_opt_send_without_block(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
}
asm.ccall(
rb_vm_opt_send_without_block as *const u8,
vec![EC, CFP, (cd as usize).into()],
)
})
}

fn gen_send(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
// Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
let block = jit.get_arg(1).as_optional_ptr();
return gen_send_general(jit, asm, ocb, cd, block);
if let Some(status) = gen_send_general(jit, asm, ocb, cd, block) {
return Some(status);
}

// Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
let blockiseq = jit.get_arg(1).as_iseq();
gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
extern "C" {
fn rb_vm_send(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
}
asm.ccall(
rb_vm_send as *const u8,
vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
)
})
}

fn gen_invokeblock(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
// Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
if let Some(status) = gen_invokeblock_specialized(jit, asm, ocb, cd) {
return Some(status);
}

// Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_invokeblock_sp_pops((*cd).ci) }, |asm| {
extern "C" {
fn rb_vm_invokeblock(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE;
}
asm.ccall(
rb_vm_invokeblock as *const u8,
vec![EC, CFP, (cd as usize).into()],
)
})
}

fn gen_invokeblock_specialized(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cd: *const rb_call_data,
) -> Option<CodegenStatus> {
if !jit.at_current_insn() {
defer_compilation(jit, asm, ocb);
return Some(EndBlock);
}

// Get call info
let cd = jit.get_arg(0).as_ptr();
let ci = unsafe { get_call_data_ci(cd) };
let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap();
let flags = unsafe { vm_ci_flag(ci) };
Expand Down Expand Up @@ -7065,7 +7148,31 @@ fn gen_invokesuper(
asm: &mut Assembler,
ocb: &mut OutlinedCb,
) -> Option<CodegenStatus> {
let cd: *const rb_call_data = jit.get_arg(0).as_ptr();
// Generate specialized code if possible
let cd = jit.get_arg(0).as_ptr();
if let Some(status) = gen_invokesuper_specialized(jit, asm, ocb, cd) {
return Some(status);
}

// Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send
let blockiseq = jit.get_arg(1).as_iseq();
gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| {
extern "C" {
fn rb_vm_invokesuper(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE;
}
asm.ccall(
rb_vm_invokesuper as *const u8,
vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()],
)
})
}

fn gen_invokesuper_specialized(
jit: &mut JITState,
asm: &mut Assembler,
ocb: &mut OutlinedCb,
cd: *const rb_call_data,
) -> Option<CodegenStatus> {
let block: Option<IseqPtr> = jit.get_arg(1).as_optional_ptr();

// Defer compilation so we can specialize on class of receiver
Expand Down
2 changes: 2 additions & 0 deletions yjit/src/cruby_bindings.inc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1336,4 +1336,6 @@ extern "C" {
line: ::std::os::raw::c_int,
);
pub fn rb_yjit_assert_holding_vm_lock();
pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize;
pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize;
}

0 comments on commit cef60e9

Please sign in to comment.