From e5ef64f710e7993ad4af99f2279bf7d02eef393f Mon Sep 17 00:00:00 2001 From: Jean Boussier Date: Tue, 25 Jan 2022 16:01:09 +0100 Subject: [PATCH] Call the gvl hooks --- .../gvl/call_without_gvl/call_without_gvl.c | 6 +-- include/ruby/internal/event.h | 28 +++++++------ include/ruby/thread_native.h | 16 ++++---- thread_pthread.c | 41 ++++++++++++------- thread_pthread.h | 1 + 5 files changed, 53 insertions(+), 39 deletions(-) diff --git a/ext/-test-/gvl/call_without_gvl/call_without_gvl.c b/ext/-test-/gvl/call_without_gvl/call_without_gvl.c index dd861ac4516402..cd3260ece8c1d7 100644 --- a/ext/-test-/gvl/call_without_gvl/call_without_gvl.c +++ b/ext/-test-/gvl/call_without_gvl/call_without_gvl.c @@ -78,7 +78,7 @@ static gvl_hook_t * single_hook = NULL; static VALUE thread_register_gvl_callback(VALUE thread) { - single_hook = rb_gvl_event_new(*ex_callback, 0x12); + single_hook = rb_gvl_event_new(*ex_callback, RUBY_INTERNAL_EVENT_GVL_ACQUIRE_ENTER, 1); return Qnil; } @@ -95,7 +95,7 @@ thread_unregister_gvl_callback(VALUE thread) { static VALUE thread_call_gvl_callback(VALUE thread) { - rb_gvl_execute_hooks(0x12); + rb_gvl_execute_hooks(RUBY_INTERNAL_EVENT_GVL_ACQUIRE_ENTER, 1); return Qnil; } @@ -103,7 +103,7 @@ static VALUE thread_register_and_unregister_gvl_callback(VALUE thread) { gvl_hook_t * hooks[5]; for (int i = 0; i < 5; i++) { - hooks[i] = rb_gvl_event_new(*ex_callback, 0x12); + hooks[i] = rb_gvl_event_new(*ex_callback, RUBY_INTERNAL_EVENT_GVL_ACQUIRE_ENTER); } if (!rb_gvl_event_delete(hooks[4])) return Qfalse; diff --git a/include/ruby/internal/event.h b/include/ruby/internal/event.h index 04b137a1939e85..633d17fa9e0541 100644 --- a/include/ruby/internal/event.h +++ b/include/ruby/internal/event.h @@ -82,18 +82,22 @@ * * @{ */ -#define RUBY_INTERNAL_EVENT_SWITCH 0x040000 /**< Thread switched. */ -#define RUBY_EVENT_SWITCH 0x040000 /**< @old{RUBY_INTERNAL_EVENT_SWITCH} */ - /* 0x080000 */ -#define RUBY_INTERNAL_EVENT_NEWOBJ 0x100000 /**< Object allocated. */ -#define RUBY_INTERNAL_EVENT_FREEOBJ 0x200000 /**< Object swept. */ -#define RUBY_INTERNAL_EVENT_GC_START 0x400000 /**< GC started. */ -#define RUBY_INTERNAL_EVENT_GC_END_MARK 0x800000 /**< GC ended mark phase. */ -#define RUBY_INTERNAL_EVENT_GC_END_SWEEP 0x1000000 /**< GC ended sweep phase. */ -#define RUBY_INTERNAL_EVENT_GC_ENTER 0x2000000 /**< `gc_enter()` is called. */ -#define RUBY_INTERNAL_EVENT_GC_EXIT 0x4000000 /**< `gc_exit()` is called. */ -#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK 0x7f00000 /**< Bitmask of GC events. */ -#define RUBY_INTERNAL_EVENT_MASK 0xffff0000 /**< Bitmask of internal events. */ +#define RUBY_INTERNAL_EVENT_SWITCH 0x00040000 /**< Thread switched. */ +#define RUBY_EVENT_SWITCH 0x00040000 /**< @old{RUBY_INTERNAL_EVENT_SWITCH} */ + /*0x00080000 */ +#define RUBY_INTERNAL_EVENT_NEWOBJ 0x00100000 /**< Object allocated. */ +#define RUBY_INTERNAL_EVENT_FREEOBJ 0x00200000 /**< Object swept. */ +#define RUBY_INTERNAL_EVENT_GC_START 0x00400000 /**< GC started. */ +#define RUBY_INTERNAL_EVENT_GC_END_MARK 0x00800000 /**< GC ended mark phase. */ +#define RUBY_INTERNAL_EVENT_GC_END_SWEEP 0x01000000 /**< GC ended sweep phase. */ +#define RUBY_INTERNAL_EVENT_GC_ENTER 0x02000000 /**< `gc_enter()` is called. */ +#define RUBY_INTERNAL_EVENT_GC_EXIT 0x04000000 /**< `gc_exit()` is called. */ +#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK 0x07f00000 /**< Bitmask of GC events. */ +#define RUBY_INTERNAL_EVENT_GVL_ACQUIRE_ENTER 0x10000000 /** `gvl_acquire() is called */ +#define RUBY_INTERNAL_EVENT_GVL_ACQUIRE_EXIT 0x20000000 /** `gvl_acquire() is exiting successfully */ +#define RUBY_INTERNAL_EVENT_GVL_RELEASE 0x40000000 /** `gvl_release() is called */ +#define RUBY_INTERNAL_EVENT_GVL_MASK 0x70000000 /**< Bitmask of GVL events. */ +#define RUBY_INTERNAL_EVENT_MASK 0xffff0000 /**< Bitmask of internal events. */ /** @} */ diff --git a/include/ruby/thread_native.h b/include/ruby/thread_native.h index d722013ea0f470..84a208ce3ba64e 100644 --- a/include/ruby/thread_native.h +++ b/include/ruby/thread_native.h @@ -201,26 +201,24 @@ void rb_native_cond_initialize(rb_nativethread_cond_t *cond); */ void rb_native_cond_destroy(rb_nativethread_cond_t *cond); -#include -struct gvl_hook_event_args { - // -}; -#include +typedef struct gvl_hook_event_args { + unsigned long waiting; +} gvl_hook_event_args_t; -typedef void (*rb_gvl_callback)(uint32_t event, struct gvl_hook_event_args args); +typedef void (*rb_gvl_callback)(uint32_t event, gvl_hook_event_args_t args); // TODO: this is going to be the same on Windows so move it somewhere sensible typedef struct gvl_hook { rb_gvl_callback callback; - uint32_t event; + rb_event_flag_t event; struct gvl_hook *next; } gvl_hook_t; #include "ruby/internal/memory.h" -gvl_hook_t * rb_gvl_event_new(void *callback, uint32_t event); +gvl_hook_t * rb_gvl_event_new(void *callback, rb_event_flag_t event); bool rb_gvl_event_delete(gvl_hook_t * hook); -void rb_gvl_execute_hooks(uint32_t event); +void rb_gvl_execute_hooks(rb_event_flag_t event, unsigned long waiting); RBIMPL_SYMBOL_EXPORT_END() #endif diff --git a/thread_pthread.c b/thread_pthread.c index 6f3d8b00da1224..1c6648a19b56eb 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -105,7 +105,7 @@ static gvl_hook_t * rb_gvl_hooks = NULL; static pthread_rwlock_t rb_gvl_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER; gvl_hook_t * -rb_gvl_event_new(void *callback, uint32_t event) { +rb_gvl_event_new(void *callback, rb_event_flag_t event) { gvl_hook_t *hook = ALLOC_N(gvl_hook_t, 1); hook->callback = callback; hook->event = event; @@ -155,25 +155,21 @@ rb_gvl_event_delete(gvl_hook_t * hook) { } void -rb_gvl_execute_hooks(uint32_t event) { - if (!rb_gvl_hooks) { - return; - } - +rb_gvl_execute_hooks(rb_event_flag_t event, unsigned long waiting) { if (pthread_rwlock_rdlock(&rb_gvl_hooks_rw_lock)) { // TODO: better way to deal with error? return; } - gvl_hook_t *h = rb_gvl_hooks; - struct gvl_hook_event_args args = {}; - - do { - if (h->event & event) { - (*h->callback)(event, args); - } - } while((h = h->next)); - + if (rb_gvl_hooks) { + gvl_hook_t *h = rb_gvl_hooks; + gvl_hook_event_args_t args = { .waiting = waiting }; + do { + if (h->event & event) { + (*h->callback)(event, args); + } + } while((h = h->next)); + } pthread_rwlock_unlock(&rb_gvl_hooks_rw_lock); } @@ -366,6 +362,10 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th) "we must not be in ubf_list and GVL waitq at the same time"); list_add_tail(&gvl->waitq, &nd->node.gvl); + gvl->waiting++; + if (rb_gvl_hooks) { + rb_gvl_execute_hooks(RUBY_INTERNAL_EVENT_GVL_ACQUIRE_ENTER, gvl->waiting); + } do { if (!gvl->timer) { @@ -377,6 +377,7 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th) } while (gvl->owner); list_del_init(&nd->node.gvl); + gvl->waiting--; if (gvl->need_yield) { gvl->need_yield = 0; @@ -387,6 +388,11 @@ gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th) gvl->timer_err = ETIMEDOUT; } gvl->owner = th; + + if (rb_gvl_hooks) { + rb_gvl_execute_hooks(RUBY_INTERNAL_EVENT_GVL_ACQUIRE_EXIT, gvl->waiting); + } + if (!gvl->timer) { if (!designate_timer_thread(gvl) && !ubf_threads_empty()) { rb_thread_wakeup_timer_thread(-1); @@ -405,6 +411,10 @@ gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th) static const native_thread_data_t * gvl_release_common(rb_global_vm_lock_t *gvl) { + if (rb_gvl_hooks) { + rb_gvl_execute_hooks(RUBY_INTERNAL_EVENT_GVL_RELEASE, gvl->waiting); + } + native_thread_data_t *next; gvl->owner = 0; next = list_top(&gvl->waitq, native_thread_data_t, node.ubf); @@ -466,6 +476,7 @@ rb_gvl_init(rb_global_vm_lock_t *gvl) rb_native_cond_initialize(&gvl->switch_wait_cond); list_head_init(&gvl->waitq); gvl->owner = 0; + gvl->waiting = 0; gvl->timer = 0; gvl->timer_err = ETIMEDOUT; gvl->need_yield = 0; diff --git a/thread_pthread.h b/thread_pthread.h index 2ac354046c010b..9830a428162a81 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -59,6 +59,7 @@ typedef struct rb_global_vm_lock_struct { * timer. */ struct list_head waitq; /* <=> native_thread_data_t.node.ubf */ + volatile unsigned long waiting; const struct rb_thread_struct *timer; int timer_err;