Skip to content
Snippets Groups Projects
Commit 242e3726 authored by Yorick Peterse's avatar Yorick Peterse
Browse files

Removed usage of Ruby mutexes

In rare circumstances locking/unlocking a Mutex may lead to an object
allocation which in turn crashes CRuby when this happens during a GC
run. Since the GIL already prevents multiple Ruby threads from running
in parallel we should be able to safely remove the usage of explicit
mutexes without breaking things.
parent 39a8eb7d
No related branches found
No related tags found
No related merge requests found
Pipeline #
Loading
Loading
@@ -2,8 +2,6 @@
 
st_table *object_counts;
 
VALUE mutex;
VALUE allocation_tracer;
VALUE free_tracer;
 
Loading
Loading
@@ -25,12 +23,8 @@ void newobj_callback(VALUE tracepoint, void* data) {
return;
}
 
rb_mutex_lock(mutex);
st_lookup(object_counts, (st_data_t) klass, &count);
st_insert(object_counts, (st_data_t) klass, count + 1);
rb_mutex_unlock(mutex);
}
 
/**
Loading
Loading
@@ -47,8 +41,6 @@ void freeobj_callback(VALUE tracepoint, void* data) {
VALUE obj = rb_tracearg_object(trace_arg);
VALUE klass = RBASIC_CLASS(obj);
 
rb_mutex_lock(mutex);
if ( st_lookup(object_counts, (st_data_t) klass, &count) ) {
if ( count > 0 && (count - 1) > 0) {
st_insert(object_counts, (st_data_t) klass, count - 1);
Loading
Loading
@@ -58,8 +50,6 @@ void freeobj_callback(VALUE tracepoint, void* data) {
st_delete(object_counts, (st_data_t*) &klass, NULL);
}
}
rb_mutex_unlock(mutex);
}
 
/**
Loading
Loading
@@ -84,18 +74,12 @@ VALUE allocations_to_hash(VALUE self) {
st_table *local_counts;
VALUE hash;
 
rb_mutex_lock(mutex);
if ( !object_counts ) {
rb_mutex_unlock(mutex);
return rb_hash_new();
}
 
local_counts = st_copy(object_counts);
 
rb_mutex_unlock(mutex);
hash = rb_hash_new();
 
st_foreach(local_counts, each_count, (st_data_t) hash);
Loading
Loading
@@ -112,11 +96,7 @@ VALUE allocations_to_hash(VALUE self) {
* Allocations.start -> nil
*/
VALUE allocations_start(VALUE self) {
rb_mutex_lock(mutex);
if ( rb_ivar_get(self, id_enabled) == Qtrue ) {
rb_mutex_unlock(mutex);
return Qnil;
}
 
Loading
Loading
@@ -124,8 +104,6 @@ VALUE allocations_start(VALUE self) {
 
rb_ivar_set(self, id_enabled, Qtrue);
 
rb_mutex_unlock(mutex);
rb_tracepoint_enable(allocation_tracer);
rb_tracepoint_enable(free_tracer);
 
Loading
Loading
@@ -139,11 +117,7 @@ VALUE allocations_start(VALUE self) {
* Allocations.stop -> nil
*/
VALUE allocations_stop(VALUE self) {
rb_mutex_lock(mutex);
if ( rb_ivar_get(self, id_enabled) != Qtrue ) {
rb_mutex_unlock(mutex);
return Qnil;
}
 
Loading
Loading
@@ -158,8 +132,6 @@ VALUE allocations_stop(VALUE self) {
 
rb_ivar_set(self, id_enabled, Qfalse);
 
rb_mutex_unlock(mutex);
return Qnil;
}
 
Loading
Loading
@@ -172,14 +144,10 @@ VALUE allocations_stop(VALUE self) {
VALUE allocations_enabled_p(VALUE self) {
VALUE enabled = Qfalse;
 
rb_mutex_lock(mutex);
if ( rb_ivar_get(self, id_enabled) == Qtrue ) {
enabled = Qtrue;
}
 
rb_mutex_unlock(mutex);
return enabled;
}
 
Loading
Loading
@@ -192,8 +160,6 @@ void Init_liballocations() {
free_tracer = rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_FREEOBJ,
freeobj_callback, NULL);
 
mutex = rb_mutex_new();
id_enabled = rb_intern("enabled");
 
rb_define_singleton_method(mAllocations, "to_hash", allocations_to_hash, 0);
Loading
Loading
@@ -201,7 +167,6 @@ void Init_liballocations() {
rb_define_singleton_method(mAllocations, "stop", allocations_stop, 0);
rb_define_singleton_method(mAllocations, "enabled?", allocations_enabled_p, 0);
 
rb_define_const(mAllocations, "MUTEX", mutex);
rb_define_const(mAllocations, "ALLOCATION_TRACER", allocation_tracer);
rb_define_const(mAllocations, "FREE_TRACER", free_tracer);
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment