1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-02 02:10:19 +02:00

Remove BDW usage from gc.c

* libguile/atomics-internal.h (scm_atomic_subtract_size): New helper.
* libguile/gc.c (scm_gc_register_allocation): Rework to use atomics.
(scm_gc_event_listener_restarting_mutators): Reset the
off_heap_allocation_countdown to the heap size after GC.
(scm_gc_disable, scm_gc_enable): Remove these.  Unclear what they mean
exactly!  Perhaps if there is a meaning we can look at it later.
(scm_i_gc):
(scm_storage_prehistory):
(scm_init_gc): Update.
This commit is contained in:
Andy Wingo 2025-05-15 15:53:34 +02:00
parent d560676572
commit f71775f396
3 changed files with 31 additions and 62 deletions

View file

@ -45,6 +45,12 @@ scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
return atomic_compare_exchange_weak (a_loc, expected, desired);
}
static inline size_t
scm_atomic_subtract_size (size_t *loc, size_t arg)
{
atomic_size_t *a_loc = (atomic_size_t *) loc;
return atomic_fetch_sub (a_loc, arg);
}
static inline void
scm_atomic_set_pointer (void **loc, void *val)
{
@ -131,6 +137,17 @@ scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
return ret;
}
static inline size_t
scm_atomic_subtract_size (size_t *loc, size_t arg)
{
size_t ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
*loc -= arg;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline void
scm_atomic_set_pointer (void **loc, void *val)
{

View file

@ -32,7 +32,7 @@
#include "arrays.h"
#include "async.h"
#include "bdw-gc.h"
#include "atomics-internal.h"
#include "deprecation.h"
#include "dynwind.h"
#include "eval.h"
@ -61,10 +61,6 @@
#include "gc-basic-stats.h"
/* For GC_set_start_callback. */
#include <gc/gc_mark.h>
struct scm_gc_event_listener {
@ -114,6 +110,10 @@ SCM scm_after_gc_hook;
static SCM after_gc_async_cell;
/* This counter is decremented at each off-heap allocation. When it
crosses zero, trigger a manual collection. */
static size_t off_heap_allocation_countdown = DEFAULT_INITIAL_HEAP_SIZE;
@ -201,6 +201,9 @@ scm_gc_event_listener_restarting_mutators (void *data)
SCM_SETCDR (after_gc_async_cell, t->pending_asyncs);
t->pending_asyncs = after_gc_async_cell;
}
/* Reset the off-heap allocation counter. */
off_heap_allocation_countdown = scm_listener->stats.heap_size;
}
static inline void*
@ -301,15 +304,6 @@ scm_oom_fn (struct gc_heap *heap, size_t nbytes)
return NULL;
}
/* Called within GC -- cannot allocate GC memory. */
static void
scm_gc_warn_proc (char *fmt, GC_word arg)
{
/* avoid scm_current_warning_port() b/c the GC lock is already taken
and the fluid ref might require it */
fprintf (stderr, fmt, arg);
}
void
scm_gc_after_nonlocal_exit (struct scm_thread *thread)
{
@ -410,29 +404,6 @@ SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
#undef FUNC_NAME
SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
(),
"Disables the garbage collector. Nested calls are permitted. "
"GC is re-enabled once @code{gc-enable} has been called the "
"same number of times @code{gc-disable} was called.")
#define FUNC_NAME s_scm_gc_disable
{
GC_disable ();
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
(),
"Enables the garbage collector.")
#define FUNC_NAME s_scm_gc_enable
{
GC_enable ();
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
(),
"Scans all of SCM objects and reclaims for further use those that are\n"
@ -450,7 +421,7 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
void
scm_i_gc (const char *what)
{
GC_gcollect ();
gc_collect (SCM_I_CURRENT_THREAD->mutator, GC_COLLECTION_COMPACTING);
}
@ -665,11 +636,7 @@ scm_storage_prehistory (struct gc_stack_addr base)
// We need to set roots so that scm_trace_loader_conservative_roots
// gets called.
gc_heap_set_roots(the_gc_heap, &heap_roots);
/* Sanity check. */
if (!GC_is_visible (&scm_protects))
abort ();
gc_heap_set_roots (the_gc_heap, &heap_roots);
scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
@ -689,24 +656,12 @@ scm_init_gc_protect_object ()
static size_t bytes_until_gc = DEFAULT_INITIAL_HEAP_SIZE;
static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
void
scm_gc_register_allocation (size_t size)
{
scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
if (size > bytes_until_gc)
{
bytes_until_gc = GC_get_heap_size ();
scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
GC_gcollect ();
}
else
{
bytes_until_gc -= size;
scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
}
size_t prev = scm_atomic_subtract_size(&off_heap_allocation_countdown, size);
if (prev < size)
gc_collect (SCM_I_CURRENT_THREAD->mutator, GC_COLLECTION_ANY);
}
@ -724,8 +679,6 @@ after_gc_async_thunk (void)
void
scm_init_gc ()
{
/* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
scm_after_gc_hook = scm_make_hook (SCM_INUM0);
scm_c_define ("after-gc-hook", scm_after_gc_hook);
@ -736,7 +689,6 @@ scm_init_gc ()
SCM_BOOL_F);
gc_heap_set_allocation_failure_handler (the_gc_heap, scm_oom_fn);
GC_set_warn_proc (scm_gc_warn_proc);
#include "gc.x"
}

View file

@ -125,7 +125,7 @@ scm_trace_loader_conservative_roots (void (*trace_range)(uintptr_t lo,
struct gc_heap *heap,
void *trace_data),
struct gc_heap *heap,
void *trace_data)
void *trace_data)
{
for (size_t i = 0; i < roots_count; i++)
trace_range(roots[i].lo, roots[i].hi, 0, heap, trace_data);