1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-20 11:40:18 +02:00

More robust vacuuming of in-use weak tables

* libguile/weak-table.c (scm_t_weak_table); Add last_gc_no member.
* libguile/weak-table.c (vacuum_weak_table): Only vacuum if we haven't
  done so since the last GC.
  (scm_c_weak_table_ref, scm_c_weak_table_put_x, scm_c_weak_table_remove_x)
  (scm_c_weak_table_fold): Vacuum the weak table if needed.
  (scm_weak_table_clear_x): Update last_gc_no flag, as no more vacuuming
  will be needed.
This commit is contained in:
Andy Wingo 2017-10-31 08:43:09 +01:00
parent d01addeb1f
commit dc8dda77e0

View file

@ -31,7 +31,6 @@
#include "libguile/hash.h"
#include "libguile/eval.h"
#include "libguile/ports.h"
#include "libguile/validate.h"
#include "libguile/weak-list.h"
#include "libguile/weak-table.h"
@ -141,6 +140,7 @@ typedef struct {
unsigned long upper; /* when to grow */
int size_index; /* index into hashtable_size */
int min_size_index; /* minimum size_index */
GC_word last_gc_no;
} scm_t_weak_table;
@ -275,8 +275,14 @@ resize_table (scm_t_weak_table *table)
static void
vacuum_weak_table (scm_t_weak_table *table)
{
GC_word gc_no = GC_get_gc_no ();
unsigned long k;
if (gc_no == table->last_gc_no)
return;
table->last_gc_no = gc_no;
for (k = 0; k < table->n_buckets; k++)
{
scm_t_weak_entry **loc = table->buckets + k;
@ -427,6 +433,7 @@ make_weak_table (unsigned long k, scm_t_weak_table_kind kind)
table->upper = 9 * n / 10;
table->size_index = i;
table->min_size_index = i;
table->last_gc_no = GC_get_gc_no ();
scm_i_pthread_mutex_init (&table->lock, NULL);
return scm_cell (scm_tc7_weak_table, (scm_t_bits)table);
@ -456,8 +463,10 @@ do_vacuum_weak_table (SCM table)
custom predicate, or via finalizers run explicitly by (gc) or in an
async (for non-threaded Guile). We add a restriction that
prohibits the first case, by convention. But since we can't
prohibit the second case, here we trylock instead of lock. Not so
nice. */
prohibit the second case, here we trylock instead of lock. In any
case, if the mutex is held by another thread, then the table is in
active use, so the next user of the table will handle the vacuum
for us. */
if (scm_i_pthread_mutex_trylock (&t->lock) == 0)
{
vacuum_weak_table (t);
@ -513,6 +522,8 @@ scm_c_weak_table_ref (SCM table, unsigned long raw_hash,
scm_i_pthread_mutex_lock (&t->lock);
vacuum_weak_table (t);
ret = weak_table_ref (t, raw_hash, pred, closure, dflt);
scm_i_pthread_mutex_unlock (&t->lock);
@ -535,6 +546,8 @@ scm_c_weak_table_put_x (SCM table, unsigned long raw_hash,
scm_i_pthread_mutex_lock (&t->lock);
vacuum_weak_table (t);
weak_table_put_x (t, raw_hash, pred, closure, key, value);
scm_i_pthread_mutex_unlock (&t->lock);
@ -555,6 +568,8 @@ scm_c_weak_table_remove_x (SCM table, unsigned long raw_hash,
scm_i_pthread_mutex_lock (&t->lock);
vacuum_weak_table (t);
weak_table_remove_x (t, raw_hash, pred, closure);
scm_i_pthread_mutex_unlock (&t->lock);
@ -604,6 +619,8 @@ scm_weak_table_clear_x (SCM table)
scm_i_pthread_mutex_lock (&t->lock);
t->last_gc_no = GC_get_gc_no ();
for (k = 0; k < t->n_buckets; k++)
{
for (entry = t->buckets[k]; entry; entry = entry->next)
@ -628,6 +645,8 @@ scm_c_weak_table_fold (scm_t_table_fold_fn proc, void *closure,
scm_i_pthread_mutex_lock (&t->lock);
vacuum_weak_table (t);
for (k = 0; k < t->n_buckets; k++)
{
scm_t_weak_entry *entry;