mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-20 11:40:18 +02:00
More robust vacuuming of in-use weak tables
* libguile/weak-table.c (scm_t_weak_table); Add last_gc_no member. * libguile/weak-table.c (vacuum_weak_table): Only vacuum if we haven't done so since the last GC. (scm_c_weak_table_ref, scm_c_weak_table_put_x, scm_c_weak_table_remove_x) (scm_c_weak_table_fold): Vacuum the weak table if needed. (scm_weak_table_clear_x): Update last_gc_no flag, as no more vacuuming will be needed.
This commit is contained in:
parent
d01addeb1f
commit
dc8dda77e0
1 changed files with 22 additions and 3 deletions
|
@ -31,7 +31,6 @@
|
||||||
#include "libguile/hash.h"
|
#include "libguile/hash.h"
|
||||||
#include "libguile/eval.h"
|
#include "libguile/eval.h"
|
||||||
#include "libguile/ports.h"
|
#include "libguile/ports.h"
|
||||||
|
|
||||||
#include "libguile/validate.h"
|
#include "libguile/validate.h"
|
||||||
#include "libguile/weak-list.h"
|
#include "libguile/weak-list.h"
|
||||||
#include "libguile/weak-table.h"
|
#include "libguile/weak-table.h"
|
||||||
|
@ -141,6 +140,7 @@ typedef struct {
|
||||||
unsigned long upper; /* when to grow */
|
unsigned long upper; /* when to grow */
|
||||||
int size_index; /* index into hashtable_size */
|
int size_index; /* index into hashtable_size */
|
||||||
int min_size_index; /* minimum size_index */
|
int min_size_index; /* minimum size_index */
|
||||||
|
GC_word last_gc_no;
|
||||||
} scm_t_weak_table;
|
} scm_t_weak_table;
|
||||||
|
|
||||||
|
|
||||||
|
@ -275,8 +275,14 @@ resize_table (scm_t_weak_table *table)
|
||||||
static void
|
static void
|
||||||
vacuum_weak_table (scm_t_weak_table *table)
|
vacuum_weak_table (scm_t_weak_table *table)
|
||||||
{
|
{
|
||||||
|
GC_word gc_no = GC_get_gc_no ();
|
||||||
unsigned long k;
|
unsigned long k;
|
||||||
|
|
||||||
|
if (gc_no == table->last_gc_no)
|
||||||
|
return;
|
||||||
|
|
||||||
|
table->last_gc_no = gc_no;
|
||||||
|
|
||||||
for (k = 0; k < table->n_buckets; k++)
|
for (k = 0; k < table->n_buckets; k++)
|
||||||
{
|
{
|
||||||
scm_t_weak_entry **loc = table->buckets + k;
|
scm_t_weak_entry **loc = table->buckets + k;
|
||||||
|
@ -427,6 +433,7 @@ make_weak_table (unsigned long k, scm_t_weak_table_kind kind)
|
||||||
table->upper = 9 * n / 10;
|
table->upper = 9 * n / 10;
|
||||||
table->size_index = i;
|
table->size_index = i;
|
||||||
table->min_size_index = i;
|
table->min_size_index = i;
|
||||||
|
table->last_gc_no = GC_get_gc_no ();
|
||||||
scm_i_pthread_mutex_init (&table->lock, NULL);
|
scm_i_pthread_mutex_init (&table->lock, NULL);
|
||||||
|
|
||||||
return scm_cell (scm_tc7_weak_table, (scm_t_bits)table);
|
return scm_cell (scm_tc7_weak_table, (scm_t_bits)table);
|
||||||
|
@ -456,8 +463,10 @@ do_vacuum_weak_table (SCM table)
|
||||||
custom predicate, or via finalizers run explicitly by (gc) or in an
|
custom predicate, or via finalizers run explicitly by (gc) or in an
|
||||||
async (for non-threaded Guile). We add a restriction that
|
async (for non-threaded Guile). We add a restriction that
|
||||||
prohibits the first case, by convention. But since we can't
|
prohibits the first case, by convention. But since we can't
|
||||||
prohibit the second case, here we trylock instead of lock. Not so
|
prohibit the second case, here we trylock instead of lock. In any
|
||||||
nice. */
|
case, if the mutex is held by another thread, then the table is in
|
||||||
|
active use, so the next user of the table will handle the vacuum
|
||||||
|
for us. */
|
||||||
if (scm_i_pthread_mutex_trylock (&t->lock) == 0)
|
if (scm_i_pthread_mutex_trylock (&t->lock) == 0)
|
||||||
{
|
{
|
||||||
vacuum_weak_table (t);
|
vacuum_weak_table (t);
|
||||||
|
@ -513,6 +522,8 @@ scm_c_weak_table_ref (SCM table, unsigned long raw_hash,
|
||||||
|
|
||||||
scm_i_pthread_mutex_lock (&t->lock);
|
scm_i_pthread_mutex_lock (&t->lock);
|
||||||
|
|
||||||
|
vacuum_weak_table (t);
|
||||||
|
|
||||||
ret = weak_table_ref (t, raw_hash, pred, closure, dflt);
|
ret = weak_table_ref (t, raw_hash, pred, closure, dflt);
|
||||||
|
|
||||||
scm_i_pthread_mutex_unlock (&t->lock);
|
scm_i_pthread_mutex_unlock (&t->lock);
|
||||||
|
@ -535,6 +546,8 @@ scm_c_weak_table_put_x (SCM table, unsigned long raw_hash,
|
||||||
|
|
||||||
scm_i_pthread_mutex_lock (&t->lock);
|
scm_i_pthread_mutex_lock (&t->lock);
|
||||||
|
|
||||||
|
vacuum_weak_table (t);
|
||||||
|
|
||||||
weak_table_put_x (t, raw_hash, pred, closure, key, value);
|
weak_table_put_x (t, raw_hash, pred, closure, key, value);
|
||||||
|
|
||||||
scm_i_pthread_mutex_unlock (&t->lock);
|
scm_i_pthread_mutex_unlock (&t->lock);
|
||||||
|
@ -555,6 +568,8 @@ scm_c_weak_table_remove_x (SCM table, unsigned long raw_hash,
|
||||||
|
|
||||||
scm_i_pthread_mutex_lock (&t->lock);
|
scm_i_pthread_mutex_lock (&t->lock);
|
||||||
|
|
||||||
|
vacuum_weak_table (t);
|
||||||
|
|
||||||
weak_table_remove_x (t, raw_hash, pred, closure);
|
weak_table_remove_x (t, raw_hash, pred, closure);
|
||||||
|
|
||||||
scm_i_pthread_mutex_unlock (&t->lock);
|
scm_i_pthread_mutex_unlock (&t->lock);
|
||||||
|
@ -604,6 +619,8 @@ scm_weak_table_clear_x (SCM table)
|
||||||
|
|
||||||
scm_i_pthread_mutex_lock (&t->lock);
|
scm_i_pthread_mutex_lock (&t->lock);
|
||||||
|
|
||||||
|
t->last_gc_no = GC_get_gc_no ();
|
||||||
|
|
||||||
for (k = 0; k < t->n_buckets; k++)
|
for (k = 0; k < t->n_buckets; k++)
|
||||||
{
|
{
|
||||||
for (entry = t->buckets[k]; entry; entry = entry->next)
|
for (entry = t->buckets[k]; entry; entry = entry->next)
|
||||||
|
@ -628,6 +645,8 @@ scm_c_weak_table_fold (scm_t_table_fold_fn proc, void *closure,
|
||||||
|
|
||||||
scm_i_pthread_mutex_lock (&t->lock);
|
scm_i_pthread_mutex_lock (&t->lock);
|
||||||
|
|
||||||
|
vacuum_weak_table (t);
|
||||||
|
|
||||||
for (k = 0; k < t->n_buckets; k++)
|
for (k = 0; k < t->n_buckets; k++)
|
||||||
{
|
{
|
||||||
scm_t_weak_entry *entry;
|
scm_t_weak_entry *entry;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue