1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-20 11:40:18 +02:00

better hysteresis in weak-set, weak-table

* libguile/weak-set.c:
* libguile/weak-table.c: Attempt to avoid bouncing between sizes when
  growing the vector causes elements to be removed from the set/table.
This commit is contained in:
Andy Wingo 2012-02-19 20:34:26 +01:00
parent 917b0e72f7
commit 7932759fc7
2 changed files with 74 additions and 2 deletions

View file

@ -308,6 +308,42 @@ compute_size_index (scm_t_weak_set *set)
return i;
}
static int
is_acceptable_size_index (scm_t_weak_set *set, int size_index)
{
int computed = compute_size_index (set);
if (size_index == computed)
/* We were going to grow or shrink, and allocating the new vector
didn't change the target size. */
return 1;
if (size_index == computed + 1)
{
/* We were going to enlarge the set, but allocating the new
vector finalized some objects, making an enlargement
unnecessary. It might still be a good idea to use the larger
set, though. (This branch also gets hit if, while allocating
the vector, some other thread was actively removing items from
the set. That is less likely, though.) */
unsigned long new_lower = hashset_size[size_index] / 5;
return set->size > new_lower;
}
if (size_index == computed - 1)
{
/* We were going to shrink the set, but when we dropped the lock
to allocate the new vector, some other thread added elements to
the set. */
return 0;
}
/* The computed size differs from our newly allocated size by more
than one size index -- recalculate. */
return 0;
}
static void
resize_set (scm_t_weak_set *set)
{
@ -328,7 +364,7 @@ resize_set (scm_t_weak_set *set)
"weak set");
scm_i_pthread_mutex_unlock (&set->lock);
}
while (new_size_index != compute_size_index (set));
while (!is_acceptable_size_index (set, new_size_index));
old_entries = set->entries;
old_size = set->size;

View file

@ -431,6 +431,42 @@ compute_size_index (scm_t_weak_table *table)
return i;
}
static int
is_acceptable_size_index (scm_t_weak_table *table, int size_index)
{
int computed = compute_size_index (table);
if (size_index == computed)
/* We were going to grow or shrink, and allocating the new vector
didn't change the target size. */
return 1;
if (size_index == computed + 1)
{
/* We were going to enlarge the table, but allocating the new
vector finalized some objects, making an enlargement
unnecessary. It might still be a good idea to use the larger
table, though. (This branch also gets hit if, while allocating
the vector, some other thread was actively removing items from
the table. That is less likely, though.) */
unsigned long new_lower = hashtable_size[size_index] / 5;
return table->size > new_lower;
}
if (size_index == computed - 1)
{
/* We were going to shrink the table, but when we dropped the lock
to allocate the new vector, some other thread added elements to
the table. */
return 0;
}
/* The computed size differs from our newly allocated size by more
than one size index -- recalculate. */
return 0;
}
static void
resize_table (scm_t_weak_table *table)
{
@ -450,7 +486,7 @@ resize_table (scm_t_weak_table *table)
new_entries = allocate_entries (new_size, table->kind);
scm_i_pthread_mutex_unlock (&table->lock);
}
while (new_size_index != compute_size_index (table));
while (!is_acceptable_size_index (table, new_size_index));
old_entries = table->entries;
old_size = table->size;