mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-18 18:40:22 +02:00
Fix race condition in computation of mark-while-stopping
Choose the ragged stop strategy when the GC kind is determined, so that we do so with respect to a single measurement of pending unavailable bytes. Also remove assert in heap_should_mark_while_stopping, as it can be called after stopping too, when evacuation is enabled.
This commit is contained in:
parent
1e3122d054
commit
24bd94d9f7
1 changed files with 23 additions and 16 deletions
39
whippet.c
39
whippet.c
|
@ -310,6 +310,7 @@ struct gc_heap {
|
||||||
pthread_cond_t mutator_cond;
|
pthread_cond_t mutator_cond;
|
||||||
size_t size;
|
size_t size;
|
||||||
int collecting;
|
int collecting;
|
||||||
|
int mark_while_stopping;
|
||||||
enum gc_kind gc_kind;
|
enum gc_kind gc_kind;
|
||||||
int multithreaded;
|
int multithreaded;
|
||||||
size_t active_mutator_count;
|
size_t active_mutator_count;
|
||||||
|
@ -871,20 +872,7 @@ static void enqueue_mutator_for_tracing(struct gc_mutator *mut) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int heap_should_mark_while_stopping(struct gc_heap *heap) {
|
static int heap_should_mark_while_stopping(struct gc_heap *heap) {
|
||||||
// Generally speaking, we allow mutators to mark their own stacks
|
return atomic_load_explicit(&heap->mark_while_stopping, memory_order_acquire);
|
||||||
// before pausing. This is a limited form of concurrent marking, as
|
|
||||||
// other mutators might be running, not having received the signal to
|
|
||||||
// stop yet. In a compacting collection, this results in pinned
|
|
||||||
// roots, because we haven't started evacuating yet and instead mark
|
|
||||||
// in place; avoid this pinning only if we're trying to reclaim free
|
|
||||||
// blocks.
|
|
||||||
GC_ASSERT(!heap_mark_space(heap)->evacuating);
|
|
||||||
if ((atomic_load(&heap->gc_kind) & GC_KIND_FLAG_EVACUATING)
|
|
||||||
&& atomic_load_explicit(&heap_mark_space(heap)->pending_unavailable_bytes,
|
|
||||||
memory_order_acquire) > 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mutator_should_mark_while_stopping(struct gc_mutator *mut) {
|
static int mutator_should_mark_while_stopping(struct gc_mutator *mut) {
|
||||||
|
@ -1245,18 +1233,32 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
||||||
struct mark_space *mark_space = heap_mark_space(heap);
|
struct mark_space *mark_space = heap_mark_space(heap);
|
||||||
enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
|
enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
|
||||||
enum gc_kind gc_kind;
|
enum gc_kind gc_kind;
|
||||||
|
int mark_while_stopping = 1;
|
||||||
double yield = heap_last_gc_yield(heap);
|
double yield = heap_last_gc_yield(heap);
|
||||||
double fragmentation = heap_fragmentation(heap);
|
double fragmentation = heap_fragmentation(heap);
|
||||||
|
ssize_t pending = atomic_load_explicit(&mark_space->pending_unavailable_bytes,
|
||||||
|
memory_order_acquire);
|
||||||
|
|
||||||
if (heap->count == 0) {
|
if (heap->count == 0) {
|
||||||
DEBUG("first collection is always major\n");
|
DEBUG("first collection is always major\n");
|
||||||
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
||||||
} else if (atomic_load_explicit(&mark_space->pending_unavailable_bytes,
|
} else if (pending > 0) {
|
||||||
memory_order_acquire) > 0) {
|
DEBUG("evacuating due to need to reclaim %zd bytes\n", pending);
|
||||||
// During the last cycle, a large allocation could not find enough
|
// During the last cycle, a large allocation could not find enough
|
||||||
// free blocks, and we decided not to expand the heap. Let's do an
|
// free blocks, and we decided not to expand the heap. Let's do an
|
||||||
// evacuating major collection to maximize the free block yield.
|
// evacuating major collection to maximize the free block yield.
|
||||||
gc_kind = GC_KIND_MAJOR_EVACUATING;
|
gc_kind = GC_KIND_MAJOR_EVACUATING;
|
||||||
|
|
||||||
|
// Generally speaking, we allow mutators to mark their own stacks
|
||||||
|
// before pausing. This is a limited form of concurrent marking, as
|
||||||
|
// other mutators might be running, not having received the signal
|
||||||
|
// to stop yet. In a compacting collection, this results in pinned
|
||||||
|
// roots, because we haven't started evacuating yet and instead mark
|
||||||
|
// in place. However as in this case we are trying to reclaim free
|
||||||
|
// blocks, try to avoid any pinning caused by the ragged-stop
|
||||||
|
// marking. Of course if the mutator has conservative roots we will
|
||||||
|
// have pinning anyway and might as well allow ragged stops.
|
||||||
|
mark_while_stopping = gc_has_conservative_roots();
|
||||||
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
|
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
|
||||||
&& fragmentation >= heap->fragmentation_low_threshold) {
|
&& fragmentation >= heap->fragmentation_low_threshold) {
|
||||||
DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
|
DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
|
||||||
|
@ -1298,6 +1300,7 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
||||||
(gc_kind & GC_KIND_FLAG_EVACUATING)) {
|
(gc_kind & GC_KIND_FLAG_EVACUATING)) {
|
||||||
DEBUG("welp. conservative heap scanning, no evacuation for you\n");
|
DEBUG("welp. conservative heap scanning, no evacuation for you\n");
|
||||||
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
||||||
|
mark_while_stopping = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is the first in a series of minor collections, reset the
|
// If this is the first in a series of minor collections, reset the
|
||||||
|
@ -1311,6 +1314,10 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
||||||
DEBUG("first minor collection at yield %.2f%%, threshold %.2f%%\n",
|
DEBUG("first minor collection at yield %.2f%%, threshold %.2f%%\n",
|
||||||
yield * 100., clamped * 100.);
|
yield * 100., clamped * 100.);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
atomic_store_explicit(&heap->mark_while_stopping, mark_while_stopping,
|
||||||
|
memory_order_release);
|
||||||
|
|
||||||
atomic_store(&heap->gc_kind, gc_kind);
|
atomic_store(&heap->gc_kind, gc_kind);
|
||||||
return gc_kind;
|
return gc_kind;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue