1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-20 03:30:27 +02:00

Allow embedder to request a major GC

This commit is contained in:
Andy Wingo 2023-10-18 10:20:55 +02:00
parent 8aa2036331
commit 514dcc702e
9 changed files with 104 additions and 70 deletions

View file

@ -288,15 +288,6 @@ struct mark_space {
uintptr_t fragmentation_granules_since_last_collection; // atomically
};
enum gc_kind {
GC_KIND_FLAG_MINOR = GC_GENERATIONAL, // 0 or 1
GC_KIND_FLAG_EVACUATING = 0x2,
GC_KIND_MINOR_IN_PLACE = GC_KIND_FLAG_MINOR,
GC_KIND_MINOR_EVACUATING = GC_KIND_FLAG_MINOR | GC_KIND_FLAG_EVACUATING,
GC_KIND_MAJOR_IN_PLACE = 0,
GC_KIND_MAJOR_EVACUATING = GC_KIND_FLAG_EVACUATING,
};
struct gc_heap {
struct mark_space mark_space;
struct large_object_space large_object_space;
@ -310,7 +301,7 @@ struct gc_heap {
int mark_while_stopping;
int check_pending_ephemerons;
struct gc_pending_ephemerons *pending_ephemerons;
enum gc_kind gc_kind;
enum gc_collection_kind gc_kind;
int multithreaded;
size_t active_mutator_count;
size_t mutator_count;
@ -318,7 +309,7 @@ struct gc_heap {
struct gc_mutator *mutator_trace_list;
long count;
long minor_count;
uint8_t last_collection_was_minor;
enum gc_collection_kind last_collection_kind;
struct gc_mutator *deactivated_mutators;
struct tracer tracer;
double fragmentation_low_threshold;
@ -380,7 +371,8 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size);
}
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
static void collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
GC_ASSERT(((uintptr_t)mark & 7) == 0);
@ -1416,7 +1408,7 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
static void trace_generational_roots(struct gc_heap *heap) {
// TODO: Add lospace nursery.
if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
if (atomic_load(&heap->gc_kind) == GC_COLLECTION_MINOR) {
mark_space_trace_remembered_set(heap_mark_space(heap), heap);
large_object_space_trace_remembered_set(heap_large_object_space(heap),
enqueue_generational_root,
@ -1580,10 +1572,12 @@ static double clamp_major_gc_yield_threshold(struct gc_heap *heap,
return threshold;
}
static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
static enum gc_collection_kind
determine_collection_kind(struct gc_heap *heap,
enum gc_collection_kind requested) {
struct mark_space *mark_space = heap_mark_space(heap);
enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
enum gc_kind gc_kind;
enum gc_collection_kind previous_gc_kind = atomic_load(&heap->gc_kind);
enum gc_collection_kind gc_kind;
int mark_while_stopping = 1;
double yield = heap_last_gc_yield(heap);
double fragmentation = heap_fragmentation(heap);
@ -1592,13 +1586,16 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
if (heap->count == 0) {
DEBUG("first collection is always major\n");
gc_kind = GC_KIND_MAJOR_IN_PLACE;
gc_kind = GC_COLLECTION_MAJOR;
} else if (requested != GC_COLLECTION_ANY) {
DEBUG("user specifically requested collection kind %d\n", (int)requested);
gc_kind = requested;
} else if (pending > 0) {
DEBUG("evacuating due to need to reclaim %zd bytes\n", pending);
// During the last cycle, a large allocation could not find enough
// free blocks, and we decided not to expand the heap. Let's do an
// evacuating major collection to maximize the free block yield.
gc_kind = GC_KIND_MAJOR_EVACUATING;
gc_kind = GC_COLLECTION_COMPACTING;
// Generally speaking, we allow mutators to mark their own stacks
// before pausing. This is a limited form of concurrent marking, as
@ -1610,7 +1607,7 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
// marking. Of course if the mutator has conservative roots we will
// have pinning anyway and might as well allow ragged stops.
mark_while_stopping = gc_has_conservative_roots();
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING
&& fragmentation >= heap->fragmentation_low_threshold) {
DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
fragmentation * 100.,
@ -1618,46 +1615,50 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
// For some reason, we already decided to compact in the past,
// and fragmentation hasn't yet fallen below a low-water-mark.
// Keep going.
gc_kind = GC_KIND_MAJOR_EVACUATING;
gc_kind = GC_COLLECTION_COMPACTING;
} else if (fragmentation > heap->fragmentation_high_threshold) {
// Switch to evacuation mode if the heap is too fragmented.
DEBUG("triggering compaction due to fragmentation %.2f%% > %.2f%%\n",
fragmentation * 100.,
heap->fragmentation_high_threshold * 100.);
gc_kind = GC_KIND_MAJOR_EVACUATING;
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING) {
gc_kind = GC_COLLECTION_COMPACTING;
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING) {
// We were evacuating, but we're good now. Go back to minor
// collections.
DEBUG("returning to in-place collection, fragmentation %.2f%% < %.2f%%\n",
fragmentation * 100.,
heap->fragmentation_low_threshold * 100.);
gc_kind = GC_KIND_MINOR_IN_PLACE;
} else if (previous_gc_kind != GC_KIND_MINOR_IN_PLACE) {
gc_kind = GC_GENERATIONAL ? GC_COLLECTION_MINOR : GC_COLLECTION_MAJOR;
} else if (!GC_GENERATIONAL) {
DEBUG("keeping on with major in-place GC\n");
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MAJOR);
gc_kind = GC_COLLECTION_MAJOR;
} else if (previous_gc_kind != GC_COLLECTION_MINOR) {
DEBUG("returning to minor collection after major collection\n");
// Go back to minor collections.
gc_kind = GC_KIND_MINOR_IN_PLACE;
gc_kind = GC_COLLECTION_MINOR;
} else if (yield < heap->major_gc_yield_threshold) {
DEBUG("collection yield too low, triggering major collection\n");
// Nursery is getting tight; trigger a major GC.
gc_kind = GC_KIND_MAJOR_IN_PLACE;
gc_kind = GC_COLLECTION_MAJOR;
} else {
DEBUG("keeping on with minor GC\n");
// Nursery has adequate space; keep trucking with minor GCs.
GC_ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE);
gc_kind = GC_KIND_MINOR_IN_PLACE;
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MINOR);
gc_kind = GC_COLLECTION_MINOR;
}
if (gc_has_conservative_intraheap_edges() &&
(gc_kind & GC_KIND_FLAG_EVACUATING)) {
gc_kind == GC_COLLECTION_COMPACTING) {
DEBUG("welp. conservative heap scanning, no evacuation for you\n");
gc_kind = GC_KIND_MAJOR_IN_PLACE;
gc_kind = GC_COLLECTION_MAJOR;
mark_while_stopping = 1;
}
// If this is the first in a series of minor collections, reset the
// threshold at which we should do a major GC.
if ((gc_kind & GC_KIND_FLAG_MINOR) &&
(previous_gc_kind & GC_KIND_FLAG_MINOR) != GC_KIND_FLAG_MINOR) {
if (gc_kind == GC_COLLECTION_MINOR &&
previous_gc_kind != GC_COLLECTION_MINOR) {
double yield = heap_last_gc_yield(heap);
double threshold = yield * heap->minor_gc_yield_threshold;
double clamped = clamp_major_gc_yield_threshold(heap, threshold);
@ -1687,7 +1688,7 @@ static void release_evacuation_target_blocks(struct mark_space *space) {
static void prepare_for_evacuation(struct gc_heap *heap) {
struct mark_space *space = heap_mark_space(heap);
if ((heap->gc_kind & GC_KIND_FLAG_EVACUATING) == 0) {
if (heap->gc_kind != GC_COLLECTION_COMPACTING) {
space->evacuating = 0;
space->evacuation_reserve = space->evacuation_minimum_reserve;
return;
@ -1796,7 +1797,7 @@ static void trace_roots_after_stop(struct gc_heap *heap) {
}
static void mark_space_finish_gc(struct mark_space *space,
enum gc_kind gc_kind) {
enum gc_collection_kind gc_kind) {
space->evacuating = 0;
reset_sweeper(space);
update_mark_patterns(space, 0);
@ -1824,7 +1825,8 @@ static void sweep_ephemerons(struct gc_heap *heap) {
return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
}
static void collect(struct gc_mutator *mut) {
static void collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
struct mark_space *space = heap_mark_space(heap);
struct large_object_space *lospace = heap_large_object_space(heap);
@ -1835,12 +1837,12 @@ static void collect(struct gc_mutator *mut) {
}
MUTATOR_EVENT(mut, mutator_cause_gc);
DEBUG("start collect #%ld:\n", heap->count);
enum gc_kind gc_kind = determine_collection_kind(heap);
HEAP_EVENT(heap, prepare_gc, gc_kind & GC_KIND_FLAG_MINOR,
gc_kind & GC_KIND_FLAG_EVACUATING);
update_mark_patterns(space, !(gc_kind & GC_KIND_FLAG_MINOR));
large_object_space_start_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
gc_extern_space_start_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
enum gc_collection_kind gc_kind =
determine_collection_kind(heap, requested_kind);
HEAP_EVENT(heap, prepare_gc, gc_kind);
update_mark_patterns(space, gc_kind != GC_COLLECTION_MINOR);
large_object_space_start_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
gc_extern_space_start_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
resolve_ephemerons_lazily(heap);
tracer_prepare(heap);
HEAP_EVENT(heap, requesting_stop);
@ -1868,11 +1870,11 @@ static void collect(struct gc_mutator *mut) {
sweep_ephemerons(heap);
tracer_release(heap);
mark_space_finish_gc(space, gc_kind);
large_object_space_finish_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
gc_extern_space_finish_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
large_object_space_finish_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
gc_extern_space_finish_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
heap->count++;
heap->last_collection_was_minor = gc_kind & GC_KIND_FLAG_MINOR;
if (heap->last_collection_was_minor)
heap->last_collection_kind = gc_kind;
if (gc_kind == GC_COLLECTION_MINOR)
heap->minor_count++;
heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection);
HEAP_EVENT(heap, restarting_mutators);
@ -2079,7 +2081,7 @@ static size_t next_hole(struct gc_mutator *mut) {
// mark bytes didn't rotate, so we have no cleanup to do; and
// we shouldn't try to allocate into them as it's not worth
// it. Any wasted space is measured as fragmentation.
if (mutator_heap(mut)->last_collection_was_minor)
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR)
continue;
else
block_summary_clear_flag(summary, BLOCK_VENERABLE);
@ -2092,7 +2094,7 @@ static size_t next_hole(struct gc_mutator *mut) {
// mostly old data. Sweep any garbage, commit the mark as
// venerable, and avoid allocating into it.
block_summary_clear_flag(summary, BLOCK_VENERABLE_AFTER_SWEEP);
if (mutator_heap(mut)->last_collection_was_minor) {
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR) {
finish_sweeping_in_block(mut);
block_summary_set_flag(summary, BLOCK_VENERABLE);
continue;
@ -2153,18 +2155,22 @@ static void finish_sweeping(struct gc_mutator *mut) {
finish_hole(mut);
}
static void trigger_collection(struct gc_mutator *mut) {
static void trigger_collection(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
enum gc_collection_kind last_collection_kind = GC_COLLECTION_ANY;
heap_lock(heap);
if (mutators_are_stopping(heap))
while (mutators_are_stopping(heap)) {
pause_mutator_for_collection_with_lock(mut);
else
collect(mut);
last_collection_kind = heap->last_collection_kind;
}
if (last_collection_kind < requested_kind)
collect(mut, requested_kind);
heap_unlock(heap);
}
void gc_collect(struct gc_mutator *mut) {
trigger_collection(mut);
void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
trigger_collection(mut, kind);
}
static void* allocate_large(struct gc_mutator *mut, size_t size) {
@ -2177,7 +2183,7 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
npages << space->page_size_log2);
while (!sweep_until_memory_released(mut))
trigger_collection(mut);
trigger_collection(mut, GC_COLLECTION_COMPACTING);
atomic_fetch_add(&heap->large_object_pages, npages);
void *ret = large_object_space_alloc(space, npages);
@ -2215,7 +2221,7 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
break;
}
if (!hole)
trigger_collection(mut);
trigger_collection(mut, GC_COLLECTION_ANY);
}
ret = gc_ref(mut->alloc);
mut->alloc += size;