1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-26 21:20:30 +02:00

semi: Tail-call an out-of-memory handler if allocation fails

This commit is contained in:
Andy Wingo 2025-04-23 10:47:13 +02:00
parent 31b373b8f2
commit 6032adede9

View file

@ -52,6 +52,7 @@ struct gc_heap {
struct gc_heap_sizer sizer; struct gc_heap_sizer sizer;
struct gc_event_listener event_listener; struct gc_event_listener event_listener;
void *event_listener_data; void *event_listener_data;
void* (*allocation_failure)(struct gc_heap *, size_t);
}; };
// One mutator per space, can just store the heap in the mutator. // One mutator per space, can just store the heap in the mutator.
struct gc_mutator { struct gc_mutator {
@ -103,9 +104,9 @@ static uintptr_t align_up(uintptr_t addr, size_t align) {
static size_t min_size(size_t a, size_t b) { return a < b ? a : b; } static size_t min_size(size_t a, size_t b) { return a < b ? a : b; }
static size_t max_size(size_t a, size_t b) { return a < b ? b : a; } static size_t max_size(size_t a, size_t b) { return a < b ? b : a; }
static void collect(struct gc_mutator *mut, size_t for_alloc) GC_NEVER_INLINE; static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
static void collect_for_alloc(struct gc_mutator *mut, static int collect_for_alloc(struct gc_mutator *mut,
size_t bytes) GC_NEVER_INLINE; size_t bytes) GC_NEVER_INLINE;
static void trace(struct gc_edge edge, struct gc_heap *heap, void *visit_data); static void trace(struct gc_edge edge, struct gc_heap *heap, void *visit_data);
@ -385,7 +386,7 @@ static uintptr_t resolve_finalizers(struct gc_heap *heap, uintptr_t grey) {
return grey; return grey;
} }
static void collect(struct gc_mutator *mut, size_t for_alloc) { static void collect(struct gc_mutator *mut) {
struct gc_heap *heap = mutator_heap(mut); struct gc_heap *heap = mutator_heap(mut);
int is_minor = 0; int is_minor = 0;
int is_compacting = 1; int is_compacting = 1;
@ -429,7 +430,6 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1); gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
size_t live_size = semi->live_bytes_at_last_gc; size_t live_size = semi->live_bytes_at_last_gc;
live_size += large_object_space_size_at_last_collection(large); live_size += large_object_space_size_at_last_collection(large);
live_size += for_alloc;
uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns; uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns;
HEAP_EVENT(heap, live_data_size, live_size); HEAP_EVENT(heap, live_data_size, live_size);
DEBUG("gc %zu: live size %zu, heap size %zu\n", heap->count, live_size, DEBUG("gc %zu: live size %zu, heap size %zu\n", heap->count, live_size,
@ -443,30 +443,29 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp)); // fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
} }
static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) { static int collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
collect(mut, bytes); collect(mut);
struct semi_space *space = mutator_semi_space(mut); struct semi_space *space = mutator_semi_space(mut);
if (bytes < space->limit - space->hp) if (bytes <= space->limit - space->hp)
return; return 1;
struct gc_heap *heap = mutator_heap(mut); struct gc_heap *heap = mutator_heap(mut);
if (heap->options->common.heap_size_policy != GC_HEAP_SIZE_FIXED) { if (heap->options->common.heap_size_policy != GC_HEAP_SIZE_FIXED) {
// Each collection can potentially resize only the inactive // Each collection can potentially resize only the inactive
// fromspace, so if we really run out of space we will need to // fromspace, so if we really run out of space we will need to
// collect again in order to resize the other half. // collect again in order to resize the other half.
collect(mut, bytes); collect(mut);
if (bytes < space->limit - space->hp) if (bytes <= space->limit - space->hp)
return; return 1;
} }
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size); return 0;
GC_CRASH();
} }
void gc_collect(struct gc_mutator *mut, void gc_collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) { enum gc_collection_kind requested_kind) {
// Ignore requested kind, because we always compact. // Ignore requested kind, because we always compact.
collect(mut, 0); collect(mut);
} }
int gc_object_is_old_generation_slow(struct gc_mutator *mut, int gc_object_is_old_generation_slow(struct gc_mutator *mut,
@ -482,8 +481,11 @@ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); } int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); } void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) { static int collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size); size_t bytes = npages * mutator_semi_space(mut)->page_size;
// Large object pages don't need a copy reserve.
bytes /= 2;
return collect_for_alloc(mut, bytes);
} }
static void* allocate_large(struct gc_mutator *mut, size_t size) { static void* allocate_large(struct gc_mutator *mut, size_t size) {
@ -492,8 +494,9 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
struct semi_space *semi_space = heap_semi_space(heap); struct semi_space *semi_space = heap_semi_space(heap);
size_t npages = large_object_space_npages(space, size); size_t npages = large_object_space_npages(space, size);
while (!semi_space_steal_pages(semi_space, npages)) if (!semi_space_steal_pages(semi_space, npages)
collect_for_large_alloc(mut, npages); && !collect_for_large_alloc(mut, npages))
return heap->allocation_failure(heap, size);
void *ret = large_object_space_alloc(space, npages, GC_TRACE_PRECISELY); void *ret = large_object_space_alloc(space, npages, GC_TRACE_PRECISELY);
@ -522,9 +525,10 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
uintptr_t addr = space->hp; uintptr_t addr = space->hp;
uintptr_t new_hp = align_up (addr + size, GC_ALIGNMENT); uintptr_t new_hp = align_up (addr + size, GC_ALIGNMENT);
if (space->limit < new_hp) { if (space->limit < new_hp) {
// The factor of 2 is for both regions. if (collect_for_alloc(mut, size))
collect_for_alloc(mut, size * 2); continue;
continue; struct gc_heap *heap = mutator_heap(mut);
return heap->allocation_failure(heap, size);
} }
space->hp = new_hp; space->hp = new_hp;
return (void *)addr; return (void *)addr;
@ -626,6 +630,12 @@ static void ignore_async_heap_size_adjustment(struct gc_heap *heap,
size_t size) { size_t size) {
} }
static void* allocation_failure(struct gc_heap *heap, size_t size) {
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
return NULL;
}
static int heap_init(struct gc_heap *heap, const struct gc_options *options) { static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
heap->extern_space = NULL; heap->extern_space = NULL;
heap->pending_ephemerons_size_factor = 0.01; heap->pending_ephemerons_size_factor = 0.01;
@ -642,6 +652,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
get_allocation_counter, get_allocation_counter,
ignore_async_heap_size_adjustment, ignore_async_heap_size_adjustment,
NULL); NULL);
heap->allocation_failure = allocation_failure;
return heap_prepare_pending_ephemerons(heap); return heap_prepare_pending_ephemerons(heap);
} }