mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-04-29 19:30:36 +02:00
semi: Tail-call an out-of-memory handler if allocation fails
This commit is contained in:
parent
31b373b8f2
commit
6032adede9
1 changed files with 33 additions and 22 deletions
55
src/semi.c
55
src/semi.c
|
@ -52,6 +52,7 @@ struct gc_heap {
|
|||
struct gc_heap_sizer sizer;
|
||||
struct gc_event_listener event_listener;
|
||||
void *event_listener_data;
|
||||
void* (*allocation_failure)(struct gc_heap *, size_t);
|
||||
};
|
||||
// One mutator per space, can just store the heap in the mutator.
|
||||
struct gc_mutator {
|
||||
|
@ -103,9 +104,9 @@ static uintptr_t align_up(uintptr_t addr, size_t align) {
|
|||
static size_t min_size(size_t a, size_t b) { return a < b ? a : b; }
|
||||
static size_t max_size(size_t a, size_t b) { return a < b ? b : a; }
|
||||
|
||||
static void collect(struct gc_mutator *mut, size_t for_alloc) GC_NEVER_INLINE;
|
||||
static void collect_for_alloc(struct gc_mutator *mut,
|
||||
size_t bytes) GC_NEVER_INLINE;
|
||||
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static int collect_for_alloc(struct gc_mutator *mut,
|
||||
size_t bytes) GC_NEVER_INLINE;
|
||||
|
||||
static void trace(struct gc_edge edge, struct gc_heap *heap, void *visit_data);
|
||||
|
||||
|
@ -385,7 +386,7 @@ static uintptr_t resolve_finalizers(struct gc_heap *heap, uintptr_t grey) {
|
|||
return grey;
|
||||
}
|
||||
|
||||
static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
||||
static void collect(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
int is_minor = 0;
|
||||
int is_compacting = 1;
|
||||
|
@ -429,7 +430,6 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
|
||||
size_t live_size = semi->live_bytes_at_last_gc;
|
||||
live_size += large_object_space_size_at_last_collection(large);
|
||||
live_size += for_alloc;
|
||||
uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns;
|
||||
HEAP_EVENT(heap, live_data_size, live_size);
|
||||
DEBUG("gc %zu: live size %zu, heap size %zu\n", heap->count, live_size,
|
||||
|
@ -443,30 +443,29 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
|
||||
}
|
||||
|
||||
static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
|
||||
collect(mut, bytes);
|
||||
static int collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
|
||||
collect(mut);
|
||||
|
||||
struct semi_space *space = mutator_semi_space(mut);
|
||||
if (bytes < space->limit - space->hp)
|
||||
return;
|
||||
if (bytes <= space->limit - space->hp)
|
||||
return 1;
|
||||
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
if (heap->options->common.heap_size_policy != GC_HEAP_SIZE_FIXED) {
|
||||
// Each collection can potentially resize only the inactive
|
||||
// fromspace, so if we really run out of space we will need to
|
||||
// collect again in order to resize the other half.
|
||||
collect(mut, bytes);
|
||||
if (bytes < space->limit - space->hp)
|
||||
return;
|
||||
collect(mut);
|
||||
if (bytes <= space->limit - space->hp)
|
||||
return 1;
|
||||
}
|
||||
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
|
||||
GC_CRASH();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gc_collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) {
|
||||
// Ignore requested kind, because we always compact.
|
||||
collect(mut, 0);
|
||||
collect(mut);
|
||||
}
|
||||
|
||||
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
|
||||
|
@ -482,8 +481,11 @@ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
|
|||
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
|
||||
static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
|
||||
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size);
|
||||
static int collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
|
||||
size_t bytes = npages * mutator_semi_space(mut)->page_size;
|
||||
// Large object pages don't need a copy reserve.
|
||||
bytes /= 2;
|
||||
return collect_for_alloc(mut, bytes);
|
||||
}
|
||||
|
||||
static void* allocate_large(struct gc_mutator *mut, size_t size) {
|
||||
|
@ -492,8 +494,9 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
|
|||
struct semi_space *semi_space = heap_semi_space(heap);
|
||||
|
||||
size_t npages = large_object_space_npages(space, size);
|
||||
while (!semi_space_steal_pages(semi_space, npages))
|
||||
collect_for_large_alloc(mut, npages);
|
||||
if (!semi_space_steal_pages(semi_space, npages)
|
||||
&& !collect_for_large_alloc(mut, npages))
|
||||
return heap->allocation_failure(heap, size);
|
||||
|
||||
void *ret = large_object_space_alloc(space, npages, GC_TRACE_PRECISELY);
|
||||
|
||||
|
@ -522,9 +525,10 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
|
|||
uintptr_t addr = space->hp;
|
||||
uintptr_t new_hp = align_up (addr + size, GC_ALIGNMENT);
|
||||
if (space->limit < new_hp) {
|
||||
// The factor of 2 is for both regions.
|
||||
collect_for_alloc(mut, size * 2);
|
||||
continue;
|
||||
if (collect_for_alloc(mut, size))
|
||||
continue;
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
return heap->allocation_failure(heap, size);
|
||||
}
|
||||
space->hp = new_hp;
|
||||
return (void *)addr;
|
||||
|
@ -626,6 +630,12 @@ static void ignore_async_heap_size_adjustment(struct gc_heap *heap,
|
|||
size_t size) {
|
||||
}
|
||||
|
||||
static void* allocation_failure(struct gc_heap *heap, size_t size) {
|
||||
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
|
||||
GC_CRASH();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
||||
heap->extern_space = NULL;
|
||||
heap->pending_ephemerons_size_factor = 0.01;
|
||||
|
@ -642,6 +652,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
|||
get_allocation_counter,
|
||||
ignore_async_heap_size_adjustment,
|
||||
NULL);
|
||||
heap->allocation_failure = allocation_failure;
|
||||
|
||||
return heap_prepare_pending_ephemerons(heap);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue