1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-03 13:20:26 +02:00

Merge remote-tracking branch 'whippet/main' into wip-whippet

This commit is contained in:
Andy Wingo 2025-04-23 13:48:18 +02:00
commit 25db208603
8 changed files with 204 additions and 99 deletions

View file

@ -42,6 +42,10 @@ struct gc_heap_roots;
GC_API_ void gc_heap_set_roots(struct gc_heap *heap,
struct gc_heap_roots *roots);
GC_API_ void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*)(struct gc_heap*,
size_t));
struct gc_extern_space;
GC_API_ void gc_heap_set_extern_space(struct gc_heap *heap,
struct gc_extern_space *space);

View file

@ -60,6 +60,7 @@ struct gc_heap {
struct gc_finalizer_state *finalizer_state;
gc_finalizer_callback have_finalizers;
void *event_listener_data;
void* (*allocation_failure)(struct gc_heap *, size_t);
};
struct gc_mutator {
@ -116,11 +117,8 @@ allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
size_t bytes = gc_inline_freelist_object_size(idx);
GC_generic_malloc_many(bytes, kind, freelist);
head = *freelist;
if (GC_UNLIKELY (!head)) {
fprintf(stderr, "ran out of space, heap size %zu\n",
GC_get_heap_size());
GC_CRASH();
}
if (GC_UNLIKELY (!head))
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, bytes);
}
*freelist = *(void **)(head);
@ -152,13 +150,20 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
} else {
switch (kind) {
case GC_ALLOCATION_TAGGED:
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
return GC_malloc(size);
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: {
void *ret = GC_malloc(size);
if (GC_LIKELY (ret != NULL))
return ret;
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
}
case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS: {
void *ret = GC_malloc_atomic(size);
memset(ret, 0, size);
return ret;
if (GC_LIKELY (ret != NULL)) {
memset(ret, 0, size);
return ret;
}
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
}
default:
GC_CRASH();
@ -521,6 +526,22 @@ uint64_t gc_allocation_counter(struct gc_heap *heap) {
return GC_get_total_bytes();
}
static void* allocation_failure(struct gc_heap *heap, size_t size) {
fprintf(stderr, "ran out of space, heap size %zu\n", GC_get_heap_size());
GC_CRASH();
return NULL;
}
static void* oom_fn(size_t nbytes) {
return NULL;
}
void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*handler)(struct gc_heap*,
size_t)) {
heap->allocation_failure = handler;
}
int gc_init(const struct gc_options *options, struct gc_stack_addr stack_base,
struct gc_heap **heap, struct gc_mutator **mutator,
struct gc_event_listener event_listener,
@ -607,6 +628,8 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr stack_base,
HEAP_EVENT(init, GC_get_heap_size());
GC_set_on_collection_event(on_collection_event);
GC_set_on_heap_resize(on_heap_resize);
GC_set_oom_fn (oom_fn);
(*heap)->allocation_failure = allocation_failure;
*mutator = add_mutator(*heap);

View file

@ -287,7 +287,7 @@ copy_space_request_release_memory(struct copy_space *space, size_t bytes) {
return atomic_fetch_add(&space->bytes_to_page_out, bytes) + bytes;
}
static int
static ssize_t
copy_space_page_out_blocks_until_memory_released(struct copy_space *space) {
ssize_t pending = atomic_load(&space->bytes_to_page_out);
struct gc_lock lock = copy_space_lock(space);
@ -299,7 +299,7 @@ copy_space_page_out_blocks_until_memory_released(struct copy_space *space) {
- COPY_SPACE_BLOCK_SIZE);
}
gc_lock_release(&lock);
return pending <= 0;
return pending;
}
static ssize_t
@ -409,11 +409,23 @@ copy_space_allocator_acquire_block(struct copy_space_allocator *alloc,
return 0;
}
static struct copy_space_block*
copy_space_obtain_empty_block_during_gc(struct copy_space *space,
const struct gc_lock *lock) {
GC_ASSERT(!copy_space_pop_empty_block(space, lock));
struct copy_space_block *block = copy_space_page_in_block(space, lock);
if (block)
atomic_fetch_add(&space->bytes_to_page_out, COPY_SPACE_BLOCK_SIZE);
return block;
}
static int
copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc,
struct copy_space *space) {
struct gc_lock lock = copy_space_lock(space);
struct copy_space_block *block = copy_space_pop_empty_block(space, &lock);
if (!block && space->in_gc)
block = copy_space_obtain_empty_block_during_gc(space, &lock);
gc_lock_release(&lock);
if (copy_space_allocator_acquire_block(alloc, block, space->active_region)) {
block->in_core = 1;
@ -925,7 +937,10 @@ static int
copy_space_init(struct copy_space *space, size_t size, uint32_t flags,
struct gc_background_thread *thread) {
size = align_up(size, COPY_SPACE_BLOCK_SIZE);
size_t reserved = align_up(size, COPY_SPACE_SLAB_SIZE);
// Reserve a few extra blocks to handle the fragmentation problem
// (https://wingolog.org/archives/2024/07/10/copying-collectors-with-block-structured-heaps-are-unreliable).
size_t reserved = size + COPY_SPACE_BLOCK_SIZE * 16;
reserved = align_up(reserved, COPY_SPACE_SLAB_SIZE);
if (flags & COPY_SPACE_ALIGNED)
reserved = copy_space_round_up_power_of_two(reserved);
size_t nslabs = reserved / COPY_SPACE_SLAB_SIZE;

View file

@ -162,12 +162,12 @@ gc_platform_acquire_memory_from_reservation(struct gc_reservation reservation,
GC_ASSERT(size <= reservation.size);
GC_ASSERT(offset <= reservation.size - size);
void *mem = mmap((void*)(reservation.base + offset), size,
PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (mem == MAP_FAILED) {
perror("mmap failed");
void *mem = (void*)(reservation.base + offset);
if (mprotect(mem, size, PROT_READ|PROT_WRITE)) {
perror("mprotect failed");
return NULL;
}
// FIXME: Should we gc_platform_populate_memory() here?
return mem;
}

View file

@ -66,6 +66,7 @@ struct gc_heap {
struct gc_heap_sizer sizer;
struct gc_event_listener event_listener;
void *event_listener_data;
void* (*allocation_failure)(struct gc_heap*, size_t);
};
#define HEAP_EVENT(heap, event, ...) do { \
@ -511,20 +512,18 @@ heap_estimate_live_data_after_gc(struct gc_heap *heap,
return bytes;
}
static void
detect_out_of_memory(struct gc_heap *heap, uintptr_t allocation_since_last_gc) {
if (heap->sizer.policy != GC_HEAP_SIZE_FIXED)
return;
static int
compute_progress(struct gc_heap *heap, uintptr_t allocation_since_last_gc) {
struct nofl_space *nofl = heap_nofl_space(heap);
return allocation_since_last_gc > nofl_space_fragmentation(nofl);
}
if (allocation_since_last_gc > nofl_space_fragmentation(heap_nofl_space(heap)))
return;
if (heap->gc_kind == GC_COLLECTION_MINOR)
return;
// No allocation since last gc: out of memory.
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
static int
compute_success(struct gc_heap *heap, enum gc_collection_kind gc_kind,
int progress) {
return progress
|| gc_kind == GC_COLLECTION_MINOR
|| heap->sizer.policy != GC_HEAP_SIZE_FIXED;
}
static double
@ -750,12 +749,10 @@ sweep_ephemerons(struct gc_heap *heap) {
return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
}
static void collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind,
int requested_by_user) GC_NEVER_INLINE;
static void
collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind,
int requested_by_user) {
static int collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
static int
collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
struct nofl_space *nofl_space = heap_nofl_space(heap);
struct large_object_space *lospace = heap_large_object_space(heap);
@ -773,8 +770,7 @@ collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind,
nofl_space_add_to_allocation_counter(nofl_space, &allocation_counter);
large_object_space_add_to_allocation_counter(lospace, &allocation_counter);
heap->total_allocated_bytes_at_last_gc += allocation_counter;
if (!requested_by_user)
detect_out_of_memory(heap, allocation_counter);
int progress = compute_progress(heap, allocation_counter);
enum gc_collection_kind gc_kind =
determine_collection_kind(heap, requested_kind);
int is_minor = gc_kind == GC_COLLECTION_MINOR;
@ -821,12 +817,12 @@ collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind,
heap->size_at_last_gc = heap->size;
HEAP_EVENT(heap, restarting_mutators);
allow_mutators_to_continue(heap);
return compute_success(heap, gc_kind, progress);
}
static void
static int
trigger_collection(struct gc_mutator *mut,
enum gc_collection_kind requested_kind,
int requested_by_user) {
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
int prev_kind = -1;
gc_stack_capture_hot(&mut->stack);
@ -836,14 +832,16 @@ trigger_collection(struct gc_mutator *mut,
heap_lock(heap);
while (mutators_are_stopping(heap))
prev_kind = pause_mutator_for_collection(heap, mut);
int success = 1;
if (prev_kind < (int)requested_kind)
collect(mut, requested_kind, requested_by_user);
success = collect(mut, requested_kind);
heap_unlock(heap);
return success;
}
void
gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
trigger_collection(mut, kind, 1);
trigger_collection(mut, kind);
}
int*
@ -903,8 +901,10 @@ allocate_large(struct gc_mutator *mut, size_t size,
nofl_space_request_release_memory(nofl_space,
npages << lospace->page_size_log2);
while (!nofl_space_shrink(nofl_space, 0))
trigger_collection(mut, GC_COLLECTION_COMPACTING, 0);
while (!nofl_space_shrink(nofl_space, 0)) {
if (!trigger_collection(mut, GC_COLLECTION_COMPACTING))
return heap->allocation_failure(heap, size);
}
atomic_fetch_add(&heap->large_object_pages, npages);
void *ret = large_object_space_alloc(lospace, npages, kind);
@ -919,7 +919,7 @@ allocate_large(struct gc_mutator *mut, size_t size,
static void
collect_for_small_allocation(void *mut) {
trigger_collection(mut, GC_COLLECTION_ANY, 0);
trigger_collection(mut, GC_COLLECTION_ANY);
}
void*
@ -930,10 +930,16 @@ gc_allocate_slow(struct gc_mutator *mut, size_t size,
if (size > gc_allocator_large_threshold())
return allocate_large(mut, size, compute_trace_kind(kind));
return gc_ref_heap_object(nofl_allocate(&mut->allocator,
heap_nofl_space(mutator_heap(mut)),
size, collect_for_small_allocation,
mut, kind));
struct gc_heap *heap = mutator_heap(mut);
while (1) {
struct gc_ref ret = nofl_allocate(&mut->allocator, heap_nofl_space(heap),
size, kind);
if (!gc_ref_is_null(ret))
return gc_ref_heap_object(ret);
if (trigger_collection(mut, GC_COLLECTION_ANY))
continue;
return heap->allocation_failure(heap, size);
}
}
void
@ -1109,6 +1115,18 @@ static void set_heap_size_from_thread(struct gc_heap *heap, size_t size) {
pthread_mutex_unlock(&heap->lock);
}
static void* allocation_failure(struct gc_heap *heap, size_t size) {
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
return NULL;
}
void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*handler)(struct gc_heap*,
size_t)) {
heap->allocation_failure = handler;
}
static int
heap_init(struct gc_heap *heap, const struct gc_options *options) {
// *heap is already initialized to 0.
@ -1143,6 +1161,7 @@ heap_init(struct gc_heap *heap, const struct gc_options *options) {
allocation_counter_from_thread,
set_heap_size_from_thread,
heap->background_thread);
heap->allocation_failure = allocation_failure;
return 1;
}

View file

@ -880,8 +880,7 @@ nofl_allocator_next_hole(struct nofl_allocator *alloc,
static struct gc_ref
nofl_allocate(struct nofl_allocator *alloc, struct nofl_space *space,
size_t size, void (*gc)(void*), void *gc_data,
enum gc_allocation_kind kind) {
size_t size, enum gc_allocation_kind kind) {
GC_ASSERT(size > 0);
GC_ASSERT(size <= gc_allocator_large_threshold());
size = align_up(size, NOFL_GRANULE_SIZE);
@ -894,7 +893,7 @@ nofl_allocate(struct nofl_allocator *alloc, struct nofl_space *space,
break;
}
if (!hole)
gc(gc_data);
return gc_ref_null();
}
}

View file

@ -73,6 +73,7 @@ struct gc_heap {
struct gc_heap_sizer sizer;
struct gc_event_listener event_listener;
void *event_listener_data;
void* (*allocation_failure)(struct gc_heap *, size_t);
};
#define HEAP_EVENT(heap, event, ...) do { \
@ -862,10 +863,26 @@ copy_spaces_allocated_bytes(struct gc_heap *heap)
: heap_mono_space(heap)->allocated_bytes_at_last_gc;
}
static enum gc_collection_kind
static int
resolve_pending_large_allocation_and_compute_success(struct gc_heap *heap,
int is_minor_gc) {
struct copy_space *space = heap_resizable_space(heap);
ssize_t deficit = copy_space_page_out_blocks_until_memory_released(space);
if (is_minor_gc)
return 1;
if (deficit <= 0)
return copy_space_can_allocate(space, gc_allocator_large_threshold());
deficit = align_up(deficit, COPY_SPACE_BLOCK_SIZE);
if (heap->sizer.policy == GC_HEAP_SIZE_FIXED)
return 0;
resize_heap(heap, heap->size + deficit);
return 1;
}
static int
collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
static enum gc_collection_kind
static int
collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
struct large_object_space *lospace = heap_large_object_space(heap);
@ -920,32 +937,28 @@ collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind) {
HEAP_EVENT(heap, live_data_size, live_size);
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_size, pause_ns,
resize_heap);
{
struct copy_space *space = heap_resizable_space(heap);
if (!copy_space_page_out_blocks_until_memory_released(space)
&& heap->sizer.policy == GC_HEAP_SIZE_FIXED) {
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
}
}
int success =
resolve_pending_large_allocation_and_compute_success(heap, is_minor_gc);
HEAP_EVENT(heap, restarting_mutators);
allow_mutators_to_continue(heap);
return gc_kind;
return success;
}
static void trigger_collection(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) {
static int trigger_collection(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut);
copy_space_allocator_finish(&mut->allocator, heap_allocation_space(heap));
if (GC_GENERATIONAL)
gc_field_set_writer_release_buffer(mutator_field_logger(mut));
heap_lock(heap);
int prev_kind = -1;
int success = 1;
while (mutators_are_stopping(heap))
prev_kind = pause_mutator_for_collection(heap, mut);
if (prev_kind < (int)requested_kind)
collect(mut, requested_kind);
success = collect(mut, requested_kind);
heap_unlock(heap);
return success;
}
void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
@ -955,13 +968,18 @@ void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
static void* allocate_large(struct gc_mutator *mut, size_t size) {
struct gc_heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap);
struct copy_space *copy_space = heap_resizable_space(heap);
size_t npages = large_object_space_npages(space, size);
size_t page_bytes = npages << space->page_size_log2;
copy_space_request_release_memory(copy_space, page_bytes);
if (copy_space_page_out_blocks_until_memory_released(copy_space) > 0
&& !trigger_collection(mut, GC_COLLECTION_COMPACTING)) {
copy_space_maybe_reacquire_memory(copy_space, page_bytes);
return heap->allocation_failure(heap, size);
}
copy_space_request_release_memory(heap_resizable_space(heap),
npages << space->page_size_log2);
while (!copy_space_page_out_blocks_until_memory_released(heap_resizable_space(heap)))
trigger_collection(mut, GC_COLLECTION_COMPACTING);
atomic_fetch_add(&heap->large_object_pages, npages);
void *ret = large_object_space_alloc(space, npages, GC_TRACE_PRECISELY);
@ -974,10 +992,6 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
return ret;
}
static void get_more_empty_blocks_for_mutator(void *mut) {
trigger_collection(mut, GC_COLLECTION_MINOR);
}
void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
enum gc_allocation_kind kind) {
if (GC_UNLIKELY(kind != GC_ALLOCATION_TAGGED
@ -996,10 +1010,11 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
ret = copy_space_allocate(&mut->allocator,
heap_allocation_space(mutator_heap(mut)),
size);
if (gc_ref_is_null(ret))
trigger_collection(mut, GC_COLLECTION_MINOR);
else
if (!gc_ref_is_null(ret))
break;
if (trigger_collection(mut, GC_COLLECTION_MINOR))
continue;
return mutator_heap(mut)->allocation_failure(mutator_heap(mut), size);
}
return gc_ref_heap_object(ret);
@ -1178,6 +1193,18 @@ static void set_heap_size_from_thread(struct gc_heap *heap, size_t size) {
pthread_mutex_unlock(&heap->lock);
}
static void* allocation_failure(struct gc_heap *heap, size_t size) {
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
return NULL;
}
void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*handler)(struct gc_heap*,
size_t)) {
heap->allocation_failure = handler;
}
static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
// *heap is already initialized to 0.
@ -1214,6 +1241,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
allocation_counter_from_thread,
set_heap_size_from_thread,
heap->background_thread);
heap->allocation_failure = allocation_failure;
return 1;
}

View file

@ -52,6 +52,7 @@ struct gc_heap {
struct gc_heap_sizer sizer;
struct gc_event_listener event_listener;
void *event_listener_data;
void* (*allocation_failure)(struct gc_heap *, size_t);
};
// One mutator per space, can just store the heap in the mutator.
struct gc_mutator {
@ -103,9 +104,9 @@ static uintptr_t align_up(uintptr_t addr, size_t align) {
static size_t min_size(size_t a, size_t b) { return a < b ? a : b; }
static size_t max_size(size_t a, size_t b) { return a < b ? b : a; }
static void collect(struct gc_mutator *mut, size_t for_alloc) GC_NEVER_INLINE;
static void collect_for_alloc(struct gc_mutator *mut,
size_t bytes) GC_NEVER_INLINE;
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
static int collect_for_alloc(struct gc_mutator *mut,
size_t bytes) GC_NEVER_INLINE;
static void trace(struct gc_edge edge, struct gc_heap *heap, void *visit_data);
@ -385,7 +386,7 @@ static uintptr_t resolve_finalizers(struct gc_heap *heap, uintptr_t grey) {
return grey;
}
static void collect(struct gc_mutator *mut, size_t for_alloc) {
static void collect(struct gc_mutator *mut) {
struct gc_heap *heap = mutator_heap(mut);
int is_minor = 0;
int is_compacting = 1;
@ -429,7 +430,6 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
size_t live_size = semi->live_bytes_at_last_gc;
live_size += large_object_space_size_at_last_collection(large);
live_size += for_alloc;
uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns;
HEAP_EVENT(heap, live_data_size, live_size);
DEBUG("gc %zu: live size %zu, heap size %zu\n", heap->count, live_size,
@ -443,30 +443,29 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
}
static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
collect(mut, bytes);
static int collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
collect(mut);
struct semi_space *space = mutator_semi_space(mut);
if (bytes < space->limit - space->hp)
return;
if (bytes <= space->limit - space->hp)
return 1;
struct gc_heap *heap = mutator_heap(mut);
if (heap->options->common.heap_size_policy != GC_HEAP_SIZE_FIXED) {
// Each collection can potentially resize only the inactive
// fromspace, so if we really run out of space we will need to
// collect again in order to resize the other half.
collect(mut, bytes);
if (bytes < space->limit - space->hp)
return;
collect(mut);
if (bytes <= space->limit - space->hp)
return 1;
}
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
return 0;
}
void gc_collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind) {
// Ignore requested kind, because we always compact.
collect(mut, 0);
collect(mut);
}
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
@ -482,8 +481,11 @@ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size);
static int collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
size_t bytes = npages * mutator_semi_space(mut)->page_size;
// Large object pages don't need a copy reserve.
bytes /= 2;
return collect_for_alloc(mut, bytes);
}
static void* allocate_large(struct gc_mutator *mut, size_t size) {
@ -492,8 +494,9 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
struct semi_space *semi_space = heap_semi_space(heap);
size_t npages = large_object_space_npages(space, size);
while (!semi_space_steal_pages(semi_space, npages))
collect_for_large_alloc(mut, npages);
if (!semi_space_steal_pages(semi_space, npages)
&& !collect_for_large_alloc(mut, npages))
return heap->allocation_failure(heap, size);
void *ret = large_object_space_alloc(space, npages, GC_TRACE_PRECISELY);
@ -522,9 +525,10 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
uintptr_t addr = space->hp;
uintptr_t new_hp = align_up (addr + size, GC_ALIGNMENT);
if (space->limit < new_hp) {
// The factor of 2 is for both regions.
collect_for_alloc(mut, size * 2);
continue;
if (collect_for_alloc(mut, size))
continue;
struct gc_heap *heap = mutator_heap(mut);
return heap->allocation_failure(heap, size);
}
space->hp = new_hp;
return (void *)addr;
@ -626,6 +630,18 @@ static void ignore_async_heap_size_adjustment(struct gc_heap *heap,
size_t size) {
}
static void* allocation_failure(struct gc_heap *heap, size_t size) {
fprintf(stderr, "ran out of space, heap size %zu\n", heap->size);
GC_CRASH();
return NULL;
}
void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*handler)(struct gc_heap*,
size_t)) {
heap->allocation_failure = handler;
}
static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
heap->extern_space = NULL;
heap->pending_ephemerons_size_factor = 0.01;
@ -642,6 +658,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
get_allocation_counter,
ignore_async_heap_size_adjustment,
NULL);
heap->allocation_failure = allocation_failure;
return heap_prepare_pending_ephemerons(heap);
}