mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-20 11:40:18 +02:00
Allow embedder to request a major GC
This commit is contained in:
parent
8aa2036331
commit
514dcc702e
9 changed files with 104 additions and 70 deletions
|
@ -4,6 +4,7 @@
|
|||
#include "gc-config.h"
|
||||
#include "gc-assert.h"
|
||||
#include "gc-attrs.h"
|
||||
#include "gc-collection-kind.h"
|
||||
#include "gc-edge.h"
|
||||
#include "gc-event-listener.h"
|
||||
#include "gc-inline.h"
|
||||
|
@ -47,7 +48,8 @@ GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
|
|||
GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
|
||||
void *data) GC_NEVER_INLINE;
|
||||
|
||||
GC_API_ void gc_collect(struct gc_mutator *mut);
|
||||
GC_API_ void gc_collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind);
|
||||
|
||||
static inline void gc_clear_fresh_allocation(struct gc_ref obj,
|
||||
size_t size) GC_ALWAYS_INLINE;
|
||||
|
|
|
@ -37,10 +37,9 @@ static inline void gc_basic_stats_init(void *data, size_t heap_size) {
|
|||
}
|
||||
|
||||
static inline void gc_basic_stats_prepare_gc(void *data,
|
||||
int is_minor,
|
||||
int is_compacting) {
|
||||
enum gc_collection_kind kind) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
if (is_minor)
|
||||
if (kind == GC_COLLECTION_MINOR)
|
||||
stats->minor_collection_count++;
|
||||
else
|
||||
stats->major_collection_count++;
|
||||
|
|
11
api/gc-collection-kind.h
Normal file
11
api/gc-collection-kind.h
Normal file
|
@ -0,0 +1,11 @@
|
|||
#ifndef GC_COLLECTION_KIND_H
|
||||
#define GC_COLLECTION_KIND_H
|
||||
|
||||
enum gc_collection_kind {
|
||||
GC_COLLECTION_ANY,
|
||||
GC_COLLECTION_MINOR,
|
||||
GC_COLLECTION_MAJOR,
|
||||
GC_COLLECTION_COMPACTING,
|
||||
};
|
||||
|
||||
#endif // GC_COLLECTION_KIND_H
|
|
@ -1,9 +1,11 @@
|
|||
#ifndef GC_EVENT_LISTENER_H
|
||||
#define GC_EVENT_LISTENER_H
|
||||
|
||||
#include "gc-collection-kind.h"
|
||||
|
||||
struct gc_event_listener {
|
||||
void (*init)(void *data, size_t heap_size);
|
||||
void (*prepare_gc)(void *data, int is_minor, int is_compacting);
|
||||
void (*prepare_gc)(void *data, enum gc_collection_kind kind);
|
||||
void (*requesting_stop)(void *data);
|
||||
void (*waiting_for_stop)(void *data);
|
||||
void (*mutators_stopped)(void *data);
|
||||
|
|
|
@ -5,8 +5,7 @@
|
|||
|
||||
static inline void gc_null_event_listener_init(void *data, size_t size) {}
|
||||
static inline void gc_null_event_listener_prepare_gc(void *data,
|
||||
int is_minor,
|
||||
int is_compacting) {}
|
||||
enum gc_collection_kind) {}
|
||||
static inline void gc_null_event_listener_requesting_stop(void *data) {}
|
||||
static inline void gc_null_event_listener_waiting_for_stop(void *data) {}
|
||||
static inline void gc_null_event_listener_mutators_stopped(void *data) {}
|
||||
|
|
|
@ -105,7 +105,8 @@ static double heap_multiplier;
|
|||
static size_t nthreads;
|
||||
|
||||
static void cause_gc(struct gc_mutator *mut) {
|
||||
gc_collect(mut);
|
||||
// Doing a full collection lets us reason precisely about liveness.
|
||||
gc_collect(mut, GC_COLLECTION_MAJOR);
|
||||
}
|
||||
|
||||
static void make_ephemeron_chain(struct thread *t, EphemeronHandle *head,
|
||||
|
|
22
src/bdw.c
22
src/bdw.c
|
@ -125,8 +125,22 @@ void* gc_allocate_pointerless(struct gc_mutator *mut,
|
|||
return GC_malloc_atomic(size);
|
||||
}
|
||||
|
||||
void gc_collect(struct gc_mutator *mut) {
|
||||
GC_gcollect();
|
||||
void gc_collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) {
|
||||
switch (requested_kind) {
|
||||
case GC_COLLECTION_MINOR:
|
||||
GC_collect_a_little();
|
||||
break;
|
||||
case GC_COLLECTION_ANY:
|
||||
case GC_COLLECTION_MAJOR:
|
||||
GC_gcollect();
|
||||
break;
|
||||
case GC_COLLECTION_COMPACTING:
|
||||
GC_gcollect_and_unmap();
|
||||
break;
|
||||
default:
|
||||
GC_CRASH();
|
||||
}
|
||||
}
|
||||
|
||||
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
||||
|
@ -330,9 +344,7 @@ gc_heap_pending_ephemerons(struct gc_heap *heap) {
|
|||
static void on_collection_event(GC_EventType event) {
|
||||
switch (event) {
|
||||
case GC_EVENT_START: {
|
||||
int is_minor = 0;
|
||||
int is_compacting = 0;
|
||||
HEAP_EVENT(prepare_gc, is_minor, is_compacting);
|
||||
HEAP_EVENT(prepare_gc, GC_COLLECTION_MAJOR);
|
||||
HEAP_EVENT(requesting_stop);
|
||||
HEAP_EVENT(waiting_for_stop);
|
||||
break;
|
||||
|
|
|
@ -354,7 +354,7 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
struct gc_heap *heap = mutator_heap(mut);
|
||||
int is_minor = 0;
|
||||
int is_compacting = 1;
|
||||
HEAP_EVENT(heap, prepare_gc, is_minor, is_compacting);
|
||||
HEAP_EVENT(heap, prepare_gc, GC_COLLECTION_COMPACTING);
|
||||
|
||||
HEAP_EVENT(heap, requesting_stop);
|
||||
HEAP_EVENT(heap, waiting_for_stop);
|
||||
|
@ -414,7 +414,9 @@ static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
|
|||
GC_CRASH();
|
||||
}
|
||||
|
||||
void gc_collect(struct gc_mutator *mut) {
|
||||
void gc_collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) {
|
||||
// Ignore requested kind, because we always compact.
|
||||
collect(mut, 0);
|
||||
}
|
||||
|
||||
|
|
116
src/whippet.c
116
src/whippet.c
|
@ -288,15 +288,6 @@ struct mark_space {
|
|||
uintptr_t fragmentation_granules_since_last_collection; // atomically
|
||||
};
|
||||
|
||||
enum gc_kind {
|
||||
GC_KIND_FLAG_MINOR = GC_GENERATIONAL, // 0 or 1
|
||||
GC_KIND_FLAG_EVACUATING = 0x2,
|
||||
GC_KIND_MINOR_IN_PLACE = GC_KIND_FLAG_MINOR,
|
||||
GC_KIND_MINOR_EVACUATING = GC_KIND_FLAG_MINOR | GC_KIND_FLAG_EVACUATING,
|
||||
GC_KIND_MAJOR_IN_PLACE = 0,
|
||||
GC_KIND_MAJOR_EVACUATING = GC_KIND_FLAG_EVACUATING,
|
||||
};
|
||||
|
||||
struct gc_heap {
|
||||
struct mark_space mark_space;
|
||||
struct large_object_space large_object_space;
|
||||
|
@ -310,7 +301,7 @@ struct gc_heap {
|
|||
int mark_while_stopping;
|
||||
int check_pending_ephemerons;
|
||||
struct gc_pending_ephemerons *pending_ephemerons;
|
||||
enum gc_kind gc_kind;
|
||||
enum gc_collection_kind gc_kind;
|
||||
int multithreaded;
|
||||
size_t active_mutator_count;
|
||||
size_t mutator_count;
|
||||
|
@ -318,7 +309,7 @@ struct gc_heap {
|
|||
struct gc_mutator *mutator_trace_list;
|
||||
long count;
|
||||
long minor_count;
|
||||
uint8_t last_collection_was_minor;
|
||||
enum gc_collection_kind last_collection_kind;
|
||||
struct gc_mutator *deactivated_mutators;
|
||||
struct tracer tracer;
|
||||
double fragmentation_low_threshold;
|
||||
|
@ -380,7 +371,8 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
|
|||
memset((char*)addr, 0, size);
|
||||
}
|
||||
|
||||
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
|
||||
|
||||
static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
|
||||
GC_ASSERT(((uintptr_t)mark & 7) == 0);
|
||||
|
@ -1416,7 +1408,7 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
|||
|
||||
static void trace_generational_roots(struct gc_heap *heap) {
|
||||
// TODO: Add lospace nursery.
|
||||
if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
|
||||
if (atomic_load(&heap->gc_kind) == GC_COLLECTION_MINOR) {
|
||||
mark_space_trace_remembered_set(heap_mark_space(heap), heap);
|
||||
large_object_space_trace_remembered_set(heap_large_object_space(heap),
|
||||
enqueue_generational_root,
|
||||
|
@ -1580,10 +1572,12 @@ static double clamp_major_gc_yield_threshold(struct gc_heap *heap,
|
|||
return threshold;
|
||||
}
|
||||
|
||||
static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
||||
static enum gc_collection_kind
|
||||
determine_collection_kind(struct gc_heap *heap,
|
||||
enum gc_collection_kind requested) {
|
||||
struct mark_space *mark_space = heap_mark_space(heap);
|
||||
enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
|
||||
enum gc_kind gc_kind;
|
||||
enum gc_collection_kind previous_gc_kind = atomic_load(&heap->gc_kind);
|
||||
enum gc_collection_kind gc_kind;
|
||||
int mark_while_stopping = 1;
|
||||
double yield = heap_last_gc_yield(heap);
|
||||
double fragmentation = heap_fragmentation(heap);
|
||||
|
@ -1592,13 +1586,16 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
|||
|
||||
if (heap->count == 0) {
|
||||
DEBUG("first collection is always major\n");
|
||||
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
||||
gc_kind = GC_COLLECTION_MAJOR;
|
||||
} else if (requested != GC_COLLECTION_ANY) {
|
||||
DEBUG("user specifically requested collection kind %d\n", (int)requested);
|
||||
gc_kind = requested;
|
||||
} else if (pending > 0) {
|
||||
DEBUG("evacuating due to need to reclaim %zd bytes\n", pending);
|
||||
// During the last cycle, a large allocation could not find enough
|
||||
// free blocks, and we decided not to expand the heap. Let's do an
|
||||
// evacuating major collection to maximize the free block yield.
|
||||
gc_kind = GC_KIND_MAJOR_EVACUATING;
|
||||
gc_kind = GC_COLLECTION_COMPACTING;
|
||||
|
||||
// Generally speaking, we allow mutators to mark their own stacks
|
||||
// before pausing. This is a limited form of concurrent marking, as
|
||||
|
@ -1610,7 +1607,7 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
|||
// marking. Of course if the mutator has conservative roots we will
|
||||
// have pinning anyway and might as well allow ragged stops.
|
||||
mark_while_stopping = gc_has_conservative_roots();
|
||||
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
|
||||
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING
|
||||
&& fragmentation >= heap->fragmentation_low_threshold) {
|
||||
DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
|
||||
fragmentation * 100.,
|
||||
|
@ -1618,46 +1615,50 @@ static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
|
|||
// For some reason, we already decided to compact in the past,
|
||||
// and fragmentation hasn't yet fallen below a low-water-mark.
|
||||
// Keep going.
|
||||
gc_kind = GC_KIND_MAJOR_EVACUATING;
|
||||
gc_kind = GC_COLLECTION_COMPACTING;
|
||||
} else if (fragmentation > heap->fragmentation_high_threshold) {
|
||||
// Switch to evacuation mode if the heap is too fragmented.
|
||||
DEBUG("triggering compaction due to fragmentation %.2f%% > %.2f%%\n",
|
||||
fragmentation * 100.,
|
||||
heap->fragmentation_high_threshold * 100.);
|
||||
gc_kind = GC_KIND_MAJOR_EVACUATING;
|
||||
} else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING) {
|
||||
gc_kind = GC_COLLECTION_COMPACTING;
|
||||
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING) {
|
||||
// We were evacuating, but we're good now. Go back to minor
|
||||
// collections.
|
||||
DEBUG("returning to in-place collection, fragmentation %.2f%% < %.2f%%\n",
|
||||
fragmentation * 100.,
|
||||
heap->fragmentation_low_threshold * 100.);
|
||||
gc_kind = GC_KIND_MINOR_IN_PLACE;
|
||||
} else if (previous_gc_kind != GC_KIND_MINOR_IN_PLACE) {
|
||||
gc_kind = GC_GENERATIONAL ? GC_COLLECTION_MINOR : GC_COLLECTION_MAJOR;
|
||||
} else if (!GC_GENERATIONAL) {
|
||||
DEBUG("keeping on with major in-place GC\n");
|
||||
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MAJOR);
|
||||
gc_kind = GC_COLLECTION_MAJOR;
|
||||
} else if (previous_gc_kind != GC_COLLECTION_MINOR) {
|
||||
DEBUG("returning to minor collection after major collection\n");
|
||||
// Go back to minor collections.
|
||||
gc_kind = GC_KIND_MINOR_IN_PLACE;
|
||||
gc_kind = GC_COLLECTION_MINOR;
|
||||
} else if (yield < heap->major_gc_yield_threshold) {
|
||||
DEBUG("collection yield too low, triggering major collection\n");
|
||||
// Nursery is getting tight; trigger a major GC.
|
||||
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
||||
gc_kind = GC_COLLECTION_MAJOR;
|
||||
} else {
|
||||
DEBUG("keeping on with minor GC\n");
|
||||
// Nursery has adequate space; keep trucking with minor GCs.
|
||||
GC_ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE);
|
||||
gc_kind = GC_KIND_MINOR_IN_PLACE;
|
||||
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MINOR);
|
||||
gc_kind = GC_COLLECTION_MINOR;
|
||||
}
|
||||
|
||||
if (gc_has_conservative_intraheap_edges() &&
|
||||
(gc_kind & GC_KIND_FLAG_EVACUATING)) {
|
||||
gc_kind == GC_COLLECTION_COMPACTING) {
|
||||
DEBUG("welp. conservative heap scanning, no evacuation for you\n");
|
||||
gc_kind = GC_KIND_MAJOR_IN_PLACE;
|
||||
gc_kind = GC_COLLECTION_MAJOR;
|
||||
mark_while_stopping = 1;
|
||||
}
|
||||
|
||||
// If this is the first in a series of minor collections, reset the
|
||||
// threshold at which we should do a major GC.
|
||||
if ((gc_kind & GC_KIND_FLAG_MINOR) &&
|
||||
(previous_gc_kind & GC_KIND_FLAG_MINOR) != GC_KIND_FLAG_MINOR) {
|
||||
if (gc_kind == GC_COLLECTION_MINOR &&
|
||||
previous_gc_kind != GC_COLLECTION_MINOR) {
|
||||
double yield = heap_last_gc_yield(heap);
|
||||
double threshold = yield * heap->minor_gc_yield_threshold;
|
||||
double clamped = clamp_major_gc_yield_threshold(heap, threshold);
|
||||
|
@ -1687,7 +1688,7 @@ static void release_evacuation_target_blocks(struct mark_space *space) {
|
|||
static void prepare_for_evacuation(struct gc_heap *heap) {
|
||||
struct mark_space *space = heap_mark_space(heap);
|
||||
|
||||
if ((heap->gc_kind & GC_KIND_FLAG_EVACUATING) == 0) {
|
||||
if (heap->gc_kind != GC_COLLECTION_COMPACTING) {
|
||||
space->evacuating = 0;
|
||||
space->evacuation_reserve = space->evacuation_minimum_reserve;
|
||||
return;
|
||||
|
@ -1796,7 +1797,7 @@ static void trace_roots_after_stop(struct gc_heap *heap) {
|
|||
}
|
||||
|
||||
static void mark_space_finish_gc(struct mark_space *space,
|
||||
enum gc_kind gc_kind) {
|
||||
enum gc_collection_kind gc_kind) {
|
||||
space->evacuating = 0;
|
||||
reset_sweeper(space);
|
||||
update_mark_patterns(space, 0);
|
||||
|
@ -1824,7 +1825,8 @@ static void sweep_ephemerons(struct gc_heap *heap) {
|
|||
return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
|
||||
}
|
||||
|
||||
static void collect(struct gc_mutator *mut) {
|
||||
static void collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
struct mark_space *space = heap_mark_space(heap);
|
||||
struct large_object_space *lospace = heap_large_object_space(heap);
|
||||
|
@ -1835,12 +1837,12 @@ static void collect(struct gc_mutator *mut) {
|
|||
}
|
||||
MUTATOR_EVENT(mut, mutator_cause_gc);
|
||||
DEBUG("start collect #%ld:\n", heap->count);
|
||||
enum gc_kind gc_kind = determine_collection_kind(heap);
|
||||
HEAP_EVENT(heap, prepare_gc, gc_kind & GC_KIND_FLAG_MINOR,
|
||||
gc_kind & GC_KIND_FLAG_EVACUATING);
|
||||
update_mark_patterns(space, !(gc_kind & GC_KIND_FLAG_MINOR));
|
||||
large_object_space_start_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
gc_extern_space_start_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
enum gc_collection_kind gc_kind =
|
||||
determine_collection_kind(heap, requested_kind);
|
||||
HEAP_EVENT(heap, prepare_gc, gc_kind);
|
||||
update_mark_patterns(space, gc_kind != GC_COLLECTION_MINOR);
|
||||
large_object_space_start_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
|
||||
gc_extern_space_start_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
|
||||
resolve_ephemerons_lazily(heap);
|
||||
tracer_prepare(heap);
|
||||
HEAP_EVENT(heap, requesting_stop);
|
||||
|
@ -1868,11 +1870,11 @@ static void collect(struct gc_mutator *mut) {
|
|||
sweep_ephemerons(heap);
|
||||
tracer_release(heap);
|
||||
mark_space_finish_gc(space, gc_kind);
|
||||
large_object_space_finish_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
gc_extern_space_finish_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
large_object_space_finish_gc(lospace, gc_kind == GC_COLLECTION_MINOR);
|
||||
gc_extern_space_finish_gc(exspace, gc_kind == GC_COLLECTION_MINOR);
|
||||
heap->count++;
|
||||
heap->last_collection_was_minor = gc_kind & GC_KIND_FLAG_MINOR;
|
||||
if (heap->last_collection_was_minor)
|
||||
heap->last_collection_kind = gc_kind;
|
||||
if (gc_kind == GC_COLLECTION_MINOR)
|
||||
heap->minor_count++;
|
||||
heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection);
|
||||
HEAP_EVENT(heap, restarting_mutators);
|
||||
|
@ -2079,7 +2081,7 @@ static size_t next_hole(struct gc_mutator *mut) {
|
|||
// mark bytes didn't rotate, so we have no cleanup to do; and
|
||||
// we shouldn't try to allocate into them as it's not worth
|
||||
// it. Any wasted space is measured as fragmentation.
|
||||
if (mutator_heap(mut)->last_collection_was_minor)
|
||||
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR)
|
||||
continue;
|
||||
else
|
||||
block_summary_clear_flag(summary, BLOCK_VENERABLE);
|
||||
|
@ -2092,7 +2094,7 @@ static size_t next_hole(struct gc_mutator *mut) {
|
|||
// mostly old data. Sweep any garbage, commit the mark as
|
||||
// venerable, and avoid allocating into it.
|
||||
block_summary_clear_flag(summary, BLOCK_VENERABLE_AFTER_SWEEP);
|
||||
if (mutator_heap(mut)->last_collection_was_minor) {
|
||||
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR) {
|
||||
finish_sweeping_in_block(mut);
|
||||
block_summary_set_flag(summary, BLOCK_VENERABLE);
|
||||
continue;
|
||||
|
@ -2153,18 +2155,22 @@ static void finish_sweeping(struct gc_mutator *mut) {
|
|||
finish_hole(mut);
|
||||
}
|
||||
|
||||
static void trigger_collection(struct gc_mutator *mut) {
|
||||
static void trigger_collection(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
enum gc_collection_kind last_collection_kind = GC_COLLECTION_ANY;
|
||||
heap_lock(heap);
|
||||
if (mutators_are_stopping(heap))
|
||||
while (mutators_are_stopping(heap)) {
|
||||
pause_mutator_for_collection_with_lock(mut);
|
||||
else
|
||||
collect(mut);
|
||||
last_collection_kind = heap->last_collection_kind;
|
||||
}
|
||||
if (last_collection_kind < requested_kind)
|
||||
collect(mut, requested_kind);
|
||||
heap_unlock(heap);
|
||||
}
|
||||
|
||||
void gc_collect(struct gc_mutator *mut) {
|
||||
trigger_collection(mut);
|
||||
void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
|
||||
trigger_collection(mut, kind);
|
||||
}
|
||||
|
||||
static void* allocate_large(struct gc_mutator *mut, size_t size) {
|
||||
|
@ -2177,7 +2183,7 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
|
|||
npages << space->page_size_log2);
|
||||
|
||||
while (!sweep_until_memory_released(mut))
|
||||
trigger_collection(mut);
|
||||
trigger_collection(mut, GC_COLLECTION_COMPACTING);
|
||||
atomic_fetch_add(&heap->large_object_pages, npages);
|
||||
|
||||
void *ret = large_object_space_alloc(space, npages);
|
||||
|
@ -2215,7 +2221,7 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
|||
break;
|
||||
}
|
||||
if (!hole)
|
||||
trigger_collection(mut);
|
||||
trigger_collection(mut, GC_COLLECTION_ANY);
|
||||
}
|
||||
ret = gc_ref(mut->alloc);
|
||||
mut->alloc += size;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue