1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-07-09 10:50:27 +02:00

Revert "Allow embedder to request a major GC"

This reverts commit f39e6ee69f0aa2e14227a019a5332ba129418977.
This commit is contained in:
Andy Wingo 2023-10-18 10:28:46 +02:00
parent 514dcc702e
commit 0c7bdacc51
9 changed files with 70 additions and 104 deletions

View file

@ -4,7 +4,6 @@
#include "gc-config.h" #include "gc-config.h"
#include "gc-assert.h" #include "gc-assert.h"
#include "gc-attrs.h" #include "gc-attrs.h"
#include "gc-collection-kind.h"
#include "gc-edge.h" #include "gc-edge.h"
#include "gc-event-listener.h" #include "gc-event-listener.h"
#include "gc-inline.h" #include "gc-inline.h"
@ -48,8 +47,7 @@ GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*), GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
void *data) GC_NEVER_INLINE; void *data) GC_NEVER_INLINE;
GC_API_ void gc_collect(struct gc_mutator *mut, GC_API_ void gc_collect(struct gc_mutator *mut);
enum gc_collection_kind requested_kind);
static inline void gc_clear_fresh_allocation(struct gc_ref obj, static inline void gc_clear_fresh_allocation(struct gc_ref obj,
size_t size) GC_ALWAYS_INLINE; size_t size) GC_ALWAYS_INLINE;

View file

@ -37,9 +37,10 @@ static inline void gc_basic_stats_init(void *data, size_t heap_size) {
} }
static inline void gc_basic_stats_prepare_gc(void *data, static inline void gc_basic_stats_prepare_gc(void *data,
enum gc_collection_kind kind) { int is_minor,
int is_compacting) {
struct gc_basic_stats *stats = data; struct gc_basic_stats *stats = data;
if (kind == GC_COLLECTION_MINOR) if (is_minor)
stats->minor_collection_count++; stats->minor_collection_count++;
else else
stats->major_collection_count++; stats->major_collection_count++;

View file

@ -1,11 +0,0 @@
#ifndef GC_COLLECTION_KIND_H
#define GC_COLLECTION_KIND_H
enum gc_collection_kind {
GC_COLLECTION_ANY,
GC_COLLECTION_MINOR,
GC_COLLECTION_MAJOR,
GC_COLLECTION_COMPACTING,
};
#endif // GC_COLLECTION_KIND_H

View file

@ -1,11 +1,9 @@
#ifndef GC_EVENT_LISTENER_H #ifndef GC_EVENT_LISTENER_H
#define GC_EVENT_LISTENER_H #define GC_EVENT_LISTENER_H
#include "gc-collection-kind.h"
struct gc_event_listener { struct gc_event_listener {
void (*init)(void *data, size_t heap_size); void (*init)(void *data, size_t heap_size);
void (*prepare_gc)(void *data, enum gc_collection_kind kind); void (*prepare_gc)(void *data, int is_minor, int is_compacting);
void (*requesting_stop)(void *data); void (*requesting_stop)(void *data);
void (*waiting_for_stop)(void *data); void (*waiting_for_stop)(void *data);
void (*mutators_stopped)(void *data); void (*mutators_stopped)(void *data);

View file

@ -5,7 +5,8 @@
static inline void gc_null_event_listener_init(void *data, size_t size) {} static inline void gc_null_event_listener_init(void *data, size_t size) {}
static inline void gc_null_event_listener_prepare_gc(void *data, static inline void gc_null_event_listener_prepare_gc(void *data,
enum gc_collection_kind) {} int is_minor,
int is_compacting) {}
static inline void gc_null_event_listener_requesting_stop(void *data) {} static inline void gc_null_event_listener_requesting_stop(void *data) {}
static inline void gc_null_event_listener_waiting_for_stop(void *data) {} static inline void gc_null_event_listener_waiting_for_stop(void *data) {}
static inline void gc_null_event_listener_mutators_stopped(void *data) {} static inline void gc_null_event_listener_mutators_stopped(void *data) {}

View file

@ -105,8 +105,7 @@ static double heap_multiplier;
static size_t nthreads; static size_t nthreads;
static void cause_gc(struct gc_mutator *mut) { static void cause_gc(struct gc_mutator *mut) {
// Doing a full collection lets us reason precisely about liveness. gc_collect(mut);
gc_collect(mut, GC_COLLECTION_MAJOR);
} }
static void make_ephemeron_chain(struct thread *t, EphemeronHandle *head, static void make_ephemeron_chain(struct thread *t, EphemeronHandle *head,

View file

@ -125,22 +125,8 @@ void* gc_allocate_pointerless(struct gc_mutator *mut,
return GC_malloc_atomic(size); return GC_malloc_atomic(size);
} }
void gc_collect(struct gc_mutator *mut, void gc_collect(struct gc_mutator *mut) {
enum gc_collection_kind requested_kind) { GC_gcollect();
switch (requested_kind) {
case GC_COLLECTION_MINOR:
GC_collect_a_little();
break;
case GC_COLLECTION_ANY:
case GC_COLLECTION_MAJOR:
GC_gcollect();
break;
case GC_COLLECTION_COMPACTING:
GC_gcollect_and_unmap();
break;
default:
GC_CRASH();
}
} }
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size, void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
@ -344,7 +330,9 @@ gc_heap_pending_ephemerons(struct gc_heap *heap) {
static void on_collection_event(GC_EventType event) { static void on_collection_event(GC_EventType event) {
switch (event) { switch (event) {
case GC_EVENT_START: { case GC_EVENT_START: {
HEAP_EVENT(prepare_gc, GC_COLLECTION_MAJOR); int is_minor = 0;
int is_compacting = 0;
HEAP_EVENT(prepare_gc, is_minor, is_compacting);
HEAP_EVENT(requesting_stop); HEAP_EVENT(requesting_stop);
HEAP_EVENT(waiting_for_stop); HEAP_EVENT(waiting_for_stop);
break; break;

View file

@ -354,7 +354,7 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
struct gc_heap *heap = mutator_heap(mut); struct gc_heap *heap = mutator_heap(mut);
int is_minor = 0; int is_minor = 0;
int is_compacting = 1; int is_compacting = 1;
HEAP_EVENT(heap, prepare_gc, GC_COLLECTION_COMPACTING); HEAP_EVENT(heap, prepare_gc, is_minor, is_compacting);
HEAP_EVENT(heap, requesting_stop); HEAP_EVENT(heap, requesting_stop);
HEAP_EVENT(heap, waiting_for_stop); HEAP_EVENT(heap, waiting_for_stop);
@ -414,9 +414,7 @@ static void collect_for_alloc(struct gc_mutator *mut, size_t bytes) {
GC_CRASH(); GC_CRASH();
} }
void gc_collect(struct gc_mutator *mut, void gc_collect(struct gc_mutator *mut) {
enum gc_collection_kind requested_kind) {
// Ignore requested kind, because we always compact.
collect(mut, 0); collect(mut, 0);
} }

View file

@ -288,6 +288,15 @@ struct mark_space {
uintptr_t fragmentation_granules_since_last_collection; // atomically uintptr_t fragmentation_granules_since_last_collection; // atomically
}; };
enum gc_kind {
GC_KIND_FLAG_MINOR = GC_GENERATIONAL, // 0 or 1
GC_KIND_FLAG_EVACUATING = 0x2,
GC_KIND_MINOR_IN_PLACE = GC_KIND_FLAG_MINOR,
GC_KIND_MINOR_EVACUATING = GC_KIND_FLAG_MINOR | GC_KIND_FLAG_EVACUATING,
GC_KIND_MAJOR_IN_PLACE = 0,
GC_KIND_MAJOR_EVACUATING = GC_KIND_FLAG_EVACUATING,
};
struct gc_heap { struct gc_heap {
struct mark_space mark_space; struct mark_space mark_space;
struct large_object_space large_object_space; struct large_object_space large_object_space;
@ -301,7 +310,7 @@ struct gc_heap {
int mark_while_stopping; int mark_while_stopping;
int check_pending_ephemerons; int check_pending_ephemerons;
struct gc_pending_ephemerons *pending_ephemerons; struct gc_pending_ephemerons *pending_ephemerons;
enum gc_collection_kind gc_kind; enum gc_kind gc_kind;
int multithreaded; int multithreaded;
size_t active_mutator_count; size_t active_mutator_count;
size_t mutator_count; size_t mutator_count;
@ -309,7 +318,7 @@ struct gc_heap {
struct gc_mutator *mutator_trace_list; struct gc_mutator *mutator_trace_list;
long count; long count;
long minor_count; long minor_count;
enum gc_collection_kind last_collection_kind; uint8_t last_collection_was_minor;
struct gc_mutator *deactivated_mutators; struct gc_mutator *deactivated_mutators;
struct tracer tracer; struct tracer tracer;
double fragmentation_low_threshold; double fragmentation_low_threshold;
@ -371,8 +380,7 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size); memset((char*)addr, 0, size);
} }
static void collect(struct gc_mutator *mut, static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
enum gc_collection_kind requested_kind) GC_NEVER_INLINE;
static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) { static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
GC_ASSERT(((uintptr_t)mark & 7) == 0); GC_ASSERT(((uintptr_t)mark & 7) == 0);
@ -1408,7 +1416,7 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
static void trace_generational_roots(struct gc_heap *heap) { static void trace_generational_roots(struct gc_heap *heap) {
// TODO: Add lospace nursery. // TODO: Add lospace nursery.
if (atomic_load(&heap->gc_kind) == GC_COLLECTION_MINOR) { if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
mark_space_trace_remembered_set(heap_mark_space(heap), heap); mark_space_trace_remembered_set(heap_mark_space(heap), heap);
large_object_space_trace_remembered_set(heap_large_object_space(heap), large_object_space_trace_remembered_set(heap_large_object_space(heap),
enqueue_generational_root, enqueue_generational_root,
@ -1572,12 +1580,10 @@ static double clamp_major_gc_yield_threshold(struct gc_heap *heap,
return threshold; return threshold;
} }
static enum gc_collection_kind static enum gc_kind determine_collection_kind(struct gc_heap *heap) {
determine_collection_kind(struct gc_heap *heap,
enum gc_collection_kind requested) {
struct mark_space *mark_space = heap_mark_space(heap); struct mark_space *mark_space = heap_mark_space(heap);
enum gc_collection_kind previous_gc_kind = atomic_load(&heap->gc_kind); enum gc_kind previous_gc_kind = atomic_load(&heap->gc_kind);
enum gc_collection_kind gc_kind; enum gc_kind gc_kind;
int mark_while_stopping = 1; int mark_while_stopping = 1;
double yield = heap_last_gc_yield(heap); double yield = heap_last_gc_yield(heap);
double fragmentation = heap_fragmentation(heap); double fragmentation = heap_fragmentation(heap);
@ -1586,16 +1592,13 @@ determine_collection_kind(struct gc_heap *heap,
if (heap->count == 0) { if (heap->count == 0) {
DEBUG("first collection is always major\n"); DEBUG("first collection is always major\n");
gc_kind = GC_COLLECTION_MAJOR; gc_kind = GC_KIND_MAJOR_IN_PLACE;
} else if (requested != GC_COLLECTION_ANY) {
DEBUG("user specifically requested collection kind %d\n", (int)requested);
gc_kind = requested;
} else if (pending > 0) { } else if (pending > 0) {
DEBUG("evacuating due to need to reclaim %zd bytes\n", pending); DEBUG("evacuating due to need to reclaim %zd bytes\n", pending);
// During the last cycle, a large allocation could not find enough // During the last cycle, a large allocation could not find enough
// free blocks, and we decided not to expand the heap. Let's do an // free blocks, and we decided not to expand the heap. Let's do an
// evacuating major collection to maximize the free block yield. // evacuating major collection to maximize the free block yield.
gc_kind = GC_COLLECTION_COMPACTING; gc_kind = GC_KIND_MAJOR_EVACUATING;
// Generally speaking, we allow mutators to mark their own stacks // Generally speaking, we allow mutators to mark their own stacks
// before pausing. This is a limited form of concurrent marking, as // before pausing. This is a limited form of concurrent marking, as
@ -1607,7 +1610,7 @@ determine_collection_kind(struct gc_heap *heap,
// marking. Of course if the mutator has conservative roots we will // marking. Of course if the mutator has conservative roots we will
// have pinning anyway and might as well allow ragged stops. // have pinning anyway and might as well allow ragged stops.
mark_while_stopping = gc_has_conservative_roots(); mark_while_stopping = gc_has_conservative_roots();
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING } else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING
&& fragmentation >= heap->fragmentation_low_threshold) { && fragmentation >= heap->fragmentation_low_threshold) {
DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n", DEBUG("continuing evacuation due to fragmentation %.2f%% > %.2f%%\n",
fragmentation * 100., fragmentation * 100.,
@ -1615,50 +1618,46 @@ determine_collection_kind(struct gc_heap *heap,
// For some reason, we already decided to compact in the past, // For some reason, we already decided to compact in the past,
// and fragmentation hasn't yet fallen below a low-water-mark. // and fragmentation hasn't yet fallen below a low-water-mark.
// Keep going. // Keep going.
gc_kind = GC_COLLECTION_COMPACTING; gc_kind = GC_KIND_MAJOR_EVACUATING;
} else if (fragmentation > heap->fragmentation_high_threshold) { } else if (fragmentation > heap->fragmentation_high_threshold) {
// Switch to evacuation mode if the heap is too fragmented. // Switch to evacuation mode if the heap is too fragmented.
DEBUG("triggering compaction due to fragmentation %.2f%% > %.2f%%\n", DEBUG("triggering compaction due to fragmentation %.2f%% > %.2f%%\n",
fragmentation * 100., fragmentation * 100.,
heap->fragmentation_high_threshold * 100.); heap->fragmentation_high_threshold * 100.);
gc_kind = GC_COLLECTION_COMPACTING; gc_kind = GC_KIND_MAJOR_EVACUATING;
} else if (previous_gc_kind == GC_COLLECTION_COMPACTING) { } else if (previous_gc_kind == GC_KIND_MAJOR_EVACUATING) {
// We were evacuating, but we're good now. Go back to minor // We were evacuating, but we're good now. Go back to minor
// collections. // collections.
DEBUG("returning to in-place collection, fragmentation %.2f%% < %.2f%%\n", DEBUG("returning to in-place collection, fragmentation %.2f%% < %.2f%%\n",
fragmentation * 100., fragmentation * 100.,
heap->fragmentation_low_threshold * 100.); heap->fragmentation_low_threshold * 100.);
gc_kind = GC_GENERATIONAL ? GC_COLLECTION_MINOR : GC_COLLECTION_MAJOR; gc_kind = GC_KIND_MINOR_IN_PLACE;
} else if (!GC_GENERATIONAL) { } else if (previous_gc_kind != GC_KIND_MINOR_IN_PLACE) {
DEBUG("keeping on with major in-place GC\n");
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MAJOR);
gc_kind = GC_COLLECTION_MAJOR;
} else if (previous_gc_kind != GC_COLLECTION_MINOR) {
DEBUG("returning to minor collection after major collection\n"); DEBUG("returning to minor collection after major collection\n");
// Go back to minor collections. // Go back to minor collections.
gc_kind = GC_COLLECTION_MINOR; gc_kind = GC_KIND_MINOR_IN_PLACE;
} else if (yield < heap->major_gc_yield_threshold) { } else if (yield < heap->major_gc_yield_threshold) {
DEBUG("collection yield too low, triggering major collection\n"); DEBUG("collection yield too low, triggering major collection\n");
// Nursery is getting tight; trigger a major GC. // Nursery is getting tight; trigger a major GC.
gc_kind = GC_COLLECTION_MAJOR; gc_kind = GC_KIND_MAJOR_IN_PLACE;
} else { } else {
DEBUG("keeping on with minor GC\n"); DEBUG("keeping on with minor GC\n");
// Nursery has adequate space; keep trucking with minor GCs. // Nursery has adequate space; keep trucking with minor GCs.
GC_ASSERT(previous_gc_kind == GC_COLLECTION_MINOR); GC_ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE);
gc_kind = GC_COLLECTION_MINOR; gc_kind = GC_KIND_MINOR_IN_PLACE;
} }
if (gc_has_conservative_intraheap_edges() && if (gc_has_conservative_intraheap_edges() &&
gc_kind == GC_COLLECTION_COMPACTING) { (gc_kind & GC_KIND_FLAG_EVACUATING)) {
DEBUG("welp. conservative heap scanning, no evacuation for you\n"); DEBUG("welp. conservative heap scanning, no evacuation for you\n");
gc_kind = GC_COLLECTION_MAJOR; gc_kind = GC_KIND_MAJOR_IN_PLACE;
mark_while_stopping = 1; mark_while_stopping = 1;
} }
// If this is the first in a series of minor collections, reset the // If this is the first in a series of minor collections, reset the
// threshold at which we should do a major GC. // threshold at which we should do a major GC.
if (gc_kind == GC_COLLECTION_MINOR && if ((gc_kind & GC_KIND_FLAG_MINOR) &&
previous_gc_kind != GC_COLLECTION_MINOR) { (previous_gc_kind & GC_KIND_FLAG_MINOR) != GC_KIND_FLAG_MINOR) {
double yield = heap_last_gc_yield(heap); double yield = heap_last_gc_yield(heap);
double threshold = yield * heap->minor_gc_yield_threshold; double threshold = yield * heap->minor_gc_yield_threshold;
double clamped = clamp_major_gc_yield_threshold(heap, threshold); double clamped = clamp_major_gc_yield_threshold(heap, threshold);
@ -1688,7 +1687,7 @@ static void release_evacuation_target_blocks(struct mark_space *space) {
static void prepare_for_evacuation(struct gc_heap *heap) { static void prepare_for_evacuation(struct gc_heap *heap) {
struct mark_space *space = heap_mark_space(heap); struct mark_space *space = heap_mark_space(heap);
if (heap->gc_kind != GC_COLLECTION_COMPACTING) { if ((heap->gc_kind & GC_KIND_FLAG_EVACUATING) == 0) {
space->evacuating = 0; space->evacuating = 0;
space->evacuation_reserve = space->evacuation_minimum_reserve; space->evacuation_reserve = space->evacuation_minimum_reserve;
return; return;
@ -1797,7 +1796,7 @@ static void trace_roots_after_stop(struct gc_heap *heap) {
} }
static void mark_space_finish_gc(struct mark_space *space, static void mark_space_finish_gc(struct mark_space *space,
enum gc_collection_kind gc_kind) { enum gc_kind gc_kind) {
space->evacuating = 0; space->evacuating = 0;
reset_sweeper(space); reset_sweeper(space);
update_mark_patterns(space, 0); update_mark_patterns(space, 0);
@ -1825,8 +1824,7 @@ static void sweep_ephemerons(struct gc_heap *heap) {
return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1); return gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
} }
static void collect(struct gc_mutator *mut, static void collect(struct gc_mutator *mut) {
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut); struct gc_heap *heap = mutator_heap(mut);
struct mark_space *space = heap_mark_space(heap); struct mark_space *space = heap_mark_space(heap);
struct large_object_space *lospace = heap_large_object_space(heap); struct large_object_space *lospace = heap_large_object_space(heap);
@ -1837,12 +1835,12 @@ static void collect(struct gc_mutator *mut,
} }
MUTATOR_EVENT(mut, mutator_cause_gc); MUTATOR_EVENT(mut, mutator_cause_gc);
DEBUG("start collect #%ld:\n", heap->count); DEBUG("start collect #%ld:\n", heap->count);
enum gc_collection_kind gc_kind = enum gc_kind gc_kind = determine_collection_kind(heap);
determine_collection_kind(heap, requested_kind); HEAP_EVENT(heap, prepare_gc, gc_kind & GC_KIND_FLAG_MINOR,
HEAP_EVENT(heap, prepare_gc, gc_kind); gc_kind & GC_KIND_FLAG_EVACUATING);
update_mark_patterns(space, gc_kind != GC_COLLECTION_MINOR); update_mark_patterns(space, !(gc_kind & GC_KIND_FLAG_MINOR));
large_object_space_start_gc(lospace, gc_kind == GC_COLLECTION_MINOR); large_object_space_start_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
gc_extern_space_start_gc(exspace, gc_kind == GC_COLLECTION_MINOR); gc_extern_space_start_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
resolve_ephemerons_lazily(heap); resolve_ephemerons_lazily(heap);
tracer_prepare(heap); tracer_prepare(heap);
HEAP_EVENT(heap, requesting_stop); HEAP_EVENT(heap, requesting_stop);
@ -1870,11 +1868,11 @@ static void collect(struct gc_mutator *mut,
sweep_ephemerons(heap); sweep_ephemerons(heap);
tracer_release(heap); tracer_release(heap);
mark_space_finish_gc(space, gc_kind); mark_space_finish_gc(space, gc_kind);
large_object_space_finish_gc(lospace, gc_kind == GC_COLLECTION_MINOR); large_object_space_finish_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
gc_extern_space_finish_gc(exspace, gc_kind == GC_COLLECTION_MINOR); gc_extern_space_finish_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
heap->count++; heap->count++;
heap->last_collection_kind = gc_kind; heap->last_collection_was_minor = gc_kind & GC_KIND_FLAG_MINOR;
if (gc_kind == GC_COLLECTION_MINOR) if (heap->last_collection_was_minor)
heap->minor_count++; heap->minor_count++;
heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection); heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection);
HEAP_EVENT(heap, restarting_mutators); HEAP_EVENT(heap, restarting_mutators);
@ -2081,7 +2079,7 @@ static size_t next_hole(struct gc_mutator *mut) {
// mark bytes didn't rotate, so we have no cleanup to do; and // mark bytes didn't rotate, so we have no cleanup to do; and
// we shouldn't try to allocate into them as it's not worth // we shouldn't try to allocate into them as it's not worth
// it. Any wasted space is measured as fragmentation. // it. Any wasted space is measured as fragmentation.
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR) if (mutator_heap(mut)->last_collection_was_minor)
continue; continue;
else else
block_summary_clear_flag(summary, BLOCK_VENERABLE); block_summary_clear_flag(summary, BLOCK_VENERABLE);
@ -2094,7 +2092,7 @@ static size_t next_hole(struct gc_mutator *mut) {
// mostly old data. Sweep any garbage, commit the mark as // mostly old data. Sweep any garbage, commit the mark as
// venerable, and avoid allocating into it. // venerable, and avoid allocating into it.
block_summary_clear_flag(summary, BLOCK_VENERABLE_AFTER_SWEEP); block_summary_clear_flag(summary, BLOCK_VENERABLE_AFTER_SWEEP);
if (mutator_heap(mut)->last_collection_kind == GC_COLLECTION_MINOR) { if (mutator_heap(mut)->last_collection_was_minor) {
finish_sweeping_in_block(mut); finish_sweeping_in_block(mut);
block_summary_set_flag(summary, BLOCK_VENERABLE); block_summary_set_flag(summary, BLOCK_VENERABLE);
continue; continue;
@ -2155,22 +2153,18 @@ static void finish_sweeping(struct gc_mutator *mut) {
finish_hole(mut); finish_hole(mut);
} }
static void trigger_collection(struct gc_mutator *mut, static void trigger_collection(struct gc_mutator *mut) {
enum gc_collection_kind requested_kind) {
struct gc_heap *heap = mutator_heap(mut); struct gc_heap *heap = mutator_heap(mut);
enum gc_collection_kind last_collection_kind = GC_COLLECTION_ANY;
heap_lock(heap); heap_lock(heap);
while (mutators_are_stopping(heap)) { if (mutators_are_stopping(heap))
pause_mutator_for_collection_with_lock(mut); pause_mutator_for_collection_with_lock(mut);
last_collection_kind = heap->last_collection_kind; else
} collect(mut);
if (last_collection_kind < requested_kind)
collect(mut, requested_kind);
heap_unlock(heap); heap_unlock(heap);
} }
void gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) { void gc_collect(struct gc_mutator *mut) {
trigger_collection(mut, kind); trigger_collection(mut);
} }
static void* allocate_large(struct gc_mutator *mut, size_t size) { static void* allocate_large(struct gc_mutator *mut, size_t size) {
@ -2183,7 +2177,7 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
npages << space->page_size_log2); npages << space->page_size_log2);
while (!sweep_until_memory_released(mut)) while (!sweep_until_memory_released(mut))
trigger_collection(mut, GC_COLLECTION_COMPACTING); trigger_collection(mut);
atomic_fetch_add(&heap->large_object_pages, npages); atomic_fetch_add(&heap->large_object_pages, npages);
void *ret = large_object_space_alloc(space, npages); void *ret = large_object_space_alloc(space, npages);
@ -2221,7 +2215,7 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
break; break;
} }
if (!hole) if (!hole)
trigger_collection(mut, GC_COLLECTION_ANY); trigger_collection(mut);
} }
ret = gc_ref(mut->alloc); ret = gc_ref(mut->alloc);
mut->alloc += size; mut->alloc += size;