mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-20 03:30:27 +02:00
Implement cooperative safepoint API
Fixes https://github.com/wingo/whippet/issues/9.
This commit is contained in:
parent
9f26dbb1fc
commit
8fba0e5322
11 changed files with 95 additions and 63 deletions
|
@ -54,6 +54,10 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
|||
return GC_SAFEPOINT_MECHANISM_SIGNAL;
|
||||
}
|
||||
|
||||
static inline enum gc_cooperative_safepoint_kind gc_cooperative_safepoint_kind(void) {
|
||||
return GC_COOPERATIVE_SAFEPOINT_NONE;
|
||||
}
|
||||
|
||||
static inline int gc_can_pin_objects(void) {
|
||||
return 1;
|
||||
}
|
||||
|
|
20
api/gc-api.h
20
api/gc-api.h
|
@ -208,4 +208,24 @@ static inline void gc_write_barrier(struct gc_ref obj, size_t obj_size,
|
|||
|
||||
GC_API_ void gc_pin_object(struct gc_mutator *mut, struct gc_ref obj);
|
||||
|
||||
GC_API_ void gc_safepoint_slow(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
GC_API_ int* gc_safepoint_flag_loc(struct gc_mutator *mut);
|
||||
static inline int gc_should_stop_for_safepoint(struct gc_mutator *mut) {
|
||||
switch (gc_cooperative_safepoint_kind()) {
|
||||
case GC_COOPERATIVE_SAFEPOINT_NONE:
|
||||
return 0;
|
||||
case GC_COOPERATIVE_SAFEPOINT_MUTATOR_FLAG:
|
||||
case GC_COOPERATIVE_SAFEPOINT_HEAP_FLAG: {
|
||||
return atomic_load_explicit(gc_safepoint_flag_loc(mut),
|
||||
memory_order_relaxed);
|
||||
}
|
||||
default:
|
||||
GC_CRASH();
|
||||
}
|
||||
}
|
||||
static inline void gc_safepoint(struct gc_mutator *mut) {
|
||||
if (GC_UNLIKELY(gc_should_stop_for_safepoint(mut)))
|
||||
gc_safepoint_slow(mut);
|
||||
}
|
||||
|
||||
#endif // GC_API_H_
|
||||
|
|
|
@ -43,6 +43,13 @@ enum gc_safepoint_mechanism {
|
|||
};
|
||||
static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) GC_ALWAYS_INLINE;
|
||||
|
||||
enum gc_cooperative_safepoint_kind {
|
||||
GC_COOPERATIVE_SAFEPOINT_NONE,
|
||||
GC_COOPERATIVE_SAFEPOINT_MUTATOR_FLAG,
|
||||
GC_COOPERATIVE_SAFEPOINT_HEAP_FLAG,
|
||||
};
|
||||
static inline enum gc_cooperative_safepoint_kind gc_cooperative_safepoint_kind(void) GC_ALWAYS_INLINE;
|
||||
|
||||
static inline int gc_can_pin_objects(void) GC_ALWAYS_INLINE;
|
||||
|
||||
#endif // GC_ATTRS_H
|
||||
|
|
|
@ -61,6 +61,10 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
|||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||
}
|
||||
|
||||
static inline enum gc_cooperative_safepoint_kind gc_cooperative_safepoint_kind(void) {
|
||||
return GC_COOPERATIVE_SAFEPOINT_HEAP_FLAG;
|
||||
}
|
||||
|
||||
static inline int gc_can_pin_objects(void) {
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,10 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
|||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||
}
|
||||
|
||||
static inline enum gc_cooperative_safepoint_kind gc_cooperative_safepoint_kind(void) {
|
||||
return GC_COOPERATIVE_SAFEPOINT_HEAP_FLAG;
|
||||
}
|
||||
|
||||
static inline int gc_can_pin_objects(void) {
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,6 +56,10 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
|||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||
}
|
||||
|
||||
static inline enum gc_cooperative_safepoint_kind gc_cooperative_safepoint_kind(void) {
|
||||
return GC_COOPERATIVE_SAFEPOINT_NONE;
|
||||
}
|
||||
|
||||
static inline int gc_can_pin_objects(void) {
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -531,12 +531,19 @@ temporarily mark itself as inactive by trampolining through
|
|||
for, for example, system calls that might block. Periodic safepoints is
|
||||
better for code that is active but not allocating.
|
||||
|
||||
Thing is, though, `gc_safepoint` is not yet implemented :) It will be,
|
||||
though!
|
||||
|
||||
Also, the BDW collector actually uses pre-emptive safepoints: it stops
|
||||
threads via POSIX signals. `gc_safepoint` is (or will be) a no-op with
|
||||
BDW.
|
||||
threads via POSIX signals. `gc_safepoint` is a no-op with BDW.
|
||||
|
||||
Embedders can inline safepoint checks. If
|
||||
`gc_cooperative_safepoint_kind()` is `GC_COOPERATIVE_SAFEPOINT_NONE`,
|
||||
then the collector doesn't need safepoints, as is the case for `bdw`
|
||||
which uses signals and `semi` which is single-threaded. If it is
|
||||
`GC_COOPERATIVE_SAFEPOINT_HEAP_FLAG`, then calling
|
||||
`gc_safepoint_flag_loc` on a mutator will return the address of an `int`
|
||||
in memory, which if nonzero when loaded using relaxed atomics indicates
|
||||
that the mutator should call `gc_safepoint_slow`. Similarly for
|
||||
`GC_COOPERATIVE_SAFEPOINT_MUTATOR_FLAG`, except that the address is
|
||||
per-mutator rather than global.
|
||||
|
||||
### Pinning
|
||||
|
||||
|
|
|
@ -153,6 +153,9 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
|||
struct gc_edge edge, struct gc_ref new_val) {
|
||||
}
|
||||
|
||||
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
|
||||
struct bdw_mark_state {
|
||||
struct GC_ms_entry *mark_stack_ptr;
|
||||
struct GC_ms_entry *mark_stack_limit;
|
||||
|
|
50
src/mmc.c
50
src/mmc.c
|
@ -417,6 +417,7 @@ static enum gc_collection_kind
|
|||
pause_mutator_for_collection(struct gc_heap *heap, struct gc_mutator *mut) {
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
GC_ASSERT(!all_mutators_stopped(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
MUTATOR_EVENT(mut, mutator_stopped);
|
||||
heap->paused_mutator_count++;
|
||||
enum gc_collection_kind collection_kind = heap->gc_kind;
|
||||
|
@ -432,35 +433,6 @@ pause_mutator_for_collection(struct gc_heap *heap, struct gc_mutator *mut) {
|
|||
return collection_kind;
|
||||
}
|
||||
|
||||
static enum gc_collection_kind
|
||||
pause_mutator_for_collection_with_lock(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static enum gc_collection_kind
|
||||
pause_mutator_for_collection_with_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
return pause_mutator_for_collection(heap, mut);
|
||||
}
|
||||
|
||||
static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void
|
||||
pause_mutator_for_collection_without_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
nofl_finish_sweeping(&mut->allocator, heap_nofl_space(heap));
|
||||
gc_stack_capture_hot(&mut->stack);
|
||||
heap_lock(heap);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
heap_unlock(heap);
|
||||
}
|
||||
|
||||
static inline void
|
||||
maybe_pause_mutator_for_collection(struct gc_mutator *mut) {
|
||||
while (mutators_are_stopping(mutator_heap(mut)))
|
||||
pause_mutator_for_collection_without_lock(mut);
|
||||
}
|
||||
|
||||
static void
|
||||
resize_heap(struct gc_heap *heap, size_t new_size) {
|
||||
if (new_size == heap->size)
|
||||
|
@ -818,7 +790,7 @@ trigger_collection(struct gc_mutator *mut,
|
|||
nofl_allocator_finish(&mut->allocator, heap_nofl_space(heap));
|
||||
heap_lock(heap);
|
||||
while (mutators_are_stopping(heap))
|
||||
prev_kind = pause_mutator_for_collection_with_lock(mut);
|
||||
prev_kind = pause_mutator_for_collection(heap, mut);
|
||||
if (prev_kind < (int)requested_kind)
|
||||
collect(mut, requested_kind);
|
||||
heap_unlock(heap);
|
||||
|
@ -829,6 +801,22 @@ gc_collect(struct gc_mutator *mut, enum gc_collection_kind kind) {
|
|||
trigger_collection(mut, kind);
|
||||
}
|
||||
|
||||
int*
|
||||
gc_safepoint_flag_loc(struct gc_mutator *mut) {
|
||||
return &mutator_heap(mut)->collecting;
|
||||
}
|
||||
|
||||
void
|
||||
gc_safepoint_slow(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
gc_stack_capture_hot(&mut->stack);
|
||||
nofl_allocator_finish(&mut->allocator, heap_nofl_space(heap));
|
||||
heap_lock(heap);
|
||||
while (mutators_are_stopping(mutator_heap(mut)))
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
heap_unlock(heap);
|
||||
}
|
||||
|
||||
static void*
|
||||
allocate_large(struct gc_mutator *mut, size_t size) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
|
@ -894,7 +882,7 @@ gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
|||
GC_ASSERT(obj_size > gc_allocator_large_threshold());
|
||||
gc_object_set_remembered(obj);
|
||||
}
|
||||
|
||||
|
||||
struct gc_ephemeron*
|
||||
gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||
struct gc_ref ret =
|
||||
|
|
42
src/pcc.c
42
src/pcc.c
|
@ -283,6 +283,7 @@ static void
|
|||
pause_mutator_for_collection(struct gc_heap *heap, struct gc_mutator *mut) {
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
GC_ASSERT(!all_mutators_stopped(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
MUTATOR_EVENT(mut, mutator_stopped);
|
||||
heap->paused_mutator_count++;
|
||||
if (all_mutators_stopped(heap))
|
||||
|
@ -296,32 +297,6 @@ pause_mutator_for_collection(struct gc_heap *heap, struct gc_mutator *mut) {
|
|||
MUTATOR_EVENT(mut, mutator_restarted);
|
||||
}
|
||||
|
||||
static void
|
||||
pause_mutator_for_collection_with_lock(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void
|
||||
pause_mutator_for_collection_with_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
}
|
||||
|
||||
static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
copy_space_allocator_finish(&mut->allocator, heap_copy_space(heap));
|
||||
heap_lock(heap);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
heap_unlock(heap);
|
||||
}
|
||||
|
||||
static inline void maybe_pause_mutator_for_collection(struct gc_mutator *mut) {
|
||||
while (mutators_are_stopping(mutator_heap(mut)))
|
||||
pause_mutator_for_collection_without_lock(mut);
|
||||
}
|
||||
|
||||
static void resize_heap(struct gc_heap *heap, size_t new_size) {
|
||||
if (new_size == heap->size)
|
||||
return;
|
||||
|
@ -447,7 +422,7 @@ static void trigger_collection(struct gc_mutator *mut) {
|
|||
heap_lock(heap);
|
||||
long epoch = heap->count;
|
||||
while (mutators_are_stopping(heap))
|
||||
pause_mutator_for_collection_with_lock(mut);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
if (epoch == heap->count)
|
||||
collect(mut);
|
||||
heap_unlock(heap);
|
||||
|
@ -512,6 +487,19 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
|||
struct gc_edge edge, struct gc_ref new_val) {
|
||||
}
|
||||
|
||||
int* gc_safepoint_flag_loc(struct gc_mutator *mut) {
|
||||
return &mutator_heap(mut)->collecting;
|
||||
}
|
||||
|
||||
void gc_safepoint_slow(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
copy_space_allocator_finish(&mut->allocator, heap_copy_space(heap));
|
||||
heap_lock(heap);
|
||||
while (mutators_are_stopping(mutator_heap(mut)))
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
heap_unlock(heap);
|
||||
}
|
||||
|
||||
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_ephemeron_size());
|
||||
}
|
||||
|
|
|
@ -457,6 +457,9 @@ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
|||
struct gc_edge edge, struct gc_ref new_val) {
|
||||
}
|
||||
|
||||
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
|
||||
|
||||
static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
|
||||
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue