mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-20 19:50:24 +02:00
Implement per-object pinning API
Fixes https://github.com/wingo/whippet/issues/6.
This commit is contained in:
parent
a722b9c13f
commit
9f26dbb1fc
13 changed files with 81 additions and 5 deletions
|
@ -45,8 +45,8 @@ See the [documentation](./doc/README.md).
|
||||||
## Status and roadmap
|
## Status and roadmap
|
||||||
|
|
||||||
As of September 2024, Whippet is almost feature-complete. We need to
|
As of September 2024, Whippet is almost feature-complete. We need to
|
||||||
land a per-object pinning API, and an API for cooperative safepoints for
|
land an API for cooperative safepoints for use by threads that are
|
||||||
use by threads that are looping without allocating.
|
looping without allocating.
|
||||||
|
|
||||||
After that, the next phase on the roadmap is support for tracing, and
|
After that, the next phase on the roadmap is support for tracing, and
|
||||||
some performance noodling.
|
some performance noodling.
|
||||||
|
|
|
@ -54,4 +54,8 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
||||||
return GC_SAFEPOINT_MECHANISM_SIGNAL;
|
return GC_SAFEPOINT_MECHANISM_SIGNAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gc_can_pin_objects(void) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // BDW_ATTRS_H
|
#endif // BDW_ATTRS_H
|
||||||
|
|
|
@ -206,4 +206,6 @@ static inline void gc_write_barrier(struct gc_ref obj, size_t obj_size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GC_API_ void gc_pin_object(struct gc_mutator *mut, struct gc_ref obj);
|
||||||
|
|
||||||
#endif // GC_API_H_
|
#endif // GC_API_H_
|
||||||
|
|
|
@ -43,4 +43,6 @@ enum gc_safepoint_mechanism {
|
||||||
};
|
};
|
||||||
static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) GC_ALWAYS_INLINE;
|
static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) GC_ALWAYS_INLINE;
|
||||||
|
|
||||||
|
static inline int gc_can_pin_objects(void) GC_ALWAYS_INLINE;
|
||||||
|
|
||||||
#endif // GC_ATTRS_H
|
#endif // GC_ATTRS_H
|
||||||
|
|
|
@ -61,4 +61,8 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
||||||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gc_can_pin_objects(void) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // MMC_ATTRS_H
|
#endif // MMC_ATTRS_H
|
||||||
|
|
|
@ -57,4 +57,8 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
||||||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gc_can_pin_objects(void) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // PCC_ATTRS_H
|
#endif // PCC_ATTRS_H
|
||||||
|
|
|
@ -56,4 +56,8 @@ static inline enum gc_safepoint_mechanism gc_safepoint_mechanism(void) {
|
||||||
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
return GC_SAFEPOINT_MECHANISM_COOPERATIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gc_can_pin_objects(void) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // SEMI_ATTRS_H
|
#endif // SEMI_ATTRS_H
|
||||||
|
|
|
@ -538,6 +538,25 @@ Also, the BDW collector actually uses pre-emptive safepoints: it stops
|
||||||
threads via POSIX signals. `gc_safepoint` is (or will be) a no-op with
|
threads via POSIX signals. `gc_safepoint` is (or will be) a no-op with
|
||||||
BDW.
|
BDW.
|
||||||
|
|
||||||
|
### Pinning
|
||||||
|
|
||||||
|
Sometimes a mutator or embedder would like to tell the collector to not
|
||||||
|
move a particular object. This can happen for example during a foreign
|
||||||
|
function call, or if the embedder allows programs to access the address
|
||||||
|
of an object, for example to compute an identity hash code. To support
|
||||||
|
this use case, some Whippet collectors allow the embedder to *pin*
|
||||||
|
objects. Call `gc_pin_object` to prevent the collector from relocating
|
||||||
|
an object.
|
||||||
|
|
||||||
|
Pinning is currently supported by the `bdw` collector, which never moves
|
||||||
|
objects, and also by the various `mmc` collectors, which can move
|
||||||
|
objects that have no inbound conservative references.
|
||||||
|
|
||||||
|
Pinning is not supported on `semi` or `pcc`.
|
||||||
|
|
||||||
|
Call `gc_can_pin_objects` to determine whether the current collector can
|
||||||
|
pin objects.
|
||||||
|
|
||||||
### Statistics
|
### Statistics
|
||||||
|
|
||||||
Sometimes a program would like some information from the GC: how many
|
Sometimes a program would like some information from the GC: how many
|
||||||
|
|
|
@ -127,6 +127,10 @@ void* gc_allocate_pointerless(struct gc_mutator *mut,
|
||||||
return GC_malloc_atomic(size);
|
return GC_malloc_atomic(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||||
|
// Nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
void gc_collect(struct gc_mutator *mut,
|
void gc_collect(struct gc_mutator *mut,
|
||||||
enum gc_collection_kind requested_kind) {
|
enum gc_collection_kind requested_kind) {
|
||||||
switch (requested_kind) {
|
switch (requested_kind) {
|
||||||
|
|
|
@ -880,6 +880,14 @@ gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||||
return gc_allocate(mut, size);
|
return gc_allocate(mut, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||||
|
struct nofl_space *nofl = heap_nofl_space(mutator_heap(mut));
|
||||||
|
if (nofl_space_contains(nofl, ref))
|
||||||
|
nofl_space_pin_object(nofl, ref);
|
||||||
|
// Otherwise if it's a large or external object, it won't move.
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
||||||
struct gc_edge edge, struct gc_ref new_val) {
|
struct gc_edge edge, struct gc_ref new_val) {
|
||||||
|
|
|
@ -1328,9 +1328,12 @@ nofl_space_sweep_until_memory_released(struct nofl_space *space,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
nofl_space_should_evacuate(struct nofl_space *space, struct gc_ref obj) {
|
nofl_space_should_evacuate(struct nofl_space *space, uint8_t metadata_byte,
|
||||||
|
struct gc_ref obj) {
|
||||||
if (!space->evacuating)
|
if (!space->evacuating)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (metadata_byte & NOFL_METADATA_BYTE_PINNED)
|
||||||
|
return 0;
|
||||||
return nofl_block_has_flag(nofl_block_for_addr(gc_ref_value(obj)),
|
return nofl_block_has_flag(nofl_block_for_addr(gc_ref_value(obj)),
|
||||||
NOFL_BLOCK_EVACUATE);
|
NOFL_BLOCK_EVACUATE);
|
||||||
}
|
}
|
||||||
|
@ -1353,6 +1356,20 @@ nofl_space_set_nonempty_mark(struct nofl_space *space, uint8_t *metadata,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
nofl_space_pin_object(struct nofl_space *space, struct gc_ref ref) {
|
||||||
|
uint8_t *metadata = nofl_metadata_byte_for_object(ref);
|
||||||
|
uint8_t byte = atomic_load_explicit(metadata, memory_order_relaxed);
|
||||||
|
if (byte & NOFL_METADATA_BYTE_PINNED)
|
||||||
|
return;
|
||||||
|
uint8_t new_byte;
|
||||||
|
do {
|
||||||
|
new_byte = byte | NOFL_METADATA_BYTE_PINNED;
|
||||||
|
} while (!atomic_compare_exchange_weak_explicit(metadata, &byte, new_byte,
|
||||||
|
memory_order_acq_rel,
|
||||||
|
memory_order_acquire));
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
nofl_space_evacuate(struct nofl_space *space, uint8_t *metadata, uint8_t byte,
|
nofl_space_evacuate(struct nofl_space *space, uint8_t *metadata, uint8_t byte,
|
||||||
struct gc_edge edge,
|
struct gc_edge edge,
|
||||||
|
@ -1429,7 +1446,7 @@ nofl_space_evacuate_or_mark_object(struct nofl_space *space,
|
||||||
if (byte & space->marked_mask)
|
if (byte & space->marked_mask)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (nofl_space_should_evacuate(space, old_ref))
|
if (nofl_space_should_evacuate(space, byte, old_ref))
|
||||||
return nofl_space_evacuate(space, metadata, byte, edge, old_ref,
|
return nofl_space_evacuate(space, metadata, byte, edge, old_ref,
|
||||||
evacuate);
|
evacuate);
|
||||||
|
|
||||||
|
@ -1490,7 +1507,7 @@ nofl_space_forward_or_mark_if_traced(struct nofl_space *space,
|
||||||
if (byte & space->marked_mask)
|
if (byte & space->marked_mask)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!nofl_space_should_evacuate(space, ref))
|
if (!nofl_space_should_evacuate(space, byte, ref))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return nofl_space_forward_if_evacuated(space, edge, ref);
|
return nofl_space_forward_if_evacuated(space, edge, ref);
|
||||||
|
|
|
@ -504,6 +504,10 @@ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||||
return gc_allocate(mut, size);
|
return gc_allocate(mut, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||||
|
GC_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
|
||||||
struct gc_edge edge, struct gc_ref new_val) {
|
struct gc_edge edge, struct gc_ref new_val) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -505,6 +505,10 @@ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||||
return gc_allocate(mut, size);
|
return gc_allocate(mut, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||||
|
GC_CRASH();
|
||||||
|
}
|
||||||
|
|
||||||
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
|
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||||
return gc_allocate(mut, gc_ephemeron_size());
|
return gc_allocate(mut, gc_ephemeron_size());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue