1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-08 22:50:27 +02:00

Allow large object space to be part of remembered set

This commit is contained in:
Andy Wingo 2023-03-15 09:34:12 +01:00
parent d0b8f6838d
commit e270290242
13 changed files with 135 additions and 25 deletions

View file

@ -40,13 +40,13 @@ static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t) {
return GC_WRITE_BARRIER_NONE;
}
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
static inline size_t gc_write_barrier_card_table_alignment(void) {
GC_CRASH();
}
static inline size_t gc_small_write_barrier_card_size(void) {
static inline size_t gc_write_barrier_card_size(void) {
GC_CRASH();
}

4
bdw.c
View file

@ -115,6 +115,10 @@ void gc_collect(struct gc_mutator *mut) {
GC_gcollect();
}
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) {
}
// In BDW-GC, we can't hook into the mark phase to call
// gc_trace_ephemerons_for_object, so the advertised ephemeron strategy
// doesn't really work. The primitives that we have are mark functions,

View file

@ -177,18 +177,30 @@ static inline void gc_small_write_barrier(struct gc_ref obj, struct gc_edge edge
struct gc_ref new_val) GC_ALWAYS_INLINE;
static inline void gc_small_write_barrier(struct gc_ref obj, struct gc_edge edge,
struct gc_ref new_val) {
switch (gc_small_write_barrier_kind()) {
}
GC_API_ void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) GC_NEVER_INLINE;
static inline void gc_write_barrier(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) GC_ALWAYS_INLINE;
static inline void gc_write_barrier(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) {
switch (gc_write_barrier_kind(obj_size)) {
case GC_WRITE_BARRIER_NONE:
return;
case GC_WRITE_BARRIER_CARD: {
size_t card_table_alignment = gc_small_write_barrier_card_table_alignment();
size_t card_size = gc_small_write_barrier_card_size();
size_t card_table_alignment = gc_write_barrier_card_table_alignment();
size_t card_size = gc_write_barrier_card_size();
uintptr_t addr = gc_ref_value(obj);
uintptr_t base = addr & ~(card_table_alignment - 1);
uintptr_t card = (addr & (card_table_alignment - 1)) / card_size;
atomic_store_explicit((uint8_t*)(base + card), 1, memory_order_relaxed);
return;
}
case GC_WRITE_BARRIER_EXTERN:
gc_write_barrier_extern(obj, obj_size, edge, new_val);
return;
default:
GC_CRASH();
}

View file

@ -29,11 +29,12 @@ static inline int gc_allocator_needs_clear(void) GC_ALWAYS_INLINE;
enum gc_write_barrier_kind {
GC_WRITE_BARRIER_NONE,
GC_WRITE_BARRIER_CARD
GC_WRITE_BARRIER_CARD,
GC_WRITE_BARRIER_EXTERN
};
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) GC_ALWAYS_INLINE;
static inline size_t gc_small_write_barrier_card_table_alignment(void) GC_ALWAYS_INLINE;
static inline size_t gc_small_write_barrier_card_size(void) GC_ALWAYS_INLINE;
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t obj_size) GC_ALWAYS_INLINE;
static inline size_t gc_write_barrier_card_table_alignment(void) GC_ALWAYS_INLINE;
static inline size_t gc_write_barrier_card_size(void) GC_ALWAYS_INLINE;
#endif // GC_ATTRS_H

View file

@ -41,6 +41,14 @@ GC_EMBEDDER_API inline void gc_trace_heap_roots(struct gc_heap_roots *roots,
struct gc_heap *heap,
void *trace_data);
// Some heap objects have space for a "remembered" bit, indicating they
// are in the remembered set. Large or potentially large objects
// (e.g. a vector whose size is a run-time property) must have a
// remembered set bit. Small objects may or may not have such a bit.
GC_EMBEDDER_API inline void gc_object_set_remembered(struct gc_ref ref);
GC_EMBEDDER_API inline int gc_object_is_remembered_nonatomic(struct gc_ref ref);
GC_EMBEDDER_API inline void gc_object_clear_remembered_nonatomic(struct gc_ref ref);
GC_EMBEDDER_API inline uintptr_t gc_object_forwarded_nonatomic(struct gc_ref ref);
GC_EMBEDDER_API inline void gc_object_forward_nonatomic(struct gc_ref ref,
struct gc_ref new_ref);

View file

@ -58,6 +58,49 @@ static size_t large_object_space_npages(struct large_object_space *space,
return (bytes + space->page_size - 1) >> space->page_size_log2;
}
static void large_object_space_clear_one_remembered(uintptr_t addr,
void *unused) {
struct gc_ref ref = gc_ref(addr);
if (gc_object_is_remembered_nonatomic(ref))
gc_object_clear_remembered_nonatomic(ref);
}
static void
large_object_space_clear_remembered_set(struct large_object_space *space) {
if (!GC_GENERATIONAL)
return;
address_set_for_each(&space->to_space,
large_object_space_clear_one_remembered, NULL);
}
struct large_object_space_trace_remembered_data {
void (*trace)(struct gc_ref, struct gc_heap*);
struct gc_heap *heap;
};
static void large_object_space_trace_one_remembered(uintptr_t addr,
void *data) {
struct gc_ref ref = gc_ref(addr);
if (gc_object_is_remembered_nonatomic(ref)) {
gc_object_clear_remembered_nonatomic(ref);
struct large_object_space_trace_remembered_data *vdata = data;
vdata->trace(ref, vdata->heap);
}
}
static void
large_object_space_trace_remembered_set(struct large_object_space *space,
void (*trace)(struct gc_ref,
struct gc_heap*),
struct gc_heap *heap) {
struct large_object_space_trace_remembered_data vdata = { trace, heap };
if (!GC_GENERATIONAL)
return;
address_set_for_each(&space->to_space,
large_object_space_trace_one_remembered, &vdata);
}
static void large_object_space_start_gc(struct large_object_space *space,
int is_minor_gc) {
if (is_minor_gc)

View file

@ -144,9 +144,9 @@ static void allocate_garbage(struct thread *t) {
}
static void set_field(Node *obj, Node **field, Node *val) {
gc_small_write_barrier(gc_ref_from_heap_object(obj),
gc_edge(field),
gc_ref_from_heap_object(val));
gc_write_barrier(gc_ref_from_heap_object(obj), sizeof(Node),
gc_edge(field),
gc_ref_from_heap_object(val));
*field = val;
}

View file

@ -42,13 +42,13 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
GC_CRASH();
}
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t) {
return GC_WRITE_BARRIER_NONE;
}
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
static inline size_t gc_write_barrier_card_table_alignment(void) {
GC_CRASH();
}
static inline size_t gc_small_write_barrier_card_size(void) {
static inline size_t gc_write_barrier_card_size(void) {
GC_CRASH();
}

4
semi.c
View file

@ -375,6 +375,10 @@ void gc_collect(struct gc_mutator *mut) {
collect(mut, 0);
}
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) {
}
static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size);
}

View file

@ -86,6 +86,25 @@ static inline void gc_object_forward_nonatomic(struct gc_ref ref,
*tag_word(ref) = gc_ref_value(new_ref);
}
static inline void gc_object_set_remembered(struct gc_ref ref) {
uintptr_t *loc = tag_word(ref);
uintptr_t tag = *loc;
while (!(tag & gcobj_remembered_bit))
atomic_compare_exchange_weak(loc, &tag, tag | gcobj_remembered_bit);
}
static inline int gc_object_is_remembered_nonatomic(struct gc_ref ref) {
uintptr_t *loc = tag_word(ref);
uintptr_t tag = *loc;
return tag & gcobj_remembered_bit;
}
static inline void gc_object_clear_remembered_nonatomic(struct gc_ref ref) {
uintptr_t *loc = tag_word(ref);
uintptr_t tag = *loc;
*loc = tag & ~(uintptr_t)gcobj_remembered_bit;
}
static inline struct gc_atomic_forward
gc_atomic_forward_begin(struct gc_ref ref) {
uintptr_t tag = atomic_load_explicit(tag_word(ref), memory_order_acquire);

View file

@ -7,9 +7,11 @@ struct gc_header {
uintptr_t tag;
};
// Alloc kind is in bits 1-7, for live objects.
static const uintptr_t gcobj_alloc_kind_mask = 0x7f;
static const uintptr_t gcobj_alloc_kind_shift = 1;
// Alloc kind is in bits 2-7, for live objects.
static const uintptr_t gcobj_alloc_kind_mask = 0x3f;
static const uintptr_t gcobj_alloc_kind_shift = 2;
static const uintptr_t gcobj_remembered_mask = 0x2;
static const uintptr_t gcobj_remembered_bit = 0x2;
static const uintptr_t gcobj_forwarded_mask = 0x1;
static const uintptr_t gcobj_not_forwarded_bit = 0x1;
static const uintptr_t gcobj_busy = 0;

View file

@ -40,16 +40,19 @@ static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
if (GC_GENERATIONAL)
return GC_WRITE_BARRIER_CARD;
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t obj_size) {
if (GC_GENERATIONAL) {
if (obj_size <= gc_allocator_large_threshold())
return GC_WRITE_BARRIER_CARD;
return GC_WRITE_BARRIER_EXTERN;
}
return GC_WRITE_BARRIER_NONE;
}
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
static inline size_t gc_write_barrier_card_table_alignment(void) {
GC_ASSERT(GC_GENERATIONAL);
return 4 * 1024 * 1024;
}
static inline size_t gc_small_write_barrier_card_size(void) {
static inline size_t gc_write_barrier_card_size(void) {
GC_ASSERT(GC_GENERATIONAL);
return 256;
}

View file

@ -1333,6 +1333,10 @@ static void trace_global_conservative_roots(struct gc_heap *heap) {
(mark_and_globally_enqueue_heap_conservative_roots, heap, NULL);
}
static void enqueue_generational_root(struct gc_ref ref, struct gc_heap *heap) {
tracer_enqueue_root(&heap->tracer, ref);
}
// Note that it's quite possible (and even likely) that any given remset
// byte doesn't hold any roots, if all stores were to nursery objects.
STATIC_ASSERT_EQ(GRANULES_PER_REMSET_BYTE % 8, 0);
@ -1352,7 +1356,7 @@ static void mark_space_trace_card(struct mark_space *space,
size_t granule = granule_base + granule_offset;
uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE;
GC_ASSERT(metadata_byte_for_addr(addr) == &slab->metadata[granule]);
tracer_enqueue_root(&heap->tracer, gc_ref(addr));
enqueue_generational_root(gc_ref(addr), heap);
}
}
}
@ -1385,12 +1389,22 @@ static void mark_space_clear_remembered_set(struct mark_space *space) {
}
}
void gc_write_barrier_extern(struct gc_ref obj, size_t obj_size,
struct gc_edge edge, struct gc_ref new_val) {
GC_ASSERT(size > gc_allocator_large_threshold());
gc_object_set_remembered(obj);
}
static void trace_generational_roots(struct gc_heap *heap) {
// TODO: Add lospace nursery.
if (atomic_load(&heap->gc_kind) & GC_KIND_FLAG_MINOR) {
mark_space_trace_remembered_set(heap_mark_space(heap), heap);
large_object_space_trace_remembered_set(heap_large_object_space(heap),
enqueue_generational_root,
heap);
} else {
mark_space_clear_remembered_set(heap_mark_space(heap));
large_object_space_clear_remembered_set(heap_large_object_space(heap));
}
}