1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-26 21:20:30 +02:00

More typesafety, more gc_ref

This commit is contained in:
Andy Wingo 2022-08-16 22:48:46 +02:00
parent 92b8f1e917
commit 6ecf226570
10 changed files with 90 additions and 114 deletions

View file

@ -12,7 +12,7 @@ struct gc_mutator_roots;
struct gc_heap_roots;
struct gc_atomic_forward;
GC_EMBEDDER_API inline void gc_trace_object(void *object,
GC_EMBEDDER_API inline void gc_trace_object(struct gc_ref ref,
void (*trace_edge)(struct gc_edge edge,
void *trace_data),
void *trace_data,
@ -26,15 +26,16 @@ GC_EMBEDDER_API inline void gc_trace_heap_roots(struct gc_heap_roots *roots,
void *trace_data),
void *trace_data);
GC_EMBEDDER_API inline uintptr_t gc_object_forwarded_nonatomic(void *object);
GC_EMBEDDER_API inline void gc_object_forward_nonatomic(void *object, uintptr_t new_addr);
GC_EMBEDDER_API inline uintptr_t gc_object_forwarded_nonatomic(struct gc_ref ref);
GC_EMBEDDER_API inline void gc_object_forward_nonatomic(struct gc_ref ref,
struct gc_ref new_ref);
GC_EMBEDDER_API inline struct gc_atomic_forward gc_atomic_forward_begin(void *obj);
GC_EMBEDDER_API inline struct gc_atomic_forward gc_atomic_forward_begin(struct gc_ref ref);
GC_EMBEDDER_API inline void gc_atomic_forward_acquire(struct gc_atomic_forward *);
GC_EMBEDDER_API inline int gc_atomic_forward_retry_busy(struct gc_atomic_forward *);
GC_EMBEDDER_API inline void gc_atomic_forward_abort(struct gc_atomic_forward *);
GC_EMBEDDER_API inline void gc_atomic_forward_commit(struct gc_atomic_forward *,
uintptr_t new_addr);
struct gc_ref new_ref);
GC_EMBEDDER_API inline uintptr_t gc_atomic_forward_address(struct gc_atomic_forward *);

View file

@ -2,6 +2,7 @@
#define GC_FORWARDING_H
#include <stdint.h>
#include "gc-ref.h"
enum gc_forwarding_state {
GC_FORWARDING_STATE_FORWARDED,
@ -12,7 +13,7 @@ enum gc_forwarding_state {
};
struct gc_atomic_forward {
void *object;
struct gc_ref ref;
uintptr_t data;
enum gc_forwarding_state state;
};

View file

@ -9,6 +9,7 @@
#include <sys/mman.h>
#include <unistd.h>
#include "gc-ref.h"
#include "address-map.h"
#include "address-set.h"
@ -19,7 +20,6 @@
// copying collector while not actually copying data.
struct gc_heap;
struct gcobj;
struct large_object_space {
pthread_mutex_t lock;
@ -71,8 +71,9 @@ static void large_object_space_start_gc(struct large_object_space *space,
}
static int large_object_space_copy(struct large_object_space *space,
uintptr_t addr) {
struct gc_ref ref) {
int copied = 0;
uintptr_t addr = gc_ref_value(ref);
pthread_mutex_lock(&space->lock);
if (!address_set_contains(&space->from_space, addr))
// Already copied; object is grey or white.
@ -145,12 +146,11 @@ static void large_object_space_finish_gc(struct large_object_space *space,
}
static inline int large_object_space_contains(struct large_object_space *space,
struct gcobj *ptr) {
int ret;
struct gc_ref ref) {
pthread_mutex_lock(&space->lock);
// ptr might be in fromspace or tospace. Just check the object_pages table, which
// contains both, as well as object_pages for free blocks.
ret = address_map_contains(&space->object_pages, (uintptr_t)ptr);
int ret = address_map_contains(&space->object_pages, gc_ref_value(ref));
pthread_mutex_unlock(&space->lock);
return ret;
}

View file

@ -449,7 +449,7 @@ static void tracer_release(struct gc_heap *heap) {
struct gcobj;
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gcobj *obj, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
static inline int trace_edge(struct gc_heap *heap,
struct gc_edge edge) GC_ALWAYS_INLINE;
@ -571,7 +571,7 @@ trace_worker_trace(struct trace_worker *worker) {
size_t n = 0;
DEBUG("tracer #%zu: running trace loop\n", worker->id);
while (1) {
struct gcobj * obj;
void *obj;
if (!local_trace_queue_empty(&trace.local)) {
obj = local_trace_queue_pop(&trace.local);
} else {
@ -579,7 +579,7 @@ trace_worker_trace(struct trace_worker *worker) {
if (!obj)
break;
}
trace_one(obj, &trace);
trace_one(gc_ref_from_heap_object(obj), &trace);
n++;
}
DEBUG("tracer #%zu: done tracing, %zu objects traced\n", worker->id, n);

50
semi.c
View file

@ -101,41 +101,42 @@ static void flip(struct semi_space *space) {
space->count++;
}
static void* copy(struct semi_space *space, void *obj) {
static struct gc_ref copy(struct semi_space *space, struct gc_ref ref) {
size_t size;
gc_trace_object(obj, NULL, NULL, &size);
void *new_obj = (void*)space->hp;
memcpy(new_obj, obj, size);
*(uintptr_t*) obj = space->hp;
space->hp += align_up (size, GC_ALIGNMENT);
return new_obj;
gc_trace_object(ref, NULL, NULL, &size);
struct gc_ref new_ref = gc_ref(space->hp);
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(ref), size);
gc_object_forward_nonatomic(ref, new_ref);
space->hp += align_up(size, GC_ALIGNMENT);
return new_ref;
}
static uintptr_t scan(struct gc_heap *heap, uintptr_t grey) {
static uintptr_t scan(struct gc_heap *heap, struct gc_ref grey) {
size_t size;
gc_trace_object((void*)grey, visit, heap, &size);
return grey + align_up(size, GC_ALIGNMENT);
gc_trace_object(grey, visit, heap, &size);
return gc_ref_value(grey) + align_up(size, GC_ALIGNMENT);
}
static void* forward(struct semi_space *space, void *obj) {
static struct gc_ref forward(struct semi_space *space, struct gc_ref obj) {
uintptr_t forwarded = gc_object_forwarded_nonatomic(obj);
return forwarded ? (void*)forwarded : copy(space, obj);
return forwarded ? gc_ref(forwarded) : copy(space, obj);
}
static void visit_semi_space(struct gc_heap *heap, struct semi_space *space,
struct gc_edge edge, void *obj) {
gc_edge_update(edge, gc_ref_from_heap_object(forward(space, obj)));
struct gc_edge edge, struct gc_ref ref) {
gc_edge_update(edge, forward(space, ref));
}
static void visit_large_object_space(struct gc_heap *heap,
struct large_object_space *space,
void *obj) {
if (large_object_space_copy(space, (uintptr_t)obj))
scan(heap, (uintptr_t)obj);
struct gc_ref ref) {
if (large_object_space_copy(space, ref))
gc_trace_object(ref, visit, heap, NULL);
}
static int semi_space_contains(struct semi_space *space, void *obj) {
return (((uintptr_t)obj) - space->base) < space->size;
static int semi_space_contains(struct semi_space *space, struct gc_ref ref) {
uintptr_t addr = gc_ref_value(ref);
return addr - space->base < space->size;
}
static void visit(struct gc_edge edge, void *visit_data) {
@ -143,11 +144,10 @@ static void visit(struct gc_edge edge, void *visit_data) {
struct gc_ref ref = gc_edge_ref(edge);
if (!gc_ref_is_heap_object(ref))
return;
void *obj = gc_ref_heap_object(ref);
if (semi_space_contains(heap_semi_space(heap), obj))
visit_semi_space(heap, heap_semi_space(heap), edge, obj);
else if (large_object_space_contains(heap_large_object_space(heap), obj))
visit_large_object_space(heap, heap_large_object_space(heap), obj);
if (semi_space_contains(heap_semi_space(heap), ref))
visit_semi_space(heap, heap_semi_space(heap), edge, ref);
else if (large_object_space_contains(heap_large_object_space(heap), ref))
visit_large_object_space(heap, heap_large_object_space(heap), ref);
else
GC_CRASH();
}
@ -164,7 +164,7 @@ static void collect(struct gc_mutator *mut) {
gc_trace_mutator_roots(mut->roots, visit, heap);
// fprintf(stderr, "pushed %zd bytes in roots\n", space->hp - grey);
while(grey < semi->hp)
grey = scan(heap, grey);
grey = scan(heap, gc_ref(grey));
large_object_space_finish_gc(large, 0);
semi_space_set_stolen_pages(semi, large->live_pages_at_last_collection);
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));

View file

@ -139,7 +139,7 @@ static void tracer_release(struct gc_heap *heap) {
struct gcobj;
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gcobj *obj, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
static inline int trace_edge(struct gc_heap *heap,
struct gc_edge edge) GC_ALWAYS_INLINE;
@ -163,7 +163,7 @@ static inline void
tracer_trace(struct gc_heap *heap) {
struct gcobj *obj;
while ((obj = trace_queue_pop(&heap_tracer(heap)->queue)))
trace_one(obj, heap);
trace_one(gc_ref_from_heap_object(obj), heap);
}
#endif // SERIAL_TRACER_H

View file

@ -7,14 +7,14 @@
static inline void*
gc_allocate_with_kind(struct gc_mutator *mut, enum alloc_kind kind, size_t bytes) {
void *obj = gc_allocate(mut, bytes);
*tag_word(obj) = tag_live(kind);
*tag_word(gc_ref_from_heap_object(obj)) = tag_live(kind);
return obj;
}
static inline void*
gc_allocate_pointerless_with_kind(struct gc_mutator *mut, enum alloc_kind kind, size_t bytes) {
void *obj = gc_allocate_pointerless(mut, bytes);
*tag_word(obj) = tag_live(kind);
*tag_word(gc_ref_from_heap_object(obj)) = tag_live(kind);
return obj;
}

View file

@ -3,18 +3,19 @@
#include "simple-tagging-scheme.h"
#include "gc-embedder-api.h"
static inline void gc_trace_object(void *object,
static inline void gc_trace_object(struct gc_ref ref,
void (*trace_edge)(struct gc_edge edge,
void *trace_data),
void *trace_data,
size_t *size) {
switch (tag_live_alloc_kind(*tag_word(object))) {
switch (tag_live_alloc_kind(*tag_word(ref))) {
#define SCAN_OBJECT(name, Name, NAME) \
case ALLOC_KIND_##NAME: \
if (trace_edge) \
visit_##name##_fields((Name*)object, trace_edge, trace_data); \
visit_##name##_fields(gc_ref_heap_object(ref), trace_edge, \
trace_data); \
if (size) \
*size = name##_size(object); \
*size = name##_size(gc_ref_heap_object(ref)); \
break;
FOR_EACH_HEAP_OBJECT_KIND(SCAN_OBJECT)
#undef SCAN_OBJECT
@ -29,19 +30,19 @@ static inline void gc_trace_object(void *object,
#include "conservative-roots-embedder.h"
#endif
static inline uintptr_t gc_object_forwarded_nonatomic(void *object) {
uintptr_t tag = *tag_word(object);
static inline uintptr_t gc_object_forwarded_nonatomic(struct gc_ref ref) {
uintptr_t tag = *tag_word(ref);
return (tag & gcobj_not_forwarded_bit) ? 0 : tag;
}
static inline void gc_object_forward_nonatomic(void *object,
uintptr_t new_addr) {
*tag_word(object) = new_addr;
static inline void gc_object_forward_nonatomic(struct gc_ref ref,
struct gc_ref new_ref) {
*tag_word(ref) = gc_ref_value(new_ref);
}
static inline struct gc_atomic_forward
gc_atomic_forward_begin(void *object) {
uintptr_t tag = atomic_load_explicit(tag_word(object), memory_order_acquire);
gc_atomic_forward_begin(struct gc_ref ref) {
uintptr_t tag = atomic_load_explicit(tag_word(ref), memory_order_acquire);
enum gc_forwarding_state state;
if (tag == gcobj_busy)
state = GC_FORWARDING_STATE_BUSY;
@ -49,13 +50,13 @@ gc_atomic_forward_begin(void *object) {
state = GC_FORWARDING_STATE_NOT_FORWARDED;
else
state = GC_FORWARDING_STATE_FORWARDED;
return (struct gc_atomic_forward){ object, tag, state };
return (struct gc_atomic_forward){ ref, tag, state };
}
static inline int
gc_atomic_forward_retry_busy(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_BUSY);
uintptr_t tag = atomic_load_explicit(tag_word(fwd->object),
uintptr_t tag = atomic_load_explicit(tag_word(fwd->ref),
memory_order_acquire);
if (tag == gcobj_busy)
return 0;
@ -71,7 +72,7 @@ gc_atomic_forward_retry_busy(struct gc_atomic_forward *fwd) {
static inline void
gc_atomic_forward_acquire(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_NOT_FORWARDED);
if (atomic_compare_exchange_strong(tag_word(fwd->object), &fwd->data,
if (atomic_compare_exchange_strong(tag_word(fwd->ref), &fwd->data,
gcobj_busy))
fwd->state = GC_FORWARDING_STATE_ACQUIRED;
else if (fwd->data == gcobj_busy)
@ -85,15 +86,16 @@ gc_atomic_forward_acquire(struct gc_atomic_forward *fwd) {
static inline void
gc_atomic_forward_abort(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_ACQUIRED);
atomic_store_explicit(tag_word(fwd->object), fwd->data, memory_order_release);
atomic_store_explicit(tag_word(fwd->ref), fwd->data, memory_order_release);
fwd->state = GC_FORWARDING_STATE_ABORTED;
}
static inline void
gc_atomic_forward_commit(struct gc_atomic_forward *fwd, uintptr_t new_addr) {
gc_atomic_forward_commit(struct gc_atomic_forward *fwd, struct gc_ref new_ref) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_ACQUIRED);
*tag_word((void*)new_addr) = fwd->data;
atomic_store_explicit(tag_word(fwd->object), new_addr, memory_order_release);
*tag_word(new_ref) = fwd->data;
atomic_store_explicit(tag_word(fwd->ref), gc_ref_value(new_ref),
memory_order_release);
fwd->state = GC_FORWARDING_STATE_FORWARDED;
}

View file

@ -21,8 +21,8 @@ static inline uintptr_t tag_live(uint8_t alloc_kind) {
| gcobj_not_forwarded_bit;
}
static inline uintptr_t* tag_word(void *object) {
struct gc_header *header = object;
static inline uintptr_t* tag_word(struct gc_ref ref) {
struct gc_header *header = gc_ref_heap_object(ref);
return &header->tag;
}

View file

@ -182,8 +182,6 @@ static struct slab *object_slab(void *obj) {
return (struct slab*) base;
}
static int heap_object_is_large(struct gcobj *obj);
static uint8_t *object_metadata_byte(void *obj) {
uintptr_t addr = (uintptr_t) obj;
uintptr_t base = addr & ~(SLAB_SIZE - 1);
@ -193,14 +191,6 @@ static uint8_t *object_metadata_byte(void *obj) {
#define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE)
#define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK)
static uint8_t *object_remset_byte(void *obj) {
GC_ASSERT(!heap_object_is_large(obj));
uintptr_t addr = (uintptr_t) obj;
uintptr_t base = addr & ~(SLAB_SIZE - 1);
uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2;
uintptr_t remset_byte = granule / GRANULES_PER_REMSET_BYTE;
return (uint8_t*) (base + remset_byte);
}
static struct block_summary* block_summary_for_addr(uintptr_t addr) {
uintptr_t base = addr & ~(SLAB_SIZE - 1);
@ -376,12 +366,6 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
static int heap_object_is_large(struct gcobj *obj) {
size_t size;
gc_trace_object(obj, NULL, NULL, &size);
return size > LARGE_OBJECT_THRESHOLD;
}
static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) {
return object_metadata_byte(obj);
}
@ -475,7 +459,7 @@ static void finish_evacuation_allocator(struct evacuation_allocator *alloc,
push_block(empties, pop_block(targets));
}
static struct gcobj *evacuation_allocate(struct mark_space *space,
static struct gc_ref evacuation_allocate(struct mark_space *space,
size_t granules) {
// All collector threads compete to allocate from what is logically a
// single bump-pointer arena, which is actually composed of a linked
@ -490,7 +474,7 @@ static struct gcobj *evacuation_allocate(struct mark_space *space,
do {
if (prev >= alloc->limit)
// No more space.
return NULL;
return gc_ref_null();
next = prev + bytes;
if ((prev ^ next) & ~block_mask)
// Allocation straddles a block boundary; advance so it starts a
@ -522,7 +506,7 @@ static struct gcobj *evacuation_allocate(struct mark_space *space,
if (base >= alloc->limit) {
// Ran out of blocks!
GC_ASSERT(!block);
return NULL;
return gc_ref_null();
}
GC_ASSERT(block);
// This store can race with other allocators, but that's OK as long
@ -534,24 +518,23 @@ static struct gcobj *evacuation_allocate(struct mark_space *space,
}
uintptr_t addr = block + (next & block_mask) - bytes;
return (struct gcobj*) addr;
return gc_ref(addr);
}
static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
struct gc_edge edge,
struct gc_ref old_ref) {
struct gcobj *obj = gc_ref_heap_object(old_ref);
uint8_t *metadata = object_metadata_byte(obj);
uint8_t *metadata = object_metadata_byte(gc_ref_heap_object(old_ref));
uint8_t byte = *metadata;
if (byte & space->marked_mask)
return 0;
if (space->evacuating &&
block_summary_has_flag(block_summary_for_addr((uintptr_t)obj),
block_summary_has_flag(block_summary_for_addr(gc_ref_value(old_ref)),
BLOCK_EVACUATE) &&
((byte & METADATA_BYTE_PINNED) == 0)) {
// This is an evacuating collection, and we are attempting to
// evacuate this block, and this particular object isn't pinned.
struct gc_atomic_forward fwd = gc_atomic_forward_begin(obj);
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
gc_atomic_forward_acquire(&fwd);
@ -564,19 +547,19 @@ static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
case GC_FORWARDING_STATE_ACQUIRED: {
// We claimed the object successfully; evacuating is up to us.
size_t object_granules = mark_space_live_object_granules(metadata);
struct gcobj *new_obj = evacuation_allocate(space, object_granules);
if (new_obj) {
struct gc_ref new_ref = evacuation_allocate(space, object_granules);
if (gc_ref_is_heap_object(new_ref)) {
// Copy object contents before committing, as we don't know what
// part of the object (if any) will be overwritten by the
// commit.
memcpy(new_obj, obj, object_granules * GRANULE_SIZE);
gc_atomic_forward_commit(&fwd, (uintptr_t)new_obj);
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref),
object_granules * GRANULE_SIZE);
gc_atomic_forward_commit(&fwd, new_ref);
// Now update extent metadata, and indicate to the caller that
// the object's fields need to be traced.
uint8_t *new_metadata = object_metadata_byte(new_obj);
uint8_t *new_metadata = object_metadata_byte(gc_ref_heap_object(new_ref));
memcpy(new_metadata + 1, metadata + 1, object_granules - 1);
gc_edge_update(edge, gc_ref_from_heap_object(new_obj));
obj = new_obj;
gc_edge_update(edge, new_ref);
metadata = new_metadata;
// Fall through to set mark bits.
} else {
@ -616,36 +599,35 @@ static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
}
static inline int mark_space_contains(struct mark_space *space,
struct gcobj *obj) {
uintptr_t addr = (uintptr_t)obj;
struct gc_ref ref) {
uintptr_t addr = gc_ref_value(ref);
return addr - space->low_addr < space->extent;
}
static inline int large_object_space_mark_object(struct large_object_space *space,
struct gcobj *obj) {
return large_object_space_copy(space, (uintptr_t)obj);
struct gc_ref ref) {
return large_object_space_copy(space, ref);
}
static inline int trace_edge(struct gc_heap *heap, struct gc_edge edge) {
struct gc_ref ref = gc_edge_ref(edge);
if (!gc_ref_is_heap_object(ref))
return 0;
struct gcobj *obj = gc_ref_heap_object(ref);
if (GC_LIKELY(mark_space_contains(heap_mark_space(heap), obj))) {
if (GC_LIKELY(mark_space_contains(heap_mark_space(heap), ref))) {
if (heap_mark_space(heap)->evacuating)
return mark_space_evacuate_or_mark_object(heap_mark_space(heap), edge,
ref);
return mark_space_mark_object(heap_mark_space(heap), ref);
}
else if (large_object_space_contains(heap_large_object_space(heap), obj))
else if (large_object_space_contains(heap_large_object_space(heap), ref))
return large_object_space_mark_object(heap_large_object_space(heap),
obj);
ref);
else
GC_CRASH();
}
static inline void trace_one(struct gcobj *obj, void *mark_data) {
gc_trace_object(obj, tracer_visit, mark_data, NULL);
static inline void trace_one(struct gc_ref ref, void *mark_data) {
gc_trace_object(ref, tracer_visit, mark_data, NULL);
}
static int heap_has_multiple_mutators(struct gc_heap *heap) {
@ -989,16 +971,6 @@ static void trace_global_roots(struct gc_heap *heap) {
gc_trace_heap_roots(heap->roots, trace_and_enqueue_globally, heap);
}
static inline int
heap_object_is_young(struct gc_heap *heap, struct gcobj *obj) {
if (GC_UNLIKELY(!mark_space_contains(heap_mark_space(heap), obj))) {
// No lospace nursery, for the moment.
return 0;
}
GC_ASSERT(!heap_object_is_large(obj));
return (*object_metadata_byte(obj)) & METADATA_BYTE_YOUNG;
}
static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
GC_ASSERT(((uintptr_t)mark & 7) == 0);
uint8_t * __attribute__((aligned(8))) aligned_mark = mark;