1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-20 11:40:18 +02:00

Add gc_object_is_old_generation

Will be useful for write barriers.
This commit is contained in:
Andy Wingo 2024-10-01 15:44:55 +02:00
parent 42bf36d7cc
commit 1493bf6398
12 changed files with 119 additions and 4 deletions

View file

@ -40,6 +40,13 @@ static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t) {
return GC_OLD_GENERATION_CHECK_NONE;
}
static inline uint8_t gc_old_generation_check_alloc_table_bit_pattern(void) {
GC_CRASH();
}
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t) {
return GC_WRITE_BARRIER_NONE;
}

View file

@ -179,6 +179,33 @@ static inline void* gc_allocate(struct gc_mutator *mut, size_t size) {
// FIXME: remove :P
GC_API_ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t bytes);
GC_API_ int gc_object_is_old_generation_slow(struct gc_mutator *mut,
struct gc_ref obj) GC_NEVER_INLINE;
static inline int gc_object_is_old_generation(struct gc_mutator *mut,
struct gc_ref obj,
size_t obj_size) GC_ALWAYS_INLINE;
static inline int gc_object_is_old_generation(struct gc_mutator *mut,
struct gc_ref obj,
size_t obj_size) {
switch (gc_old_generation_check_kind(obj_size)) {
case GC_OLD_GENERATION_CHECK_ALLOC_TABLE: {
size_t alignment = gc_allocator_alloc_table_alignment();
GC_ASSERT(alignment);
uintptr_t addr = gc_ref_value(obj);
uintptr_t base = addr & ~(alignment - 1);
size_t granule_size = gc_allocator_small_granule_size();
uintptr_t granule = (addr & (alignment - 1)) / granule_size;
uint8_t *byte = (uint8_t*)(base + granule);
return (*byte) & gc_old_generation_check_alloc_table_bit_pattern();
}
case GC_OLD_GENERATION_CHECK_SLOW:
return gc_object_is_old_generation_slow(mut, obj);
default:
GC_CRASH();
}
}
GC_API_ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
size_t obj_size, struct gc_edge edge,
struct gc_ref new_val) GC_NEVER_INLINE;
@ -202,11 +229,14 @@ static inline void gc_write_barrier(struct gc_mutator *mut, struct gc_ref obj,
return;
}
case GC_WRITE_BARRIER_FIELD: {
if (!gc_object_is_old_generation(mut, obj, obj_size))
return;
size_t field_table_alignment = gc_write_barrier_field_table_alignment();
size_t fields_per_byte = gc_write_barrier_field_fields_per_byte();
uint8_t first_bit_pattern = gc_write_barrier_field_first_bit_pattern();
uintptr_t addr = (uintptr_t) gc_edge_loc(edge);
uintptr_t addr = gc_ref_value(obj);
uintptr_t base = addr & ~(field_table_alignment - 1);
uintptr_t field = (addr & (field_table_alignment - 1)) / sizeof(uintptr_t);
uintptr_t log_byte = field / fields_per_byte;

View file

@ -27,6 +27,16 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) GC_ALWAYS_INLIN
static inline int gc_allocator_needs_clear(void) GC_ALWAYS_INLINE;
enum gc_old_generation_check_kind {
GC_OLD_GENERATION_CHECK_NONE,
GC_OLD_GENERATION_CHECK_ALLOC_TABLE,
GC_OLD_GENERATION_CHECK_SLOW
};
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t obj_size) GC_ALWAYS_INLINE;
static uint8_t gc_old_generation_check_alloc_table_bit_pattern(void) GC_ALWAYS_INLINE;
enum gc_write_barrier_kind {
GC_WRITE_BARRIER_NONE,
GC_WRITE_BARRIER_CARD,

View file

@ -40,6 +40,19 @@ static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t obj_size) {
if (GC_GENERATIONAL) {
if (obj_size <= gc_allocator_large_threshold())
return GC_OLD_GENERATION_CHECK_ALLOC_TABLE;
return GC_OLD_GENERATION_CHECK_SLOW;
}
return GC_OLD_GENERATION_CHECK_NONE;
}
static inline uint8_t gc_old_generation_check_alloc_table_bit_pattern(void) {
// The three mark bits.
return 2 + 4 + 8;
}
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t obj_size) {
if (GC_GENERATIONAL) {
if (obj_size <= gc_allocator_large_threshold())

View file

@ -43,6 +43,13 @@ static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t) {
return GC_OLD_GENERATION_CHECK_NONE;
}
static inline uint8_t gc_old_generation_check_alloc_table_bit_pattern(void) {
GC_CRASH();
}
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t obj_size) {
return GC_WRITE_BARRIER_NONE;
}

View file

@ -42,6 +42,13 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
GC_CRASH();
}
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t) {
return GC_OLD_GENERATION_CHECK_NONE;
}
static inline uint8_t gc_old_generation_check_alloc_table_bit_pattern(void) {
GC_CRASH();
}
static inline enum gc_write_barrier_kind gc_write_barrier_kind(size_t) {
return GC_WRITE_BARRIER_NONE;
}

View file

@ -149,6 +149,11 @@ void gc_collect(struct gc_mutator *mut,
}
}
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
struct gc_ref obj) {
return 0;
}
void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
size_t obj_size, struct gc_edge edge,
struct gc_ref new_val) {

View file

@ -176,8 +176,8 @@ static int large_object_space_is_copied(struct large_object_space *space,
return copied;
}
static int large_object_space_is_old(struct large_object_space *space,
struct gc_ref ref) {
static int large_object_space_is_survivor(struct large_object_space *space,
struct gc_ref ref) {
GC_ASSERT(large_object_space_contains(space, ref));
int old = 0;
uintptr_t addr = gc_ref_value(ref);

View file

@ -879,6 +879,23 @@ gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
// Otherwise if it's a large or external object, it won't move.
}
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
struct gc_ref obj) {
if (!GC_GENERATIONAL)
return 0;
struct gc_heap *heap = mutator_heap(mut);
struct nofl_space *nofl_space = heap_nofl_space(heap);
if (nofl_space_contains(nofl_space, obj))
return nofl_space_is_survivor(nofl_space, obj);
struct large_object_space *lospace = heap_large_object_space(heap);
if (large_object_space_contains(lospace, obj))
return large_object_space_is_survivor(lospace, obj);
return 0;
}
void
gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
size_t obj_size, struct gc_edge edge,
@ -887,7 +904,7 @@ gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
GC_ASSERT(obj_size > gc_allocator_large_threshold());
struct gc_heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap);
if (!large_object_space_is_old(space, obj))
if (!large_object_space_is_survivor(space, obj))
return;
if (gc_object_set_remembered(obj))
large_object_space_remember_object(space, obj);

View file

@ -1431,6 +1431,15 @@ nofl_space_pin_object(struct nofl_space *space, struct gc_ref ref) {
memory_order_acquire));
}
static inline int
nofl_space_is_survivor(struct nofl_space *space, struct gc_ref ref) {
uint8_t *metadata = nofl_metadata_byte_for_object(ref);
uint8_t mask = NOFL_METADATA_BYTE_MARK_0
| NOFL_METADATA_BYTE_MARK_1 | NOFL_METADATA_BYTE_MARK_2;
uint8_t byte = atomic_load_explicit(metadata, memory_order_relaxed);
return byte & mask;
}
static inline int
nofl_space_evacuate(struct nofl_space *space, uint8_t *metadata, uint8_t byte,
struct gc_edge edge,

View file

@ -483,6 +483,11 @@ void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
GC_CRASH();
}
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
struct gc_ref obj) {
return 0;
}
void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
size_t obj_size, struct gc_edge edge,
struct gc_ref new_val) {

View file

@ -453,6 +453,11 @@ void gc_collect(struct gc_mutator *mut,
collect(mut, 0);
}
int gc_object_is_old_generation_slow(struct gc_mutator *mut,
struct gc_ref obj) {
return 0;
}
void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
size_t obj_size, struct gc_edge edge,
struct gc_ref new_val) {