1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-11 14:21:10 +02:00

nofl: Block marks are bytes

There was no need to use a bitvector, and the marks were only being
partially cleared.  More straightforward (and still low overhead) as
bytes.
This commit is contained in:
Andy Wingo 2024-08-30 21:19:51 +02:00
parent 8d6db735fd
commit cf129f10de

View file

@ -236,30 +236,24 @@ nofl_metadata_byte_for_object(struct gc_ref ref) {
return nofl_metadata_byte_for_addr(gc_ref_value(ref)); return nofl_metadata_byte_for_addr(gc_ref_value(ref));
} }
static int static uint8_t*
nofl_block_is_marked(uintptr_t addr) { nofl_block_mark_loc(uintptr_t addr) {
uintptr_t base = align_down(addr, NOFL_SLAB_SIZE); uintptr_t base = align_down(addr, NOFL_SLAB_SIZE);
struct nofl_slab *slab = (struct nofl_slab *) base; struct nofl_slab *slab = (struct nofl_slab *) base;
unsigned block_idx = (addr / NOFL_BLOCK_SIZE) % NOFL_BLOCKS_PER_SLAB; unsigned block_idx = (addr / NOFL_BLOCK_SIZE) % NOFL_BLOCKS_PER_SLAB;
uint8_t mark_byte = block_idx / 8; return &slab->header.block_marks[block_idx];
GC_ASSERT(mark_byte < NOFL_HEADER_BYTES_PER_SLAB); }
uint8_t mark_mask = 1U << (block_idx % 8);
uint8_t byte = atomic_load_explicit(&slab->header.block_marks[mark_byte], static int
memory_order_relaxed); nofl_block_is_marked(uintptr_t addr) {
return byte & mark_mask; return atomic_load_explicit(nofl_block_mark_loc(addr), memory_order_relaxed);
} }
static void static void
nofl_block_set_mark(uintptr_t addr) { nofl_block_set_mark(uintptr_t addr) {
uintptr_t base = align_down(addr, NOFL_SLAB_SIZE); uint8_t *loc = nofl_block_mark_loc(addr);
struct nofl_slab *slab = (struct nofl_slab *) base; if (!atomic_load_explicit(loc, memory_order_relaxed))
unsigned block_idx = (addr / NOFL_BLOCK_SIZE) % NOFL_BLOCKS_PER_SLAB; atomic_store_explicit(loc, 1, memory_order_relaxed);
uint8_t mark_byte = block_idx / 8;
GC_ASSERT(mark_byte < NOFL_HEADER_BYTES_PER_SLAB);
uint8_t mark_mask = 1U << (block_idx % 8);
atomic_fetch_or_explicit(&slab->header.block_marks[mark_byte],
mark_mask,
memory_order_relaxed);
} }
#define NOFL_GRANULES_PER_BLOCK (NOFL_BLOCK_SIZE / NOFL_GRANULE_SIZE) #define NOFL_GRANULES_PER_BLOCK (NOFL_BLOCK_SIZE / NOFL_GRANULE_SIZE)
@ -982,7 +976,7 @@ static void
nofl_space_clear_block_marks(struct nofl_space *space) { nofl_space_clear_block_marks(struct nofl_space *space) {
for (size_t s = 0; s < space->nslabs; s++) { for (size_t s = 0; s < space->nslabs; s++) {
struct nofl_slab *slab = space->slabs[s]; struct nofl_slab *slab = space->slabs[s];
memset(slab->header.block_marks, 0, NOFL_BLOCKS_PER_SLAB / 8); memset(slab->header.block_marks, 0, sizeof(slab->header.block_marks));
} }
} }
@ -1282,8 +1276,7 @@ static inline int
nofl_space_set_nonempty_mark(struct nofl_space *space, uint8_t *metadata, nofl_space_set_nonempty_mark(struct nofl_space *space, uint8_t *metadata,
uint8_t byte, struct gc_ref ref) { uint8_t byte, struct gc_ref ref) {
nofl_space_set_mark(space, metadata, byte); nofl_space_set_mark(space, metadata, byte);
if (!nofl_block_is_marked(gc_ref_value(ref))) nofl_block_set_mark(gc_ref_value(ref));
nofl_block_set_mark(gc_ref_value(ref));
return 1; return 1;
} }