1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-04-30 03:40:34 +02:00

bdw: Disable thread-local freelists for pointerless allocations

There was a race between GC and popping the head off freelists.  We
could have separate freelists for tagged and untagged normal allocations
though.
This commit is contained in:
Andy Wingo 2025-04-23 17:28:46 +02:00
parent 06b53470f4
commit c9063b8027
7 changed files with 52 additions and 65 deletions

View file

@ -4,8 +4,15 @@
#include "gc-attrs.h" #include "gc-attrs.h"
#include "gc-assert.h" #include "gc-assert.h"
static inline enum gc_allocator_kind gc_allocator_kind(void) { static inline enum gc_inline_allocator_kind
return GC_ALLOCATOR_INLINE_FREELIST; gc_inline_allocator_kind(enum gc_allocation_kind kind) {
switch (kind) {
case GC_ALLOCATION_TAGGED:
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
return GC_INLINE_ALLOCATOR_FREELIST;
default:
return GC_INLINE_ALLOCATOR_NONE;
}
} }
static inline size_t gc_allocator_small_granule_size(void) { static inline size_t gc_allocator_small_granule_size(void) {
return 2 * sizeof(void *); return 2 * sizeof(void *);
@ -30,11 +37,8 @@ static inline size_t gc_allocator_freelist_offset(size_t size,
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
base = 0; base = 0;
break; break;
case GC_ALLOCATION_UNTAGGED_POINTERLESS: default:
case GC_ALLOCATION_TAGGED_POINTERLESS: GC_CRASH();
base = (sizeof(void*) * gc_allocator_large_threshold() /
gc_allocator_small_granule_size());
break;
} }
size_t bucket = (size - 1) / gc_allocator_small_granule_size(); size_t bucket = (size - 1) / gc_allocator_small_granule_size();
return base + sizeof(void*) * bucket; return base + sizeof(void*) * bucket;

View file

@ -154,12 +154,12 @@ static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size,
GC_ASSERT(size != 0); GC_ASSERT(size != 0);
GC_ASSERT(size <= gc_allocator_large_threshold()); GC_ASSERT(size <= gc_allocator_large_threshold());
switch (gc_allocator_kind()) { switch (gc_inline_allocator_kind(kind)) {
case GC_ALLOCATOR_INLINE_BUMP_POINTER: case GC_INLINE_ALLOCATOR_BUMP_POINTER:
return gc_allocate_small_fast_bump_pointer(mut, size, kind); return gc_allocate_small_fast_bump_pointer(mut, size, kind);
case GC_ALLOCATOR_INLINE_FREELIST: case GC_INLINE_ALLOCATOR_FREELIST:
return gc_allocate_small_fast_freelist(mut, size, kind); return gc_allocate_small_fast_freelist(mut, size, kind);
case GC_ALLOCATOR_INLINE_NONE: case GC_INLINE_ALLOCATOR_NONE:
return NULL; return NULL;
default: default:
GC_CRASH(); GC_CRASH();

View file

@ -7,13 +7,13 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
enum gc_allocator_kind { enum gc_inline_allocator_kind {
GC_ALLOCATOR_INLINE_BUMP_POINTER, GC_INLINE_ALLOCATOR_BUMP_POINTER,
GC_ALLOCATOR_INLINE_FREELIST, GC_INLINE_ALLOCATOR_FREELIST,
GC_ALLOCATOR_INLINE_NONE GC_INLINE_ALLOCATOR_NONE
}; };
static inline enum gc_allocator_kind gc_allocator_kind(void) GC_ALWAYS_INLINE; static inline enum gc_inline_allocator_kind gc_inline_allocator_kind(enum gc_allocation_kind) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_large_threshold(void) GC_ALWAYS_INLINE; static inline size_t gc_allocator_large_threshold(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_small_granule_size(void) GC_ALWAYS_INLINE; static inline size_t gc_allocator_small_granule_size(void) GC_ALWAYS_INLINE;

View file

@ -5,8 +5,8 @@
#include "gc-assert.h" #include "gc-assert.h"
#include "gc-attrs.h" #include "gc-attrs.h"
static inline enum gc_allocator_kind gc_allocator_kind(void) { static inline enum gc_inline_allocator_kind gc_inline_allocator_kind(enum gc_allocation_kind kind) {
return GC_ALLOCATOR_INLINE_BUMP_POINTER; return GC_INLINE_ALLOCATOR_BUMP_POINTER;
} }
static inline size_t gc_allocator_small_granule_size(void) { static inline size_t gc_allocator_small_granule_size(void) {
return 16; return 16;

View file

@ -8,8 +8,8 @@
static const uintptr_t GC_ALIGNMENT = 8; static const uintptr_t GC_ALIGNMENT = 8;
static const size_t GC_LARGE_OBJECT_THRESHOLD = 8192; static const size_t GC_LARGE_OBJECT_THRESHOLD = 8192;
static inline enum gc_allocator_kind gc_allocator_kind(void) { static inline enum gc_inline_allocator_kind gc_inline_allocator_kind(enum gc_allocation_kind kind) {
return GC_ALLOCATOR_INLINE_BUMP_POINTER; return GC_INLINE_ALLOCATOR_BUMP_POINTER;
} }
static inline size_t gc_allocator_small_granule_size(void) { static inline size_t gc_allocator_small_granule_size(void) {
return GC_ALIGNMENT; return GC_ALIGNMENT;

View file

@ -7,8 +7,9 @@
static const uintptr_t GC_ALIGNMENT = 8; static const uintptr_t GC_ALIGNMENT = 8;
static const size_t GC_LARGE_OBJECT_THRESHOLD = 8192; static const size_t GC_LARGE_OBJECT_THRESHOLD = 8192;
static inline enum gc_allocator_kind gc_allocator_kind(void) { static inline enum gc_inline_allocator_kind
return GC_ALLOCATOR_INLINE_BUMP_POINTER; gc_inline_allocator_kind(enum gc_allocation_kind kind) {
return GC_INLINE_ALLOCATOR_BUMP_POINTER;
} }
static inline size_t gc_allocator_small_granule_size(void) { static inline size_t gc_allocator_small_granule_size(void) {
return GC_ALIGNMENT; return GC_ALIGNMENT;

View file

@ -65,7 +65,6 @@ struct gc_heap {
struct gc_mutator { struct gc_mutator {
void *freelists[GC_INLINE_FREELIST_COUNT]; void *freelists[GC_INLINE_FREELIST_COUNT];
void *pointerless_freelists[GC_INLINE_FREELIST_COUNT];
struct gc_heap *heap; struct gc_heap *heap;
struct gc_mutator_roots *roots; struct gc_mutator_roots *roots;
struct gc_mutator *next; // with heap lock struct gc_mutator *next; // with heap lock
@ -124,9 +123,6 @@ allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
*freelist = *(void **)(head); *freelist = *(void **)(head);
*(void**)head = NULL; *(void**)head = NULL;
if (kind == GC_INLINE_KIND_POINTERLESS)
memset(head, 0, gc_inline_freelist_object_size(idx));
return head; return head;
} }
@ -134,42 +130,39 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
enum gc_allocation_kind kind) { enum gc_allocation_kind kind) {
GC_ASSERT(size != 0); GC_ASSERT(size != 0);
if (size <= gc_allocator_large_threshold()) { if (size <= gc_allocator_large_threshold()) {
size_t idx = gc_inline_bytes_to_freelist_index(size);
void **freelists;
enum gc_inline_kind freelist_kind;
switch (kind) {
case GC_ALLOCATION_TAGGED:
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS:
return allocate_small(&mut->pointerless_freelists[idx], idx,
GC_INLINE_KIND_POINTERLESS);
default:
GC_CRASH();
}
} else {
switch (kind) { switch (kind) {
case GC_ALLOCATION_TAGGED: case GC_ALLOCATION_TAGGED:
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: { case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: {
void *ret = GC_malloc(size); size_t idx = gc_inline_bytes_to_freelist_index(size);
if (GC_LIKELY (ret != NULL)) return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
return ret;
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
} }
case GC_ALLOCATION_TAGGED_POINTERLESS: case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS: { case GC_ALLOCATION_UNTAGGED_POINTERLESS:
void *ret = GC_malloc_atomic(size); break;
if (GC_LIKELY (ret != NULL)) {
memset(ret, 0, size);
return ret;
}
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
}
default: default:
GC_CRASH(); GC_CRASH();
} }
} }
switch (kind) {
case GC_ALLOCATION_TAGGED:
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: {
void *ret = GC_malloc(size);
if (GC_LIKELY (ret != NULL))
return ret;
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
}
case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS: {
void *ret = GC_malloc_atomic(size);
if (GC_LIKELY (ret != NULL)) {
memset(ret, 0, size);
return ret;
}
return __the_bdw_gc_heap->allocation_failure(__the_bdw_gc_heap, size);
}
default:
GC_CRASH();
}
} }
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) { void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
@ -400,18 +393,7 @@ mark_mutator(GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
return state.mark_stack_ptr; return state.mark_stack_ptr;
} }
for (int i = 0; i < GC_INLINE_FREELIST_COUNT; i++) memset(mut->freelists, 0, sizeof(void*) * GC_INLINE_FREELIST_COUNT);
state.mark_stack_ptr = GC_MARK_AND_PUSH (mut->freelists[i],
state.mark_stack_ptr,
state.mark_stack_limit,
NULL);
for (int i = 0; i < GC_INLINE_FREELIST_COUNT; i++)
for (void *head = mut->pointerless_freelists[i]; head; head = *(void**)head)
state.mark_stack_ptr = GC_MARK_AND_PUSH (head,
state.mark_stack_ptr,
state.mark_stack_limit,
NULL);
if (mut->roots) if (mut->roots)
gc_trace_mutator_roots(mut->roots, bdw_mark_edge, mut->heap, &state); gc_trace_mutator_roots(mut->roots, bdw_mark_edge, mut->heap, &state);