1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-09 23:40:29 +02:00

Rework fast/slow path alloc API

This lets users do gc_allocate_fast() and know that if they got a
non-NULL result, we could allocate without GC, and so no object was
moved.
This commit is contained in:
Andy Wingo 2023-03-14 14:35:20 +01:00
parent 62f4b045f8
commit d0b8f6838d
4 changed files with 60 additions and 47 deletions

43
bdw.c
View file

@ -74,42 +74,33 @@ enum gc_inline_kind {
GC_INLINE_KIND_NORMAL
};
static void* allocate_small_slow(void **freelist, size_t idx,
enum gc_inline_kind kind) GC_NEVER_INLINE;
static void* allocate_small_slow(void **freelist, size_t idx,
enum gc_inline_kind kind) {
size_t bytes = gc_inline_freelist_object_size(idx);
GC_generic_malloc_many(bytes, kind, freelist);
void *head = *freelist;
if (GC_UNLIKELY (!head)) {
fprintf(stderr, "ran out of space, heap size %zu\n",
GC_get_heap_size());
GC_CRASH();
}
*freelist = *(void **)(head);
return head;
}
static inline void *
allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
void *head = *freelist;
if (GC_UNLIKELY (!head))
return allocate_small_slow(freelist, idx, kind);
if (!head) {
size_t bytes = gc_inline_freelist_object_size(idx);
GC_generic_malloc_many(bytes, kind, freelist);
head = *freelist;
if (GC_UNLIKELY (!head)) {
fprintf(stderr, "ran out of space, heap size %zu\n",
GC_get_heap_size());
GC_CRASH();
}
}
*freelist = *(void **)(head);
return head;
}
void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
return GC_malloc(size);
}
void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size != 0);
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t idx = gc_inline_bytes_to_freelist_index(size);
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
if (size <= gc_allocator_large_threshold()) {
size_t idx = gc_inline_bytes_to_freelist_index(size);
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
} else {
return GC_malloc(size);
}
}
void* gc_allocate_pointerless(struct gc_mutator *mut,

View file

@ -83,12 +83,11 @@ static inline void gc_update_alloc_table(struct gc_mutator *mut,
}
}
GC_API_ void* gc_allocate_small(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_large(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_slow(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE;
static inline void*
gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size) {
gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t granule_size = gc_allocator_small_granule_size();
@ -105,7 +104,7 @@ static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size
uintptr_t new_hp = hp + size;
if (GC_UNLIKELY (new_hp > limit))
return gc_allocate_small(mut, size);
return NULL;
*hp_loc = new_hp;
@ -115,9 +114,9 @@ static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size
return (void*)hp;
}
static inline void* gc_allocate_freelist(struct gc_mutator *mut,
size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) {
static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut,
size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t freelist_offset = gc_allocator_freelist_offset(size);
@ -126,7 +125,7 @@ static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) {
void *head = *freelist_loc;
if (GC_UNLIKELY(!head))
return gc_allocate_small(mut, size);
return NULL;
*freelist_loc = *(void**)head;
@ -136,24 +135,41 @@ static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) {
return head;
}
static inline void* gc_allocate(struct gc_mutator *mut, size_t bytes) GC_ALWAYS_INLINE;
static inline void* gc_allocate(struct gc_mutator *mut, size_t size) {
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size != 0);
if (size > gc_allocator_large_threshold())
return gc_allocate_large(mut, size);
GC_ASSERT(size <= gc_allocator_large_threshold());
switch (gc_allocator_kind()) {
case GC_ALLOCATOR_INLINE_BUMP_POINTER:
return gc_allocate_bump_pointer(mut, size);
return gc_allocate_small_fast_bump_pointer(mut, size);
case GC_ALLOCATOR_INLINE_FREELIST:
return gc_allocate_freelist(mut, size);
return gc_allocate_small_fast_freelist(mut, size);
case GC_ALLOCATOR_INLINE_NONE:
return gc_allocate_small(mut, size);
return NULL;
default:
GC_CRASH();
}
}
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size != 0);
if (size > gc_allocator_large_threshold())
return NULL;
return gc_allocate_small_fast(mut, size);
}
static inline void* gc_allocate(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate(struct gc_mutator *mut, size_t size) {
void *ret = gc_allocate_fast(mut, size);
if (GC_LIKELY(ret != NULL))
return ret;
return gc_allocate_slow(mut, size);
}
// FIXME: remove :P
GC_API_ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t bytes);

7
semi.c
View file

@ -379,7 +379,7 @@ static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size);
}
void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
static void* allocate_large(struct gc_mutator *mut, size_t size) {
struct gc_heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap);
struct semi_space *semi_space = heap_semi_space(heap);
@ -400,7 +400,10 @@ void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
return ret;
}
void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
if (size > gc_allocator_large_threshold())
return allocate_large(mut, size);
struct semi_space *space = mutator_semi_space(mut);
while (1) {
uintptr_t addr = space->hp;

View file

@ -2114,7 +2114,7 @@ void gc_collect(struct gc_mutator *mut) {
trigger_collection(mut);
}
void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
static void* allocate_large(struct gc_mutator *mut, size_t size) {
struct gc_heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap);
@ -2139,9 +2139,12 @@ void* gc_allocate_large(struct gc_mutator *mut, size_t size) {
return ret;
}
void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
GC_ASSERT(size > 0); // allocating 0 bytes would be silly
GC_ASSERT(size <= gc_allocator_large_threshold());
if (size > gc_allocator_large_threshold())
return allocate_large(mut, size);
size = align_up(size, GRANULE_SIZE);
uintptr_t alloc = mut->alloc;
uintptr_t sweep = mut->sweep;