From d0b8f6838dddc6c7f9dec5156cb6ea985394df19 Mon Sep 17 00:00:00 2001 From: Andy Wingo Date: Tue, 14 Mar 2023 14:35:20 +0100 Subject: [PATCH] Rework fast/slow path alloc API This lets users do gc_allocate_fast() and know that if they got a non-NULL result, we could allocate without GC, and so no object was moved. --- bdw.c | 43 +++++++++++++++++-------------------------- gc-api.h | 48 ++++++++++++++++++++++++++++++++---------------- semi.c | 7 +++++-- whippet.c | 9 ++++++--- 4 files changed, 60 insertions(+), 47 deletions(-) diff --git a/bdw.c b/bdw.c index d4c664eeb..29958f133 100644 --- a/bdw.c +++ b/bdw.c @@ -74,42 +74,33 @@ enum gc_inline_kind { GC_INLINE_KIND_NORMAL }; -static void* allocate_small_slow(void **freelist, size_t idx, - enum gc_inline_kind kind) GC_NEVER_INLINE; -static void* allocate_small_slow(void **freelist, size_t idx, - enum gc_inline_kind kind) { - size_t bytes = gc_inline_freelist_object_size(idx); - GC_generic_malloc_many(bytes, kind, freelist); - void *head = *freelist; - if (GC_UNLIKELY (!head)) { - fprintf(stderr, "ran out of space, heap size %zu\n", - GC_get_heap_size()); - GC_CRASH(); - } - *freelist = *(void **)(head); - return head; -} - static inline void * allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) { void *head = *freelist; - if (GC_UNLIKELY (!head)) - return allocate_small_slow(freelist, idx, kind); + if (!head) { + size_t bytes = gc_inline_freelist_object_size(idx); + GC_generic_malloc_many(bytes, kind, freelist); + head = *freelist; + if (GC_UNLIKELY (!head)) { + fprintf(stderr, "ran out of space, heap size %zu\n", + GC_get_heap_size()); + GC_CRASH(); + } + } *freelist = *(void **)(head); return head; } -void* gc_allocate_large(struct gc_mutator *mut, size_t size) { - return GC_malloc(size); -} - -void* gc_allocate_small(struct gc_mutator *mut, size_t size) { +void* gc_allocate_slow(struct gc_mutator *mut, size_t size) { GC_ASSERT(size != 0); - GC_ASSERT(size <= gc_allocator_large_threshold()); - size_t idx = gc_inline_bytes_to_freelist_index(size); - return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL); + if (size <= gc_allocator_large_threshold()) { + size_t idx = gc_inline_bytes_to_freelist_index(size); + return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL); + } else { + return GC_malloc(size); + } } void* gc_allocate_pointerless(struct gc_mutator *mut, diff --git a/gc-api.h b/gc-api.h index 47222706a..5f0d3c1aa 100644 --- a/gc-api.h +++ b/gc-api.h @@ -83,12 +83,11 @@ static inline void gc_update_alloc_table(struct gc_mutator *mut, } } -GC_API_ void* gc_allocate_small(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE; -GC_API_ void* gc_allocate_large(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE; +GC_API_ void* gc_allocate_slow(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE; static inline void* -gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE; -static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size) { +gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE; +static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) { GC_ASSERT(size <= gc_allocator_large_threshold()); size_t granule_size = gc_allocator_small_granule_size(); @@ -105,7 +104,7 @@ static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size uintptr_t new_hp = hp + size; if (GC_UNLIKELY (new_hp > limit)) - return gc_allocate_small(mut, size); + return NULL; *hp_loc = new_hp; @@ -115,9 +114,9 @@ static inline void* gc_allocate_bump_pointer(struct gc_mutator *mut, size_t size return (void*)hp; } -static inline void* gc_allocate_freelist(struct gc_mutator *mut, - size_t size) GC_ALWAYS_INLINE; -static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) { +static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, + size_t size) GC_ALWAYS_INLINE; +static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, size_t size) { GC_ASSERT(size <= gc_allocator_large_threshold()); size_t freelist_offset = gc_allocator_freelist_offset(size); @@ -126,7 +125,7 @@ static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) { void *head = *freelist_loc; if (GC_UNLIKELY(!head)) - return gc_allocate_small(mut, size); + return NULL; *freelist_loc = *(void**)head; @@ -136,24 +135,41 @@ static inline void* gc_allocate_freelist(struct gc_mutator *mut, size_t size) { return head; } -static inline void* gc_allocate(struct gc_mutator *mut, size_t bytes) GC_ALWAYS_INLINE; -static inline void* gc_allocate(struct gc_mutator *mut, size_t size) { +static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE; +static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) { GC_ASSERT(size != 0); - if (size > gc_allocator_large_threshold()) - return gc_allocate_large(mut, size); + GC_ASSERT(size <= gc_allocator_large_threshold()); switch (gc_allocator_kind()) { case GC_ALLOCATOR_INLINE_BUMP_POINTER: - return gc_allocate_bump_pointer(mut, size); + return gc_allocate_small_fast_bump_pointer(mut, size); case GC_ALLOCATOR_INLINE_FREELIST: - return gc_allocate_freelist(mut, size); + return gc_allocate_small_fast_freelist(mut, size); case GC_ALLOCATOR_INLINE_NONE: - return gc_allocate_small(mut, size); + return NULL; default: GC_CRASH(); } } +static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE; +static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) { + GC_ASSERT(size != 0); + if (size > gc_allocator_large_threshold()) + return NULL; + + return gc_allocate_small_fast(mut, size); +} + +static inline void* gc_allocate(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE; +static inline void* gc_allocate(struct gc_mutator *mut, size_t size) { + void *ret = gc_allocate_fast(mut, size); + if (GC_LIKELY(ret != NULL)) + return ret; + + return gc_allocate_slow(mut, size); +} + // FIXME: remove :P GC_API_ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t bytes); diff --git a/semi.c b/semi.c index fe4ee382b..5d85ec8c6 100644 --- a/semi.c +++ b/semi.c @@ -379,7 +379,7 @@ static void collect_for_large_alloc(struct gc_mutator *mut, size_t npages) { collect_for_alloc(mut, npages * mutator_semi_space(mut)->page_size); } -void* gc_allocate_large(struct gc_mutator *mut, size_t size) { +static void* allocate_large(struct gc_mutator *mut, size_t size) { struct gc_heap *heap = mutator_heap(mut); struct large_object_space *space = heap_large_object_space(heap); struct semi_space *semi_space = heap_semi_space(heap); @@ -400,7 +400,10 @@ void* gc_allocate_large(struct gc_mutator *mut, size_t size) { return ret; } -void* gc_allocate_small(struct gc_mutator *mut, size_t size) { +void* gc_allocate_slow(struct gc_mutator *mut, size_t size) { + if (size > gc_allocator_large_threshold()) + return allocate_large(mut, size); + struct semi_space *space = mutator_semi_space(mut); while (1) { uintptr_t addr = space->hp; diff --git a/whippet.c b/whippet.c index 686ac5cda..5d692b728 100644 --- a/whippet.c +++ b/whippet.c @@ -2114,7 +2114,7 @@ void gc_collect(struct gc_mutator *mut) { trigger_collection(mut); } -void* gc_allocate_large(struct gc_mutator *mut, size_t size) { +static void* allocate_large(struct gc_mutator *mut, size_t size) { struct gc_heap *heap = mutator_heap(mut); struct large_object_space *space = heap_large_object_space(heap); @@ -2139,9 +2139,12 @@ void* gc_allocate_large(struct gc_mutator *mut, size_t size) { return ret; } -void* gc_allocate_small(struct gc_mutator *mut, size_t size) { +void* gc_allocate_slow(struct gc_mutator *mut, size_t size) { GC_ASSERT(size > 0); // allocating 0 bytes would be silly - GC_ASSERT(size <= gc_allocator_large_threshold()); + + if (size > gc_allocator_large_threshold()) + return allocate_large(mut, size); + size = align_up(size, GRANULE_SIZE); uintptr_t alloc = mut->alloc; uintptr_t sweep = mut->sweep;