1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-10 15:50:50 +02:00

Mostly implementation-independent inline allocation

This is a step towards separate compilation of the GC without losing
performance.  Only remaining task is the write barrier.
This commit is contained in:
Andy Wingo 2022-08-15 11:17:15 +02:00
parent 4d8a7169d0
commit a75842be90
4 changed files with 225 additions and 62 deletions

40
bdw.h
View file

@ -45,6 +45,34 @@ static inline size_t gc_inline_freelist_object_size(size_t idx) {
return (idx + 1U) * GC_INLINE_GRANULE_BYTES; return (idx + 1U) * GC_INLINE_GRANULE_BYTES;
} }
static inline enum gc_allocator_kind gc_allocator_kind(void) {
return GC_ALLOCATOR_INLINE_FREELIST;
}
static inline size_t gc_allocator_small_granule_size(void) {
return GC_INLINE_GRANULE_BYTES;
}
static inline size_t gc_allocator_large_threshold(void) {
return 256;
}
static inline size_t gc_allocator_allocation_pointer_offset(void) {
abort();
}
static inline size_t gc_allocator_allocation_limit_offset(void) {
abort();
}
static inline size_t gc_allocator_freelist_offset(size_t size) {
GC_ASSERT(size);
return sizeof(void*) * gc_inline_bytes_to_freelist_index(size);
}
static inline void gc_allocator_inline_success(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size) {}
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size) {}
// The values of these must match the internal POINTERLESS and NORMAL // The values of these must match the internal POINTERLESS and NORMAL
// definitions in libgc, for which unfortunately there are no external // definitions in libgc, for which unfortunately there are no external
// definitions. Alack. // definitions. Alack.
@ -80,12 +108,14 @@ allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
return head; return head;
} }
static inline void* gc_allocate(struct mutator *mut, size_t size) { static void* gc_allocate_large(struct mutator *mut, size_t size) {
return GC_malloc(size);
}
static void* gc_allocate_small(struct mutator *mut, size_t size) {
GC_ASSERT(size != 0);
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t idx = gc_inline_bytes_to_freelist_index(size); size_t idx = gc_inline_bytes_to_freelist_index(size);
if (UNLIKELY(idx >= GC_INLINE_FREELIST_COUNT))
return GC_malloc(size);
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL); return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
} }

View file

@ -23,6 +23,10 @@ struct gc_option {
double value; double value;
}; };
struct gc_mutator {
void *user_data;
};
// FIXME: Conflict with bdw-gc GC_API. Switch prefix? // FIXME: Conflict with bdw-gc GC_API. Switch prefix?
#ifndef GC_API_ #ifndef GC_API_
#define GC_API_ static #define GC_API_ static
@ -38,8 +42,96 @@ GC_API_ void gc_finish_for_thread(struct mutator *mut);
GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*), GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*),
void *data) GC_NEVER_INLINE; void *data) GC_NEVER_INLINE;
GC_API_ inline void* gc_allocate(struct mutator *mut, size_t bytes); GC_API_ void* gc_allocate_small(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_large(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
static inline void* gc_allocate(struct mutator *mut, size_t bytes) GC_ALWAYS_INLINE;
// FIXME: remove :P // FIXME: remove :P
GC_API_ inline void* gc_allocate_pointerless(struct mutator *mut, size_t bytes); static inline void* gc_allocate_pointerless(struct mutator *mut, size_t bytes);
enum gc_allocator_kind {
GC_ALLOCATOR_INLINE_BUMP_POINTER,
GC_ALLOCATOR_INLINE_FREELIST,
GC_ALLOCATOR_INLINE_NONE
};
static inline enum gc_allocator_kind gc_allocator_kind(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_large_threshold(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_small_granule_size(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_allocation_pointer_offset(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_allocation_limit_offset(void) GC_ALWAYS_INLINE;
static inline size_t gc_allocator_freelist_offset(size_t size) GC_ALWAYS_INLINE;
static inline void gc_allocator_inline_success(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size);
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size);
static inline void*
gc_allocate_bump_pointer(struct mutator *mut, size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_bump_pointer(struct mutator *mut, size_t size) {
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t granule_size = gc_allocator_small_granule_size();
size_t hp_offset = gc_allocator_allocation_pointer_offset();
size_t limit_offset = gc_allocator_allocation_limit_offset();
uintptr_t base_addr = (uintptr_t)mut;
uintptr_t *hp_loc = (uintptr_t*)(base_addr + hp_offset);
uintptr_t *limit_loc = (uintptr_t*)(base_addr + limit_offset);
size = (size + granule_size - 1) & ~(granule_size - 1);
uintptr_t hp = *hp_loc;
uintptr_t limit = *limit_loc;
uintptr_t new_hp = hp + size;
if (GC_UNLIKELY (new_hp > limit)) {
gc_allocator_inline_failure(mut, size);
return gc_allocate_small(mut, size);
}
gc_allocator_inline_success(mut, gc_ref(hp), size);
*hp_loc = new_hp;
return (void*)hp;
}
static inline void* gc_allocate_freelist(struct mutator *mut,
size_t size) GC_ALWAYS_INLINE;
static inline void* gc_allocate_freelist(struct mutator *mut, size_t size) {
GC_ASSERT(size <= gc_allocator_large_threshold());
size_t freelist_offset = gc_allocator_freelist_offset(size);
uintptr_t base_addr = (uintptr_t)mut;
void **freelist_loc = (void**)(base_addr + freelist_offset);
void *head = *freelist_loc;
if (GC_UNLIKELY(!head))
return gc_allocate_small(mut, size);
*freelist_loc = *(void**)head;
return head;
}
static inline void* gc_allocate(struct mutator *mut, size_t size) {
GC_ASSERT(size != 0);
if (size > gc_allocator_large_threshold())
return gc_allocate_large(mut, size);
switch (gc_allocator_kind()) {
case GC_ALLOCATOR_INLINE_BUMP_POINTER:
return gc_allocate_bump_pointer(mut, size);
case GC_ALLOCATOR_INLINE_FREELIST:
return gc_allocate_freelist(mut, size);
case GC_ALLOCATOR_INLINE_NONE:
return gc_allocate_small(mut, size);
default:
abort();
}
}
#endif // GC_API_H_ #endif // GC_API_H_

51
semi.h
View file

@ -29,6 +29,43 @@ struct mutator {
struct handle *roots; struct handle *roots;
}; };
static const uintptr_t ALIGNMENT = 8;
static const size_t LARGE_OBJECT_THRESHOLD = 8192;
static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size);
}
static inline enum gc_allocator_kind gc_allocator_kind(void) {
return GC_ALLOCATOR_INLINE_BUMP_POINTER;
}
static inline size_t gc_allocator_small_granule_size(void) {
return ALIGNMENT;
}
static inline size_t gc_allocator_large_threshold(void) {
return LARGE_OBJECT_THRESHOLD;
}
static inline size_t gc_allocator_allocation_pointer_offset(void) {
return offsetof(struct semi_space, hp);
}
static inline size_t gc_allocator_allocation_limit_offset(void) {
return offsetof(struct semi_space, limit);
}
static inline size_t gc_allocator_freelist_offset(size_t size) {
abort();
}
static inline void gc_allocator_inline_success(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size) {
// FIXME: Allow allocator to avoid clearing memory?
clear_memory(gc_ref_value(obj), aligned_size);
}
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size) {}
static inline struct heap* mutator_heap(struct mutator *mut) { static inline struct heap* mutator_heap(struct mutator *mut) {
return &mut->heap; return &mut->heap;
} }
@ -42,16 +79,10 @@ static inline struct semi_space* mutator_semi_space(struct mutator *mut) {
return heap_semi_space(mutator_heap(mut)); return heap_semi_space(mutator_heap(mut));
} }
static const uintptr_t ALIGNMENT = 8;
static uintptr_t align_up(uintptr_t addr, size_t align) { static uintptr_t align_up(uintptr_t addr, size_t align) {
return (addr + align - 1) & ~(align-1); return (addr + align - 1) & ~(align-1);
} }
static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size);
}
static void collect(struct mutator *mut) GC_NEVER_INLINE; static void collect(struct mutator *mut) GC_NEVER_INLINE;
static void collect_for_alloc(struct mutator *mut, size_t bytes) GC_NEVER_INLINE; static void collect_for_alloc(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
@ -171,8 +202,7 @@ static void collect_for_alloc(struct mutator *mut, size_t bytes) {
} }
} }
static const size_t LARGE_OBJECT_THRESHOLD = 8192; static void* gc_allocate_large(struct mutator *mut, size_t size) {
static void* allocate_large(struct mutator *mut, size_t size) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap); struct large_object_space *space = heap_large_object_space(heap);
struct semi_space *semi_space = heap_semi_space(heap); struct semi_space *semi_space = heap_semi_space(heap);
@ -198,10 +228,7 @@ static void* allocate_large(struct mutator *mut, size_t size) {
return ret; return ret;
} }
static inline void* gc_allocate(struct mutator *mut, size_t size) { static void* gc_allocate_small(struct mutator *mut, size_t size) {
if (size >= LARGE_OBJECT_THRESHOLD)
return allocate_large(mut, size);
struct semi_space *space = mutator_semi_space(mut); struct semi_space *space = mutator_semi_space(mut);
while (1) { while (1) {
uintptr_t addr = space->hp; uintptr_t addr = space->hp;

100
whippet.h
View file

@ -369,6 +369,44 @@ static inline struct heap* mutator_heap(struct mutator *mutator) {
return mutator->heap; return mutator->heap;
} }
static inline enum gc_allocator_kind gc_allocator_kind(void) {
return GC_ALLOCATOR_INLINE_BUMP_POINTER;
}
static inline size_t gc_allocator_small_granule_size(void) {
return GRANULE_SIZE;
}
static inline size_t gc_allocator_large_threshold(void) {
return LARGE_OBJECT_THRESHOLD;
}
static inline size_t gc_allocator_allocation_pointer_offset(void) {
return offsetof(struct mutator, alloc);
}
static inline size_t gc_allocator_allocation_limit_offset(void) {
return offsetof(struct mutator, sweep);
}
static inline size_t gc_allocator_freelist_offset(size_t size) {
abort();
}
static inline void gc_allocator_inline_success(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size) {
uint8_t *metadata = object_metadata_byte(gc_ref_heap_object(obj));
size_t granules = aligned_size >> GRANULE_SIZE_LOG_2;
if (granules == 1) {
metadata[0] = METADATA_BYTE_YOUNG | METADATA_BYTE_END;
} else {
metadata[0] = METADATA_BYTE_YOUNG;
if (granules > 2)
memset(metadata + 1, 0, granules - 2);
metadata[granules - 1] = METADATA_BYTE_END;
}
}
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size) {}
static inline void clear_memory(uintptr_t addr, size_t size) { static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size); memset((char*)addr, 0, size);
} }
@ -1756,11 +1794,10 @@ static void trigger_collection(struct mutator *mut) {
heap_unlock(heap); heap_unlock(heap);
} }
static void* allocate_large(struct mutator *mut, size_t granules) { static void* gc_allocate_large(struct mutator *mut, size_t size) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap); struct large_object_space *space = heap_large_object_space(heap);
size_t size = granules * GRANULE_SIZE;
size_t npages = large_object_space_npages(space, size); size_t npages = large_object_space_npages(space, size);
mark_space_request_release_memory(heap_mark_space(heap), mark_space_request_release_memory(heap_mark_space(heap),
@ -1782,58 +1819,35 @@ static void* allocate_large(struct mutator *mut, size_t granules) {
return ret; return ret;
} }
static void* allocate_small_slow(struct mutator *mut, size_t granules) GC_NEVER_INLINE; static void* gc_allocate_small(struct mutator *mut, size_t size) {
static void* allocate_small_slow(struct mutator *mut, size_t granules) { GC_ASSERT(size > 0); // allocating 0 bytes would be silly
while (1) { GC_ASSERT(size <= gc_allocator_large_threshold());
size_t hole = next_hole(mut); size = align_up(size, GRANULE_SIZE);
if (hole >= granules) {
clear_memory(mut->alloc, hole * GRANULE_SIZE);
break;
}
if (!hole)
trigger_collection(mut);
}
struct gcobj* ret = (struct gcobj*)mut->alloc;
mut->alloc += granules * GRANULE_SIZE;
return ret;
}
static inline void* allocate_small(struct mutator *mut, size_t granules) {
GC_ASSERT(granules > 0); // allocating 0 granules would be silly
uintptr_t alloc = mut->alloc; uintptr_t alloc = mut->alloc;
uintptr_t sweep = mut->sweep; uintptr_t sweep = mut->sweep;
uintptr_t new_alloc = alloc + granules * GRANULE_SIZE; uintptr_t new_alloc = alloc + size;
struct gcobj *obj; struct gcobj *obj;
if (new_alloc <= sweep) { if (new_alloc <= sweep) {
mut->alloc = new_alloc; mut->alloc = new_alloc;
obj = (struct gcobj *)alloc; obj = (struct gcobj *)alloc;
} else { } else {
obj = allocate_small_slow(mut, granules); size_t granules = size >> GRANULE_SIZE_LOG_2;
} while (1) {
uint8_t *metadata = object_metadata_byte(obj); size_t hole = next_hole(mut);
if (granules == 1) { if (hole >= granules) {
metadata[0] = METADATA_BYTE_YOUNG | METADATA_BYTE_END; clear_memory(mut->alloc, hole * GRANULE_SIZE);
} else { break;
metadata[0] = METADATA_BYTE_YOUNG; }
if (granules > 2) if (!hole)
memset(metadata + 1, 0, granules - 2); trigger_collection(mut);
metadata[granules - 1] = METADATA_BYTE_END; }
obj = (struct gcobj*)mut->alloc;
mut->alloc += size;
} }
gc_allocator_inline_success(mut, gc_ref_from_heap_object(obj), size);
return obj; return obj;
} }
static inline void* allocate_medium(struct mutator *mut, size_t granules) {
return allocate_small(mut, granules);
}
static inline void* gc_allocate(struct mutator *mut, size_t size) {
size_t granules = size_to_granules(size);
if (granules <= MEDIUM_OBJECT_GRANULE_THRESHOLD)
return allocate_small(mut, granules);
if (granules <= LARGE_OBJECT_GRANULE_THRESHOLD)
return allocate_medium(mut, granules);
return allocate_large(mut, granules);
}
static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) { static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
return gc_allocate(mut, size); return gc_allocate(mut, size);
} }