1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-15 02:00:22 +02:00

Inline post-allocation actions

This commit is contained in:
Andy Wingo 2022-08-15 16:00:01 +02:00
parent a75842be90
commit a00c83878e
6 changed files with 168 additions and 51 deletions

25
bdw.h
View file

@ -131,11 +131,28 @@ static inline void collect(struct mutator *mut) {
GC_gcollect();
}
static inline void init_field(void *obj, void **addr, void *val) {
*addr = val;
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
return GC_WRITE_BARRIER_NONE;
}
static inline void set_field(void *obj, void **addr, void *val) {
*addr = val;
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
abort();
}
static inline size_t gc_small_write_barrier_card_size(void) {
abort();
}
static inline size_t gc_allocator_alloc_table_alignment(void) {
return 0;
}
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
abort();
}
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
abort();
}
static inline int gc_allocator_needs_clear(void) {
return 0;
}
static inline struct mutator *add_mutator(struct heap *heap) {

107
gc-api.h
View file

@ -7,7 +7,9 @@
#include "gc-ref.h"
#include "gc-edge.h"
#include <stdatomic.h>
#include <stdint.h>
#include <string.h>
// FIXME: prefix with gc_
struct heap;
@ -42,12 +44,6 @@ GC_API_ void gc_finish_for_thread(struct mutator *mut);
GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*),
void *data) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_small(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_large(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
static inline void* gc_allocate(struct mutator *mut, size_t bytes) GC_ALWAYS_INLINE;
// FIXME: remove :P
static inline void* gc_allocate_pointerless(struct mutator *mut, size_t bytes);
enum gc_allocator_kind {
GC_ALLOCATOR_INLINE_BUMP_POINTER,
GC_ALLOCATOR_INLINE_FREELIST,
@ -65,11 +61,54 @@ static inline size_t gc_allocator_allocation_limit_offset(void) GC_ALWAYS_INLINE
static inline size_t gc_allocator_freelist_offset(size_t size) GC_ALWAYS_INLINE;
static inline void gc_allocator_inline_success(struct mutator *mut,
static inline size_t gc_allocator_alloc_table_alignment(void) GC_ALWAYS_INLINE;
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) GC_ALWAYS_INLINE;
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) GC_ALWAYS_INLINE;
static inline int gc_allocator_needs_clear(void) GC_ALWAYS_INLINE;
static inline void gc_clear_fresh_allocation(struct gc_ref obj,
size_t size) GC_ALWAYS_INLINE;
static inline void gc_clear_fresh_allocation(struct gc_ref obj,
size_t size) {
if (!gc_allocator_needs_clear()) return;
memset(gc_ref_heap_object(obj), 0, size);
}
static inline void gc_update_alloc_table(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size);
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size);
size_t size) GC_ALWAYS_INLINE;
static inline void gc_update_alloc_table(struct mutator *mut,
struct gc_ref obj,
size_t size) {
size_t alignment = gc_allocator_alloc_table_alignment();
if (!alignment) return;
uintptr_t addr = gc_ref_value(obj);
uintptr_t base = addr & ~(alignment - 1);
size_t granule_size = gc_allocator_small_granule_size();
uintptr_t granule = (addr & (alignment - 1)) / granule_size;
uint8_t *alloc = (uint8_t*)(base + granule);
uint8_t begin_pattern = gc_allocator_alloc_table_begin_pattern();
uint8_t end_pattern = gc_allocator_alloc_table_end_pattern();
if (end_pattern) {
size_t granules = size / granule_size;
if (granules == 1) {
alloc[0] = begin_pattern | end_pattern;
} else {
alloc[0] = begin_pattern;
if (granules > 2)
memset(alloc + 1, 0, granules - 2);
alloc[granules - 1] = end_pattern;
}
} else {
alloc[0] = begin_pattern;
}
}
GC_API_ void* gc_allocate_small(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
GC_API_ void* gc_allocate_large(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
static inline void*
gc_allocate_bump_pointer(struct mutator *mut, size_t size) GC_ALWAYS_INLINE;
@ -89,14 +128,14 @@ static inline void* gc_allocate_bump_pointer(struct mutator *mut, size_t size) {
uintptr_t limit = *limit_loc;
uintptr_t new_hp = hp + size;
if (GC_UNLIKELY (new_hp > limit)) {
gc_allocator_inline_failure(mut, size);
if (GC_UNLIKELY (new_hp > limit))
return gc_allocate_small(mut, size);
}
gc_allocator_inline_success(mut, gc_ref(hp), size);
*hp_loc = new_hp;
gc_clear_fresh_allocation(gc_ref(hp), size);
gc_update_alloc_table(mut, gc_ref(hp), size);
return (void*)hp;
}
@ -114,9 +153,14 @@ static inline void* gc_allocate_freelist(struct mutator *mut, size_t size) {
return gc_allocate_small(mut, size);
*freelist_loc = *(void**)head;
gc_clear_fresh_allocation(gc_ref_from_heap_object(head), size);
gc_update_alloc_table(mut, gc_ref_from_heap_object(head), size);
return head;
}
static inline void* gc_allocate(struct mutator *mut, size_t bytes) GC_ALWAYS_INLINE;
static inline void* gc_allocate(struct mutator *mut, size_t size) {
GC_ASSERT(size != 0);
if (size > gc_allocator_large_threshold())
@ -134,4 +178,37 @@ static inline void* gc_allocate(struct mutator *mut, size_t size) {
}
}
// FIXME: remove :P
static inline void* gc_allocate_pointerless(struct mutator *mut, size_t bytes);
enum gc_write_barrier_kind {
GC_WRITE_BARRIER_NONE,
GC_WRITE_BARRIER_CARD
};
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void);
static inline size_t gc_small_write_barrier_card_table_alignment(void);
static inline size_t gc_small_write_barrier_card_size(void);
static inline void gc_small_write_barrier(struct gc_ref obj, struct gc_edge edge,
struct gc_ref new_val) GC_ALWAYS_INLINE;
static inline void gc_small_write_barrier(struct gc_ref obj, struct gc_edge edge,
struct gc_ref new_val) {
switch (gc_small_write_barrier_kind()) {
case GC_WRITE_BARRIER_NONE:
return;
case GC_WRITE_BARRIER_CARD: {
size_t card_table_alignment = gc_small_write_barrier_card_table_alignment();
size_t card_size = gc_small_write_barrier_card_size();
uintptr_t addr = gc_ref_value(obj);
uintptr_t base = addr & ~(card_table_alignment - 1);
uintptr_t card = (addr & (card_table_alignment - 1)) / card_size;
atomic_store_explicit((uint8_t*)(base + card), 1, memory_order_relaxed);
return;
}
default:
abort();
}
}
#endif // GC_API_H_

View file

@ -195,6 +195,13 @@ static void allocate_garbage(struct thread *t) {
}
}
static void set_field(Node *obj, Node **field, Node *val) {
gc_small_write_barrier(gc_ref_from_heap_object(obj),
gc_edge(field),
gc_ref_from_heap_object(val));
*field = val;
}
// Build tree top down, assigning to older objects.
static void populate(struct thread *t, int depth, Node *node) {
struct mutator *mut = t->mut;
@ -210,8 +217,8 @@ static void populate(struct thread *t, int depth, Node *node) {
NodeHandle r = { allocate_node(mut) };
PUSH_HANDLE(mut, r);
set_field(HANDLE_REF(self), (void**)&HANDLE_REF(self)->left, HANDLE_REF(l));
set_field(HANDLE_REF(self), (void**)&HANDLE_REF(self)->right, HANDLE_REF(r));
set_field(HANDLE_REF(self), &HANDLE_REF(self)->left, HANDLE_REF(l));
set_field(HANDLE_REF(self), &HANDLE_REF(self)->right, HANDLE_REF(r));
// i is 0 because the memory is zeroed.
HANDLE_REF(self)->j = depth;
@ -236,8 +243,8 @@ static Node* make_tree(struct thread *t, int depth) {
allocate_garbage(t);
Node *result = allocate_node(mut);
init_field(result, (void**)&result->left, HANDLE_REF(left));
init_field(result, (void**)&result->right, HANDLE_REF(right));
result->left = HANDLE_REF(left);
result->right = HANDLE_REF(right);
// i is 0 because the memory is zeroed.
result->j = depth;

View file

@ -51,7 +51,7 @@ static Quad* make_tree(struct mutator *mut, int depth) {
Quad *result = allocate_quad(mut);
for (size_t i = 0; i < 4; i++)
init_field(result, (void**)&result->kids[i], HANDLE_REF(kids[i]));
result->kids[i] = HANDLE_REF(kids[i]);
for (size_t i = 0; i < 4; i++)
POP_HANDLE(mut);

30
semi.h
View file

@ -57,14 +57,19 @@ static inline size_t gc_allocator_freelist_offset(size_t size) {
abort();
}
static inline void gc_allocator_inline_success(struct mutator *mut,
struct gc_ref obj,
uintptr_t aligned_size) {
// FIXME: Allow allocator to avoid clearing memory?
clear_memory(gc_ref_value(obj), aligned_size);
static inline int gc_allocator_needs_clear(void) {
return 1;
}
static inline size_t gc_allocator_alloc_table_alignment(void) {
return 0;
}
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
abort();
}
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
abort();
}
static inline void gc_allocator_inline_failure(struct mutator *mut,
uintptr_t aligned_size) {}
static inline struct heap* mutator_heap(struct mutator *mut) {
return &mut->heap;
@ -247,11 +252,14 @@ static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
return gc_allocate(mut, size);
}
static inline void init_field(void *obj, void **addr, void *val) {
*addr = val;
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
return GC_WRITE_BARRIER_NONE;
}
static inline void set_field(void *obj, void **addr, void *val) {
*addr = val;
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
abort();
}
static inline size_t gc_small_write_barrier_card_size(void) {
abort();
}
static int initialize_semi_space(struct semi_space *space, size_t size) {

View file

@ -1852,24 +1852,32 @@ static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
return gc_allocate(mut, size);
}
static inline void mark_space_write_barrier(void *obj) {
// Unconditionally mark the card the object is in. Precondition: obj
// is in the mark space (is not a large object).
atomic_store_explicit(object_remset_byte(obj), 1, memory_order_relaxed);
static inline enum gc_write_barrier_kind gc_small_write_barrier_kind(void) {
if (GC_GENERATIONAL)
return GC_WRITE_BARRIER_CARD;
return GC_WRITE_BARRIER_NONE;
}
static inline size_t gc_small_write_barrier_card_table_alignment(void) {
GC_ASSERT(GC_GENERATIONAL);
return SLAB_SIZE;
}
static inline size_t gc_small_write_barrier_card_size(void) {
GC_ASSERT(GC_GENERATIONAL);
return GRANULES_PER_REMSET_BYTE * GRANULE_SIZE;
}
// init_field is an optimization for the case in which there is no
// intervening allocation or safepoint between allocating an object and
// setting the value of a field in the object. For the purposes of
// generational collection, we can omit the barrier in that case,
// because we know the source object is in the nursery. It is always
// correct to replace it with set_field.
static inline void init_field(void *obj, void **addr, void *val) {
*addr = val;
static inline size_t gc_allocator_alloc_table_alignment(void) {
return SLAB_SIZE;
}
static inline void set_field(void *obj, void **addr, void *val) {
if (GC_GENERATIONAL) mark_space_write_barrier(obj);
*addr = val;
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
return METADATA_BYTE_YOUNG;
}
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
return METADATA_BYTE_END;
}
static inline int gc_allocator_needs_clear(void) {
return 0;
}
#define FOR_EACH_GC_OPTION(M) \