1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-09 15:10:29 +02:00

Separate tagging from collector

The collector now has an abstract interface onto the embedder.  The
embedder has to supply some functionality, such as tracing and
forwarding.  This is a pretty big change in terms of lines but it's
supposed to have no functional or performance change.
This commit is contained in:
Andy Wingo 2022-08-12 16:44:38 +02:00
parent cacc28b577
commit fb71c4c363
20 changed files with 452 additions and 306 deletions

9
bdw.h
View file

@ -54,7 +54,7 @@ enum gc_inline_kind {
}; };
static void* allocate_small_slow(void **freelist, size_t idx, static void* allocate_small_slow(void **freelist, size_t idx,
enum gc_inline_kind kind) NEVER_INLINE; enum gc_inline_kind kind) GC_NEVER_INLINE;
static void* allocate_small_slow(void **freelist, size_t idx, static void* allocate_small_slow(void **freelist, size_t idx,
enum gc_inline_kind kind) { enum gc_inline_kind kind) {
size_t bytes = gc_inline_freelist_object_size(idx); size_t bytes = gc_inline_freelist_object_size(idx);
@ -80,8 +80,7 @@ allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
return head; return head;
} }
static inline void* allocate(struct mutator *mut, enum alloc_kind kind, static inline void* gc_allocate(struct mutator *mut, size_t size) {
size_t size) {
size_t idx = gc_inline_bytes_to_freelist_index(size); size_t idx = gc_inline_bytes_to_freelist_index(size);
if (UNLIKELY(idx >= GC_INLINE_FREELIST_COUNT)) if (UNLIKELY(idx >= GC_INLINE_FREELIST_COUNT))
@ -90,8 +89,8 @@ static inline void* allocate(struct mutator *mut, enum alloc_kind kind,
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL); return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
} }
static inline void* allocate_pointerless(struct mutator *mut, static inline void* gc_allocate_pointerless(struct mutator *mut,
enum alloc_kind kind, size_t size) { size_t size) {
// Because the BDW API requires us to implement a custom marker so // Because the BDW API requires us to implement a custom marker so
// that the pointerless freelist gets traced, even though it's in a // that the pointerless freelist gets traced, even though it's in a
// pointerless region, we punt on thread-local pointerless freelists. // pointerless region, we punt on thread-local pointerless freelists.

View file

@ -1,67 +1,13 @@
#ifndef GC_API_H_ #ifndef GC_API_H_
#define GC_API_H_ #define GC_API_H_
#include "gc-config.h"
#include "gc-assert.h"
#include "gc-ref.h"
#include "gc-edge.h"
#include <stdint.h> #include <stdint.h>
#ifndef GC_DEBUG
#define GC_DEBUG 0
#endif
#define GC_ALWAYS_INLINE __attribute__((always_inline))
#define GC_NEVER_INLINE __attribute__((noinline))
#define GC_UNLIKELY(e) __builtin_expect(e, 0)
#define GC_LIKELY(e) __builtin_expect(e, 1)
#if GC_DEBUG
#define GC_ASSERT(x) do { if (GC_UNLIKELY(!(x))) __builtin_trap(); } while (0)
#else
#define GC_ASSERT(x) do { } while (0)
#endif
struct gc_ref {
uintptr_t value;
};
static inline struct gc_ref gc_ref(uintptr_t value) {
return (struct gc_ref){value};
}
static inline uintptr_t gc_ref_value(struct gc_ref ref) {
return ref.value;
}
static inline struct gc_ref gc_ref_null(void) {
return gc_ref(0);
}
static inline int gc_ref_is_heap_object(struct gc_ref ref) {
return ref.value != 0;
}
static inline struct gc_ref gc_ref_from_heap_object_or_null(void *obj) {
return gc_ref((uintptr_t) obj);
}
static inline struct gc_ref gc_ref_from_heap_object(void *obj) {
GC_ASSERT(obj);
return gc_ref_from_heap_object_or_null(obj);
}
static inline void* gc_ref_heap_object(struct gc_ref ref) {
GC_ASSERT(gc_ref_is_heap_object(ref));
return (void *) gc_ref_value(ref);
}
struct gc_edge {
struct gc_ref *dst;
};
static inline struct gc_edge gc_edge(void* addr) {
return (struct gc_edge){addr};
}
static inline struct gc_ref gc_edge_ref(struct gc_edge edge) {
return *edge.dst;
}
static inline void gc_edge_update(struct gc_edge edge, struct gc_ref ref) {
*edge.dst = ref;
}
// FIXME: prefix with gc_ // FIXME: prefix with gc_
struct heap; struct heap;
struct mutator; struct mutator;
@ -91,8 +37,8 @@ GC_API_ void gc_finish_for_thread(struct mutator *mut);
GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*), GC_API_ void* gc_call_without_gc(struct mutator *mut, void* (*f)(void*),
void *data) GC_NEVER_INLINE; void *data) GC_NEVER_INLINE;
struct gc_header { GC_API_ inline void* gc_allocate(struct mutator *mut, size_t bytes);
uintptr_t tag; // FIXME: remove :P
}; GC_API_ inline void* gc_allocate_pointerless(struct mutator *mut, size_t bytes);
#endif // GC_API_H_ #endif // GC_API_H_

15
gc-assert.h Normal file
View file

@ -0,0 +1,15 @@
#ifndef GC_ASSERT_H
#define GC_ASSERT_H
#include "gc-config.h"
#define GC_UNLIKELY(e) __builtin_expect(e, 0)
#define GC_LIKELY(e) __builtin_expect(e, 1)
#if GC_DEBUG
#define GC_ASSERT(x) do { if (GC_UNLIKELY(!(x))) __builtin_trap(); } while (0)
#else
#define GC_ASSERT(x) do { } while (0)
#endif
#endif // GC_ASSERT_H

8
gc-config.h Normal file
View file

@ -0,0 +1,8 @@
#ifndef GC_CONFIG_H
#define GC_CONFIG_H
#ifndef GC_DEBUG
#define GC_DEBUG 0
#endif
#endif // GC_CONFIG_H

20
gc-edge.h Normal file
View file

@ -0,0 +1,20 @@
#ifndef GC_EDGE_H
#define GC_EDGE_H
#include "gc-ref.h"
struct gc_edge {
struct gc_ref *dst;
};
static inline struct gc_edge gc_edge(void* addr) {
return (struct gc_edge){addr};
}
static inline struct gc_ref gc_edge_ref(struct gc_edge edge) {
return *edge.dst;
}
static inline void gc_edge_update(struct gc_edge edge, struct gc_ref ref) {
*edge.dst = ref;
}
#endif // GC_EDGE_H

28
gc-embedder-api.h Normal file
View file

@ -0,0 +1,28 @@
#ifndef GC_EMBEDDER_API_H
#define GC_EMBEDDER_API_H
#include "gc-edge.h"
#include "gc-forwarding.h"
#ifndef GC_EMBEDDER_API
#define GC_EMBEDDER_API static
#endif
GC_EMBEDDER_API inline void gc_trace_object(void *object,
void (*trace_edge)(struct gc_edge edge,
void *trace_data),
void *trace_data,
size_t *size) GC_ALWAYS_INLINE;
GC_EMBEDDER_API inline uintptr_t gc_object_forwarded_nonatomic(void *object);
GC_EMBEDDER_API inline void gc_object_forward_nonatomic(void *object, uintptr_t new_addr);
GC_EMBEDDER_API inline struct gc_atomic_forward gc_atomic_forward_begin(void *obj);
GC_EMBEDDER_API inline void gc_atomic_forward_acquire(struct gc_atomic_forward *);
GC_EMBEDDER_API inline int gc_atomic_forward_retry_busy(struct gc_atomic_forward *);
GC_EMBEDDER_API inline void gc_atomic_forward_abort(struct gc_atomic_forward *);
GC_EMBEDDER_API inline void gc_atomic_forward_commit(struct gc_atomic_forward *,
uintptr_t new_addr);
GC_EMBEDDER_API inline uintptr_t gc_atomic_forward_address(struct gc_atomic_forward *);
#endif // GC_EMBEDDER_API_H

20
gc-forwarding.h Normal file
View file

@ -0,0 +1,20 @@
#ifndef GC_FORWARDING_H
#define GC_FORWARDING_H
#include <stdint.h>
enum gc_forwarding_state {
GC_FORWARDING_STATE_FORWARDED,
GC_FORWARDING_STATE_BUSY,
GC_FORWARDING_STATE_ACQUIRED,
GC_FORWARDING_STATE_NOT_FORWARDED,
GC_FORWARDING_STATE_ABORTED
};
struct gc_atomic_forward {
void *object;
uintptr_t data;
enum gc_forwarding_state state;
};
#endif // GC_FORWARDING_H

7
gc-inline.h Normal file
View file

@ -0,0 +1,7 @@
#ifndef GC_INLINE_H_
#define GC_INLINE_H_
#define GC_ALWAYS_INLINE __attribute__((always_inline))
#define GC_NEVER_INLINE __attribute__((noinline))
#endif // GC_INLINE_H_

37
gc-ref.h Normal file
View file

@ -0,0 +1,37 @@
#ifndef GC_REF_H
#define GC_REF_H
#include "gc-assert.h"
#include <stdint.h>
struct gc_ref {
uintptr_t value;
};
static inline struct gc_ref gc_ref(uintptr_t value) {
return (struct gc_ref){value};
}
static inline uintptr_t gc_ref_value(struct gc_ref ref) {
return ref.value;
}
static inline struct gc_ref gc_ref_null(void) {
return gc_ref(0);
}
static inline int gc_ref_is_heap_object(struct gc_ref ref) {
return ref.value != 0;
}
static inline struct gc_ref gc_ref_from_heap_object_or_null(void *obj) {
return gc_ref((uintptr_t) obj);
}
static inline struct gc_ref gc_ref_from_heap_object(void *obj) {
GC_ASSERT(obj);
return gc_ref_from_heap_object_or_null(obj);
}
static inline void* gc_ref_heap_object(struct gc_ref ref) {
GC_ASSERT(gc_ref_is_heap_object(ref));
return (void *) gc_ref_value(ref);
}
#endif // GC_REF_H

View file

@ -1,8 +1,8 @@
#ifndef HEAP_OBJECTS_H #ifndef HEAP_OBJECTS_H
#define HEAP_OBJECTS_H #define HEAP_OBJECTS_H
#include "inline.h" #include "gc-inline.h"
#include "gc-api.h" #include "gc-edge.h"
#define DECLARE_NODE_TYPE(name, Name, NAME) \ #define DECLARE_NODE_TYPE(name, Name, NAME) \
struct Name; \ struct Name; \
@ -17,10 +17,10 @@ enum alloc_kind {
#undef DEFINE_ENUM #undef DEFINE_ENUM
#define DEFINE_METHODS(name, Name, NAME) \ #define DEFINE_METHODS(name, Name, NAME) \
static inline size_t name##_size(Name *obj) ALWAYS_INLINE; \ static inline size_t name##_size(Name *obj) GC_ALWAYS_INLINE; \
static inline void visit_##name##_fields(Name *obj,\ static inline void visit_##name##_fields(Name *obj,\
void (*visit)(struct gc_edge edge, void *visit_data), \ void (*visit)(struct gc_edge edge, void *visit_data), \
void *visit_data) ALWAYS_INLINE; void *visit_data) GC_ALWAYS_INLINE;
FOR_EACH_HEAP_OBJECT_KIND(DEFINE_METHODS) FOR_EACH_HEAP_OBJECT_KIND(DEFINE_METHODS)
#undef DEFINE_METHODS #undef DEFINE_METHODS

View file

@ -1,7 +0,0 @@
#ifndef INLINE_H
#define INLINE_H
#define ALWAYS_INLINE __attribute__((always_inline))
#define NEVER_INLINE __attribute__((noinline))
#endif // INLINE_H

View file

@ -44,10 +44,17 @@
#include <stdlib.h> #include <stdlib.h>
#include <sys/time.h> #include <sys/time.h>
#include "assert.h" // Tracer will be specialized with respect to tags defined in this header.
#include "mt-gcbench-types.h" #include "mt-gcbench-types.h"
#include "assert.h"
#include "simple-allocator.h"
#include "simple-gc-embedder.h"
#include "gc-api.h"
#include "gc.h" #include "gc.h"
#include "inline.h"
#include "gc-inline.h"
#define MAX_THREAD_COUNT 256 #define MAX_THREAD_COUNT 256
@ -100,6 +107,7 @@ static inline void
visit_hole_fields(Hole *obj, visit_hole_fields(Hole *obj,
void (*visit)(struct gc_edge edge, void *visit_data), void (*visit)(struct gc_edge edge, void *visit_data),
void *visit_data) { void *visit_data) {
abort();
} }
typedef HANDLE_TO(Node) NodeHandle; typedef HANDLE_TO(Node) NodeHandle;
@ -107,22 +115,22 @@ typedef HANDLE_TO(DoubleArray) DoubleArrayHandle;
static Node* allocate_node(struct mutator *mut) { static Node* allocate_node(struct mutator *mut) {
// memset to 0 by the collector. // memset to 0 by the collector.
return allocate(mut, ALLOC_KIND_NODE, sizeof (Node)); return gc_allocate_with_kind(mut, ALLOC_KIND_NODE, sizeof (Node));
} }
static DoubleArray* allocate_double_array(struct mutator *mut, static DoubleArray* allocate_double_array(struct mutator *mut,
size_t size) { size_t size) {
// May be uninitialized. // May be uninitialized.
size_t bytes = sizeof(DoubleArray) + sizeof (double) * size;
DoubleArray *ret = DoubleArray *ret =
allocate_pointerless(mut, ALLOC_KIND_DOUBLE_ARRAY, gc_allocate_pointerless_with_kind(mut, ALLOC_KIND_DOUBLE_ARRAY, bytes);
sizeof(DoubleArray) + sizeof (double) * size);
ret->length = size; ret->length = size;
return ret; return ret;
} }
static Hole* allocate_hole(struct mutator *mut, size_t size) { static Hole* allocate_hole(struct mutator *mut, size_t size) {
Hole *ret = allocate(mut, ALLOC_KIND_HOLE, size_t bytes = sizeof(Hole) + sizeof (uintptr_t) * size;
sizeof(Hole) + sizeof (uintptr_t) * size); Hole *ret = gc_allocate_with_kind(mut, ALLOC_KIND_HOLE, bytes);
ret->length = size; ret->length = size;
return ret; return ret;
} }
@ -289,8 +297,8 @@ static void time_construction(struct thread *t, int depth) {
POP_HANDLE(mut); POP_HANDLE(mut);
} }
static void* call_with_stack_base(void* (*)(uintptr_t*, void*), void*) NEVER_INLINE; static void* call_with_stack_base(void* (*)(uintptr_t*, void*), void*) GC_NEVER_INLINE;
static void* call_with_stack_base_inner(void* (*)(uintptr_t*, void*), uintptr_t*, void*) NEVER_INLINE; static void* call_with_stack_base_inner(void* (*)(uintptr_t*, void*), uintptr_t*, void*) GC_NEVER_INLINE;
static void* call_with_stack_base_inner(void* (*f)(uintptr_t *stack_base, void *arg), static void* call_with_stack_base_inner(void* (*f)(uintptr_t *stack_base, void *arg),
uintptr_t *stack_base, void *arg) { uintptr_t *stack_base, void *arg) {
return f(stack_base, arg); return f(stack_base, arg);

View file

@ -8,7 +8,7 @@
#include "assert.h" #include "assert.h"
#include "debug.h" #include "debug.h"
#include "inline.h" #include "gc-inline.h"
#include "spin.h" #include "spin.h"
// The Chase-Lev work-stealing deque, as initially described in "Dynamic // The Chase-Lev work-stealing deque, as initially described in "Dynamic
@ -448,10 +448,10 @@ static void tracer_release(struct heap *heap) {
} }
struct gcobj; struct gcobj;
static inline void tracer_visit(struct gc_edge edge, void *trace_data) ALWAYS_INLINE; static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gcobj *obj, void *trace_data) ALWAYS_INLINE; static inline void trace_one(struct gcobj *obj, void *trace_data) GC_ALWAYS_INLINE;
static inline int trace_edge(struct heap *heap, static inline int trace_edge(struct heap *heap,
struct gc_edge edge) ALWAYS_INLINE; struct gc_edge edge) GC_ALWAYS_INLINE;
static inline void static inline void
tracer_share(struct local_tracer *trace) { tracer_share(struct local_tracer *trace) {

View file

@ -4,6 +4,8 @@
#include "assert.h" #include "assert.h"
#include "quads-types.h" #include "quads-types.h"
#include "simple-allocator.h"
#include "simple-gc-embedder.h"
#include "gc.h" #include "gc.h"
typedef struct Quad { typedef struct Quad {
@ -24,7 +26,7 @@ typedef HANDLE_TO(Quad) QuadHandle;
static Quad* allocate_quad(struct mutator *mut) { static Quad* allocate_quad(struct mutator *mut) {
// memset to 0 by the collector. // memset to 0 by the collector.
return allocate(mut, ALLOC_KIND_QUAD, sizeof (Quad)); return gc_allocate_with_kind(mut, ALLOC_KIND_QUAD, sizeof (Quad));
} }
/* Get the current time in microseconds */ /* Get the current time in microseconds */
@ -106,7 +108,7 @@ static size_t tree_size(size_t depth) {
#define MAX_THREAD_COUNT 256 #define MAX_THREAD_COUNT 256
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
if (argc != 3) { if (argc != 4) {
fprintf(stderr, "usage: %s DEPTH MULTIPLIER PARALLELISM\n", argv[0]); fprintf(stderr, "usage: %s DEPTH MULTIPLIER PARALLELISM\n", argv[0]);
return 1; return 1;
} }

68
semi.h
View file

@ -52,8 +52,8 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size); memset((char*)addr, 0, size);
} }
static void collect(struct mutator *mut) NEVER_INLINE; static void collect(struct mutator *mut) GC_NEVER_INLINE;
static void collect_for_alloc(struct mutator *mut, size_t bytes) NEVER_INLINE; static void collect_for_alloc(struct mutator *mut, size_t bytes) GC_NEVER_INLINE;
static void visit(struct gc_edge edge, void *visit_data); static void visit(struct gc_edge edge, void *visit_data);
@ -93,18 +93,9 @@ static void flip(struct semi_space *space) {
space->count++; space->count++;
} }
static void* copy(struct semi_space *space, uintptr_t kind, void *obj) { static void* copy(struct semi_space *space, void *obj) {
size_t size; size_t size;
switch (kind) { gc_trace_object(obj, NULL, NULL, &size);
#define COMPUTE_SIZE(name, Name, NAME) \
case ALLOC_KIND_##NAME: \
size = name##_size(obj); \
break;
FOR_EACH_HEAP_OBJECT_KIND(COMPUTE_SIZE)
#undef COMPUTE_SIZE
default:
abort ();
}
void *new_obj = (void*)space->hp; void *new_obj = (void*)space->hp;
memcpy(new_obj, obj, size); memcpy(new_obj, obj, size);
*(uintptr_t*) obj = space->hp; *(uintptr_t*) obj = space->hp;
@ -113,31 +104,14 @@ static void* copy(struct semi_space *space, uintptr_t kind, void *obj) {
} }
static uintptr_t scan(struct heap *heap, uintptr_t grey) { static uintptr_t scan(struct heap *heap, uintptr_t grey) {
void *obj = (void*)grey; size_t size;
uintptr_t kind = *(uintptr_t*) obj; gc_trace_object((void*)grey, visit, heap, &size);
switch (kind) { return grey + align_up(size, ALIGNMENT);
#define SCAN_OBJECT(name, Name, NAME) \
case ALLOC_KIND_##NAME: \
visit_##name##_fields((Name*)obj, visit, heap); \
return grey + align_up(name##_size((Name*)obj), ALIGNMENT);
FOR_EACH_HEAP_OBJECT_KIND(SCAN_OBJECT)
#undef SCAN_OBJECT
default:
abort ();
}
} }
static void* forward(struct semi_space *space, void *obj) { static void* forward(struct semi_space *space, void *obj) {
uintptr_t header_word = *(uintptr_t*)obj; uintptr_t forwarded = gc_object_forwarded_nonatomic(obj);
switch (header_word) { return forwarded ? (void*)forwarded : copy(space, obj);
#define CASE_ALLOC_KIND(name, Name, NAME) \
case ALLOC_KIND_##NAME:
FOR_EACH_HEAP_OBJECT_KIND(CASE_ALLOC_KIND)
#undef CASE_ALLOC_KIND
return copy(space, header_word, obj);
default:
return (void*)header_word;
}
} }
static void visit_semi_space(struct heap *heap, struct semi_space *space, static void visit_semi_space(struct heap *heap, struct semi_space *space,
@ -198,8 +172,7 @@ static void collect_for_alloc(struct mutator *mut, size_t bytes) {
} }
static const size_t LARGE_OBJECT_THRESHOLD = 8192; static const size_t LARGE_OBJECT_THRESHOLD = 8192;
static void* allocate_large(struct mutator *mut, enum alloc_kind kind, static void* allocate_large(struct mutator *mut, size_t size) {
size_t size) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap); struct large_object_space *space = heap_large_object_space(heap);
struct semi_space *semi_space = heap_semi_space(heap); struct semi_space *semi_space = heap_semi_space(heap);
@ -222,14 +195,12 @@ static void* allocate_large(struct mutator *mut, enum alloc_kind kind,
abort(); abort();
} }
*(uintptr_t*)ret = kind;
return ret; return ret;
} }
static inline void* allocate(struct mutator *mut, enum alloc_kind kind, static inline void* gc_allocate(struct mutator *mut, size_t size) {
size_t size) {
if (size >= LARGE_OBJECT_THRESHOLD) if (size >= LARGE_OBJECT_THRESHOLD)
return allocate_large(mut, kind, size); return allocate_large(mut, size);
struct semi_space *space = mutator_semi_space(mut); struct semi_space *space = mutator_semi_space(mut);
while (1) { while (1) {
@ -240,18 +211,13 @@ static inline void* allocate(struct mutator *mut, enum alloc_kind kind,
continue; continue;
} }
space->hp = new_hp; space->hp = new_hp;
void *ret = (void *)addr; // FIXME: Allow allocator to avoid clearing memory?
uintptr_t *header_word = ret; clear_memory(addr, size);
*header_word = kind; return (void *)addr;
// FIXME: Allow allocator to avoid initializing pointerless memory?
// if (kind == NODE)
clear_memory(addr + sizeof(uintptr_t), size - sizeof(uintptr_t));
return ret;
} }
} }
static inline void* allocate_pointerless(struct mutator *mut, static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
enum alloc_kind kind, size_t size) { return gc_allocate(mut, size);
return allocate(mut, kind, size);
} }
static inline void init_field(void *obj, void **addr, void *val) { static inline void init_field(void *obj, void **addr, void *val) {

View file

@ -52,7 +52,7 @@ trace_queue_put(struct trace_queue *q, size_t idx, struct gcobj *x) {
q->buf[idx & (q->size - 1)] = x; q->buf[idx & (q->size - 1)] = x;
} }
static int trace_queue_grow(struct trace_queue *q) NEVER_INLINE; static int trace_queue_grow(struct trace_queue *q) GC_NEVER_INLINE;
static int static int
trace_queue_grow(struct trace_queue *q) { trace_queue_grow(struct trace_queue *q) {
@ -138,10 +138,10 @@ static void tracer_release(struct heap *heap) {
} }
struct gcobj; struct gcobj;
static inline void tracer_visit(struct gc_edge edge, void *trace_data) ALWAYS_INLINE; static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
static inline void trace_one(struct gcobj *obj, void *trace_data) ALWAYS_INLINE; static inline void trace_one(struct gcobj *obj, void *trace_data) GC_ALWAYS_INLINE;
static inline int trace_edge(struct heap *heap, static inline int trace_edge(struct heap *heap,
struct gc_edge edge) ALWAYS_INLINE; struct gc_edge edge) GC_ALWAYS_INLINE;
static inline void static inline void
tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) { tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) {

21
simple-allocator.h Normal file
View file

@ -0,0 +1,21 @@
#ifndef SIMPLE_ALLOCATOR_H
#define SIMPLE_ALLOCATOR_H
#include "simple-tagging-scheme.h"
#include "gc-api.h"
static inline void*
gc_allocate_with_kind(struct mutator *mut, enum alloc_kind kind, size_t bytes) {
void *obj = gc_allocate(mut, bytes);
*tag_word(obj) = tag_live(kind);
return obj;
}
static inline void*
gc_allocate_pointerless_with_kind(struct mutator *mut, enum alloc_kind kind, size_t bytes) {
void *obj = gc_allocate_pointerless(mut, bytes);
*tag_word(obj) = tag_live(kind);
return obj;
}
#endif // SIMPLE_ALLOCATOR_H

98
simple-gc-embedder.h Normal file
View file

@ -0,0 +1,98 @@
#include <stdatomic.h>
#include "simple-tagging-scheme.h"
#include "gc-embedder-api.h"
static inline void gc_trace_object(void *object,
void (*trace_edge)(struct gc_edge edge,
void *trace_data),
void *trace_data,
size_t *size) {
switch (tag_live_alloc_kind(*tag_word(object))) {
#define SCAN_OBJECT(name, Name, NAME) \
case ALLOC_KIND_##NAME: \
if (trace_edge) \
visit_##name##_fields((Name*)object, trace_edge, trace_data); \
if (size) \
*size = name##_size(object); \
break;
FOR_EACH_HEAP_OBJECT_KIND(SCAN_OBJECT)
#undef SCAN_OBJECT
default:
abort ();
}
}
static inline uintptr_t gc_object_forwarded_nonatomic(void *object) {
uintptr_t tag = *tag_word(object);
return (tag & gcobj_not_forwarded_bit) ? 0 : tag;
}
static inline void gc_object_forward_nonatomic(void *object,
uintptr_t new_addr) {
*tag_word(object) = new_addr;
}
static inline struct gc_atomic_forward
gc_atomic_forward_begin(void *object) {
uintptr_t tag = atomic_load_explicit(tag_word(object), memory_order_acquire);
enum gc_forwarding_state state;
if (tag == gcobj_busy)
state = GC_FORWARDING_STATE_BUSY;
else if (tag & gcobj_not_forwarded_bit)
state = GC_FORWARDING_STATE_NOT_FORWARDED;
else
state = GC_FORWARDING_STATE_FORWARDED;
return (struct gc_atomic_forward){ object, tag, state };
}
static inline int
gc_atomic_forward_retry_busy(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_BUSY);
uintptr_t tag = atomic_load_explicit(tag_word(fwd->object),
memory_order_acquire);
if (tag == gcobj_busy)
return 0;
if (tag & gcobj_not_forwarded_bit)
fwd->state = GC_FORWARDING_STATE_ABORTED;
else {
fwd->state = GC_FORWARDING_STATE_FORWARDED;
fwd->data = tag;
}
return 1;
}
static inline void
gc_atomic_forward_acquire(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_NOT_FORWARDED);
if (atomic_compare_exchange_strong(tag_word(fwd->object), &fwd->data,
gcobj_busy))
fwd->state = GC_FORWARDING_STATE_ACQUIRED;
else if (fwd->data == gcobj_busy)
fwd->state = GC_FORWARDING_STATE_BUSY;
else {
GC_ASSERT((fwd->data & gcobj_not_forwarded_bit) == 0);
fwd->state = GC_FORWARDING_STATE_FORWARDED;
}
}
static inline void
gc_atomic_forward_abort(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_ACQUIRED);
atomic_store_explicit(tag_word(fwd->object), fwd->data, memory_order_release);
fwd->state = GC_FORWARDING_STATE_ABORTED;
}
static inline void
gc_atomic_forward_commit(struct gc_atomic_forward *fwd, uintptr_t new_addr) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_ACQUIRED);
*tag_word((void*)new_addr) = fwd->data;
atomic_store_explicit(tag_word(fwd->object), new_addr, memory_order_release);
fwd->state = GC_FORWARDING_STATE_FORWARDED;
}
static inline uintptr_t
gc_atomic_forward_address(struct gc_atomic_forward *fwd) {
GC_ASSERT(fwd->state == GC_FORWARDING_STATE_FORWARDED);
return fwd->data;
}

29
simple-tagging-scheme.h Normal file
View file

@ -0,0 +1,29 @@
#ifndef SIMPLE_TAGGING_SCHEME_H
#define SIMPLE_TAGGING_SCHEME_H
#include <stdint.h>
struct gc_header {
uintptr_t tag;
};
// Alloc kind is in bits 1-7, for live objects.
static const uintptr_t gcobj_alloc_kind_mask = 0x7f;
static const uintptr_t gcobj_alloc_kind_shift = 1;
static const uintptr_t gcobj_forwarded_mask = 0x1;
static const uintptr_t gcobj_not_forwarded_bit = 0x1;
static const uintptr_t gcobj_busy = 0;
static inline uint8_t tag_live_alloc_kind(uintptr_t tag) {
return (tag >> gcobj_alloc_kind_shift) & gcobj_alloc_kind_mask;
}
static inline uintptr_t tag_live(uint8_t alloc_kind) {
return ((uintptr_t)alloc_kind << gcobj_alloc_kind_shift)
| gcobj_not_forwarded_bit;
}
static inline uintptr_t* tag_word(void *object) {
struct gc_header *header = object;
return &header->tag;
}
#endif // SIMPLE_TAGGING_SCHEME_H

265
whippet.h
View file

@ -15,9 +15,8 @@
#include <string.h> #include <string.h>
#include <unistd.h> #include <unistd.h>
#include "assert.h"
#include "debug.h" #include "debug.h"
#include "inline.h" #include "gc-inline.h"
#include "large-object-space.h" #include "large-object-space.h"
#include "precise-roots.h" #include "precise-roots.h"
#if GC_PARALLEL_TRACE #if GC_PARALLEL_TRACE
@ -194,7 +193,7 @@ static uint8_t *object_metadata_byte(void *obj) {
#define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE) #define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE)
#define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK) #define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK)
static uint8_t *object_remset_byte(void *obj) { static uint8_t *object_remset_byte(void *obj) {
ASSERT(!heap_object_is_large(obj)); GC_ASSERT(!heap_object_is_large(obj));
uintptr_t addr = (uintptr_t) obj; uintptr_t addr = (uintptr_t) obj;
uintptr_t base = addr & ~(SLAB_SIZE - 1); uintptr_t base = addr & ~(SLAB_SIZE - 1);
uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2; uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2;
@ -225,7 +224,7 @@ static uintptr_t block_summary_next(struct block_summary *summary) {
} }
static void block_summary_set_next(struct block_summary *summary, static void block_summary_set_next(struct block_summary *summary,
uintptr_t next) { uintptr_t next) {
ASSERT((next & (BLOCK_SIZE - 1)) == 0); GC_ASSERT((next & (BLOCK_SIZE - 1)) == 0);
summary->next_and_flags = summary->next_and_flags =
(summary->next_and_flags & (BLOCK_SIZE - 1)) | next; (summary->next_and_flags & (BLOCK_SIZE - 1)) | next;
} }
@ -268,29 +267,7 @@ static inline size_t size_to_granules(size_t size) {
return (size + GRANULE_SIZE - 1) >> GRANULE_SIZE_LOG_2; return (size + GRANULE_SIZE - 1) >> GRANULE_SIZE_LOG_2;
} }
// Alloc kind is in bits 1-7, for live objects. struct gcobj;
static const uintptr_t gcobj_alloc_kind_mask = 0x7f;
static const uintptr_t gcobj_alloc_kind_shift = 1;
static const uintptr_t gcobj_forwarded_mask = 0x1;
static const uintptr_t gcobj_not_forwarded_bit = 0x1;
static inline uint8_t tag_live_alloc_kind(uintptr_t tag) {
return (tag >> gcobj_alloc_kind_shift) & gcobj_alloc_kind_mask;
}
static inline uintptr_t tag_live(uint8_t alloc_kind) {
return ((uintptr_t)alloc_kind << gcobj_alloc_kind_shift)
| gcobj_not_forwarded_bit;
}
static inline uintptr_t tag_forwarded(struct gcobj *new_addr) {
return (uintptr_t)new_addr;
}
struct gcobj {
union {
uintptr_t tag;
uintptr_t words[0];
void *pointers[0];
};
};
struct evacuation_allocator { struct evacuation_allocator {
size_t allocated; // atomically size_t allocated; // atomically
@ -396,18 +373,12 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
memset((char*)addr, 0, size); memset((char*)addr, 0, size);
} }
static void collect(struct mutator *mut) NEVER_INLINE; static void collect(struct mutator *mut) GC_NEVER_INLINE;
static int heap_object_is_large(struct gcobj *obj) { static int heap_object_is_large(struct gcobj *obj) {
switch (tag_live_alloc_kind(obj->tag)) { size_t size;
#define IS_LARGE(name, Name, NAME) \ gc_trace_object(obj, NULL, NULL, &size);
case ALLOC_KIND_##NAME: \ return size > LARGE_OBJECT_THRESHOLD;
return name##_size((Name*)obj) > LARGE_OBJECT_THRESHOLD;
break;
FOR_EACH_HEAP_OBJECT_KIND(IS_LARGE)
#undef IS_LARGE
}
abort();
} }
static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) { static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) {
@ -436,7 +407,7 @@ static inline int mark_space_mark_object(struct mark_space *space,
static uintptr_t make_evacuation_allocator_cursor(uintptr_t block, static uintptr_t make_evacuation_allocator_cursor(uintptr_t block,
size_t allocated) { size_t allocated) {
ASSERT(allocated < (BLOCK_SIZE - 1) * (uint64_t) BLOCK_SIZE); GC_ASSERT(allocated < (BLOCK_SIZE - 1) * (uint64_t) BLOCK_SIZE);
return (block & ~(BLOCK_SIZE - 1)) | (allocated / BLOCK_SIZE); return (block & ~(BLOCK_SIZE - 1)) | (allocated / BLOCK_SIZE);
} }
@ -453,17 +424,17 @@ static void prepare_evacuation_allocator(struct evacuation_allocator *alloc,
static void clear_remaining_metadata_bytes_in_block(uintptr_t block, static void clear_remaining_metadata_bytes_in_block(uintptr_t block,
uintptr_t allocated) { uintptr_t allocated) {
ASSERT((allocated & (GRANULE_SIZE - 1)) == 0); GC_ASSERT((allocated & (GRANULE_SIZE - 1)) == 0);
uintptr_t base = block + allocated; uintptr_t base = block + allocated;
uintptr_t limit = block + BLOCK_SIZE; uintptr_t limit = block + BLOCK_SIZE;
uintptr_t granules = (limit - base) >> GRANULE_SIZE_LOG_2; uintptr_t granules = (limit - base) >> GRANULE_SIZE_LOG_2;
ASSERT(granules <= GRANULES_PER_BLOCK); GC_ASSERT(granules <= GRANULES_PER_BLOCK);
memset(object_metadata_byte((void*)base), 0, granules); memset(object_metadata_byte((void*)base), 0, granules);
} }
static void finish_evacuation_allocator_block(uintptr_t block, static void finish_evacuation_allocator_block(uintptr_t block,
uintptr_t allocated) { uintptr_t allocated) {
ASSERT(allocated <= BLOCK_SIZE); GC_ASSERT(allocated <= BLOCK_SIZE);
struct block_summary *summary = block_summary_for_addr(block); struct block_summary *summary = block_summary_for_addr(block);
block_summary_set_flag(summary, BLOCK_NEEDS_SWEEP); block_summary_set_flag(summary, BLOCK_NEEDS_SWEEP);
size_t fragmentation = (BLOCK_SIZE - allocated) >> GRANULE_SIZE_LOG_2; size_t fragmentation = (BLOCK_SIZE - allocated) >> GRANULE_SIZE_LOG_2;
@ -489,13 +460,13 @@ static void finish_evacuation_allocator(struct evacuation_allocator *alloc,
allocated = alloc->limit; allocated = alloc->limit;
while (allocated >= BLOCK_SIZE) { while (allocated >= BLOCK_SIZE) {
uintptr_t block = pop_block(targets); uintptr_t block = pop_block(targets);
ASSERT(block); GC_ASSERT(block);
allocated -= BLOCK_SIZE; allocated -= BLOCK_SIZE;
} }
if (allocated) { if (allocated) {
// Finish off the last partially-filled block. // Finish off the last partially-filled block.
uintptr_t block = pop_block(targets); uintptr_t block = pop_block(targets);
ASSERT(block); GC_ASSERT(block);
finish_evacuation_allocator_block(block, allocated); finish_evacuation_allocator_block(block, allocated);
} }
size_t remaining = atomic_load_explicit(&targets->count, memory_order_acquire); size_t remaining = atomic_load_explicit(&targets->count, memory_order_acquire);
@ -536,7 +507,7 @@ static struct gcobj *evacuation_allocate(struct mark_space *space,
uintptr_t base = seq * BLOCK_SIZE; uintptr_t base = seq * BLOCK_SIZE;
while ((base ^ next) & ~block_mask) { while ((base ^ next) & ~block_mask) {
ASSERT(base < next); GC_ASSERT(base < next);
if (base + BLOCK_SIZE > prev) { if (base + BLOCK_SIZE > prev) {
// The allocation straddles a block boundary, and the cursor has // The allocation straddles a block boundary, and the cursor has
// caught up so that we identify the block for the previous // caught up so that we identify the block for the previous
@ -549,10 +520,10 @@ static struct gcobj *evacuation_allocate(struct mark_space *space,
base += BLOCK_SIZE; base += BLOCK_SIZE;
if (base >= alloc->limit) { if (base >= alloc->limit) {
// Ran out of blocks! // Ran out of blocks!
ASSERT(!block); GC_ASSERT(!block);
return NULL; return NULL;
} }
ASSERT(block); GC_ASSERT(block);
// This store can race with other allocators, but that's OK as long // This store can race with other allocators, but that's OK as long
// as it never advances the cursor beyond the allocation pointer, // as it never advances the cursor beyond the allocation pointer,
// which it won't because we updated the allocation pointer already. // which it won't because we updated the allocation pointer already.
@ -579,37 +550,28 @@ static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
((byte & METADATA_BYTE_PINNED) == 0)) { ((byte & METADATA_BYTE_PINNED) == 0)) {
// This is an evacuating collection, and we are attempting to // This is an evacuating collection, and we are attempting to
// evacuate this block, and this particular object isn't pinned. // evacuate this block, and this particular object isn't pinned.
// First, see if someone evacuated this object already. struct gc_atomic_forward fwd = gc_atomic_forward_begin(obj);
uintptr_t header_word = atomic_load_explicit(&obj->tag,
memory_order_relaxed); if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
uintptr_t busy_header_word = 0; gc_atomic_forward_acquire(&fwd);
if (header_word != busy_header_word &&
(header_word & gcobj_not_forwarded_bit) == 0) { switch (fwd.state) {
// The object has been evacuated already. Update the edge; case GC_FORWARDING_STATE_NOT_FORWARDED:
// whoever forwarded the object will make sure it's eventually case GC_FORWARDING_STATE_ABORTED:
// traced. // Impossible.
gc_edge_update(edge, gc_ref(header_word)); abort();
return 0; case GC_FORWARDING_STATE_ACQUIRED: {
}
// Otherwise try to claim it for evacuation.
if (header_word != busy_header_word &&
atomic_compare_exchange_strong(&obj->tag, &header_word,
busy_header_word)) {
// We claimed the object successfully; evacuating is up to us. // We claimed the object successfully; evacuating is up to us.
size_t object_granules = mark_space_live_object_granules(metadata); size_t object_granules = mark_space_live_object_granules(metadata);
struct gcobj *new_obj = evacuation_allocate(space, object_granules); struct gcobj *new_obj = evacuation_allocate(space, object_granules);
if (new_obj) { if (new_obj) {
// We were able to reserve space in which to evacuate this object. // Copy object contents before committing, as we don't know what
// Commit the evacuation by overwriting the tag. // part of the object (if any) will be overwritten by the
uintptr_t new_header_word = tag_forwarded(new_obj); // commit.
atomic_store_explicit(&obj->tag, new_header_word, memcpy(new_obj, obj, object_granules * GRANULE_SIZE);
memory_order_release); gc_atomic_forward_commit(&fwd, (uintptr_t)new_obj);
// Now copy the object contents, update extent metadata, and // Now update extent metadata, and indicate to the caller that
// indicate to the caller that the object's fields need to be // the object's fields need to be traced.
// traced.
new_obj->tag = header_word;
memcpy(&new_obj->words[1], &obj->words[1],
object_granules * GRANULE_SIZE - sizeof(header_word));
uint8_t *new_metadata = object_metadata_byte(new_obj); uint8_t *new_metadata = object_metadata_byte(new_obj);
memcpy(new_metadata + 1, metadata + 1, object_granules - 1); memcpy(new_metadata + 1, metadata + 1, object_granules - 1);
gc_edge_update(edge, gc_ref_from_heap_object(new_obj)); gc_edge_update(edge, gc_ref_from_heap_object(new_obj));
@ -619,27 +581,33 @@ static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
} else { } else {
// Well shucks; allocation failed, marking the end of // Well shucks; allocation failed, marking the end of
// opportunistic evacuation. No future evacuation of this // opportunistic evacuation. No future evacuation of this
// object will succeed. Restore the original header word and // object will succeed. Mark in place instead.
// mark instead. gc_atomic_forward_abort(&fwd);
atomic_store_explicit(&obj->tag, header_word,
memory_order_release);
} }
} else { break;
}
case GC_FORWARDING_STATE_BUSY:
// Someone else claimed this object first. Spin until new address // Someone else claimed this object first. Spin until new address
// known, or evacuation aborts. // known, or evacuation aborts.
for (size_t spin_count = 0;; spin_count++) { for (size_t spin_count = 0;; spin_count++) {
header_word = atomic_load_explicit(&obj->tag, memory_order_acquire); if (gc_atomic_forward_retry_busy(&fwd))
if (header_word)
break; break;
yield_for_spin(spin_count); yield_for_spin(spin_count);
} }
if ((header_word & gcobj_not_forwarded_bit) == 0) if (fwd.state == GC_FORWARDING_STATE_ABORTED)
gc_edge_update(edge, gc_ref(header_word)); // Remove evacuation aborted; remote will mark and enqueue.
// Either way, the other party is responsible for adding the return 0;
// object to the mark queue. ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
// Fall through.
case GC_FORWARDING_STATE_FORWARDED:
// The object has been evacuated already. Update the edge;
// whoever forwarded the object will make sure it's eventually
// traced.
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
return 0; return 0;
} }
} }
uint8_t mask = METADATA_BYTE_YOUNG | METADATA_BYTE_MARK_0 uint8_t mask = METADATA_BYTE_YOUNG | METADATA_BYTE_MARK_0
| METADATA_BYTE_MARK_1 | METADATA_BYTE_MARK_2; | METADATA_BYTE_MARK_1 | METADATA_BYTE_MARK_2;
*metadata = (byte & ~mask) | space->marked_mask; *metadata = (byte & ~mask) | space->marked_mask;
@ -662,7 +630,7 @@ static inline int trace_edge(struct heap *heap, struct gc_edge edge) {
if (!gc_ref_is_heap_object(ref)) if (!gc_ref_is_heap_object(ref))
return 0; return 0;
struct gcobj *obj = gc_ref_heap_object(ref); struct gcobj *obj = gc_ref_heap_object(ref);
if (LIKELY(mark_space_contains(heap_mark_space(heap), obj))) { if (GC_LIKELY(mark_space_contains(heap_mark_space(heap), obj))) {
if (heap_mark_space(heap)->evacuating) if (heap_mark_space(heap)->evacuating)
return mark_space_evacuate_or_mark_object(heap_mark_space(heap), edge, return mark_space_evacuate_or_mark_object(heap_mark_space(heap), edge,
ref); ref);
@ -676,16 +644,7 @@ static inline int trace_edge(struct heap *heap, struct gc_edge edge) {
} }
static inline void trace_one(struct gcobj *obj, void *mark_data) { static inline void trace_one(struct gcobj *obj, void *mark_data) {
switch (tag_live_alloc_kind(obj->tag)) { gc_trace_object(obj, tracer_visit, mark_data, NULL);
#define SCAN_OBJECT(name, Name, NAME) \
case ALLOC_KIND_##NAME: \
visit_##name##_fields((Name*)obj, tracer_visit, mark_data); \
break;
FOR_EACH_HEAP_OBJECT_KIND(SCAN_OBJECT)
#undef SCAN_OBJECT
default:
abort ();
}
} }
static int heap_has_multiple_mutators(struct heap *heap) { static int heap_has_multiple_mutators(struct heap *heap) {
@ -730,23 +689,23 @@ static void remove_mutator(struct heap *heap, struct mutator *mut) {
} }
static void request_mutators_to_stop(struct heap *heap) { static void request_mutators_to_stop(struct heap *heap) {
ASSERT(!mutators_are_stopping(heap)); GC_ASSERT(!mutators_are_stopping(heap));
atomic_store_explicit(&heap->collecting, 1, memory_order_relaxed); atomic_store_explicit(&heap->collecting, 1, memory_order_relaxed);
} }
static void allow_mutators_to_continue(struct heap *heap) { static void allow_mutators_to_continue(struct heap *heap) {
ASSERT(mutators_are_stopping(heap)); GC_ASSERT(mutators_are_stopping(heap));
ASSERT(heap->active_mutator_count == 0); GC_ASSERT(heap->active_mutator_count == 0);
heap->active_mutator_count++; heap->active_mutator_count++;
atomic_store_explicit(&heap->collecting, 0, memory_order_relaxed); atomic_store_explicit(&heap->collecting, 0, memory_order_relaxed);
ASSERT(!mutators_are_stopping(heap)); GC_ASSERT(!mutators_are_stopping(heap));
pthread_cond_broadcast(&heap->mutator_cond); pthread_cond_broadcast(&heap->mutator_cond);
} }
static void push_unavailable_block(struct mark_space *space, uintptr_t block) { static void push_unavailable_block(struct mark_space *space, uintptr_t block) {
struct block_summary *summary = block_summary_for_addr(block); struct block_summary *summary = block_summary_for_addr(block);
ASSERT(!block_summary_has_flag(summary, BLOCK_NEEDS_SWEEP)); GC_ASSERT(!block_summary_has_flag(summary, BLOCK_NEEDS_SWEEP));
ASSERT(!block_summary_has_flag(summary, BLOCK_UNAVAILABLE)); GC_ASSERT(!block_summary_has_flag(summary, BLOCK_UNAVAILABLE));
block_summary_set_flag(summary, BLOCK_UNAVAILABLE); block_summary_set_flag(summary, BLOCK_UNAVAILABLE);
madvise((void*)block, BLOCK_SIZE, MADV_DONTNEED); madvise((void*)block, BLOCK_SIZE, MADV_DONTNEED);
push_block(&space->unavailable, block); push_block(&space->unavailable, block);
@ -757,7 +716,7 @@ static uintptr_t pop_unavailable_block(struct mark_space *space) {
if (!block) if (!block)
return 0; return 0;
struct block_summary *summary = block_summary_for_addr(block); struct block_summary *summary = block_summary_for_addr(block);
ASSERT(block_summary_has_flag(summary, BLOCK_UNAVAILABLE)); GC_ASSERT(block_summary_has_flag(summary, BLOCK_UNAVAILABLE));
block_summary_clear_flag(summary, BLOCK_UNAVAILABLE); block_summary_clear_flag(summary, BLOCK_UNAVAILABLE);
return block; return block;
} }
@ -768,7 +727,7 @@ static uintptr_t pop_empty_block(struct mark_space *space) {
static int maybe_push_evacuation_target(struct mark_space *space, static int maybe_push_evacuation_target(struct mark_space *space,
uintptr_t block, double reserve) { uintptr_t block, double reserve) {
ASSERT(!block_summary_has_flag(block_summary_for_addr(block), GC_ASSERT(!block_summary_has_flag(block_summary_for_addr(block),
BLOCK_NEEDS_SWEEP)); BLOCK_NEEDS_SWEEP));
size_t targets = atomic_load_explicit(&space->evacuation_targets.count, size_t targets = atomic_load_explicit(&space->evacuation_targets.count,
memory_order_acquire); memory_order_acquire);
@ -795,7 +754,7 @@ static int push_evacuation_target_if_possible(struct mark_space *space,
} }
static void push_empty_block(struct mark_space *space, uintptr_t block) { static void push_empty_block(struct mark_space *space, uintptr_t block) {
ASSERT(!block_summary_has_flag(block_summary_for_addr(block), GC_ASSERT(!block_summary_has_flag(block_summary_for_addr(block),
BLOCK_NEEDS_SWEEP)); BLOCK_NEEDS_SWEEP));
push_block(&space->empty, block); push_block(&space->empty, block);
} }
@ -811,7 +770,7 @@ static void mark_space_reacquire_memory(struct mark_space *space,
atomic_fetch_sub(&space->pending_unavailable_bytes, bytes) - bytes; atomic_fetch_sub(&space->pending_unavailable_bytes, bytes) - bytes;
while (pending + BLOCK_SIZE <= 0) { while (pending + BLOCK_SIZE <= 0) {
uintptr_t block = pop_unavailable_block(space); uintptr_t block = pop_unavailable_block(space);
ASSERT(block); GC_ASSERT(block);
if (push_evacuation_target_if_needed(space, block)) if (push_evacuation_target_if_needed(space, block))
continue; continue;
push_empty_block(space, block); push_empty_block(space, block);
@ -859,7 +818,7 @@ static int sweep_until_memory_released(struct mutator *mut) {
static void heap_reset_large_object_pages(struct heap *heap, size_t npages) { static void heap_reset_large_object_pages(struct heap *heap, size_t npages) {
size_t previous = heap->large_object_pages; size_t previous = heap->large_object_pages;
heap->large_object_pages = npages; heap->large_object_pages = npages;
ASSERT(npages <= previous); GC_ASSERT(npages <= previous);
size_t bytes = (previous - npages) << size_t bytes = (previous - npages) <<
heap_large_object_space(heap)->page_size_log2; heap_large_object_space(heap)->page_size_log2;
mark_space_reacquire_memory(heap_mark_space(heap), bytes); mark_space_reacquire_memory(heap_mark_space(heap), bytes);
@ -888,7 +847,7 @@ static void mutator_mark_buf_grow(struct mutator_mark_buf *buf) {
static void mutator_mark_buf_push(struct mutator_mark_buf *buf, static void mutator_mark_buf_push(struct mutator_mark_buf *buf,
struct gcobj *val) { struct gcobj *val) {
if (UNLIKELY(buf->size == buf->capacity)) if (GC_UNLIKELY(buf->size == buf->capacity))
mutator_mark_buf_grow(buf); mutator_mark_buf_grow(buf);
buf->objects[buf->size++] = val; buf->objects[buf->size++] = val;
} }
@ -908,7 +867,7 @@ static void mutator_mark_buf_destroy(struct mutator_mark_buf *buf) {
static void enqueue_mutator_for_tracing(struct mutator *mut) { static void enqueue_mutator_for_tracing(struct mutator *mut) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
ASSERT(mut->next == NULL); GC_ASSERT(mut->next == NULL);
struct mutator *next = struct mutator *next =
atomic_load_explicit(&heap->mutator_trace_list, memory_order_acquire); atomic_load_explicit(&heap->mutator_trace_list, memory_order_acquire);
do { do {
@ -948,7 +907,7 @@ static int mutator_should_mark_while_stopping(struct mutator *mut) {
// Mark the roots of a mutator that is stopping for GC. We can't // Mark the roots of a mutator that is stopping for GC. We can't
// enqueue them directly, so we send them to the controller in a buffer. // enqueue them directly, so we send them to the controller in a buffer.
static void mark_stopping_mutator_roots(struct mutator *mut) { static void mark_stopping_mutator_roots(struct mutator *mut) {
ASSERT(mutator_should_mark_while_stopping(mut)); GC_ASSERT(mutator_should_mark_while_stopping(mut));
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
struct mutator_mark_buf *local_roots = &mut->mark_buf; struct mutator_mark_buf *local_roots = &mut->mark_buf;
for (struct handle *h = mut->roots; h; h = h->next) { for (struct handle *h = mut->roots; h; h = h->next) {
@ -1026,16 +985,16 @@ static void trace_global_roots(struct heap *heap) {
static inline int static inline int
heap_object_is_young(struct heap *heap, struct gcobj *obj) { heap_object_is_young(struct heap *heap, struct gcobj *obj) {
if (UNLIKELY(!mark_space_contains(heap_mark_space(heap), obj))) { if (GC_UNLIKELY(!mark_space_contains(heap_mark_space(heap), obj))) {
// No lospace nursery, for the moment. // No lospace nursery, for the moment.
return 0; return 0;
} }
ASSERT(!heap_object_is_large(obj)); GC_ASSERT(!heap_object_is_large(obj));
return (*object_metadata_byte(obj)) & METADATA_BYTE_YOUNG; return (*object_metadata_byte(obj)) & METADATA_BYTE_YOUNG;
} }
static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) { static inline uint64_t load_eight_aligned_bytes(uint8_t *mark) {
ASSERT(((uintptr_t)mark & 7) == 0); GC_ASSERT(((uintptr_t)mark & 7) == 0);
uint8_t * __attribute__((aligned(8))) aligned_mark = mark; uint8_t * __attribute__((aligned(8))) aligned_mark = mark;
uint64_t word; uint64_t word;
memcpy(&word, aligned_mark, 8); memcpy(&word, aligned_mark, 8);
@ -1073,7 +1032,7 @@ static void mark_space_trace_card(struct mark_space *space,
size_t granule = granule_base + granule_offset; size_t granule = granule_base + granule_offset;
uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE; uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE;
struct gcobj *obj = (struct gcobj*)addr; struct gcobj *obj = (struct gcobj*)addr;
ASSERT(object_metadata_byte(obj) == &slab->metadata[granule]); GC_ASSERT(object_metadata_byte(obj) == &slab->metadata[granule]);
tracer_enqueue_root(&heap->tracer, obj); tracer_enqueue_root(&heap->tracer, obj);
} }
} }
@ -1081,7 +1040,7 @@ static void mark_space_trace_card(struct mark_space *space,
static void mark_space_trace_remembered_set(struct mark_space *space, static void mark_space_trace_remembered_set(struct mark_space *space,
struct heap *heap) { struct heap *heap) {
ASSERT(!space->evacuating); GC_ASSERT(!space->evacuating);
for (size_t s = 0; s < space->nslabs; s++) { for (size_t s = 0; s < space->nslabs; s++) {
struct slab *slab = &space->slabs[s]; struct slab *slab = &space->slabs[s];
uint8_t *remset = slab->remembered_set; uint8_t *remset = slab->remembered_set;
@ -1116,10 +1075,10 @@ static void trace_generational_roots(struct heap *heap) {
} }
} }
static void pause_mutator_for_collection(struct heap *heap) NEVER_INLINE; static void pause_mutator_for_collection(struct heap *heap) GC_NEVER_INLINE;
static void pause_mutator_for_collection(struct heap *heap) { static void pause_mutator_for_collection(struct heap *heap) {
ASSERT(mutators_are_stopping(heap)); GC_ASSERT(mutators_are_stopping(heap));
ASSERT(heap->active_mutator_count); GC_ASSERT(heap->active_mutator_count);
heap->active_mutator_count--; heap->active_mutator_count--;
if (heap->active_mutator_count == 0) if (heap->active_mutator_count == 0)
pthread_cond_signal(&heap->collector_cond); pthread_cond_signal(&heap->collector_cond);
@ -1139,10 +1098,10 @@ static void pause_mutator_for_collection(struct heap *heap) {
heap->active_mutator_count++; heap->active_mutator_count++;
} }
static void pause_mutator_for_collection_with_lock(struct mutator *mut) NEVER_INLINE; static void pause_mutator_for_collection_with_lock(struct mutator *mut) GC_NEVER_INLINE;
static void pause_mutator_for_collection_with_lock(struct mutator *mut) { static void pause_mutator_for_collection_with_lock(struct mutator *mut) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
ASSERT(mutators_are_stopping(heap)); GC_ASSERT(mutators_are_stopping(heap));
finish_sweeping_in_block(mut); finish_sweeping_in_block(mut);
if (mutator_should_mark_while_stopping(mut)) if (mutator_should_mark_while_stopping(mut))
// No need to collect results in mark buf; we can enqueue roots directly. // No need to collect results in mark buf; we can enqueue roots directly.
@ -1152,10 +1111,10 @@ static void pause_mutator_for_collection_with_lock(struct mutator *mut) {
pause_mutator_for_collection(heap); pause_mutator_for_collection(heap);
} }
static void pause_mutator_for_collection_without_lock(struct mutator *mut) NEVER_INLINE; static void pause_mutator_for_collection_without_lock(struct mutator *mut) GC_NEVER_INLINE;
static void pause_mutator_for_collection_without_lock(struct mutator *mut) { static void pause_mutator_for_collection_without_lock(struct mutator *mut) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
ASSERT(mutators_are_stopping(heap)); GC_ASSERT(mutators_are_stopping(heap));
finish_sweeping(mut); finish_sweeping(mut);
if (mutator_should_mark_while_stopping(mut)) if (mutator_should_mark_while_stopping(mut))
mark_stopping_mutator_roots(mut); mark_stopping_mutator_roots(mut);
@ -1310,7 +1269,7 @@ static enum gc_kind determine_collection_kind(struct heap *heap) {
} else { } else {
DEBUG("keeping on with minor GC\n"); DEBUG("keeping on with minor GC\n");
// Nursery has adequate space; keep trucking with minor GCs. // Nursery has adequate space; keep trucking with minor GCs.
ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE); GC_ASSERT(previous_gc_kind == GC_KIND_MINOR_IN_PLACE);
gc_kind = GC_KIND_MINOR_IN_PLACE; gc_kind = GC_KIND_MINOR_IN_PLACE;
} }
@ -1391,7 +1350,7 @@ static void prepare_for_evacuation(struct heap *heap) {
// they have been removed from the pool and have the UNAVAILABLE flag // they have been removed from the pool and have the UNAVAILABLE flag
// set, or because they are on the empties or evacuation target // set, or because they are on the empties or evacuation target
// lists. When evacuation starts, the empties list should be empty. // lists. When evacuation starts, the empties list should be empty.
ASSERT(empties == target_blocks); GC_ASSERT(empties == target_blocks);
// Now select a number of blocks that is likely to fill the space in // Now select a number of blocks that is likely to fill the space in
// the target blocks. Prefer candidate blocks with fewer survivors // the target blocks. Prefer candidate blocks with fewer survivors
@ -1560,7 +1519,7 @@ static uintptr_t mark_space_next_block_to_sweep(struct mark_space *space) {
} }
static void finish_block(struct mutator *mut) { static void finish_block(struct mutator *mut) {
ASSERT(mut->block); GC_ASSERT(mut->block);
struct block_summary *block = block_summary_for_addr(mut->block); struct block_summary *block = block_summary_for_addr(mut->block);
struct mark_space *space = heap_mark_space(mutator_heap(mut)); struct mark_space *space = heap_mark_space(mutator_heap(mut));
atomic_fetch_add(&space->granules_freed_by_last_collection, atomic_fetch_add(&space->granules_freed_by_last_collection,
@ -1572,7 +1531,7 @@ static void finish_block(struct mutator *mut) {
// trying to allocate into it for a minor GC. Sweep it next time to // trying to allocate into it for a minor GC. Sweep it next time to
// clear any garbage allocated in this cycle and mark it as // clear any garbage allocated in this cycle and mark it as
// "venerable" (i.e., old). // "venerable" (i.e., old).
ASSERT(!block_summary_has_flag(block, BLOCK_VENERABLE)); GC_ASSERT(!block_summary_has_flag(block, BLOCK_VENERABLE));
if (!block_summary_has_flag(block, BLOCK_VENERABLE_AFTER_SWEEP) && if (!block_summary_has_flag(block, BLOCK_VENERABLE_AFTER_SWEEP) &&
block->free_granules < GRANULES_PER_BLOCK * space->venerable_threshold) block->free_granules < GRANULES_PER_BLOCK * space->venerable_threshold)
block_summary_set_flag(block, BLOCK_VENERABLE_AFTER_SWEEP); block_summary_set_flag(block, BLOCK_VENERABLE_AFTER_SWEEP);
@ -1590,7 +1549,7 @@ static size_t next_hole_in_block(struct mutator *mut) {
uintptr_t sweep_mask = heap_mark_space(mutator_heap(mut))->sweep_mask; uintptr_t sweep_mask = heap_mark_space(mutator_heap(mut))->sweep_mask;
while (sweep != limit) { while (sweep != limit) {
ASSERT((sweep & (GRANULE_SIZE - 1)) == 0); GC_ASSERT((sweep & (GRANULE_SIZE - 1)) == 0);
uint8_t* metadata = object_metadata_byte((struct gcobj*)sweep); uint8_t* metadata = object_metadata_byte((struct gcobj*)sweep);
size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2; size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2;
@ -1613,12 +1572,12 @@ static size_t next_hole_in_block(struct mutator *mut) {
} }
size_t free_granules = next_mark(metadata, limit_granules, sweep_mask); size_t free_granules = next_mark(metadata, limit_granules, sweep_mask);
ASSERT(free_granules); GC_ASSERT(free_granules);
ASSERT(free_granules <= limit_granules); GC_ASSERT(free_granules <= limit_granules);
struct block_summary *summary = block_summary_for_addr(sweep); struct block_summary *summary = block_summary_for_addr(sweep);
summary->hole_count++; summary->hole_count++;
ASSERT(free_granules <= GRANULES_PER_BLOCK - summary->free_granules); GC_ASSERT(free_granules <= GRANULES_PER_BLOCK - summary->free_granules);
summary->free_granules += free_granules; summary->free_granules += free_granules;
size_t free_bytes = free_granules * GRANULE_SIZE; size_t free_bytes = free_granules * GRANULE_SIZE;
@ -1645,7 +1604,7 @@ static void finish_hole(struct mutator *mut) {
} }
static int maybe_release_swept_empty_block(struct mutator *mut) { static int maybe_release_swept_empty_block(struct mutator *mut) {
ASSERT(mut->block); GC_ASSERT(mut->block);
struct mark_space *space = heap_mark_space(mutator_heap(mut)); struct mark_space *space = heap_mark_space(mutator_heap(mut));
uintptr_t block = mut->block; uintptr_t block = mut->block;
if (atomic_load_explicit(&space->pending_unavailable_bytes, if (atomic_load_explicit(&space->pending_unavailable_bytes,
@ -1696,7 +1655,7 @@ static size_t next_hole(struct mutator *mut) {
mut->alloc = mut->sweep = mut->block = 0; mut->alloc = mut->sweep = mut->block = 0;
empties_countdown--; empties_countdown--;
} }
ASSERT(mut->block == 0); GC_ASSERT(mut->block == 0);
while (1) { while (1) {
uintptr_t block = mark_space_next_block_to_sweep(space); uintptr_t block = mark_space_next_block_to_sweep(space);
if (block) { if (block) {
@ -1797,8 +1756,7 @@ static void trigger_collection(struct mutator *mut) {
heap_unlock(heap); heap_unlock(heap);
} }
static void* allocate_large(struct mutator *mut, enum alloc_kind kind, static void* allocate_large(struct mutator *mut, size_t granules) {
size_t granules) {
struct heap *heap = mutator_heap(mut); struct heap *heap = mutator_heap(mut);
struct large_object_space *space = heap_large_object_space(heap); struct large_object_space *space = heap_large_object_space(heap);
@ -1821,14 +1779,11 @@ static void* allocate_large(struct mutator *mut, enum alloc_kind kind,
abort(); abort();
} }
*(uintptr_t*)ret = tag_live(kind);
return ret; return ret;
} }
static void* allocate_small_slow(struct mutator *mut, enum alloc_kind kind, static void* allocate_small_slow(struct mutator *mut, size_t granules) GC_NEVER_INLINE;
size_t granules) NEVER_INLINE; static void* allocate_small_slow(struct mutator *mut, size_t granules) {
static void* allocate_small_slow(struct mutator *mut, enum alloc_kind kind,
size_t granules) {
while (1) { while (1) {
size_t hole = next_hole(mut); size_t hole = next_hole(mut);
if (hole >= granules) { if (hole >= granules) {
@ -1843,9 +1798,8 @@ static void* allocate_small_slow(struct mutator *mut, enum alloc_kind kind,
return ret; return ret;
} }
static inline void* allocate_small(struct mutator *mut, enum alloc_kind kind, static inline void* allocate_small(struct mutator *mut, size_t granules) {
size_t granules) { GC_ASSERT(granules > 0); // allocating 0 granules would be silly
ASSERT(granules > 0); // allocating 0 granules would be silly
uintptr_t alloc = mut->alloc; uintptr_t alloc = mut->alloc;
uintptr_t sweep = mut->sweep; uintptr_t sweep = mut->sweep;
uintptr_t new_alloc = alloc + granules * GRANULE_SIZE; uintptr_t new_alloc = alloc + granules * GRANULE_SIZE;
@ -1854,9 +1808,8 @@ static inline void* allocate_small(struct mutator *mut, enum alloc_kind kind,
mut->alloc = new_alloc; mut->alloc = new_alloc;
obj = (struct gcobj *)alloc; obj = (struct gcobj *)alloc;
} else { } else {
obj = allocate_small_slow(mut, kind, granules); obj = allocate_small_slow(mut, granules);
} }
obj->tag = tag_live(kind);
uint8_t *metadata = object_metadata_byte(obj); uint8_t *metadata = object_metadata_byte(obj);
if (granules == 1) { if (granules == 1) {
metadata[0] = METADATA_BYTE_YOUNG | METADATA_BYTE_END; metadata[0] = METADATA_BYTE_YOUNG | METADATA_BYTE_END;
@ -1869,24 +1822,20 @@ static inline void* allocate_small(struct mutator *mut, enum alloc_kind kind,
return obj; return obj;
} }
static inline void* allocate_medium(struct mutator *mut, enum alloc_kind kind, static inline void* allocate_medium(struct mutator *mut, size_t granules) {
size_t granules) { return allocate_small(mut, granules);
return allocate_small(mut, kind, granules);
} }
static inline void* allocate(struct mutator *mut, enum alloc_kind kind, static inline void* gc_allocate(struct mutator *mut, size_t size) {
size_t size) {
size_t granules = size_to_granules(size); size_t granules = size_to_granules(size);
if (granules <= MEDIUM_OBJECT_GRANULE_THRESHOLD) if (granules <= MEDIUM_OBJECT_GRANULE_THRESHOLD)
return allocate_small(mut, kind, granules); return allocate_small(mut, granules);
if (granules <= LARGE_OBJECT_GRANULE_THRESHOLD) if (granules <= LARGE_OBJECT_GRANULE_THRESHOLD)
return allocate_medium(mut, kind, granules); return allocate_medium(mut, granules);
return allocate_large(mut, kind, granules); return allocate_large(mut, granules);
} }
static inline void* allocate_pointerless(struct mutator *mut, static inline void* gc_allocate_pointerless(struct mutator *mut, size_t size) {
enum alloc_kind kind, return gc_allocate(mut, size);
size_t size) {
return allocate(mut, kind, size);
} }
static inline void mark_space_write_barrier(void *obj) { static inline void mark_space_write_barrier(void *obj) {
@ -1940,8 +1889,8 @@ struct options {
}; };
static size_t parse_size_t(double value) { static size_t parse_size_t(double value) {
ASSERT(value >= 0); GC_ASSERT(value >= 0);
ASSERT(value <= (size_t) -1); GC_ASSERT(value <= (size_t) -1);
return value; return value;
} }
@ -2093,7 +2042,7 @@ static void gc_finish_for_thread(struct mutator *mut) {
} }
static void deactivate_mutator(struct heap *heap, struct mutator *mut) { static void deactivate_mutator(struct heap *heap, struct mutator *mut) {
ASSERT(mut->next == NULL); GC_ASSERT(mut->next == NULL);
heap_lock(heap); heap_lock(heap);
mut->next = heap->deactivated_mutators; mut->next = heap->deactivated_mutators;
heap->deactivated_mutators = mut; heap->deactivated_mutators = mut;