mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-13 17:20:21 +02:00
Excise struct gcobj
This commit is contained in:
parent
6ecf226570
commit
2199d5f48d
3 changed files with 93 additions and 105 deletions
|
@ -18,12 +18,10 @@
|
||||||
// for Weak Memory Models" (Lê et al, PPoPP'13)
|
// for Weak Memory Models" (Lê et al, PPoPP'13)
|
||||||
// (http://www.di.ens.fr/%7Ezappa/readings/ppopp13.pdf).
|
// (http://www.di.ens.fr/%7Ezappa/readings/ppopp13.pdf).
|
||||||
|
|
||||||
struct gcobj;
|
|
||||||
|
|
||||||
struct trace_buf {
|
struct trace_buf {
|
||||||
unsigned log_size;
|
unsigned log_size;
|
||||||
size_t size;
|
size_t size;
|
||||||
struct gcobj **data;
|
uintptr_t *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Min size: 8 kB on 64-bit systems, 4 kB on 32-bit.
|
// Min size: 8 kB on 64-bit systems, 4 kB on 32-bit.
|
||||||
|
@ -35,7 +33,7 @@ static int
|
||||||
trace_buf_init(struct trace_buf *buf, unsigned log_size) {
|
trace_buf_init(struct trace_buf *buf, unsigned log_size) {
|
||||||
ASSERT(log_size >= trace_buf_min_log_size);
|
ASSERT(log_size >= trace_buf_min_log_size);
|
||||||
ASSERT(log_size <= trace_buf_max_log_size);
|
ASSERT(log_size <= trace_buf_max_log_size);
|
||||||
size_t size = (1 << log_size) * sizeof(struct gcobj *);
|
size_t size = (1 << log_size) * sizeof(uintptr_t);
|
||||||
void *mem = mmap(NULL, size, PROT_READ|PROT_WRITE,
|
void *mem = mmap(NULL, size, PROT_READ|PROT_WRITE,
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
if (mem == MAP_FAILED) {
|
if (mem == MAP_FAILED) {
|
||||||
|
@ -56,7 +54,7 @@ trace_buf_size(struct trace_buf *buf) {
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
trace_buf_byte_size(struct trace_buf *buf) {
|
trace_buf_byte_size(struct trace_buf *buf) {
|
||||||
return trace_buf_size(buf) * sizeof(struct gcobj *);
|
return trace_buf_size(buf) * sizeof(uintptr_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -75,16 +73,16 @@ trace_buf_destroy(struct trace_buf *buf) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct gcobj *
|
static inline struct gc_ref
|
||||||
trace_buf_get(struct trace_buf *buf, size_t i) {
|
trace_buf_get(struct trace_buf *buf, size_t i) {
|
||||||
return atomic_load_explicit(&buf->data[i & (buf->size - 1)],
|
return gc_ref(atomic_load_explicit(&buf->data[i & (buf->size - 1)],
|
||||||
memory_order_relaxed);
|
memory_order_relaxed));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
trace_buf_put(struct trace_buf *buf, size_t i, struct gcobj * o) {
|
trace_buf_put(struct trace_buf *buf, size_t i, struct gc_ref ref) {
|
||||||
return atomic_store_explicit(&buf->data[i & (buf->size - 1)],
|
return atomic_store_explicit(&buf->data[i & (buf->size - 1)],
|
||||||
o,
|
gc_ref_value(ref),
|
||||||
memory_order_relaxed);
|
memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +156,7 @@ trace_deque_grow(struct trace_deque *q, int cur, size_t b, size_t t) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_deque_push(struct trace_deque *q, struct gcobj * x) {
|
trace_deque_push(struct trace_deque *q, struct gc_ref x) {
|
||||||
size_t b = LOAD_RELAXED(&q->bottom);
|
size_t b = LOAD_RELAXED(&q->bottom);
|
||||||
size_t t = LOAD_ACQUIRE(&q->top);
|
size_t t = LOAD_ACQUIRE(&q->top);
|
||||||
int active = LOAD_RELAXED(&q->active);
|
int active = LOAD_RELAXED(&q->active);
|
||||||
|
@ -172,7 +170,7 @@ trace_deque_push(struct trace_deque *q, struct gcobj * x) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_deque_push_many(struct trace_deque *q, struct gcobj **objv, size_t count) {
|
trace_deque_push_many(struct trace_deque *q, struct gc_ref *objv, size_t count) {
|
||||||
size_t b = LOAD_RELAXED(&q->bottom);
|
size_t b = LOAD_RELAXED(&q->bottom);
|
||||||
size_t t = LOAD_ACQUIRE(&q->top);
|
size_t t = LOAD_ACQUIRE(&q->top);
|
||||||
int active = LOAD_RELAXED(&q->active);
|
int active = LOAD_RELAXED(&q->active);
|
||||||
|
@ -186,7 +184,7 @@ trace_deque_push_many(struct trace_deque *q, struct gcobj **objv, size_t count)
|
||||||
STORE_RELAXED(&q->bottom, b + count);
|
STORE_RELAXED(&q->bottom, b + count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gcobj *
|
static struct gc_ref
|
||||||
trace_deque_try_pop(struct trace_deque *q) {
|
trace_deque_try_pop(struct trace_deque *q) {
|
||||||
size_t b = LOAD_RELAXED(&q->bottom);
|
size_t b = LOAD_RELAXED(&q->bottom);
|
||||||
b = b - 1;
|
b = b - 1;
|
||||||
|
@ -194,7 +192,7 @@ trace_deque_try_pop(struct trace_deque *q) {
|
||||||
STORE_RELAXED(&q->bottom, b);
|
STORE_RELAXED(&q->bottom, b);
|
||||||
atomic_thread_fence(memory_order_seq_cst);
|
atomic_thread_fence(memory_order_seq_cst);
|
||||||
size_t t = LOAD_RELAXED(&q->top);
|
size_t t = LOAD_RELAXED(&q->top);
|
||||||
struct gcobj * x;
|
struct gc_ref x;
|
||||||
if (t <= b) { // Non-empty queue.
|
if (t <= b) { // Non-empty queue.
|
||||||
x = trace_buf_get(&q->bufs[active], b);
|
x = trace_buf_get(&q->bufs[active], b);
|
||||||
if (t == b) { // Single last element in queue.
|
if (t == b) { // Single last element in queue.
|
||||||
|
@ -202,32 +200,32 @@ trace_deque_try_pop(struct trace_deque *q) {
|
||||||
memory_order_seq_cst,
|
memory_order_seq_cst,
|
||||||
memory_order_relaxed))
|
memory_order_relaxed))
|
||||||
// Failed race.
|
// Failed race.
|
||||||
x = NULL;
|
x = gc_ref_null();
|
||||||
STORE_RELAXED(&q->bottom, b + 1);
|
STORE_RELAXED(&q->bottom, b + 1);
|
||||||
}
|
}
|
||||||
} else { // Empty queue.
|
} else { // Empty queue.
|
||||||
x = NULL;
|
x = gc_ref_null();
|
||||||
STORE_RELAXED(&q->bottom, b + 1);
|
STORE_RELAXED(&q->bottom, b + 1);
|
||||||
}
|
}
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gcobj *
|
static struct gc_ref
|
||||||
trace_deque_steal(struct trace_deque *q) {
|
trace_deque_steal(struct trace_deque *q) {
|
||||||
while (1) {
|
while (1) {
|
||||||
size_t t = LOAD_ACQUIRE(&q->top);
|
size_t t = LOAD_ACQUIRE(&q->top);
|
||||||
atomic_thread_fence(memory_order_seq_cst);
|
atomic_thread_fence(memory_order_seq_cst);
|
||||||
size_t b = LOAD_ACQUIRE(&q->bottom);
|
size_t b = LOAD_ACQUIRE(&q->bottom);
|
||||||
if (t >= b)
|
if (t >= b)
|
||||||
return NULL;
|
return gc_ref_null();
|
||||||
int active = LOAD_CONSUME(&q->active);
|
int active = LOAD_CONSUME(&q->active);
|
||||||
struct gcobj *x = x = trace_buf_get(&q->bufs[active], t);
|
struct gc_ref ref = trace_buf_get(&q->bufs[active], t);
|
||||||
if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
|
if (!atomic_compare_exchange_strong_explicit(&q->top, &t, t + 1,
|
||||||
memory_order_seq_cst,
|
memory_order_seq_cst,
|
||||||
memory_order_relaxed))
|
memory_order_relaxed))
|
||||||
// Failed race.
|
// Failed race.
|
||||||
continue;
|
continue;
|
||||||
return x;
|
return ref;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,7 +249,7 @@ trace_deque_can_steal(struct trace_deque *q) {
|
||||||
struct local_trace_queue {
|
struct local_trace_queue {
|
||||||
size_t read;
|
size_t read;
|
||||||
size_t write;
|
size_t write;
|
||||||
struct gcobj * data[LOCAL_TRACE_QUEUE_SIZE];
|
struct gc_ref data[LOCAL_TRACE_QUEUE_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -275,10 +273,10 @@ local_trace_queue_full(struct local_trace_queue *q) {
|
||||||
return local_trace_queue_size(q) >= LOCAL_TRACE_QUEUE_SIZE;
|
return local_trace_queue_size(q) >= LOCAL_TRACE_QUEUE_SIZE;
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
local_trace_queue_push(struct local_trace_queue *q, struct gcobj * v) {
|
local_trace_queue_push(struct local_trace_queue *q, struct gc_ref v) {
|
||||||
q->data[q->write++ & LOCAL_TRACE_QUEUE_MASK] = v;
|
q->data[q->write++ & LOCAL_TRACE_QUEUE_MASK] = v;
|
||||||
}
|
}
|
||||||
static inline struct gcobj *
|
static inline struct gc_ref
|
||||||
local_trace_queue_pop(struct local_trace_queue *q) {
|
local_trace_queue_pop(struct local_trace_queue *q) {
|
||||||
return q->data[q->read++ & LOCAL_TRACE_QUEUE_MASK];
|
return q->data[q->read++ & LOCAL_TRACE_QUEUE_MASK];
|
||||||
}
|
}
|
||||||
|
@ -447,7 +445,6 @@ static void tracer_release(struct gc_heap *heap) {
|
||||||
trace_deque_release(&tracer->workers[i].deque);
|
trace_deque_release(&tracer->workers[i].deque);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gcobj;
|
|
||||||
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
|
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
|
||||||
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
|
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
|
||||||
static inline int trace_edge(struct gc_heap *heap,
|
static inline int trace_edge(struct gc_heap *heap,
|
||||||
|
@ -466,12 +463,11 @@ tracer_visit(struct gc_edge edge, void *trace_data) {
|
||||||
if (trace_edge(trace->heap, edge)) {
|
if (trace_edge(trace->heap, edge)) {
|
||||||
if (local_trace_queue_full(&trace->local))
|
if (local_trace_queue_full(&trace->local))
|
||||||
tracer_share(trace);
|
tracer_share(trace);
|
||||||
local_trace_queue_push(&trace->local,
|
local_trace_queue_push(&trace->local, gc_edge_ref(edge));
|
||||||
gc_ref_heap_object(gc_edge_ref(edge)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gcobj *
|
static struct gc_ref
|
||||||
tracer_steal_from_worker(struct tracer *tracer, size_t id) {
|
tracer_steal_from_worker(struct tracer *tracer, size_t id) {
|
||||||
ASSERT(id < tracer->worker_count);
|
ASSERT(id < tracer->worker_count);
|
||||||
return trace_deque_steal(&tracer->workers[id].deque);
|
return trace_deque_steal(&tracer->workers[id].deque);
|
||||||
|
@ -483,21 +479,21 @@ tracer_can_steal_from_worker(struct tracer *tracer, size_t id) {
|
||||||
return trace_deque_can_steal(&tracer->workers[id].deque);
|
return trace_deque_can_steal(&tracer->workers[id].deque);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gcobj *
|
static struct gc_ref
|
||||||
trace_worker_steal_from_any(struct trace_worker *worker, struct tracer *tracer) {
|
trace_worker_steal_from_any(struct trace_worker *worker, struct tracer *tracer) {
|
||||||
size_t steal_id = worker->steal_id;
|
size_t steal_id = worker->steal_id;
|
||||||
for (size_t i = 0; i < tracer->worker_count; i++) {
|
for (size_t i = 0; i < tracer->worker_count; i++) {
|
||||||
steal_id = (steal_id + 1) % tracer->worker_count;
|
steal_id = (steal_id + 1) % tracer->worker_count;
|
||||||
DEBUG("tracer #%zu: stealing from #%zu\n", worker->id, steal_id);
|
DEBUG("tracer #%zu: stealing from #%zu\n", worker->id, steal_id);
|
||||||
struct gcobj * obj = tracer_steal_from_worker(tracer, steal_id);
|
struct gc_ref obj = tracer_steal_from_worker(tracer, steal_id);
|
||||||
if (obj) {
|
if (gc_ref_is_heap_object(obj)) {
|
||||||
DEBUG("tracer #%zu: stealing got %p\n", worker->id, obj);
|
DEBUG("tracer #%zu: stealing got %p\n", worker->id, obj);
|
||||||
worker->steal_id = steal_id;
|
worker->steal_id = steal_id;
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DEBUG("tracer #%zu: failed to steal\n", worker->id);
|
DEBUG("tracer #%zu: failed to steal\n", worker->id);
|
||||||
return 0;
|
return gc_ref_null();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -544,19 +540,19 @@ trace_worker_check_termination(struct trace_worker *worker,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gcobj *
|
static struct gc_ref
|
||||||
trace_worker_steal(struct local_tracer *trace) {
|
trace_worker_steal(struct local_tracer *trace) {
|
||||||
struct tracer *tracer = heap_tracer(trace->heap);
|
struct tracer *tracer = heap_tracer(trace->heap);
|
||||||
struct trace_worker *worker = trace->worker;
|
struct trace_worker *worker = trace->worker;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
DEBUG("tracer #%zu: trying to steal\n", worker->id);
|
DEBUG("tracer #%zu: trying to steal\n", worker->id);
|
||||||
struct gcobj *obj = trace_worker_steal_from_any(worker, tracer);
|
struct gc_ref obj = trace_worker_steal_from_any(worker, tracer);
|
||||||
if (obj)
|
if (gc_ref_is_heap_object(obj))
|
||||||
return obj;
|
return obj;
|
||||||
|
|
||||||
if (trace_worker_check_termination(worker, tracer))
|
if (trace_worker_check_termination(worker, tracer))
|
||||||
return NULL;
|
return gc_ref_null();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -571,15 +567,15 @@ trace_worker_trace(struct trace_worker *worker) {
|
||||||
size_t n = 0;
|
size_t n = 0;
|
||||||
DEBUG("tracer #%zu: running trace loop\n", worker->id);
|
DEBUG("tracer #%zu: running trace loop\n", worker->id);
|
||||||
while (1) {
|
while (1) {
|
||||||
void *obj;
|
struct gc_ref ref;
|
||||||
if (!local_trace_queue_empty(&trace.local)) {
|
if (!local_trace_queue_empty(&trace.local)) {
|
||||||
obj = local_trace_queue_pop(&trace.local);
|
ref = local_trace_queue_pop(&trace.local);
|
||||||
} else {
|
} else {
|
||||||
obj = trace_worker_steal(&trace);
|
ref = trace_worker_steal(&trace);
|
||||||
if (!obj)
|
if (!gc_ref_is_heap_object(ref))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
trace_one(gc_ref_from_heap_object(obj), &trace);
|
trace_one(ref, &trace);
|
||||||
n++;
|
n++;
|
||||||
}
|
}
|
||||||
DEBUG("tracer #%zu: done tracing, %zu objects traced\n", worker->id, n);
|
DEBUG("tracer #%zu: done tracing, %zu objects traced\n", worker->id, n);
|
||||||
|
@ -588,13 +584,13 @@ trace_worker_trace(struct trace_worker *worker) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) {
|
tracer_enqueue_root(struct tracer *tracer, struct gc_ref ref) {
|
||||||
struct trace_deque *worker0_deque = &tracer->workers[0].deque;
|
struct trace_deque *worker0_deque = &tracer->workers[0].deque;
|
||||||
trace_deque_push(worker0_deque, obj);
|
trace_deque_push(worker0_deque, ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tracer_enqueue_roots(struct tracer *tracer, struct gcobj **objv,
|
tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objv,
|
||||||
size_t count) {
|
size_t count) {
|
||||||
struct trace_deque *worker0_deque = &tracer->workers[0].deque;
|
struct trace_deque *worker0_deque = &tracer->workers[0].deque;
|
||||||
trace_deque_push_many(worker0_deque, objv, count);
|
trace_deque_push_many(worker0_deque, objv, count);
|
||||||
|
|
|
@ -8,22 +8,20 @@
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "gc-api.h"
|
#include "gc-api.h"
|
||||||
|
|
||||||
struct gcobj;
|
|
||||||
|
|
||||||
struct trace_queue {
|
struct trace_queue {
|
||||||
size_t size;
|
size_t size;
|
||||||
size_t read;
|
size_t read;
|
||||||
size_t write;
|
size_t write;
|
||||||
struct gcobj **buf;
|
struct gc_ref *buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const size_t trace_queue_max_size =
|
static const size_t trace_queue_max_size =
|
||||||
(1ULL << (sizeof(struct gcobj *) * 8 - 1)) / sizeof(struct gcobj *);
|
(1ULL << (sizeof(struct gc_ref) * 8 - 1)) / sizeof(struct gc_ref);
|
||||||
static const size_t trace_queue_release_byte_threshold = 1 * 1024 * 1024;
|
static const size_t trace_queue_release_byte_threshold = 1 * 1024 * 1024;
|
||||||
|
|
||||||
static struct gcobj **
|
static struct gc_ref *
|
||||||
trace_queue_alloc(size_t size) {
|
trace_queue_alloc(size_t size) {
|
||||||
void *mem = mmap(NULL, size * sizeof(struct gcobj *), PROT_READ|PROT_WRITE,
|
void *mem = mmap(NULL, size * sizeof(struct gc_ref), PROT_READ|PROT_WRITE,
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
if (mem == MAP_FAILED) {
|
if (mem == MAP_FAILED) {
|
||||||
perror("Failed to grow trace queue");
|
perror("Failed to grow trace queue");
|
||||||
|
@ -35,20 +33,20 @@ trace_queue_alloc(size_t size) {
|
||||||
|
|
||||||
static int
|
static int
|
||||||
trace_queue_init(struct trace_queue *q) {
|
trace_queue_init(struct trace_queue *q) {
|
||||||
q->size = getpagesize() / sizeof(struct gcobj *);
|
q->size = getpagesize() / sizeof(struct gc_ref);
|
||||||
q->read = 0;
|
q->read = 0;
|
||||||
q->write = 0;
|
q->write = 0;
|
||||||
q->buf = trace_queue_alloc(q->size);
|
q->buf = trace_queue_alloc(q->size);
|
||||||
return !!q->buf;
|
return !!q->buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct gcobj *
|
static inline struct gc_ref
|
||||||
trace_queue_get(struct trace_queue *q, size_t idx) {
|
trace_queue_get(struct trace_queue *q, size_t idx) {
|
||||||
return q->buf[idx & (q->size - 1)];
|
return q->buf[idx & (q->size - 1)];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
trace_queue_put(struct trace_queue *q, size_t idx, struct gcobj *x) {
|
trace_queue_put(struct trace_queue *q, size_t idx, struct gc_ref x) {
|
||||||
q->buf[idx & (q->size - 1)] = x;
|
q->buf[idx & (q->size - 1)] = x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,14 +55,14 @@ static int trace_queue_grow(struct trace_queue *q) GC_NEVER_INLINE;
|
||||||
static int
|
static int
|
||||||
trace_queue_grow(struct trace_queue *q) {
|
trace_queue_grow(struct trace_queue *q) {
|
||||||
size_t old_size = q->size;
|
size_t old_size = q->size;
|
||||||
struct gcobj **old_buf = q->buf;
|
struct gc_ref *old_buf = q->buf;
|
||||||
if (old_size >= trace_queue_max_size) {
|
if (old_size >= trace_queue_max_size) {
|
||||||
DEBUG("trace queue already at max size of %zu bytes", old_size);
|
DEBUG("trace queue already at max size of %zu bytes", old_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t new_size = old_size * 2;
|
size_t new_size = old_size * 2;
|
||||||
struct gcobj **new_buf = trace_queue_alloc(new_size);
|
struct gc_ref *new_buf = trace_queue_alloc(new_size);
|
||||||
if (!new_buf)
|
if (!new_buf)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -74,7 +72,7 @@ trace_queue_grow(struct trace_queue *q) {
|
||||||
for (size_t i = q->read; i < q->write; i++)
|
for (size_t i = q->read; i < q->write; i++)
|
||||||
new_buf[i & new_mask] = old_buf[i & old_mask];
|
new_buf[i & new_mask] = old_buf[i & old_mask];
|
||||||
|
|
||||||
munmap(old_buf, old_size * sizeof(struct gcobj *));
|
munmap(old_buf, old_size * sizeof(struct gc_ref));
|
||||||
|
|
||||||
q->size = new_size;
|
q->size = new_size;
|
||||||
q->buf = new_buf;
|
q->buf = new_buf;
|
||||||
|
@ -82,7 +80,7 @@ trace_queue_grow(struct trace_queue *q) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
trace_queue_push(struct trace_queue *q, struct gcobj *p) {
|
trace_queue_push(struct trace_queue *q, struct gc_ref p) {
|
||||||
if (UNLIKELY(q->write - q->read == q->size)) {
|
if (UNLIKELY(q->write - q->read == q->size)) {
|
||||||
if (!trace_queue_grow(q))
|
if (!trace_queue_grow(q))
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
|
@ -91,7 +89,7 @@ trace_queue_push(struct trace_queue *q, struct gcobj *p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
trace_queue_push_many(struct trace_queue *q, struct gcobj **pv, size_t count) {
|
trace_queue_push_many(struct trace_queue *q, struct gc_ref *pv, size_t count) {
|
||||||
while (q->size - (q->write - q->read) < count) {
|
while (q->size - (q->write - q->read) < count) {
|
||||||
if (!trace_queue_grow(q))
|
if (!trace_queue_grow(q))
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
|
@ -100,16 +98,16 @@ trace_queue_push_many(struct trace_queue *q, struct gcobj **pv, size_t count) {
|
||||||
trace_queue_put(q, q->write++, pv[i]);
|
trace_queue_put(q, q->write++, pv[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct gcobj*
|
static inline struct gc_ref
|
||||||
trace_queue_pop(struct trace_queue *q) {
|
trace_queue_pop(struct trace_queue *q) {
|
||||||
if (UNLIKELY(q->read == q->write))
|
if (UNLIKELY(q->read == q->write))
|
||||||
return NULL;
|
return gc_ref_null();
|
||||||
return trace_queue_get(q, q->read++);
|
return trace_queue_get(q, q->read++);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_queue_release(struct trace_queue *q) {
|
trace_queue_release(struct trace_queue *q) {
|
||||||
size_t byte_size = q->size * sizeof(struct gcobj *);
|
size_t byte_size = q->size * sizeof(struct gc_ref);
|
||||||
if (byte_size >= trace_queue_release_byte_threshold)
|
if (byte_size >= trace_queue_release_byte_threshold)
|
||||||
madvise(q->buf, byte_size, MADV_DONTNEED);
|
madvise(q->buf, byte_size, MADV_DONTNEED);
|
||||||
q->read = q->write = 0;
|
q->read = q->write = 0;
|
||||||
|
@ -117,7 +115,7 @@ trace_queue_release(struct trace_queue *q) {
|
||||||
|
|
||||||
static void
|
static void
|
||||||
trace_queue_destroy(struct trace_queue *q) {
|
trace_queue_destroy(struct trace_queue *q) {
|
||||||
size_t byte_size = q->size * sizeof(struct gcobj *);
|
size_t byte_size = q->size * sizeof(struct gc_ref);
|
||||||
munmap(q->buf, byte_size);
|
munmap(q->buf, byte_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,18 +135,17 @@ static void tracer_release(struct gc_heap *heap) {
|
||||||
trace_queue_release(&heap_tracer(heap)->queue);
|
trace_queue_release(&heap_tracer(heap)->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gcobj;
|
|
||||||
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
|
static inline void tracer_visit(struct gc_edge edge, void *trace_data) GC_ALWAYS_INLINE;
|
||||||
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
|
static inline void trace_one(struct gc_ref ref, void *trace_data) GC_ALWAYS_INLINE;
|
||||||
static inline int trace_edge(struct gc_heap *heap,
|
static inline int trace_edge(struct gc_heap *heap,
|
||||||
struct gc_edge edge) GC_ALWAYS_INLINE;
|
struct gc_edge edge) GC_ALWAYS_INLINE;
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tracer_enqueue_root(struct tracer *tracer, struct gcobj *obj) {
|
tracer_enqueue_root(struct tracer *tracer, struct gc_ref obj) {
|
||||||
trace_queue_push(&tracer->queue, obj);
|
trace_queue_push(&tracer->queue, obj);
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
tracer_enqueue_roots(struct tracer *tracer, struct gcobj **objs,
|
tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objs,
|
||||||
size_t count) {
|
size_t count) {
|
||||||
trace_queue_push_many(&tracer->queue, objs, count);
|
trace_queue_push_many(&tracer->queue, objs, count);
|
||||||
}
|
}
|
||||||
|
@ -156,14 +153,16 @@ static inline void
|
||||||
tracer_visit(struct gc_edge edge, void *trace_data) {
|
tracer_visit(struct gc_edge edge, void *trace_data) {
|
||||||
struct gc_heap *heap = trace_data;
|
struct gc_heap *heap = trace_data;
|
||||||
if (trace_edge(heap, edge))
|
if (trace_edge(heap, edge))
|
||||||
tracer_enqueue_root(heap_tracer(heap),
|
tracer_enqueue_root(heap_tracer(heap), gc_edge_ref(edge));
|
||||||
gc_ref_heap_object(gc_edge_ref(edge)));
|
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
tracer_trace(struct gc_heap *heap) {
|
tracer_trace(struct gc_heap *heap) {
|
||||||
struct gcobj *obj;
|
do {
|
||||||
while ((obj = trace_queue_pop(&heap_tracer(heap)->queue)))
|
struct gc_ref obj = trace_queue_pop(&heap_tracer(heap)->queue);
|
||||||
trace_one(gc_ref_from_heap_object(obj), heap);
|
if (!gc_ref_is_heap_object(obj))
|
||||||
|
break;
|
||||||
|
trace_one(obj, heap);
|
||||||
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // SERIAL_TRACER_H
|
#endif // SERIAL_TRACER_H
|
||||||
|
|
65
whippet.c
65
whippet.c
|
@ -182,13 +182,16 @@ static struct slab *object_slab(void *obj) {
|
||||||
return (struct slab*) base;
|
return (struct slab*) base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t *object_metadata_byte(void *obj) {
|
static uint8_t *metadata_byte_for_addr(uintptr_t addr) {
|
||||||
uintptr_t addr = (uintptr_t) obj;
|
|
||||||
uintptr_t base = addr & ~(SLAB_SIZE - 1);
|
uintptr_t base = addr & ~(SLAB_SIZE - 1);
|
||||||
uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2;
|
uintptr_t granule = (addr & (SLAB_SIZE - 1)) >> GRANULE_SIZE_LOG_2;
|
||||||
return (uint8_t*) (base + granule);
|
return (uint8_t*) (base + granule);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint8_t *metadata_byte_for_object(struct gc_ref ref) {
|
||||||
|
return metadata_byte_for_addr(gc_ref_value(ref));
|
||||||
|
}
|
||||||
|
|
||||||
#define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE)
|
#define GRANULES_PER_BLOCK (BLOCK_SIZE / GRANULE_SIZE)
|
||||||
#define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK)
|
#define GRANULES_PER_REMSET_BYTE (GRANULES_PER_BLOCK / REMSET_BYTES_PER_BLOCK)
|
||||||
|
|
||||||
|
@ -258,8 +261,6 @@ static inline size_t size_to_granules(size_t size) {
|
||||||
return (size + GRANULE_SIZE - 1) >> GRANULE_SIZE_LOG_2;
|
return (size + GRANULE_SIZE - 1) >> GRANULE_SIZE_LOG_2;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gcobj;
|
|
||||||
|
|
||||||
struct evacuation_allocator {
|
struct evacuation_allocator {
|
||||||
size_t allocated; // atomically
|
size_t allocated; // atomically
|
||||||
size_t limit;
|
size_t limit;
|
||||||
|
@ -329,7 +330,7 @@ struct gc_heap {
|
||||||
struct gc_mutator_mark_buf {
|
struct gc_mutator_mark_buf {
|
||||||
size_t size;
|
size_t size;
|
||||||
size_t capacity;
|
size_t capacity;
|
||||||
struct gcobj **objects;
|
struct gc_ref *objects;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct gc_mutator {
|
struct gc_mutator {
|
||||||
|
@ -366,10 +367,6 @@ static inline void clear_memory(uintptr_t addr, size_t size) {
|
||||||
|
|
||||||
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
|
static void collect(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||||
|
|
||||||
static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) {
|
|
||||||
return object_metadata_byte(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t mark_space_live_object_granules(uint8_t *metadata) {
|
static size_t mark_space_live_object_granules(uint8_t *metadata) {
|
||||||
size_t n = 0;
|
size_t n = 0;
|
||||||
while ((metadata[n] & METADATA_BYTE_END) == 0)
|
while ((metadata[n] & METADATA_BYTE_END) == 0)
|
||||||
|
@ -379,8 +376,7 @@ static size_t mark_space_live_object_granules(uint8_t *metadata) {
|
||||||
|
|
||||||
static inline int mark_space_mark_object(struct mark_space *space,
|
static inline int mark_space_mark_object(struct mark_space *space,
|
||||||
struct gc_ref ref) {
|
struct gc_ref ref) {
|
||||||
struct gcobj *obj = gc_ref_heap_object(ref);
|
uint8_t *loc = metadata_byte_for_object(ref);
|
||||||
uint8_t *loc = object_metadata_byte(obj);
|
|
||||||
uint8_t byte = *loc;
|
uint8_t byte = *loc;
|
||||||
if (byte & space->marked_mask)
|
if (byte & space->marked_mask)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -414,7 +410,7 @@ static void clear_remaining_metadata_bytes_in_block(uintptr_t block,
|
||||||
uintptr_t limit = block + BLOCK_SIZE;
|
uintptr_t limit = block + BLOCK_SIZE;
|
||||||
uintptr_t granules = (limit - base) >> GRANULE_SIZE_LOG_2;
|
uintptr_t granules = (limit - base) >> GRANULE_SIZE_LOG_2;
|
||||||
GC_ASSERT(granules <= GRANULES_PER_BLOCK);
|
GC_ASSERT(granules <= GRANULES_PER_BLOCK);
|
||||||
memset(object_metadata_byte((void*)base), 0, granules);
|
memset(metadata_byte_for_addr(base), 0, granules);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void finish_evacuation_allocator_block(uintptr_t block,
|
static void finish_evacuation_allocator_block(uintptr_t block,
|
||||||
|
@ -524,7 +520,7 @@ static struct gc_ref evacuation_allocate(struct mark_space *space,
|
||||||
static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
|
static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
|
||||||
struct gc_edge edge,
|
struct gc_edge edge,
|
||||||
struct gc_ref old_ref) {
|
struct gc_ref old_ref) {
|
||||||
uint8_t *metadata = object_metadata_byte(gc_ref_heap_object(old_ref));
|
uint8_t *metadata = metadata_byte_for_object(old_ref);
|
||||||
uint8_t byte = *metadata;
|
uint8_t byte = *metadata;
|
||||||
if (byte & space->marked_mask)
|
if (byte & space->marked_mask)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -557,7 +553,7 @@ static inline int mark_space_evacuate_or_mark_object(struct mark_space *space,
|
||||||
gc_atomic_forward_commit(&fwd, new_ref);
|
gc_atomic_forward_commit(&fwd, new_ref);
|
||||||
// Now update extent metadata, and indicate to the caller that
|
// Now update extent metadata, and indicate to the caller that
|
||||||
// the object's fields need to be traced.
|
// the object's fields need to be traced.
|
||||||
uint8_t *new_metadata = object_metadata_byte(gc_ref_heap_object(new_ref));
|
uint8_t *new_metadata = metadata_byte_for_object(new_ref);
|
||||||
memcpy(new_metadata + 1, metadata + 1, object_granules - 1);
|
memcpy(new_metadata + 1, metadata + 1, object_granules - 1);
|
||||||
gc_edge_update(edge, new_ref);
|
gc_edge_update(edge, new_ref);
|
||||||
metadata = new_metadata;
|
metadata = new_metadata;
|
||||||
|
@ -809,10 +805,10 @@ static void heap_reset_large_object_pages(struct gc_heap *heap, size_t npages) {
|
||||||
|
|
||||||
static void mutator_mark_buf_grow(struct gc_mutator_mark_buf *buf) {
|
static void mutator_mark_buf_grow(struct gc_mutator_mark_buf *buf) {
|
||||||
size_t old_capacity = buf->capacity;
|
size_t old_capacity = buf->capacity;
|
||||||
size_t old_bytes = old_capacity * sizeof(struct gcobj*);
|
size_t old_bytes = old_capacity * sizeof(struct gc_ref);
|
||||||
|
|
||||||
size_t new_bytes = old_bytes ? old_bytes * 2 : getpagesize();
|
size_t new_bytes = old_bytes ? old_bytes * 2 : getpagesize();
|
||||||
size_t new_capacity = new_bytes / sizeof(struct gcobj*);
|
size_t new_capacity = new_bytes / sizeof(struct gc_ref);
|
||||||
|
|
||||||
void *mem = mmap(NULL, new_bytes, PROT_READ|PROT_WRITE,
|
void *mem = mmap(NULL, new_bytes, PROT_READ|PROT_WRITE,
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
|
@ -829,21 +825,21 @@ static void mutator_mark_buf_grow(struct gc_mutator_mark_buf *buf) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mutator_mark_buf_push(struct gc_mutator_mark_buf *buf,
|
static void mutator_mark_buf_push(struct gc_mutator_mark_buf *buf,
|
||||||
struct gcobj *val) {
|
struct gc_ref ref) {
|
||||||
if (GC_UNLIKELY(buf->size == buf->capacity))
|
if (GC_UNLIKELY(buf->size == buf->capacity))
|
||||||
mutator_mark_buf_grow(buf);
|
mutator_mark_buf_grow(buf);
|
||||||
buf->objects[buf->size++] = val;
|
buf->objects[buf->size++] = ref;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mutator_mark_buf_release(struct gc_mutator_mark_buf *buf) {
|
static void mutator_mark_buf_release(struct gc_mutator_mark_buf *buf) {
|
||||||
size_t bytes = buf->size * sizeof(struct gcobj*);
|
size_t bytes = buf->size * sizeof(struct gc_ref);
|
||||||
if (bytes >= getpagesize())
|
if (bytes >= getpagesize())
|
||||||
madvise(buf->objects, align_up(bytes, getpagesize()), MADV_DONTNEED);
|
madvise(buf->objects, align_up(bytes, getpagesize()), MADV_DONTNEED);
|
||||||
buf->size = 0;
|
buf->size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mutator_mark_buf_destroy(struct gc_mutator_mark_buf *buf) {
|
static void mutator_mark_buf_destroy(struct gc_mutator_mark_buf *buf) {
|
||||||
size_t bytes = buf->capacity * sizeof(struct gcobj*);
|
size_t bytes = buf->capacity * sizeof(struct gc_ref);
|
||||||
if (bytes)
|
if (bytes)
|
||||||
munmap(buf->objects, bytes);
|
munmap(buf->objects, bytes);
|
||||||
}
|
}
|
||||||
|
@ -898,15 +894,13 @@ void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots *roots) {
|
||||||
static void trace_and_enqueue_locally(struct gc_edge edge, void *data) {
|
static void trace_and_enqueue_locally(struct gc_edge edge, void *data) {
|
||||||
struct gc_mutator *mut = data;
|
struct gc_mutator *mut = data;
|
||||||
if (trace_edge(mutator_heap(mut), edge))
|
if (trace_edge(mutator_heap(mut), edge))
|
||||||
mutator_mark_buf_push(&mut->mark_buf,
|
mutator_mark_buf_push(&mut->mark_buf, gc_edge_ref(edge));
|
||||||
gc_ref_heap_object(gc_edge_ref(edge)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void trace_and_enqueue_globally(struct gc_edge edge, void *data) {
|
static void trace_and_enqueue_globally(struct gc_edge edge, void *data) {
|
||||||
struct gc_heap *heap = data;
|
struct gc_heap *heap = data;
|
||||||
if (trace_edge(heap, edge))
|
if (trace_edge(heap, edge))
|
||||||
tracer_enqueue_root(&heap->tracer,
|
tracer_enqueue_root(&heap->tracer, gc_edge_ref(edge));
|
||||||
gc_ref_heap_object(gc_edge_ref(edge)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark the roots of a mutator that is stopping for GC. We can't
|
// Mark the roots of a mutator that is stopping for GC. We can't
|
||||||
|
@ -951,8 +945,8 @@ static void trace_mutator_roots_after_stop(struct gc_heap *heap) {
|
||||||
int active_mutators_already_marked = heap_should_mark_while_stopping(heap);
|
int active_mutators_already_marked = heap_should_mark_while_stopping(heap);
|
||||||
while (mut) {
|
while (mut) {
|
||||||
if (active_mutators_already_marked)
|
if (active_mutators_already_marked)
|
||||||
tracer_enqueue_roots(&heap->tracer,
|
tracer_enqueue_roots(&heap->tracer, mut->mark_buf.objects,
|
||||||
mut->mark_buf.objects, mut->mark_buf.size);
|
mut->mark_buf.size);
|
||||||
else
|
else
|
||||||
trace_mutator_roots_with_lock(mut);
|
trace_mutator_roots_with_lock(mut);
|
||||||
struct gc_mutator *next = mut->next;
|
struct gc_mutator *next = mut->next;
|
||||||
|
@ -1009,9 +1003,8 @@ static void mark_space_trace_card(struct mark_space *space,
|
||||||
mark_bytes &= ~(((uint64_t)0xff) << (granule_offset * 8));
|
mark_bytes &= ~(((uint64_t)0xff) << (granule_offset * 8));
|
||||||
size_t granule = granule_base + granule_offset;
|
size_t granule = granule_base + granule_offset;
|
||||||
uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE;
|
uintptr_t addr = first_addr_in_slab + granule * GRANULE_SIZE;
|
||||||
struct gcobj *obj = (struct gcobj*)addr;
|
GC_ASSERT(metadata_byte_for_addr(addr) == &slab->metadata[granule]);
|
||||||
GC_ASSERT(object_metadata_byte(obj) == &slab->metadata[granule]);
|
tracer_enqueue_root(&heap->tracer, gc_ref(addr));
|
||||||
tracer_enqueue_root(&heap->tracer, obj);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1528,7 +1521,7 @@ static size_t next_hole_in_block(struct gc_mutator *mut) {
|
||||||
|
|
||||||
while (sweep != limit) {
|
while (sweep != limit) {
|
||||||
GC_ASSERT((sweep & (GRANULE_SIZE - 1)) == 0);
|
GC_ASSERT((sweep & (GRANULE_SIZE - 1)) == 0);
|
||||||
uint8_t* metadata = object_metadata_byte((struct gcobj*)sweep);
|
uint8_t* metadata = metadata_byte_for_addr(sweep);
|
||||||
size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2;
|
size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2;
|
||||||
|
|
||||||
// Except for when we first get a block, mut->sweep is positioned
|
// Except for when we first get a block, mut->sweep is positioned
|
||||||
|
@ -1574,7 +1567,7 @@ static void finish_hole(struct gc_mutator *mut) {
|
||||||
struct block_summary *summary = block_summary_for_addr(mut->block);
|
struct block_summary *summary = block_summary_for_addr(mut->block);
|
||||||
summary->holes_with_fragmentation++;
|
summary->holes_with_fragmentation++;
|
||||||
summary->fragmentation_granules += granules;
|
summary->fragmentation_granules += granules;
|
||||||
uint8_t *metadata = object_metadata_byte((void*)mut->alloc);
|
uint8_t *metadata = metadata_byte_for_addr(mut->alloc);
|
||||||
memset(metadata, 0, granules);
|
memset(metadata, 0, granules);
|
||||||
mut->alloc = mut->sweep;
|
mut->alloc = mut->sweep;
|
||||||
}
|
}
|
||||||
|
@ -1766,10 +1759,10 @@ void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
|
||||||
uintptr_t alloc = mut->alloc;
|
uintptr_t alloc = mut->alloc;
|
||||||
uintptr_t sweep = mut->sweep;
|
uintptr_t sweep = mut->sweep;
|
||||||
uintptr_t new_alloc = alloc + size;
|
uintptr_t new_alloc = alloc + size;
|
||||||
struct gcobj *obj;
|
struct gc_ref ret;
|
||||||
if (new_alloc <= sweep) {
|
if (new_alloc <= sweep) {
|
||||||
mut->alloc = new_alloc;
|
mut->alloc = new_alloc;
|
||||||
obj = (struct gcobj *)alloc;
|
ret = gc_ref(alloc);
|
||||||
} else {
|
} else {
|
||||||
size_t granules = size >> GRANULE_SIZE_LOG_2;
|
size_t granules = size >> GRANULE_SIZE_LOG_2;
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -1781,11 +1774,11 @@ void* gc_allocate_small(struct gc_mutator *mut, size_t size) {
|
||||||
if (!hole)
|
if (!hole)
|
||||||
trigger_collection(mut);
|
trigger_collection(mut);
|
||||||
}
|
}
|
||||||
obj = (struct gcobj*)mut->alloc;
|
ret = gc_ref(mut->alloc);
|
||||||
mut->alloc += size;
|
mut->alloc += size;
|
||||||
}
|
}
|
||||||
gc_update_alloc_table(mut, gc_ref_from_heap_object(obj), size);
|
gc_update_alloc_table(mut, ret, size);
|
||||||
return obj;
|
return gc_ref_heap_object(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue