mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-13 17:20:21 +02:00
Move to mark queue, is it an improvement?
This commit is contained in:
parent
d2828975a5
commit
45405efe56
1 changed files with 66 additions and 47 deletions
113
serial-marker.h
113
serial-marker.h
|
@ -7,22 +7,23 @@
|
||||||
#include "assert.h"
|
#include "assert.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
|
||||||
struct mark_stack {
|
struct mark_queue {
|
||||||
size_t size;
|
size_t size;
|
||||||
size_t next;
|
size_t read;
|
||||||
|
size_t write;
|
||||||
uintptr_t *buf;
|
uintptr_t *buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const size_t mark_stack_max_size =
|
static const size_t mark_queue_max_size =
|
||||||
(1ULL << (sizeof(uintptr_t) * 8 - 1)) / sizeof(uintptr_t);
|
(1ULL << (sizeof(uintptr_t) * 8 - 1)) / sizeof(uintptr_t);
|
||||||
static const size_t mark_stack_release_byte_threshold = 1 * 1024 * 1024;
|
static const size_t mark_queue_release_byte_threshold = 1 * 1024 * 1024;
|
||||||
|
|
||||||
static void*
|
static void*
|
||||||
mark_stack_alloc(size_t size) {
|
mark_queue_alloc(size_t size) {
|
||||||
void *mem = mmap(NULL, size, PROT_READ|PROT_WRITE,
|
void *mem = mmap(NULL, size * sizeof(uintptr_t), PROT_READ|PROT_WRITE,
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
if (mem == MAP_FAILED) {
|
if (mem == MAP_FAILED) {
|
||||||
perror("Failed to grow mark stack");
|
perror("Failed to grow mark queue");
|
||||||
DEBUG("Failed to allocate %zu bytes", size);
|
DEBUG("Failed to allocate %zu bytes", size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -30,67 +31,85 @@ mark_stack_alloc(size_t size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mark_stack_init(struct mark_stack *stack) {
|
mark_queue_init(struct mark_queue *q) {
|
||||||
stack->size = getpagesize();
|
q->size = getpagesize();
|
||||||
stack->next = 0;
|
q->read = 0;
|
||||||
stack->buf = mark_stack_alloc(stack->size);
|
q->write = 0;
|
||||||
return !!stack->buf;
|
q->buf = mark_queue_alloc(q->size);
|
||||||
|
return !!q->buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline uintptr_t
|
||||||
|
mark_queue_get(struct mark_queue *q, size_t idx) {
|
||||||
|
return q->buf[idx & (q->size - 1)];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mark_queue_put(struct mark_queue *q, size_t idx, uintptr_t x) {
|
||||||
|
q->buf[idx & (q->size - 1)] = x;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
mark_stack_grow(struct mark_stack *stack) {
|
mark_queue_grow(struct mark_queue *q) {
|
||||||
uintptr_t size = stack->size;
|
uintptr_t old_size = q->size;
|
||||||
if (size >= mark_stack_max_size) {
|
size_t old_read = q->read;
|
||||||
DEBUG("mark stack already at max size of %zu bytes", size);
|
size_t old_write = q->write;
|
||||||
|
uintptr_t *old_buf = q->buf;
|
||||||
|
if (old_size >= mark_queue_max_size) {
|
||||||
|
DEBUG("mark queue already at max size of %zu bytes", old_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
size *= 2;
|
uintptr_t new_size = old_size * 2;
|
||||||
uintptr_t *buf = mark_stack_alloc(size);
|
size_t new_read = 0;
|
||||||
if (!buf)
|
size_t new_write = 0;
|
||||||
|
uintptr_t *new_buf = mark_queue_alloc(new_size);
|
||||||
|
if (!new_buf)
|
||||||
return 0;
|
return 0;
|
||||||
memcpy(buf, stack->buf, stack->next * sizeof(uintptr_t));
|
|
||||||
munmap(stack->buf, stack->size * sizeof(uintptr_t));
|
while (old_read < old_write)
|
||||||
stack->size = size;
|
new_buf[new_write++] = mark_queue_get(q, old_read++);
|
||||||
stack->buf = buf;
|
|
||||||
|
munmap(old_buf, old_size * sizeof(uintptr_t));
|
||||||
|
|
||||||
|
q->size = new_size;
|
||||||
|
q->read = new_read;
|
||||||
|
q->write = new_write;
|
||||||
|
q->buf = new_buf;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
mark_stack_push(struct mark_stack *stack, void *p) {
|
mark_queue_push(struct mark_queue *q, void *p) {
|
||||||
size_t next = stack->next;
|
if (UNLIKELY(q->write - q->read == q->size)) {
|
||||||
if (UNLIKELY(next == stack->size)) {
|
if (!mark_queue_grow(q))
|
||||||
if (!mark_stack_grow(stack))
|
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
stack->buf[next] = (uintptr_t)p;
|
mark_queue_put(q, q->write++, (uintptr_t)p);
|
||||||
stack->next = next + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void*
|
static inline void*
|
||||||
mark_stack_pop(struct mark_stack *stack) {
|
mark_queue_pop(struct mark_queue *q) {
|
||||||
size_t next = stack->next;
|
if (UNLIKELY(q->read == q->write))
|
||||||
if (UNLIKELY(next == 0))
|
|
||||||
return NULL;
|
return NULL;
|
||||||
uintptr_t ret = stack->buf[next - 1];
|
return (void*)mark_queue_get(q, q->read++);
|
||||||
stack->next = next - 1;
|
|
||||||
return (void*)ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mark_stack_release(struct mark_stack *stack) {
|
mark_queue_release(struct mark_queue *q) {
|
||||||
size_t byte_size = stack->size * sizeof(uintptr_t);
|
size_t byte_size = q->size * sizeof(uintptr_t);
|
||||||
if (byte_size >= mark_stack_release_byte_threshold)
|
if (byte_size >= mark_queue_release_byte_threshold)
|
||||||
madvise(stack->buf, byte_size, MADV_DONTNEED);
|
madvise(q->buf, byte_size, MADV_DONTNEED);
|
||||||
|
q->read = q->write = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mark_stack_destroy(struct mark_stack *stack) {
|
mark_queue_destroy(struct mark_queue *q) {
|
||||||
size_t byte_size = stack->size * sizeof(uintptr_t);
|
size_t byte_size = q->size * sizeof(uintptr_t);
|
||||||
munmap(stack->buf, byte_size);
|
munmap(q->buf, byte_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct marker {
|
struct marker {
|
||||||
struct mark_stack stack;
|
struct mark_queue queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct context;
|
struct context;
|
||||||
|
@ -98,11 +117,11 @@ static inline struct marker* context_marker(struct context *cx);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
marker_init(struct context *cx) {
|
marker_init(struct context *cx) {
|
||||||
return mark_stack_init(&context_marker(cx)->stack);
|
return mark_queue_init(&context_marker(cx)->queue);
|
||||||
}
|
}
|
||||||
static void marker_prepare(struct context *cx) {}
|
static void marker_prepare(struct context *cx) {}
|
||||||
static void marker_release(struct context *cx) {
|
static void marker_release(struct context *cx) {
|
||||||
mark_stack_release(&context_marker(cx)->stack);
|
mark_queue_release(&context_marker(cx)->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct gcobj;
|
struct gcobj;
|
||||||
|
@ -117,7 +136,7 @@ marker_visit(struct context *cx, void **loc) {
|
||||||
struct gcobj *obj = *loc;
|
struct gcobj *obj = *loc;
|
||||||
if (obj) {
|
if (obj) {
|
||||||
__builtin_prefetch(obj);
|
__builtin_prefetch(obj);
|
||||||
mark_stack_push(&context_marker(cx)->stack, obj);
|
mark_queue_push(&context_marker(cx)->queue, obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -128,7 +147,7 @@ static inline void
|
||||||
marker_trace(struct context *cx,
|
marker_trace(struct context *cx,
|
||||||
void (*process)(struct context *, struct gcobj *)) {
|
void (*process)(struct context *, struct gcobj *)) {
|
||||||
struct gcobj *obj;
|
struct gcobj *obj;
|
||||||
while ((obj = mark_stack_pop(&context_marker(cx)->stack)))
|
while ((obj = mark_queue_pop(&context_marker(cx)->queue)))
|
||||||
if (mark_object(obj))
|
if (mark_object(obj))
|
||||||
process(cx, obj);
|
process(cx, obj);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue