1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-29 22:40:34 +02:00

Add gc_ prefix to tracer API

This commit is contained in:
Andy Wingo 2024-07-08 17:15:00 +02:00
parent b4543ad641
commit 921c012b51
4 changed files with 63 additions and 62 deletions

View file

@ -35,7 +35,7 @@ struct trace_worker {
#define TRACE_WORKERS_MAX_COUNT 8
struct tracer {
struct gc_tracer {
atomic_size_t active_tracers;
size_t worker_count;
long epoch;
@ -52,7 +52,7 @@ struct local_tracer {
static int
trace_worker_init(struct trace_worker *worker, struct gc_heap *heap,
struct tracer *tracer, size_t id) {
struct gc_tracer *tracer, size_t id) {
worker->heap = heap;
worker->id = id;
worker->steal_id = 0;
@ -66,7 +66,7 @@ static void trace_worker_trace(struct trace_worker *worker);
static void*
trace_worker_thread(void *data) {
struct trace_worker *worker = data;
struct tracer *tracer = heap_tracer(worker->heap);
struct gc_tracer *tracer = heap_tracer(worker->heap);
long trace_epoch = 0;
pthread_mutex_lock(&worker->lock);
@ -92,8 +92,8 @@ trace_worker_spawn(struct trace_worker *worker) {
}
static int
tracer_init(struct gc_heap *heap, size_t parallelism) {
struct tracer *tracer = heap_tracer(heap);
gc_tracer_init(struct gc_heap *heap, size_t parallelism) {
struct gc_tracer *tracer = heap_tracer(heap);
atomic_init(&tracer->active_tracers, 0);
tracer->epoch = 0;
pthread_mutex_init(&tracer->lock, NULL);
@ -116,19 +116,19 @@ tracer_init(struct gc_heap *heap, size_t parallelism) {
return 1;
}
static void tracer_prepare(struct gc_heap *heap) {
struct tracer *tracer = heap_tracer(heap);
static void gc_tracer_prepare(struct gc_heap *heap) {
struct gc_tracer *tracer = heap_tracer(heap);
for (size_t i = 0; i < tracer->worker_count; i++)
tracer->workers[i].steal_id = 0;
}
static void tracer_release(struct gc_heap *heap) {
struct tracer *tracer = heap_tracer(heap);
static void gc_tracer_release(struct gc_heap *heap) {
struct gc_tracer *tracer = heap_tracer(heap);
for (size_t i = 0; i < tracer->worker_count; i++)
shared_worklist_release(&tracer->workers[i].deque);
}
static inline void
tracer_unpark_all_workers(struct tracer *tracer) {
tracer_unpark_all_workers(struct gc_tracer *tracer) {
long old_epoch =
atomic_fetch_add_explicit(&tracer->epoch, 1, memory_order_acq_rel);
long epoch = old_epoch + 1;
@ -138,7 +138,7 @@ tracer_unpark_all_workers(struct tracer *tracer) {
}
static inline void
tracer_maybe_unpark_workers(struct tracer *tracer) {
tracer_maybe_unpark_workers(struct gc_tracer *tracer) {
size_t active =
atomic_load_explicit(&tracer->active_tracers, memory_order_acquire);
if (active < tracer->worker_count)
@ -159,7 +159,7 @@ tracer_share(struct local_tracer *trace) {
}
static inline void
tracer_enqueue(struct gc_ref ref, struct gc_heap *heap, void *trace_data) {
gc_tracer_enqueue(struct gc_ref ref, struct gc_heap *heap, void *trace_data) {
struct local_tracer *trace = trace_data;
if (local_worklist_full(&trace->local))
tracer_share(trace);
@ -167,19 +167,19 @@ tracer_enqueue(struct gc_ref ref, struct gc_heap *heap, void *trace_data) {
}
static struct gc_ref
tracer_steal_from_worker(struct tracer *tracer, size_t id) {
tracer_steal_from_worker(struct gc_tracer *tracer, size_t id) {
ASSERT(id < tracer->worker_count);
return shared_worklist_steal(&tracer->workers[id].deque);
}
static int
tracer_can_steal_from_worker(struct tracer *tracer, size_t id) {
tracer_can_steal_from_worker(struct gc_tracer *tracer, size_t id) {
ASSERT(id < tracer->worker_count);
return shared_worklist_can_steal(&tracer->workers[id].deque);
}
static struct gc_ref
trace_worker_steal_from_any(struct trace_worker *worker, struct tracer *tracer) {
trace_worker_steal_from_any(struct trace_worker *worker, struct gc_tracer *tracer) {
for (size_t i = 0; i < tracer->worker_count; i++) {
DEBUG("tracer #%zu: stealing from #%zu\n", worker->id, worker->steal_id);
struct gc_ref obj = tracer_steal_from_worker(tracer, worker->steal_id);
@ -196,7 +196,7 @@ trace_worker_steal_from_any(struct trace_worker *worker, struct tracer *tracer)
static int
trace_worker_can_steal_from_any(struct trace_worker *worker,
struct tracer *tracer) {
struct gc_tracer *tracer) {
DEBUG("tracer #%zu: checking if any worker has tasks\n", worker->id);
for (size_t i = 0; i < tracer->worker_count; i++) {
int res = tracer_can_steal_from_worker(tracer, worker->steal_id);
@ -217,7 +217,7 @@ trace_worker_should_continue(struct trace_worker *worker) {
if (worker->id != 0)
return 0;
struct tracer *tracer = heap_tracer(worker->heap);
struct gc_tracer *tracer = heap_tracer(worker->heap);
for (size_t spin_count = 0;; spin_count++) {
if (atomic_load_explicit(&tracer->active_tracers,
@ -248,7 +248,7 @@ trace_worker_should_continue(struct trace_worker *worker) {
static struct gc_ref
trace_worker_steal(struct local_tracer *trace) {
struct trace_worker *worker = trace->worker;
struct tracer *tracer = heap_tracer(worker->heap);
struct gc_tracer *tracer = heap_tracer(worker->heap);
// It could be that the worker's local trace queue has simply
// overflowed. In that case avoid contention by trying to pop
@ -271,7 +271,7 @@ trace_worker_steal(struct local_tracer *trace) {
static void
trace_worker_trace(struct trace_worker *worker) {
struct gc_heap *heap = worker->heap;
struct tracer *tracer = heap_tracer(heap);
struct gc_tracer *tracer = heap_tracer(heap);
atomic_fetch_add_explicit(&tracer->active_tracers, 1, memory_order_acq_rel);
struct local_tracer trace;
@ -303,21 +303,21 @@ trace_worker_trace(struct trace_worker *worker) {
}
static inline void
tracer_enqueue_root(struct tracer *tracer, struct gc_ref ref) {
gc_tracer_enqueue_root(struct gc_tracer *tracer, struct gc_ref ref) {
struct shared_worklist *worker0_deque = &tracer->workers[0].deque;
shared_worklist_push(worker0_deque, ref);
}
static inline void
tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objv,
size_t count) {
gc_tracer_enqueue_roots(struct gc_tracer *tracer, struct gc_ref *objv,
size_t count) {
struct shared_worklist *worker0_deque = &tracer->workers[0].deque;
shared_worklist_push_many(worker0_deque, objv, count);
}
static inline void
tracer_trace(struct gc_heap *heap) {
struct tracer *tracer = heap_tracer(heap);
gc_tracer_trace(struct gc_heap *heap) {
struct gc_tracer *tracer = heap_tracer(heap);
DEBUG("starting trace; %zu workers\n", tracer->worker_count);

View file

@ -9,34 +9,34 @@
#include "simple-worklist.h"
#include "tracer.h"
struct tracer {
struct gc_tracer {
struct simple_worklist worklist;
};
static int
tracer_init(struct gc_heap *heap, size_t parallelism) {
gc_tracer_init(struct gc_heap *heap, size_t parallelism) {
return simple_worklist_init(&heap_tracer(heap)->worklist);
}
static void tracer_prepare(struct gc_heap *heap) {}
static void tracer_release(struct gc_heap *heap) {
static void gc_tracer_prepare(struct gc_heap *heap) {}
static void gc_tracer_release(struct gc_heap *heap) {
simple_worklist_release(&heap_tracer(heap)->worklist);
}
static inline void
tracer_enqueue_root(struct tracer *tracer, struct gc_ref obj) {
gc_tracer_enqueue_root(struct gc_tracer *tracer, struct gc_ref obj) {
simple_worklist_push(&tracer->worklist, obj);
}
static inline void
tracer_enqueue_roots(struct tracer *tracer, struct gc_ref *objs,
size_t count) {
gc_tracer_enqueue_roots(struct gc_tracer *tracer, struct gc_ref *objs,
size_t count) {
simple_worklist_push_many(&tracer->worklist, objs, count);
}
static inline void
tracer_enqueue(struct gc_ref ref, struct gc_heap *heap, void *trace_data) {
tracer_enqueue_root(heap_tracer(heap), ref);
gc_tracer_enqueue(struct gc_ref ref, struct gc_heap *heap, void *trace_data) {
gc_tracer_enqueue_root(heap_tracer(heap), ref);
}
static inline void
tracer_trace(struct gc_heap *heap) {
gc_tracer_trace(struct gc_heap *heap) {
do {
struct gc_ref obj = simple_worklist_pop(&heap_tracer(heap)->worklist);
if (!gc_ref_is_heap_object(obj))

View file

@ -11,7 +11,7 @@ struct gc_heap;
////////////////////////////////////////////////////////////////////////
// Initialize the tracer when the heap is created.
static inline struct tracer* heap_tracer(struct gc_heap *heap);
static inline struct gc_tracer* heap_tracer(struct gc_heap *heap);
// Visit all fields in an object.
static inline void trace_one(struct gc_ref ref, struct gc_heap *heap,
@ -25,31 +25,32 @@ static inline int trace_edge(struct gc_heap *heap,
/// To be implemented by tracer.
////////////////////////////////////////////////////////////////////////
// The tracer struct itself should be defined in the implementation.
struct tracer;
// The tracer struct itself should be defined by the tracer
// implementation.
struct gc_tracer;
// Initialize the tracer when the heap is created.
static int tracer_init(struct gc_heap *heap, size_t parallelism);
static int gc_tracer_init(struct gc_heap *heap, size_t parallelism);
// Initialize the tracer for a new GC cycle.
static void tracer_prepare(struct gc_heap *heap);
static void gc_tracer_prepare(struct gc_heap *heap);
// Release any resources allocated during the trace.
static void tracer_release(struct gc_heap *heap);
static void gc_tracer_release(struct gc_heap *heap);
// Add root objects to the trace. Call before tracer_trace.
static inline void tracer_enqueue_root(struct tracer *tracer,
struct gc_ref obj);
static inline void tracer_enqueue_roots(struct tracer *tracer,
struct gc_ref *objs,
size_t count);
static inline void gc_tracer_enqueue_root(struct gc_tracer *tracer,
struct gc_ref obj);
static inline void gc_tracer_enqueue_roots(struct gc_tracer *tracer,
struct gc_ref *objs,
size_t count);
// Given that an object has been shaded grey, enqueue for tracing.
static inline void tracer_enqueue(struct gc_ref ref, struct gc_heap *heap,
void *trace_data) GC_ALWAYS_INLINE;
static inline void gc_tracer_enqueue(struct gc_ref ref, struct gc_heap *heap,
void *trace_data) GC_ALWAYS_INLINE;
// Run the full trace.
static inline void tracer_trace(struct gc_heap *heap);
static inline void gc_tracer_trace(struct gc_heap *heap);
////////////////////////////////////////////////////////////////////////
/// Procedures that work with any tracer.
@ -62,7 +63,7 @@ static inline void tracer_visit(struct gc_edge edge, struct gc_heap *heap,
static inline void
tracer_visit(struct gc_edge edge, struct gc_heap *heap, void *trace_data) {
if (trace_edge(heap, edge))
tracer_enqueue(gc_edge_ref(edge), heap, trace_data);
gc_tracer_enqueue(gc_edge_ref(edge), heap, trace_data);
}
#endif // TRACER_H

View file

@ -311,7 +311,7 @@ struct gc_heap {
long count;
uint8_t last_collection_was_minor;
struct gc_mutator *inactive_mutators;
struct tracer tracer;
struct gc_tracer tracer;
double fragmentation_low_threshold;
double fragmentation_high_threshold;
double minor_gc_yield_threshold;
@ -351,7 +351,7 @@ struct gc_mutator {
struct gc_mutator *next;
};
static inline struct tracer* heap_tracer(struct gc_heap *heap) {
static inline struct gc_tracer* heap_tracer(struct gc_heap *heap) {
return &heap->tracer;
}
static inline struct mark_space* heap_mark_space(struct gc_heap *heap) {
@ -1126,7 +1126,7 @@ static void trace_and_enqueue_globally(struct gc_edge edge,
struct gc_heap *heap,
void *unused) {
if (trace_edge(heap, edge))
tracer_enqueue_root(&heap->tracer, gc_edge_ref(edge));
gc_tracer_enqueue_root(&heap->tracer, gc_edge_ref(edge));
}
static inline void do_trace_conservative_ref_and_enqueue_globally(struct gc_conservative_ref ref,
@ -1135,7 +1135,7 @@ static inline void do_trace_conservative_ref_and_enqueue_globally(struct gc_cons
int possibly_interior) {
struct gc_ref object = trace_conservative_ref(heap, ref, possibly_interior);
if (gc_ref_is_heap_object(object))
tracer_enqueue_root(&heap->tracer, object);
gc_tracer_enqueue_root(&heap->tracer, object);
}
static void trace_possibly_interior_conservative_ref_and_enqueue_globally(struct gc_conservative_ref ref,
@ -1177,7 +1177,7 @@ static inline void tracer_trace_conservative_ref(struct gc_conservative_ref ref,
int possibly_interior = 0;
struct gc_ref resolved = trace_conservative_ref(heap, ref, possibly_interior);
if (gc_ref_is_heap_object(resolved))
tracer_enqueue(resolved, heap, data);
gc_tracer_enqueue(resolved, heap, data);
}
static inline void trace_one_conservatively(struct gc_ref ref,
@ -1325,8 +1325,8 @@ static void trace_mutator_roots_after_stop(struct gc_heap *heap) {
// Also collect any already-marked grey objects and put them on the
// global trace queue.
if (active_mutators_already_marked)
tracer_enqueue_roots(&heap->tracer, mut->mark_buf.objects,
mut->mark_buf.size);
gc_tracer_enqueue_roots(&heap->tracer, mut->mark_buf.objects,
mut->mark_buf.size);
else
trace_mutator_roots_with_lock(mut);
// Also unlink mutator_trace_list chain.
@ -1349,7 +1349,7 @@ static void trace_global_conservative_roots(struct gc_heap *heap) {
}
static void enqueue_generational_root(struct gc_ref ref, struct gc_heap *heap) {
tracer_enqueue_root(&heap->tracer, ref);
gc_tracer_enqueue_root(&heap->tracer, ref);
}
// Note that it's quite possible (and even likely) that any given remset
@ -1889,7 +1889,7 @@ static void collect(struct gc_mutator *mut,
large_object_space_start_gc(lospace, is_minor);
gc_extern_space_start_gc(exspace, is_minor);
resolve_ephemerons_lazily(heap);
tracer_prepare(heap);
gc_tracer_prepare(heap);
HEAP_EVENT(heap, requesting_stop);
request_mutators_to_stop(heap);
trace_mutator_roots_with_lock_before_stop(mut);
@ -1906,14 +1906,14 @@ static void collect(struct gc_mutator *mut,
prepare_for_evacuation(heap);
trace_roots_after_stop(heap);
HEAP_EVENT(heap, roots_traced);
tracer_trace(heap);
gc_tracer_trace(heap);
HEAP_EVENT(heap, heap_traced);
resolve_ephemerons_eagerly(heap);
while (enqueue_resolved_ephemerons(heap))
tracer_trace(heap);
gc_tracer_trace(heap);
HEAP_EVENT(heap, ephemerons_traced);
sweep_ephemerons(heap);
tracer_release(heap);
gc_tracer_release(heap);
mark_space_finish_gc(space, gc_kind);
large_object_space_finish_gc(lospace, is_minor);
gc_extern_space_finish_gc(exspace, is_minor);
@ -2366,7 +2366,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
pthread_cond_init(&heap->collector_cond, NULL);
heap->size = options->common.heap_size;
if (!tracer_init(heap, options->common.parallelism))
if (!gc_tracer_init(heap, options->common.parallelism))
GC_CRASH();
heap->pending_ephemerons_size_factor = 0.005;