diff --git a/libguile/whippet/src/mmc.c b/libguile/whippet/src/mmc.c index 4278e9254..fe99ee331 100644 --- a/libguile/whippet/src/mmc.c +++ b/libguile/whippet/src/mmc.c @@ -470,12 +470,14 @@ trace_root(struct gc_root root, struct gc_heap *heap, trace_remembered_edge, heap, worker); break; case GC_ROOT_KIND_HEAP_PINNED_ROOTS: + GC_ASSERT(!heap_nofl_space(heap)->evacuating); gc_trace_heap_pinned_roots(root.heap->roots, tracer_visit_pinned_root, trace_conservative_edges_wrapper, heap, worker); break; case GC_ROOT_KIND_MUTATOR_PINNED_ROOTS: + GC_ASSERT(!heap_nofl_space(heap)->evacuating); gc_trace_mutator_pinned_roots(root.mutator->roots, tracer_visit_pinned_root, trace_conservative_edges_wrapper, diff --git a/libguile/whippet/src/nofl-space.h b/libguile/whippet/src/nofl-space.h index cda7e5f92..b10371bee 100644 --- a/libguile/whippet/src/nofl-space.h +++ b/libguile/whippet/src/nofl-space.h @@ -1630,6 +1630,8 @@ nofl_space_set_nonempty_mark(struct nofl_space *space, uint8_t *metadata, uint8_t byte, struct gc_ref ref) { // FIXME: Check that relaxed atomics are actually worth it. if (nofl_space_set_mark(space, metadata, byte)) { + GC_ASSERT(nofl_metadata_byte_is_young_or_has_mark(byte, + space->survivor_mark)); nofl_block_set_mark(gc_ref_value(ref)); return 1; } @@ -1925,10 +1927,11 @@ nofl_space_mark_conservative_ref(struct nofl_space *space, GC_ASSERT(nofl_metadata_byte_is_young_or_has_mark(resolved.byte, space->survivor_mark)); - nofl_space_set_nonempty_mark(space, resolved.metadata, resolved.byte, - gc_ref(resolved.addr)); + if (nofl_space_set_nonempty_mark(space, resolved.metadata, resolved.byte, + gc_ref(resolved.addr))) + return gc_ref(resolved.addr); - return gc_ref(resolved.addr); + return gc_ref_null(); } static inline size_t diff --git a/libguile/whippet/src/parallel-tracer.h b/libguile/whippet/src/parallel-tracer.h index db8afae8a..70edbe7f2 100644 --- a/libguile/whippet/src/parallel-tracer.h +++ b/libguile/whippet/src/parallel-tracer.h @@ -184,6 +184,19 @@ tracer_share(struct gc_trace_worker *worker) { tracer_maybe_unpark_workers(worker->tracer); } +static inline void +tracer_share_all(struct gc_trace_worker *worker) { + LOG("tracer #%zu: found %zu roots\n", worker->id, + local_worklist_size (&worker->local)); + while (!local_worklist_empty (&worker->local)) { + struct gc_ref *objv; + size_t count = + local_worklist_pop_many(&worker->local, &objv, LOCAL_WORKLIST_SIZE); + shared_worklist_push_many(&worker->shared, objv, count); + } + // No unparking; this is used at the end of a roots-only trace. +} + static inline void gc_trace_worker_enqueue(struct gc_trace_worker *worker, struct gc_ref ref) { ASSERT(gc_ref_is_heap_object(ref)); @@ -332,6 +345,7 @@ trace_with_data(struct gc_tracer *tracer, // result of marking roots isn't ours to deal with. However we do need to // synchronize with remote workers to ensure they have completed their // work items. + tracer_share_all(worker); if (worker->id == 0) { for (size_t i = 1; i < tracer->worker_count; i++) pthread_mutex_lock(&tracer->workers[i].lock);