diff --git a/libguile/whippet/api/mmc-attrs.h b/libguile/whippet/api/mmc-attrs.h index 116c64036..a9fdcc844 100644 --- a/libguile/whippet/api/mmc-attrs.h +++ b/libguile/whippet/api/mmc-attrs.h @@ -35,6 +35,7 @@ static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_ uint8_t trace_precisely = 0; uint8_t trace_none = 8; uint8_t trace_conservatively = 16; + uint8_t pinned = 16; switch (kind) { case GC_ALLOCATION_TAGGED: return young | trace_precisely; @@ -43,8 +44,9 @@ static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_ GC_CRASH (); return young | trace_conservatively; case GC_ALLOCATION_TAGGED_POINTERLESS: - case GC_ALLOCATION_UNTAGGED_POINTERLESS: return young | trace_none; + case GC_ALLOCATION_UNTAGGED_POINTERLESS: + return young | trace_none | pinned; default: GC_CRASH(); } diff --git a/libguile/whippet/src/mmc.c b/libguile/whippet/src/mmc.c index 9b9b00046..65d076b06 100644 --- a/libguile/whippet/src/mmc.c +++ b/libguile/whippet/src/mmc.c @@ -617,7 +617,7 @@ grow_heap_if_necessary(struct gc_heap *heap, // If we cannot defragment and are making no progress but have a // growable heap, expand by 25% to add some headroom. size_t needed_headroom = - GC_CONSERVATIVE_TRACE + nofl_space_heap_has_ambiguous_edges (nofl) ? (progress ? 0 : nofl_active_block_count (nofl) * NOFL_BLOCK_SIZE / 4) : 0; size_t headroom = nofl_empty_block_count(nofl) * NOFL_BLOCK_SIZE; @@ -1031,8 +1031,6 @@ compute_trace_kind(enum gc_allocation_kind kind) { case GC_ALLOCATION_TAGGED: return GC_TRACE_PRECISELY; case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: - if (!GC_CONSERVATIVE_TRACE) - GC_CRASH (); return GC_TRACE_CONSERVATIVELY; case GC_ALLOCATION_TAGGED_POINTERLESS: case GC_ALLOCATION_UNTAGGED_POINTERLESS: @@ -1359,7 +1357,7 @@ gc_init(const struct gc_options *options, struct gc_stack_addr stack_base, GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_CONSERVATIVE), NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY); GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_POINTERLESS), - NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_NONE); + NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_NONE | NOFL_METADATA_BYTE_PINNED); GC_ASSERT_EQ(gc_allocator_alloc_table_end_pattern(), NOFL_METADATA_BYTE_END); if (GC_GENERATIONAL) { GC_ASSERT_EQ(gc_write_barrier_field_table_alignment(), NOFL_SLAB_SIZE); diff --git a/libguile/whippet/src/nofl-space.h b/libguile/whippet/src/nofl-space.h index ce62c17d3..82315aaec 100644 --- a/libguile/whippet/src/nofl-space.h +++ b/libguile/whippet/src/nofl-space.h @@ -984,7 +984,7 @@ static void nofl_clear_pinned_bits_in_block(struct nofl_block_ref block) { uint8_t *meta = nofl_metadata_byte_for_addr(block.addr); uint64_t mask = broadcast_byte (NOFL_METADATA_BYTE_PINNED); - for (size_t i = 0; i < NOFL_GRANULES_PER_BLOCK; i++, meta += 8) { + for (size_t i = 0; i < NOFL_GRANULES_PER_BLOCK; i += 8, meta += 8) { uint64_t vals = load_eight_aligned_bytes(meta); if (vals & mask) store_eight_aligned_bytes(meta, vals & ~mask); @@ -1045,6 +1045,7 @@ nofl_space_set_heap_has_ambiguous_edges (struct nofl_space *space) // conservatively-traced. Ideally we would have another bit here. For now, // race to clear all pinned bits. nofl_space_clear_all_pinned_bits (space); + space->evacuation_minimum_reserve = space->evacuation_reserve = 0.0; } } @@ -1357,15 +1358,29 @@ nofl_metadata_byte_trace_kind(struct nofl_space *space, uint8_t byte) switch (byte & mask) { case NOFL_METADATA_BYTE_TRACE_PRECISELY: return GC_TRACE_PRECISELY; - case NOFL_METADATA_BYTE_TRACE_NONE: - return GC_TRACE_NONE; case NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY: return GC_TRACE_CONSERVATIVELY; + case NOFL_METADATA_BYTE_TRACE_NONE: + return GC_TRACE_NONE; default: - GC_CRASH(); + /* Untagged pointerless objects are allocated with the PINNED bit, + because we can't relocate them, because we don't have a tag word + to hold the forwarding state. Fortunately, this bit pattern is + different from NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY; we can + just leave it as-is. */ + GC_ASSERT_EQ (byte & mask, + NOFL_METADATA_BYTE_TRACE_NONE | NOFL_METADATA_BYTE_PINNED); + return GC_TRACE_NONE; } } +static void +nofl_assert_not_forwarded(struct gc_ref ref) +{ + struct gc_atomic_forward fwd = gc_atomic_forward_begin(ref); + GC_ASSERT_EQ(fwd.state, GC_FORWARDING_STATE_NOT_FORWARDED); +} + static void nofl_space_verify_sweepable_blocks(struct nofl_space *space, struct nofl_block_list *list) @@ -1388,11 +1403,13 @@ nofl_space_verify_sweepable_blocks(struct nofl_space *space, granules++; GC_ASSERT(meta[granules - 1] & NOFL_METADATA_BYTE_END); - if (nofl_metadata_byte_trace_kind (space, byte) == GC_TRACE_PRECISELY) { + if (nofl_metadata_byte_trace_kind(space, byte) == GC_TRACE_PRECISELY) { size_t trace_bytes; gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes); size_t trace_granules = nofl_size_to_granules(trace_bytes); GC_ASSERT_EQ(granules, trace_granules); + + nofl_assert_not_forwarded(obj); } meta += granules; @@ -1428,11 +1445,13 @@ nofl_space_verify_swept_blocks(struct nofl_space *space, granules++; GC_ASSERT(meta[granules - 1] & NOFL_METADATA_BYTE_END); - if (nofl_metadata_byte_trace_kind (space, byte) == GC_TRACE_PRECISELY) { + if (nofl_metadata_byte_trace_kind(space, byte) == GC_TRACE_PRECISELY) { size_t trace_bytes; gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes); size_t trace_granules = nofl_size_to_granules(trace_bytes); GC_ASSERT_EQ(granules, trace_granules); + + nofl_assert_not_forwarded(obj); } meta += granules;