1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-07-05 17:20:18 +02:00

Merge remote-tracking branch 'whippet/main' into wip-whippet

This commit is contained in:
Andy Wingo 2025-07-03 10:22:30 +02:00
commit 9dbc673778
3 changed files with 30 additions and 11 deletions

View file

@ -35,6 +35,7 @@ static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_
uint8_t trace_precisely = 0; uint8_t trace_precisely = 0;
uint8_t trace_none = 8; uint8_t trace_none = 8;
uint8_t trace_conservatively = 16; uint8_t trace_conservatively = 16;
uint8_t pinned = 16;
switch (kind) { switch (kind) {
case GC_ALLOCATION_TAGGED: case GC_ALLOCATION_TAGGED:
return young | trace_precisely; return young | trace_precisely;
@ -43,8 +44,9 @@ static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_
GC_CRASH (); GC_CRASH ();
return young | trace_conservatively; return young | trace_conservatively;
case GC_ALLOCATION_TAGGED_POINTERLESS: case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS:
return young | trace_none; return young | trace_none;
case GC_ALLOCATION_UNTAGGED_POINTERLESS:
return young | trace_none | pinned;
default: default:
GC_CRASH(); GC_CRASH();
} }

View file

@ -617,7 +617,7 @@ grow_heap_if_necessary(struct gc_heap *heap,
// If we cannot defragment and are making no progress but have a // If we cannot defragment and are making no progress but have a
// growable heap, expand by 25% to add some headroom. // growable heap, expand by 25% to add some headroom.
size_t needed_headroom = size_t needed_headroom =
GC_CONSERVATIVE_TRACE nofl_space_heap_has_ambiguous_edges (nofl)
? (progress ? 0 : nofl_active_block_count (nofl) * NOFL_BLOCK_SIZE / 4) ? (progress ? 0 : nofl_active_block_count (nofl) * NOFL_BLOCK_SIZE / 4)
: 0; : 0;
size_t headroom = nofl_empty_block_count(nofl) * NOFL_BLOCK_SIZE; size_t headroom = nofl_empty_block_count(nofl) * NOFL_BLOCK_SIZE;
@ -1031,8 +1031,6 @@ compute_trace_kind(enum gc_allocation_kind kind) {
case GC_ALLOCATION_TAGGED: case GC_ALLOCATION_TAGGED:
return GC_TRACE_PRECISELY; return GC_TRACE_PRECISELY;
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE: case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
if (!GC_CONSERVATIVE_TRACE)
GC_CRASH ();
return GC_TRACE_CONSERVATIVELY; return GC_TRACE_CONSERVATIVELY;
case GC_ALLOCATION_TAGGED_POINTERLESS: case GC_ALLOCATION_TAGGED_POINTERLESS:
case GC_ALLOCATION_UNTAGGED_POINTERLESS: case GC_ALLOCATION_UNTAGGED_POINTERLESS:
@ -1359,7 +1357,7 @@ gc_init(const struct gc_options *options, struct gc_stack_addr stack_base,
GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_CONSERVATIVE), GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_CONSERVATIVE),
NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY); NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY);
GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_POINTERLESS), GC_ASSERT_EQ(gc_allocator_alloc_table_begin_pattern(GC_ALLOCATION_UNTAGGED_POINTERLESS),
NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_NONE); NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_TRACE_NONE | NOFL_METADATA_BYTE_PINNED);
GC_ASSERT_EQ(gc_allocator_alloc_table_end_pattern(), NOFL_METADATA_BYTE_END); GC_ASSERT_EQ(gc_allocator_alloc_table_end_pattern(), NOFL_METADATA_BYTE_END);
if (GC_GENERATIONAL) { if (GC_GENERATIONAL) {
GC_ASSERT_EQ(gc_write_barrier_field_table_alignment(), NOFL_SLAB_SIZE); GC_ASSERT_EQ(gc_write_barrier_field_table_alignment(), NOFL_SLAB_SIZE);

View file

@ -984,7 +984,7 @@ static void
nofl_clear_pinned_bits_in_block(struct nofl_block_ref block) { nofl_clear_pinned_bits_in_block(struct nofl_block_ref block) {
uint8_t *meta = nofl_metadata_byte_for_addr(block.addr); uint8_t *meta = nofl_metadata_byte_for_addr(block.addr);
uint64_t mask = broadcast_byte (NOFL_METADATA_BYTE_PINNED); uint64_t mask = broadcast_byte (NOFL_METADATA_BYTE_PINNED);
for (size_t i = 0; i < NOFL_GRANULES_PER_BLOCK; i++, meta += 8) { for (size_t i = 0; i < NOFL_GRANULES_PER_BLOCK; i += 8, meta += 8) {
uint64_t vals = load_eight_aligned_bytes(meta); uint64_t vals = load_eight_aligned_bytes(meta);
if (vals & mask) if (vals & mask)
store_eight_aligned_bytes(meta, vals & ~mask); store_eight_aligned_bytes(meta, vals & ~mask);
@ -1045,6 +1045,7 @@ nofl_space_set_heap_has_ambiguous_edges (struct nofl_space *space)
// conservatively-traced. Ideally we would have another bit here. For now, // conservatively-traced. Ideally we would have another bit here. For now,
// race to clear all pinned bits. // race to clear all pinned bits.
nofl_space_clear_all_pinned_bits (space); nofl_space_clear_all_pinned_bits (space);
space->evacuation_minimum_reserve = space->evacuation_reserve = 0.0;
} }
} }
@ -1357,15 +1358,29 @@ nofl_metadata_byte_trace_kind(struct nofl_space *space, uint8_t byte)
switch (byte & mask) { switch (byte & mask) {
case NOFL_METADATA_BYTE_TRACE_PRECISELY: case NOFL_METADATA_BYTE_TRACE_PRECISELY:
return GC_TRACE_PRECISELY; return GC_TRACE_PRECISELY;
case NOFL_METADATA_BYTE_TRACE_NONE:
return GC_TRACE_NONE;
case NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY: case NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY:
return GC_TRACE_CONSERVATIVELY; return GC_TRACE_CONSERVATIVELY;
case NOFL_METADATA_BYTE_TRACE_NONE:
return GC_TRACE_NONE;
default: default:
GC_CRASH(); /* Untagged pointerless objects are allocated with the PINNED bit,
because we can't relocate them, because we don't have a tag word
to hold the forwarding state. Fortunately, this bit pattern is
different from NOFL_METADATA_BYTE_TRACE_CONSERVATIVELY; we can
just leave it as-is. */
GC_ASSERT_EQ (byte & mask,
NOFL_METADATA_BYTE_TRACE_NONE | NOFL_METADATA_BYTE_PINNED);
return GC_TRACE_NONE;
} }
} }
static void
nofl_assert_not_forwarded(struct gc_ref ref)
{
struct gc_atomic_forward fwd = gc_atomic_forward_begin(ref);
GC_ASSERT_EQ(fwd.state, GC_FORWARDING_STATE_NOT_FORWARDED);
}
static void static void
nofl_space_verify_sweepable_blocks(struct nofl_space *space, nofl_space_verify_sweepable_blocks(struct nofl_space *space,
struct nofl_block_list *list) struct nofl_block_list *list)
@ -1393,6 +1408,8 @@ nofl_space_verify_sweepable_blocks(struct nofl_space *space,
gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes); gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes);
size_t trace_granules = nofl_size_to_granules(trace_bytes); size_t trace_granules = nofl_size_to_granules(trace_bytes);
GC_ASSERT_EQ(granules, trace_granules); GC_ASSERT_EQ(granules, trace_granules);
nofl_assert_not_forwarded(obj);
} }
meta += granules; meta += granules;
@ -1433,6 +1450,8 @@ nofl_space_verify_swept_blocks(struct nofl_space *space,
gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes); gc_trace_object(obj, NULL, NULL, NULL, &trace_bytes);
size_t trace_granules = nofl_size_to_granules(trace_bytes); size_t trace_granules = nofl_size_to_granules(trace_bytes);
GC_ASSERT_EQ(granules, trace_granules); GC_ASSERT_EQ(granules, trace_granules);
nofl_assert_not_forwarded(obj);
} }
meta += granules; meta += granules;