mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-04-30 11:50:28 +02:00
pcc / copy-space: Allow allocations to fail
This fixes an issue in which minor collection of a nursery full of live data can fail because of fragmentation, whereas really it should just fall back to promotion.
This commit is contained in:
parent
5fdb14cc5e
commit
ba65e32b00
2 changed files with 104 additions and 47 deletions
|
@ -149,6 +149,17 @@ struct copy_space {
|
||||||
size_t nslabs;
|
size_t nslabs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum copy_space_forward_result {
|
||||||
|
// We went to forward an edge, but the target was already forwarded, so we
|
||||||
|
// just updated the edge.
|
||||||
|
COPY_SPACE_FORWARD_UPDATED,
|
||||||
|
// We went to forward an edge and evacuated the referent to a new location.
|
||||||
|
COPY_SPACE_FORWARD_EVACUATED,
|
||||||
|
// We went to forward an edge but failed to acquire memory for its new
|
||||||
|
// location.
|
||||||
|
COPY_SPACE_FORWARD_FAILED,
|
||||||
|
};
|
||||||
|
|
||||||
struct copy_space_allocator {
|
struct copy_space_allocator {
|
||||||
uintptr_t hp;
|
uintptr_t hp;
|
||||||
uintptr_t limit;
|
uintptr_t limit;
|
||||||
|
@ -473,9 +484,7 @@ copy_space_allocator_release_partly_full_block(struct copy_space_allocator *allo
|
||||||
static inline struct gc_ref
|
static inline struct gc_ref
|
||||||
copy_space_allocate(struct copy_space_allocator *alloc,
|
copy_space_allocate(struct copy_space_allocator *alloc,
|
||||||
struct copy_space *space,
|
struct copy_space *space,
|
||||||
size_t size,
|
size_t size) {
|
||||||
void (*get_more_empty_blocks)(void *data),
|
|
||||||
void *data) {
|
|
||||||
GC_ASSERT(size > 0);
|
GC_ASSERT(size > 0);
|
||||||
GC_ASSERT(size <= gc_allocator_large_threshold());
|
GC_ASSERT(size <= gc_allocator_large_threshold());
|
||||||
size = align_up(size, gc_allocator_small_granule_size());
|
size = align_up(size, gc_allocator_small_granule_size());
|
||||||
|
@ -490,8 +499,8 @@ copy_space_allocate(struct copy_space_allocator *alloc,
|
||||||
goto done;
|
goto done;
|
||||||
copy_space_allocator_release_full_block(alloc, space);
|
copy_space_allocator_release_full_block(alloc, space);
|
||||||
}
|
}
|
||||||
while (!copy_space_allocator_acquire_empty_block(alloc, space))
|
if (!copy_space_allocator_acquire_empty_block(alloc, space))
|
||||||
get_more_empty_blocks(data);
|
return gc_ref_null();
|
||||||
// The newly acquired block is empty and is therefore large enough for
|
// The newly acquired block is empty and is therefore large enough for
|
||||||
// a small allocation.
|
// a small allocation.
|
||||||
|
|
||||||
|
@ -588,12 +597,13 @@ copy_space_gc_during_evacuation(void *data) {
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline enum copy_space_forward_result
|
||||||
copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
|
copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
|
||||||
struct gc_ref old_ref,
|
struct gc_ref old_ref,
|
||||||
struct copy_space_allocator *alloc) {
|
struct copy_space_allocator *alloc) {
|
||||||
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
|
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
|
||||||
|
|
||||||
|
retry:
|
||||||
if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
|
if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
|
||||||
gc_atomic_forward_acquire(&fwd);
|
gc_atomic_forward_acquire(&fwd);
|
||||||
|
|
||||||
|
@ -605,33 +615,34 @@ copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
|
||||||
case GC_FORWARDING_STATE_ACQUIRED: {
|
case GC_FORWARDING_STATE_ACQUIRED: {
|
||||||
// We claimed the object successfully; evacuating is up to us.
|
// We claimed the object successfully; evacuating is up to us.
|
||||||
size_t bytes = gc_atomic_forward_object_size(&fwd);
|
size_t bytes = gc_atomic_forward_object_size(&fwd);
|
||||||
struct gc_ref new_ref =
|
struct gc_ref new_ref = copy_space_allocate(alloc, space, bytes);
|
||||||
copy_space_allocate(alloc, space, bytes,
|
if (gc_ref_is_null(new_ref)) {
|
||||||
copy_space_gc_during_evacuation, NULL);
|
gc_atomic_forward_abort(&fwd);
|
||||||
|
return COPY_SPACE_FORWARD_FAILED;
|
||||||
|
}
|
||||||
// Copy object contents before committing, as we don't know what
|
// Copy object contents before committing, as we don't know what
|
||||||
// part of the object (if any) will be overwritten by the
|
// part of the object (if any) will be overwritten by the
|
||||||
// commit.
|
// commit.
|
||||||
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), bytes);
|
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), bytes);
|
||||||
gc_atomic_forward_commit(&fwd, new_ref);
|
gc_atomic_forward_commit(&fwd, new_ref);
|
||||||
gc_edge_update(edge, new_ref);
|
gc_edge_update(edge, new_ref);
|
||||||
return 1;
|
return COPY_SPACE_FORWARD_EVACUATED;
|
||||||
}
|
}
|
||||||
case GC_FORWARDING_STATE_BUSY:
|
case GC_FORWARDING_STATE_BUSY:
|
||||||
// Someone else claimed this object first. Spin until new address
|
// Someone else claimed this object first. Spin until new address
|
||||||
// known, or evacuation aborts.
|
// known, or evacuation aborts.
|
||||||
for (size_t spin_count = 0;; spin_count++) {
|
for (size_t spin_count = 0;; spin_count++) {
|
||||||
if (gc_atomic_forward_retry_busy(&fwd))
|
if (gc_atomic_forward_retry_busy(&fwd))
|
||||||
break;
|
goto retry;
|
||||||
yield_for_spin(spin_count);
|
yield_for_spin(spin_count);
|
||||||
}
|
}
|
||||||
GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
|
GC_CRASH(); // Unreachable.
|
||||||
// Fall through.
|
|
||||||
case GC_FORWARDING_STATE_FORWARDED:
|
case GC_FORWARDING_STATE_FORWARDED:
|
||||||
// The object has been evacuated already. Update the edge;
|
// The object has been evacuated already. Update the edge;
|
||||||
// whoever forwarded the object will make sure it's eventually
|
// whoever forwarded the object will make sure it's eventually
|
||||||
// traced.
|
// traced.
|
||||||
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
|
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
|
||||||
return 0;
|
return COPY_SPACE_FORWARD_UPDATED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -640,6 +651,7 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
|
||||||
struct gc_edge edge,
|
struct gc_edge edge,
|
||||||
struct gc_ref old_ref) {
|
struct gc_ref old_ref) {
|
||||||
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
|
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
|
||||||
|
retry:
|
||||||
switch (fwd.state) {
|
switch (fwd.state) {
|
||||||
case GC_FORWARDING_STATE_NOT_FORWARDED:
|
case GC_FORWARDING_STATE_NOT_FORWARDED:
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -648,11 +660,10 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
|
||||||
// known.
|
// known.
|
||||||
for (size_t spin_count = 0;; spin_count++) {
|
for (size_t spin_count = 0;; spin_count++) {
|
||||||
if (gc_atomic_forward_retry_busy(&fwd))
|
if (gc_atomic_forward_retry_busy(&fwd))
|
||||||
break;
|
goto retry;
|
||||||
yield_for_spin(spin_count);
|
yield_for_spin(spin_count);
|
||||||
}
|
}
|
||||||
GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
|
GC_CRASH(); // Unreachable.
|
||||||
// Fall through.
|
|
||||||
case GC_FORWARDING_STATE_FORWARDED:
|
case GC_FORWARDING_STATE_FORWARDED:
|
||||||
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
|
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -661,24 +672,24 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline enum copy_space_forward_result
|
||||||
copy_space_forward_nonatomic(struct copy_space *space, struct gc_edge edge,
|
copy_space_forward_nonatomic(struct copy_space *space, struct gc_edge edge,
|
||||||
struct gc_ref old_ref,
|
struct gc_ref old_ref,
|
||||||
struct copy_space_allocator *alloc) {
|
struct copy_space_allocator *alloc) {
|
||||||
uintptr_t forwarded = gc_object_forwarded_nonatomic(old_ref);
|
uintptr_t forwarded = gc_object_forwarded_nonatomic(old_ref);
|
||||||
if (forwarded) {
|
if (forwarded) {
|
||||||
gc_edge_update(edge, gc_ref(forwarded));
|
gc_edge_update(edge, gc_ref(forwarded));
|
||||||
return 0;
|
return COPY_SPACE_FORWARD_UPDATED;
|
||||||
} else {
|
} else {
|
||||||
size_t size;
|
size_t size;
|
||||||
gc_trace_object(old_ref, NULL, NULL, NULL, &size);
|
gc_trace_object(old_ref, NULL, NULL, NULL, &size);
|
||||||
struct gc_ref new_ref =
|
struct gc_ref new_ref = copy_space_allocate(alloc, space, size);
|
||||||
copy_space_allocate(alloc, space, size,
|
if (gc_ref_is_null(new_ref))
|
||||||
copy_space_gc_during_evacuation, NULL);
|
return COPY_SPACE_FORWARD_FAILED;
|
||||||
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), size);
|
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), size);
|
||||||
gc_object_forward_nonatomic(old_ref, new_ref);
|
gc_object_forward_nonatomic(old_ref, new_ref);
|
||||||
gc_edge_update(edge, new_ref);
|
gc_edge_update(edge, new_ref);
|
||||||
return 1;
|
return COPY_SPACE_FORWARD_EVACUATED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -694,7 +705,7 @@ copy_space_forward_if_traced_nonatomic(struct copy_space *space,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline enum copy_space_forward_result
|
||||||
copy_space_forward(struct copy_space *src_space, struct copy_space *dst_space,
|
copy_space_forward(struct copy_space *src_space, struct copy_space *dst_space,
|
||||||
struct gc_edge edge,
|
struct gc_edge edge,
|
||||||
struct gc_ref old_ref,
|
struct gc_ref old_ref,
|
||||||
|
|
80
src/pcc.c
80
src/pcc.c
|
@ -287,6 +287,31 @@ static inline int edge_is_from_survivor(struct gc_heap *heap,
|
||||||
return copy_space_contains_edge_aligned(heap_new_space(heap), edge);
|
return copy_space_contains_edge_aligned(heap_new_space(heap), edge);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int forward(struct copy_space *src_space,
|
||||||
|
struct copy_space *dst_space,
|
||||||
|
struct gc_edge edge,
|
||||||
|
struct gc_ref ref,
|
||||||
|
struct copy_space_allocator *dst_alloc) {
|
||||||
|
switch (copy_space_forward(src_space, dst_space, edge, ref, dst_alloc)) {
|
||||||
|
case COPY_SPACE_FORWARD_UPDATED:
|
||||||
|
return 0;
|
||||||
|
case COPY_SPACE_FORWARD_EVACUATED:
|
||||||
|
return 1;
|
||||||
|
case COPY_SPACE_FORWARD_FAILED:
|
||||||
|
// If space is really tight and reordering of objects during evacuation
|
||||||
|
// resulted in more end-of-block fragmentation and thus block use than
|
||||||
|
// before collection started, we can actually run out of memory while
|
||||||
|
// collecting. We should probably attempt to expand the heap here, at
|
||||||
|
// least by a single block; it's better than the alternatives. For now,
|
||||||
|
// abort.
|
||||||
|
fprintf(stderr, "Out of memory\n");
|
||||||
|
GC_CRASH();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GC_CRASH();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
|
static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
|
||||||
struct gc_ref ref,
|
struct gc_ref ref,
|
||||||
struct gc_trace_worker_data *data) {
|
struct gc_trace_worker_data *data) {
|
||||||
|
@ -324,16 +349,32 @@ static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
|
||||||
// However however, it is hard to distinguish between edges from promoted
|
// However however, it is hard to distinguish between edges from promoted
|
||||||
// objects and edges from old objects, so we mostly just rely on an
|
// objects and edges from old objects, so we mostly just rely on an
|
||||||
// idempotent "log if unlogged" operation instead.
|
// idempotent "log if unlogged" operation instead.
|
||||||
int promote = copy_space_should_promote(new_space, ref);
|
if (!copy_space_should_promote(new_space, ref)) {
|
||||||
struct copy_space *dst_space = promote ? old_space : new_space;
|
// Try to leave the object in newspace as a survivor. If the edge is from
|
||||||
struct copy_space_allocator *alloc = promote
|
// a promoted object, we will need to add it to the remembered set.
|
||||||
? trace_worker_old_space_allocator(data)
|
if (!edge_is_from_survivor(heap, edge)
|
||||||
: trace_worker_new_space_allocator(data);
|
&& remember_edge_to_survivor_object(heap, edge)) {
|
||||||
// Update the remembered set for promoted-to-survivor edges.
|
// Log the edge even though in rare conditions the referent could end up
|
||||||
if (!promote && !edge_is_from_survivor(heap, edge)
|
// being promoted by us (if we run out of newspace) or a remote
|
||||||
&& remember_edge_to_survivor_object(heap, edge))
|
// evacuation thread (if they run out of newspace).
|
||||||
gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
|
gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
|
||||||
return copy_space_forward(new_space, dst_space, edge, ref, alloc);
|
}
|
||||||
|
switch (copy_space_forward(new_space, new_space, edge, ref,
|
||||||
|
trace_worker_new_space_allocator(data))) {
|
||||||
|
case COPY_SPACE_FORWARD_UPDATED:
|
||||||
|
return 0;
|
||||||
|
case COPY_SPACE_FORWARD_EVACUATED:
|
||||||
|
return 1;
|
||||||
|
case COPY_SPACE_FORWARD_FAILED:
|
||||||
|
// Ran out of newspace! Fall through to promote instead.
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GC_CRASH();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Promote the object.
|
||||||
|
return forward(new_space, old_space, edge, ref,
|
||||||
|
trace_worker_old_space_allocator(data));
|
||||||
} else {
|
} else {
|
||||||
// Note that although the target of the edge might not be in lospace, this
|
// Note that although the target of the edge might not be in lospace, this
|
||||||
// will do what we want and return 1 if and only if ref is was a young
|
// will do what we want and return 1 if and only if ref is was a young
|
||||||
|
@ -354,14 +395,14 @@ static inline int do_trace(struct gc_heap *heap, struct gc_edge edge,
|
||||||
struct copy_space *new_space = heap_new_space(heap);
|
struct copy_space *new_space = heap_new_space(heap);
|
||||||
struct copy_space *old_space = heap_old_space(heap);
|
struct copy_space *old_space = heap_old_space(heap);
|
||||||
if (new_space_contains(heap, ref))
|
if (new_space_contains(heap, ref))
|
||||||
return copy_space_forward(new_space, old_space, edge, ref,
|
return forward(new_space, old_space, edge, ref,
|
||||||
trace_worker_old_space_allocator(data));
|
trace_worker_old_space_allocator(data));
|
||||||
if (old_space_contains(heap, ref))
|
if (old_space_contains(heap, ref))
|
||||||
return copy_space_forward(old_space, old_space, edge, ref,
|
return forward(old_space, old_space, edge, ref,
|
||||||
trace_worker_old_space_allocator(data));
|
trace_worker_old_space_allocator(data));
|
||||||
} else {
|
} else {
|
||||||
if (GC_LIKELY(copy_space_contains(heap_mono_space(heap), ref)))
|
if (GC_LIKELY(copy_space_contains(heap_mono_space(heap), ref)))
|
||||||
return copy_space_forward(heap_mono_space(heap), heap_mono_space(heap),
|
return forward(heap_mono_space(heap), heap_mono_space(heap),
|
||||||
edge, ref,
|
edge, ref,
|
||||||
trace_worker_mono_space_allocator(data));
|
trace_worker_mono_space_allocator(data));
|
||||||
}
|
}
|
||||||
|
@ -916,12 +957,17 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||||
if (size > gc_allocator_large_threshold())
|
if (size > gc_allocator_large_threshold())
|
||||||
return allocate_large(mut, size);
|
return allocate_large(mut, size);
|
||||||
|
|
||||||
struct gc_ref ret =
|
struct gc_ref ret;
|
||||||
copy_space_allocate(&mut->allocator,
|
while (1) {
|
||||||
|
ret = copy_space_allocate(&mut->allocator,
|
||||||
heap_allocation_space(mutator_heap(mut)),
|
heap_allocation_space(mutator_heap(mut)),
|
||||||
size,
|
size);
|
||||||
get_more_empty_blocks_for_mutator,
|
if (gc_ref_is_null(ret))
|
||||||
mut);
|
trigger_collection(mut, GC_COLLECTION_MINOR);
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
gc_clear_fresh_allocation(ret, size);
|
gc_clear_fresh_allocation(ret, size);
|
||||||
return gc_ref_heap_object(ret);
|
return gc_ref_heap_object(ret);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue