1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-09 05:30:21 +02:00

Fix ABA problem in the copy space

Same concerns as the previous fix to the nofl space.
This commit is contained in:
Andy Wingo 2024-09-30 12:29:35 +02:00
parent 326e925f4c
commit 691c777e7b

View file

@ -1,6 +1,7 @@
#ifndef COPY_SPACE_H #ifndef COPY_SPACE_H
#define COPY_SPACE_H #define COPY_SPACE_H
#include <pthread.h>
#include <stdlib.h> #include <stdlib.h>
#include <sys/mman.h> #include <sys/mman.h>
@ -105,13 +106,26 @@ copy_space_object_region(struct gc_ref obj) {
#define COPY_SPACE_PAGE_OUT_QUEUE_SIZE 4 #define COPY_SPACE_PAGE_OUT_QUEUE_SIZE 4
struct copy_space_block_list {
struct copy_space_block *head;
};
struct copy_space_block_stack {
struct copy_space_block_list list;
};
struct copy_space_lock {
pthread_mutex_t *lock;
};
struct copy_space { struct copy_space {
struct copy_space_block *empty; pthread_mutex_t lock;
struct copy_space_block *partly_full; struct copy_space_block_stack empty;
struct copy_space_block *full ALIGNED_TO_AVOID_FALSE_SHARING; struct copy_space_block_stack partly_full;
struct copy_space_block_list full ALIGNED_TO_AVOID_FALSE_SHARING;
size_t allocated_bytes; size_t allocated_bytes;
size_t fragmentation; size_t fragmentation;
struct copy_space_block *paged_out[COPY_SPACE_PAGE_OUT_QUEUE_SIZE] struct copy_space_block_stack paged_out[COPY_SPACE_PAGE_OUT_QUEUE_SIZE]
ALIGNED_TO_AVOID_FALSE_SHARING; ALIGNED_TO_AVOID_FALSE_SHARING;
ssize_t bytes_to_page_out ALIGNED_TO_AVOID_FALSE_SHARING; ssize_t bytes_to_page_out ALIGNED_TO_AVOID_FALSE_SHARING;
// The rest of these members are only changed rarely and with the heap // The rest of these members are only changed rarely and with the heap
@ -131,32 +145,72 @@ struct copy_space_allocator {
struct copy_space_block *block; struct copy_space_block *block;
}; };
static struct copy_space_lock
copy_space_lock_acquire(pthread_mutex_t *lock) {
pthread_mutex_lock(lock);
return (struct copy_space_lock){ lock };
}
static void static void
copy_space_push_block(struct copy_space_block **list, copy_space_lock_release(struct copy_space_lock *lock) {
struct copy_space_block *block) { GC_ASSERT(lock->lock);
pthread_mutex_unlock(lock->lock);
lock->lock = NULL;
}
static struct copy_space_lock
copy_space_lock(struct copy_space *space) {
return copy_space_lock_acquire(&space->lock);
}
static void
copy_space_block_list_push(struct copy_space_block_list *list,
struct copy_space_block *block) {
struct copy_space_block *next = struct copy_space_block *next =
atomic_load_explicit(list, memory_order_acquire); atomic_load_explicit(&list->head, memory_order_acquire);
do { do {
block->next = next; block->next = next;
} while (!atomic_compare_exchange_weak(list, &next, block)); } while (!atomic_compare_exchange_weak(&list->head, &next, block));
} }
static struct copy_space_block* static struct copy_space_block*
copy_space_pop_block(struct copy_space_block **list) { copy_space_block_list_pop(struct copy_space_block_list *list) {
struct copy_space_block *head = struct copy_space_block *head =
atomic_load_explicit(list, memory_order_acquire); atomic_load_explicit(&list->head, memory_order_acquire);
struct copy_space_block *next; struct copy_space_block *next;
do { do {
if (!head) if (!head)
return NULL; return NULL;
} while (!atomic_compare_exchange_weak(list, &head, head->next)); } while (!atomic_compare_exchange_weak(&list->head, &head, head->next));
head->next = NULL; head->next = NULL;
return head; return head;
} }
static void
copy_space_block_stack_push(struct copy_space_block_stack *stack,
struct copy_space_block *block,
const struct copy_space_lock *lock) {
struct copy_space_block *next = stack->list.head;
block->next = next;
stack->list.head = block;
}
static struct copy_space_block* static struct copy_space_block*
copy_space_pop_empty_block(struct copy_space *space) { copy_space_block_stack_pop(struct copy_space_block_stack *stack,
struct copy_space_block *ret = copy_space_pop_block(&space->empty); const struct copy_space_lock *lock) {
struct copy_space_block *head = stack->list.head;
if (head) {
stack->list.head = head->next;
head->next = NULL;
}
return head;
}
static struct copy_space_block*
copy_space_pop_empty_block(struct copy_space *space,
const struct copy_space_lock *lock) {
struct copy_space_block *ret = copy_space_block_stack_pop(&space->empty,
lock);
if (ret) if (ret)
ret->allocated = 0; ret->allocated = 0;
return ret; return ret;
@ -164,46 +218,53 @@ copy_space_pop_empty_block(struct copy_space *space) {
static void static void
copy_space_push_empty_block(struct copy_space *space, copy_space_push_empty_block(struct copy_space *space,
struct copy_space_block *block) { struct copy_space_block *block,
copy_space_push_block(&space->empty, block); const struct copy_space_lock *lock) {
copy_space_block_stack_push(&space->empty, block, lock);
} }
static struct copy_space_block* static struct copy_space_block*
copy_space_pop_full_block(struct copy_space *space) { copy_space_pop_full_block(struct copy_space *space) {
return copy_space_pop_block(&space->full); return copy_space_block_list_pop(&space->full);
} }
static void static void
copy_space_push_full_block(struct copy_space *space, copy_space_push_full_block(struct copy_space *space,
struct copy_space_block *block) { struct copy_space_block *block) {
copy_space_push_block(&space->full, block); copy_space_block_list_push(&space->full, block);
} }
static struct copy_space_block* static struct copy_space_block*
copy_space_pop_partly_full_block(struct copy_space *space) { copy_space_pop_partly_full_block(struct copy_space *space,
return copy_space_pop_block(&space->partly_full); const struct copy_space_lock *lock) {
return copy_space_block_stack_pop(&space->partly_full, lock);
} }
static void static void
copy_space_push_partly_full_block(struct copy_space *space, copy_space_push_partly_full_block(struct copy_space *space,
struct copy_space_block *block) { struct copy_space_block *block,
copy_space_push_block(&space->partly_full, block); const struct copy_space_lock *lock) {
copy_space_block_stack_push(&space->partly_full, block, lock);
} }
static void static void
copy_space_page_out_block(struct copy_space *space, copy_space_page_out_block(struct copy_space *space,
struct copy_space_block *block) { struct copy_space_block *block,
copy_space_push_block(block->in_core const struct copy_space_lock *lock) {
? &space->paged_out[0] copy_space_block_stack_push
: &space->paged_out[COPY_SPACE_PAGE_OUT_QUEUE_SIZE-1], (block->in_core
block); ? &space->paged_out[0]
: &space->paged_out[COPY_SPACE_PAGE_OUT_QUEUE_SIZE-1],
block,
lock);
} }
static struct copy_space_block* static struct copy_space_block*
copy_space_page_in_block(struct copy_space *space) { copy_space_page_in_block(struct copy_space *space,
const struct copy_space_lock *lock) {
for (int age = 0; age < COPY_SPACE_PAGE_OUT_QUEUE_SIZE; age++) { for (int age = 0; age < COPY_SPACE_PAGE_OUT_QUEUE_SIZE; age++) {
struct copy_space_block *block = struct copy_space_block *block =
copy_space_pop_block(&space->paged_out[age]); copy_space_block_stack_pop(&space->paged_out[age], lock);
if (block) return block; if (block) return block;
} }
return NULL; return NULL;
@ -217,28 +278,32 @@ copy_space_request_release_memory(struct copy_space *space, size_t bytes) {
static int static int
copy_space_page_out_blocks_until_memory_released(struct copy_space *space) { copy_space_page_out_blocks_until_memory_released(struct copy_space *space) {
ssize_t pending = atomic_load(&space->bytes_to_page_out); ssize_t pending = atomic_load(&space->bytes_to_page_out);
struct copy_space_lock lock = copy_space_lock(space);
while (pending > 0) { while (pending > 0) {
struct copy_space_block *block = copy_space_pop_empty_block(space); struct copy_space_block *block = copy_space_pop_empty_block(space, &lock);
if (!block) return 0; if (!block) break;
copy_space_page_out_block(space, block); copy_space_page_out_block(space, block, &lock);
pending = (atomic_fetch_sub(&space->bytes_to_page_out, COPY_SPACE_BLOCK_SIZE) pending = (atomic_fetch_sub(&space->bytes_to_page_out, COPY_SPACE_BLOCK_SIZE)
- COPY_SPACE_BLOCK_SIZE); - COPY_SPACE_BLOCK_SIZE);
} }
return 1; copy_space_lock_release(&lock);
return pending <= 0;
} }
static ssize_t static ssize_t
copy_space_maybe_reacquire_memory(struct copy_space *space, size_t bytes) { copy_space_maybe_reacquire_memory(struct copy_space *space, size_t bytes) {
ssize_t pending = ssize_t pending =
atomic_fetch_sub(&space->bytes_to_page_out, bytes) - bytes; atomic_fetch_sub(&space->bytes_to_page_out, bytes) - bytes;
struct copy_space_lock lock = copy_space_lock(space);
while (pending + COPY_SPACE_BLOCK_SIZE <= 0) { while (pending + COPY_SPACE_BLOCK_SIZE <= 0) {
struct copy_space_block *block = copy_space_page_in_block(space); struct copy_space_block *block = copy_space_page_in_block(space, &lock);
if (!block) break; if (!block) break;
copy_space_push_empty_block(space, block); copy_space_push_empty_block(space, block, &lock);
pending = (atomic_fetch_add(&space->bytes_to_page_out, pending = (atomic_fetch_add(&space->bytes_to_page_out,
COPY_SPACE_BLOCK_SIZE) COPY_SPACE_BLOCK_SIZE)
+ COPY_SPACE_BLOCK_SIZE); + COPY_SPACE_BLOCK_SIZE);
} }
copy_space_lock_release(&lock);
return pending; return pending;
} }
@ -273,12 +338,13 @@ copy_space_allocator_acquire_block(struct copy_space_allocator *alloc,
static int static int
copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc, copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc,
struct copy_space *space) { struct copy_space *space) {
if (copy_space_allocator_acquire_block(alloc, struct copy_space_lock lock = copy_space_lock(space);
copy_space_pop_empty_block(space), struct copy_space_block *block = copy_space_pop_empty_block(space, &lock);
space->active_region)) { copy_space_lock_release(&lock);
alloc->block->in_core = 1; if (copy_space_allocator_acquire_block(alloc, block, space->active_region)) {
if (alloc->block->all_zeroes[space->active_region]) block->in_core = 1;
alloc->block->all_zeroes[space->active_region] = 0; if (block->all_zeroes[space->active_region])
block->all_zeroes[space->active_region] = 0;
else else
memset((char*)alloc->hp, 0, COPY_SPACE_REGION_SIZE); memset((char*)alloc->hp, 0, COPY_SPACE_REGION_SIZE);
return 1; return 1;
@ -289,10 +355,12 @@ copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc,
static int static int
copy_space_allocator_acquire_partly_full_block(struct copy_space_allocator *alloc, copy_space_allocator_acquire_partly_full_block(struct copy_space_allocator *alloc,
struct copy_space *space) { struct copy_space *space) {
if (copy_space_allocator_acquire_block(alloc, struct copy_space_lock lock = copy_space_lock(space);
copy_space_pop_partly_full_block(space), struct copy_space_block *block = copy_space_pop_partly_full_block(space,
space->active_region)) { &lock);
alloc->hp += alloc->block->allocated; copy_space_lock_release(&lock);
if (copy_space_allocator_acquire_block(alloc, block, space->active_region)) {
alloc->hp += block->allocated;
return 1; return 1;
} }
return 0; return 0;
@ -322,7 +390,9 @@ copy_space_allocator_release_partly_full_block(struct copy_space_allocator *allo
allocated - alloc->block->allocated, allocated - alloc->block->allocated,
memory_order_relaxed); memory_order_relaxed);
alloc->block->allocated = allocated; alloc->block->allocated = allocated;
copy_space_push_partly_full_block(space, alloc->block); struct copy_space_lock lock = copy_space_lock(space);
copy_space_push_partly_full_block(space, alloc->block, &lock);
copy_space_lock_release(&lock);
} else { } else {
// In this case, hp was bumped all the way to the limit, in which // In this case, hp was bumped all the way to the limit, in which
// case allocated wraps to 0; the block is full. // case allocated wraps to 0; the block is full.
@ -382,12 +452,12 @@ copy_space_append_block_lists(struct copy_space_block *head,
static void static void
copy_space_flip(struct copy_space *space) { copy_space_flip(struct copy_space *space) {
// Mutators stopped, can access nonatomically. // Mutators stopped, can access nonatomically.
struct copy_space_block *flip = space->full; struct copy_space_block* flip = space->full.head;
flip = copy_space_append_block_lists(space->partly_full, flip); flip = copy_space_append_block_lists(space->partly_full.list.head, flip);
flip = copy_space_append_block_lists(space->empty, flip); flip = copy_space_append_block_lists(space->empty.list.head, flip);
space->empty = flip; space->empty.list.head = flip;
space->partly_full = NULL; space->partly_full.list.head = NULL;
space->full = NULL; space->full.head = NULL;
space->allocated_bytes = 0; space->allocated_bytes = 0;
space->fragmentation = 0; space->fragmentation = 0;
space->active_region ^= 1; space->active_region ^= 1;
@ -621,45 +691,51 @@ copy_space_expand(struct copy_space *space, size_t bytes) {
struct copy_space_slab *slabs = copy_space_allocate_slabs(nslabs); struct copy_space_slab *slabs = copy_space_allocate_slabs(nslabs);
copy_space_add_slabs(space, slabs, nslabs); copy_space_add_slabs(space, slabs, nslabs);
struct copy_space_lock lock = copy_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) { for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) { for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) {
struct copy_space_block *block = &slabs[slab].headers[idx]; struct copy_space_block *block = &slabs[slab].headers[idx];
block->all_zeroes[0] = block->all_zeroes[1] = 1; block->all_zeroes[0] = block->all_zeroes[1] = 1;
block->in_core = 0; block->in_core = 0;
copy_space_page_out_block(space, block); copy_space_page_out_block(space, block, &lock);
reserved -= COPY_SPACE_BLOCK_SIZE; reserved -= COPY_SPACE_BLOCK_SIZE;
} }
} }
copy_space_lock_release(&lock);
copy_space_reacquire_memory(space, 0); copy_space_reacquire_memory(space, 0);
} }
static void static void
copy_space_advance_page_out_queue(void *data) { copy_space_advance_page_out_queue(void *data) {
struct copy_space *space = data; struct copy_space *space = data;
struct copy_space_lock lock = copy_space_lock(space);
for (int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 3; age >= 0; age--) { for (int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 3; age >= 0; age--) {
while (1) { while (1) {
struct copy_space_block *block = struct copy_space_block *block =
copy_space_pop_block(&space->paged_out[age]); copy_space_block_stack_pop(&space->paged_out[age], &lock);
if (!block) break; if (!block) break;
copy_space_push_block(&space->paged_out[age + 1], block); copy_space_block_stack_push(&space->paged_out[age + 1], block, &lock);
} }
} }
copy_space_lock_release(&lock);
} }
static void static void
copy_space_page_out_blocks(void *data) { copy_space_page_out_blocks(void *data) {
struct copy_space *space = data; struct copy_space *space = data;
int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 2; int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 2;
struct copy_space_lock lock = copy_space_lock(space);
while (1) { while (1) {
struct copy_space_block *block = struct copy_space_block *block =
copy_space_pop_block(&space->paged_out[age]); copy_space_block_stack_pop(&space->paged_out[age], &lock);
if (!block) break; if (!block) break;
block->in_core = 0; block->in_core = 0;
block->all_zeroes[0] = block->all_zeroes[1] = 1; block->all_zeroes[0] = block->all_zeroes[1] = 1;
madvise(copy_space_block_payload(block), COPY_SPACE_BLOCK_SIZE, madvise(copy_space_block_payload(block), COPY_SPACE_BLOCK_SIZE,
MADV_DONTNEED); MADV_DONTNEED);
copy_space_push_block(&space->paged_out[age + 1], block); copy_space_block_stack_push(&space->paged_out[age + 1], block, &lock);
} }
copy_space_lock_release(&lock);
} }
static int static int
@ -672,11 +748,12 @@ copy_space_init(struct copy_space *space, size_t size, int atomic,
if (!slabs) if (!slabs)
return 0; return 0;
space->empty = NULL; pthread_mutex_init(&space->lock, NULL);
space->partly_full = NULL; space->empty.list.head = NULL;
space->full = NULL; space->partly_full.list.head = NULL;
space->full.head = NULL;
for (int age = 0; age < COPY_SPACE_PAGE_OUT_QUEUE_SIZE; age++) for (int age = 0; age < COPY_SPACE_PAGE_OUT_QUEUE_SIZE; age++)
space->paged_out[age] = NULL; space->paged_out[age].list.head = NULL;
space->allocated_bytes = 0; space->allocated_bytes = 0;
space->fragmentation = 0; space->fragmentation = 0;
space->bytes_to_page_out = 0; space->bytes_to_page_out = 0;
@ -686,19 +763,21 @@ copy_space_init(struct copy_space *space, size_t size, int atomic,
space->fragmentation_at_last_gc = 0; space->fragmentation_at_last_gc = 0;
space->extents = extents_allocate(10); space->extents = extents_allocate(10);
copy_space_add_slabs(space, slabs, nslabs); copy_space_add_slabs(space, slabs, nslabs);
struct copy_space_lock lock = copy_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) { for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) { for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) {
struct copy_space_block *block = &slabs[slab].headers[idx]; struct copy_space_block *block = &slabs[slab].headers[idx];
block->all_zeroes[0] = block->all_zeroes[1] = 1; block->all_zeroes[0] = block->all_zeroes[1] = 1;
block->in_core = 0; block->in_core = 0;
if (reserved > size) { if (reserved > size) {
copy_space_page_out_block(space, block); copy_space_page_out_block(space, block, &lock);
reserved -= COPY_SPACE_BLOCK_SIZE; reserved -= COPY_SPACE_BLOCK_SIZE;
} else { } else {
copy_space_push_empty_block(space, block); copy_space_push_empty_block(space, block, &lock);
} }
} }
} }
copy_space_lock_release(&lock);
gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_START, gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_START,
copy_space_advance_page_out_queue, copy_space_advance_page_out_queue,
space); space);