1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-09 13:30:26 +02:00

Factor out locking utils to separate header

This commit is contained in:
Andy Wingo 2024-09-30 20:52:45 +02:00
parent 691c777e7b
commit 3955d2ad96
3 changed files with 126 additions and 106 deletions

View file

@ -17,6 +17,7 @@
#include "gc-align.h"
#include "gc-attrs.h"
#include "gc-inline.h"
#include "gc-lock.h"
#include "spin.h"
// A copy space: a block-structured space that traces via evacuation.
@ -114,10 +115,6 @@ struct copy_space_block_stack {
struct copy_space_block_list list;
};
struct copy_space_lock {
pthread_mutex_t *lock;
};
struct copy_space {
pthread_mutex_t lock;
struct copy_space_block_stack empty;
@ -145,22 +142,9 @@ struct copy_space_allocator {
struct copy_space_block *block;
};
static struct copy_space_lock
copy_space_lock_acquire(pthread_mutex_t *lock) {
pthread_mutex_lock(lock);
return (struct copy_space_lock){ lock };
}
static void
copy_space_lock_release(struct copy_space_lock *lock) {
GC_ASSERT(lock->lock);
pthread_mutex_unlock(lock->lock);
lock->lock = NULL;
}
static struct copy_space_lock
static struct gc_lock
copy_space_lock(struct copy_space *space) {
return copy_space_lock_acquire(&space->lock);
return gc_lock_acquire(&space->lock);
}
static void
@ -189,7 +173,7 @@ copy_space_block_list_pop(struct copy_space_block_list *list) {
static void
copy_space_block_stack_push(struct copy_space_block_stack *stack,
struct copy_space_block *block,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
struct copy_space_block *next = stack->list.head;
block->next = next;
stack->list.head = block;
@ -197,7 +181,7 @@ copy_space_block_stack_push(struct copy_space_block_stack *stack,
static struct copy_space_block*
copy_space_block_stack_pop(struct copy_space_block_stack *stack,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
struct copy_space_block *head = stack->list.head;
if (head) {
stack->list.head = head->next;
@ -208,7 +192,7 @@ copy_space_block_stack_pop(struct copy_space_block_stack *stack,
static struct copy_space_block*
copy_space_pop_empty_block(struct copy_space *space,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
struct copy_space_block *ret = copy_space_block_stack_pop(&space->empty,
lock);
if (ret)
@ -219,7 +203,7 @@ copy_space_pop_empty_block(struct copy_space *space,
static void
copy_space_push_empty_block(struct copy_space *space,
struct copy_space_block *block,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
copy_space_block_stack_push(&space->empty, block, lock);
}
@ -236,21 +220,21 @@ copy_space_push_full_block(struct copy_space *space,
static struct copy_space_block*
copy_space_pop_partly_full_block(struct copy_space *space,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
return copy_space_block_stack_pop(&space->partly_full, lock);
}
static void
copy_space_push_partly_full_block(struct copy_space *space,
struct copy_space_block *block,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
copy_space_block_stack_push(&space->partly_full, block, lock);
}
static void
copy_space_page_out_block(struct copy_space *space,
struct copy_space_block *block,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
copy_space_block_stack_push
(block->in_core
? &space->paged_out[0]
@ -261,7 +245,7 @@ copy_space_page_out_block(struct copy_space *space,
static struct copy_space_block*
copy_space_page_in_block(struct copy_space *space,
const struct copy_space_lock *lock) {
const struct gc_lock *lock) {
for (int age = 0; age < COPY_SPACE_PAGE_OUT_QUEUE_SIZE; age++) {
struct copy_space_block *block =
copy_space_block_stack_pop(&space->paged_out[age], lock);
@ -278,7 +262,7 @@ copy_space_request_release_memory(struct copy_space *space, size_t bytes) {
static int
copy_space_page_out_blocks_until_memory_released(struct copy_space *space) {
ssize_t pending = atomic_load(&space->bytes_to_page_out);
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
while (pending > 0) {
struct copy_space_block *block = copy_space_pop_empty_block(space, &lock);
if (!block) break;
@ -286,7 +270,7 @@ copy_space_page_out_blocks_until_memory_released(struct copy_space *space) {
pending = (atomic_fetch_sub(&space->bytes_to_page_out, COPY_SPACE_BLOCK_SIZE)
- COPY_SPACE_BLOCK_SIZE);
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
return pending <= 0;
}
@ -294,7 +278,7 @@ static ssize_t
copy_space_maybe_reacquire_memory(struct copy_space *space, size_t bytes) {
ssize_t pending =
atomic_fetch_sub(&space->bytes_to_page_out, bytes) - bytes;
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
while (pending + COPY_SPACE_BLOCK_SIZE <= 0) {
struct copy_space_block *block = copy_space_page_in_block(space, &lock);
if (!block) break;
@ -303,7 +287,7 @@ copy_space_maybe_reacquire_memory(struct copy_space *space, size_t bytes) {
COPY_SPACE_BLOCK_SIZE)
+ COPY_SPACE_BLOCK_SIZE);
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
return pending;
}
@ -338,9 +322,9 @@ copy_space_allocator_acquire_block(struct copy_space_allocator *alloc,
static int
copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc,
struct copy_space *space) {
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
struct copy_space_block *block = copy_space_pop_empty_block(space, &lock);
copy_space_lock_release(&lock);
gc_lock_release(&lock);
if (copy_space_allocator_acquire_block(alloc, block, space->active_region)) {
block->in_core = 1;
if (block->all_zeroes[space->active_region])
@ -355,10 +339,10 @@ copy_space_allocator_acquire_empty_block(struct copy_space_allocator *alloc,
static int
copy_space_allocator_acquire_partly_full_block(struct copy_space_allocator *alloc,
struct copy_space *space) {
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
struct copy_space_block *block = copy_space_pop_partly_full_block(space,
&lock);
copy_space_lock_release(&lock);
gc_lock_release(&lock);
if (copy_space_allocator_acquire_block(alloc, block, space->active_region)) {
alloc->hp += block->allocated;
return 1;
@ -390,9 +374,9 @@ copy_space_allocator_release_partly_full_block(struct copy_space_allocator *allo
allocated - alloc->block->allocated,
memory_order_relaxed);
alloc->block->allocated = allocated;
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
copy_space_push_partly_full_block(space, alloc->block, &lock);
copy_space_lock_release(&lock);
gc_lock_release(&lock);
} else {
// In this case, hp was bumped all the way to the limit, in which
// case allocated wraps to 0; the block is full.
@ -691,7 +675,7 @@ copy_space_expand(struct copy_space *space, size_t bytes) {
struct copy_space_slab *slabs = copy_space_allocate_slabs(nslabs);
copy_space_add_slabs(space, slabs, nslabs);
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) {
struct copy_space_block *block = &slabs[slab].headers[idx];
@ -701,14 +685,14 @@ copy_space_expand(struct copy_space *space, size_t bytes) {
reserved -= COPY_SPACE_BLOCK_SIZE;
}
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
copy_space_reacquire_memory(space, 0);
}
static void
copy_space_advance_page_out_queue(void *data) {
struct copy_space *space = data;
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
for (int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 3; age >= 0; age--) {
while (1) {
struct copy_space_block *block =
@ -717,14 +701,14 @@ copy_space_advance_page_out_queue(void *data) {
copy_space_block_stack_push(&space->paged_out[age + 1], block, &lock);
}
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
}
static void
copy_space_page_out_blocks(void *data) {
struct copy_space *space = data;
int age = COPY_SPACE_PAGE_OUT_QUEUE_SIZE - 2;
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
while (1) {
struct copy_space_block *block =
copy_space_block_stack_pop(&space->paged_out[age], &lock);
@ -735,7 +719,7 @@ copy_space_page_out_blocks(void *data) {
MADV_DONTNEED);
copy_space_block_stack_push(&space->paged_out[age + 1], block, &lock);
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
}
static int
@ -763,7 +747,7 @@ copy_space_init(struct copy_space *space, size_t size, int atomic,
space->fragmentation_at_last_gc = 0;
space->extents = extents_allocate(10);
copy_space_add_slabs(space, slabs, nslabs);
struct copy_space_lock lock = copy_space_lock(space);
struct gc_lock lock = copy_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < COPY_SPACE_NONHEADER_BLOCKS_PER_SLAB; idx++) {
struct copy_space_block *block = &slabs[slab].headers[idx];
@ -777,7 +761,7 @@ copy_space_init(struct copy_space *space, size_t size, int atomic,
}
}
}
copy_space_lock_release(&lock);
gc_lock_release(&lock);
gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_START,
copy_space_advance_page_out_queue,
space);

24
src/gc-lock.h Normal file
View file

@ -0,0 +1,24 @@
#ifndef GC_LOCK_H
#define GC_LOCK_H
#include <pthread.h>
#include "gc-assert.h"
struct gc_lock {
pthread_mutex_t *lock;
};
static struct gc_lock
gc_lock_acquire(pthread_mutex_t *lock) {
pthread_mutex_lock(lock);
return (struct gc_lock){ lock };
}
static void
gc_lock_release(struct gc_lock *lock) {
GC_ASSERT(lock->lock);
pthread_mutex_unlock(lock->lock);
lock->lock = NULL;
}
#endif // GC_LOCK_H

View file

@ -18,6 +18,7 @@
#include "gc-align.h"
#include "gc-attrs.h"
#include "gc-inline.h"
#include "gc-lock.h"
#include "spin.h"
#include "swar.h"
@ -146,10 +147,6 @@ struct nofl_block_stack {
struct nofl_block_list list;
};
struct nofl_lock {
pthread_mutex_t *lock;
};
#define NOFL_PAGE_OUT_QUEUE_SIZE 4
struct nofl_space {
@ -208,15 +205,33 @@ struct nofl_allocator {
//
// When an object becomes dead after a GC, it will still have a bit set
// -- maybe the young bit, or maybe a survivor bit. The sweeper has to
// clear these bits before the next collection. But, for concurrent
// marking, we will also be marking "live" objects, updating their mark
// bits. So there are four object states concurrently observable:
// young, dead, survivor, and marked. (If we didn't have concurrent
// marking we would still need the "marked" state, because marking
// mutator roots before stopping is also a form of concurrent marking.)
// Even though these states are mutually exclusive, we use separate bits
// for them because we have the space. After each collection, the dead,
// survivor, and marked states rotate by one bit.
// clear these bits before the next collection. But if we add
// concurrent marking, we will also be marking "live" objects, updating
// their mark bits. So there are four object states concurrently
// observable: young, dead, survivor, and marked. (We don't currently
// have concurrent marking, though.) Even though these states are
// mutually exclusive, we use separate bits for them because we have the
// space. After each collection, the dead, survivor, and marked states
// rotate by one bit.
//
// An object can be pinned, preventing it from being evacuated during
// collection. Pinning does not keep the object alive; if it is
// otherwise unreachable, it will be collected. To pin an object, a
// running mutator can set the pinned bit, using atomic
// compare-and-swap.
//
// For generational collectors, the nofl space supports a field-logging
// write barrier. The two logging bits correspond to the two words in a
// granule. When a field is written to, the write barrier should check
// the logged bit; if it is unset, it should try to atomically set the
// bit, and if that works, then we record the field location as a
// generational root, adding it to a sequential-store buffer.
//
// Finally, for heap-conservative collectors, nofl generally traces all
// objects in the same way, treating them as an array of conservative
// edges. But we need to know when we have an ephemeron. In that case,
// we re-use the pinned bit, because it's of no use to us anyway in that
// configuration, as all objects are pinned.
enum nofl_metadata_byte {
NOFL_METADATA_BYTE_NONE = 0,
NOFL_METADATA_BYTE_YOUNG = 1,
@ -224,9 +239,10 @@ enum nofl_metadata_byte {
NOFL_METADATA_BYTE_MARK_1 = 4,
NOFL_METADATA_BYTE_MARK_2 = 8,
NOFL_METADATA_BYTE_END = 16,
NOFL_METADATA_BYTE_EPHEMERON = 32,
NOFL_METADATA_BYTE_PINNED = 64,
NOFL_METADATA_BYTE_UNUSED_1 = 128
NOFL_METADATA_BYTE_PINNED = 32,
NOFL_METADATA_BYTE_LOGGED_0 = 64,
NOFL_METADATA_BYTE_LOGGED_1 = 128,
NOFL_METADATA_BYTE_EPHEMERON = NOFL_METADATA_BYTE_PINNED,
};
static uint8_t
@ -236,22 +252,9 @@ nofl_rotate_dead_survivor_marked(uint8_t mask) {
return ((mask << 1) | (mask >> 2)) & all;
}
static struct nofl_lock
nofl_lock_acquire(pthread_mutex_t *lock) {
pthread_mutex_lock(lock);
return (struct nofl_lock){ lock };
}
static void
nofl_lock_release(struct nofl_lock *lock) {
GC_ASSERT(lock->lock);
pthread_mutex_unlock(lock->lock);
lock->lock = NULL;
}
static struct nofl_lock
static struct gc_lock
nofl_space_lock(struct nofl_space *space) {
return nofl_lock_acquire(&space->lock);
return gc_lock_acquire(&space->lock);
}
static struct nofl_slab*
@ -440,7 +443,7 @@ nofl_block_list_pop(struct nofl_block_list *list) {
static void
nofl_block_stack_push(struct nofl_block_stack *stack,
struct nofl_block_ref block,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
struct nofl_block_list *list = &stack->list;
list->count++;
GC_ASSERT(nofl_block_is_null(nofl_block_next(block)));
@ -451,7 +454,7 @@ nofl_block_stack_push(struct nofl_block_stack *stack,
static struct nofl_block_ref
nofl_block_stack_pop(struct nofl_block_stack *stack,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
struct nofl_block_list *list = &stack->list;
struct nofl_block_ref head = nofl_block_head(list);
if (!nofl_block_is_null(head)) {
@ -470,7 +473,7 @@ nofl_block_count(struct nofl_block_list *list) {
static void
nofl_push_unavailable_block(struct nofl_space *space,
struct nofl_block_ref block,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
nofl_block_set_flag(block, NOFL_BLOCK_UNAVAILABLE);
nofl_block_stack_push(nofl_block_has_flag(block, NOFL_BLOCK_PAGED_OUT)
? &space->paged_out[NOFL_PAGE_OUT_QUEUE_SIZE-1]
@ -480,7 +483,7 @@ nofl_push_unavailable_block(struct nofl_space *space,
static struct nofl_block_ref
nofl_pop_unavailable_block(struct nofl_space *space,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
for (int age = 0; age < NOFL_PAGE_OUT_QUEUE_SIZE; age++) {
struct nofl_block_ref block =
nofl_block_stack_pop(&space->paged_out[age], lock);
@ -495,21 +498,21 @@ nofl_pop_unavailable_block(struct nofl_space *space,
static void
nofl_push_empty_block(struct nofl_space *space,
struct nofl_block_ref block,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
nofl_block_stack_push(&space->empty, block, lock);
}
static struct nofl_block_ref
nofl_pop_empty_block_with_lock(struct nofl_space *space,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
return nofl_block_stack_pop(&space->empty, lock);
}
static struct nofl_block_ref
nofl_pop_empty_block(struct nofl_space *space) {
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
struct nofl_block_ref ret = nofl_pop_empty_block_with_lock(space, &lock);
nofl_lock_release(&lock);
gc_lock_release(&lock);
return ret;
}
@ -635,19 +638,19 @@ nofl_allocator_release_partly_full_block(struct nofl_allocator *alloc,
size_t hole_size = alloc->sweep - alloc->alloc;
GC_ASSERT(hole_size);
block.summary->fragmentation_granules = hole_size / NOFL_GRANULE_SIZE;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
nofl_block_stack_push(&space->partly_full, block, &lock);
nofl_lock_release(&lock);
gc_lock_release(&lock);
nofl_allocator_reset(alloc);
}
static size_t
nofl_allocator_acquire_partly_full_block(struct nofl_allocator *alloc,
struct nofl_space *space) {
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
struct nofl_block_ref block = nofl_block_stack_pop(&space->partly_full,
&lock);
nofl_lock_release(&lock);
gc_lock_release(&lock);
if (nofl_block_is_null(block))
return 0;
GC_ASSERT_EQ(block.summary->holes_with_fragmentation, 0);
@ -1039,11 +1042,11 @@ static void
nofl_space_prepare_evacuation(struct nofl_space *space) {
GC_ASSERT(!space->evacuating);
struct nofl_block_ref block;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
while (!nofl_block_is_null
(block = nofl_block_list_pop(&space->evacuation_targets)))
nofl_push_empty_block(space, block, &lock);
nofl_lock_release(&lock);
gc_lock_release(&lock);
// Blocks are either to_sweep, empty, or unavailable.
GC_ASSERT_EQ(nofl_block_count(&space->partly_full.list), 0);
GC_ASSERT_EQ(nofl_block_count(&space->full), 0);
@ -1164,7 +1167,7 @@ nofl_space_start_gc(struct nofl_space *space, enum gc_collection_kind gc_kind) {
static void
nofl_space_finish_evacuation(struct nofl_space *space,
const struct nofl_lock *lock) {
const struct gc_lock *lock) {
// When evacuation began, the evacuation reserve was moved to the
// empties list. Now that evacuation is finished, attempt to
// repopulate the reserve.
@ -1308,7 +1311,7 @@ static void
nofl_space_finish_gc(struct nofl_space *space,
enum gc_collection_kind gc_kind) {
space->last_collection_was_minor = (gc_kind == GC_COLLECTION_MINOR);
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
if (space->evacuating)
nofl_space_finish_evacuation(space, &lock);
else {
@ -1346,7 +1349,7 @@ nofl_space_finish_gc(struct nofl_space *space,
}
// FIXME: Promote concurrently instead of during the pause.
nofl_lock_release(&lock);
gc_lock_release(&lock);
nofl_space_promote_blocks(space);
nofl_space_reset_statistics(space);
nofl_space_update_mark_patterns(space, 0);
@ -1363,7 +1366,7 @@ static ssize_t
nofl_space_maybe_reacquire_memory(struct nofl_space *space, size_t bytes) {
ssize_t pending =
atomic_fetch_sub(&space->pending_unavailable_bytes, bytes) - bytes;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
while (pending + NOFL_BLOCK_SIZE <= 0) {
struct nofl_block_ref block = nofl_pop_unavailable_block(space, &lock);
if (nofl_block_is_null(block)) break;
@ -1372,13 +1375,15 @@ nofl_space_maybe_reacquire_memory(struct nofl_space *space, size_t bytes) {
pending = atomic_fetch_add(&space->pending_unavailable_bytes, NOFL_BLOCK_SIZE)
+ NOFL_BLOCK_SIZE;
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
return pending;
}
static inline int
nofl_space_should_evacuate(struct nofl_space *space, uint8_t metadata_byte,
struct gc_ref obj) {
if (gc_has_conservative_intraheap_edges())
return 0;
if (!space->evacuating)
return 0;
if (metadata_byte & NOFL_METADATA_BYTE_PINNED)
@ -1389,8 +1394,11 @@ nofl_space_should_evacuate(struct nofl_space *space, uint8_t metadata_byte,
static inline int
nofl_space_set_mark(struct nofl_space *space, uint8_t *metadata, uint8_t byte) {
// Clear logged bits when we mark: after marking, there will be no
// young objects.
uint8_t mask = NOFL_METADATA_BYTE_YOUNG | NOFL_METADATA_BYTE_MARK_0
| NOFL_METADATA_BYTE_MARK_1 | NOFL_METADATA_BYTE_MARK_2;
| NOFL_METADATA_BYTE_MARK_1 | NOFL_METADATA_BYTE_MARK_2
| NOFL_METADATA_BYTE_LOGGED_0 | NOFL_METADATA_BYTE_LOGGED_1;
atomic_store_explicit(metadata,
(byte & ~mask) | space->marked_mask,
memory_order_relaxed);
@ -1407,6 +1415,10 @@ nofl_space_set_nonempty_mark(struct nofl_space *space, uint8_t *metadata,
static inline void
nofl_space_pin_object(struct nofl_space *space, struct gc_ref ref) {
// For the heap-conservative configuration, all objects are pinned,
// and we re-use the pinned bit to identify ephemerons.
if (gc_has_conservative_intraheap_edges())
return;
uint8_t *metadata = nofl_metadata_byte_for_object(ref);
uint8_t byte = atomic_load_explicit(metadata, memory_order_relaxed);
if (byte & NOFL_METADATA_BYTE_PINNED)
@ -1674,7 +1686,7 @@ nofl_space_add_slabs(struct nofl_space *space, struct nofl_slab *slabs,
static int
nofl_space_shrink(struct nofl_space *space, size_t bytes) {
ssize_t pending = nofl_space_request_release_memory(space, bytes);
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
// First try to shrink by unmapping previously-identified empty blocks.
while (pending > 0) {
@ -1707,7 +1719,7 @@ nofl_space_shrink(struct nofl_space *space, size_t bytes) {
}
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
// It still may be the case we need to page out more blocks. Only evacuation
// can help us then!
@ -1725,7 +1737,7 @@ nofl_space_expand(struct nofl_space *space, size_t bytes) {
struct nofl_slab *slabs = nofl_allocate_slabs(nslabs);
nofl_space_add_slabs(space, slabs, nslabs);
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < NOFL_NONMETA_BLOCKS_PER_SLAB; idx++) {
uintptr_t addr = (uintptr_t)slabs[slab].blocks[idx].data;
@ -1734,7 +1746,7 @@ nofl_space_expand(struct nofl_space *space, size_t bytes) {
nofl_push_unavailable_block(space, block, &lock);
}
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
nofl_space_maybe_reacquire_memory(space, 0);
}
@ -1748,7 +1760,7 @@ nofl_space_advance_page_out_queue(void *data) {
// items, except that we don't page out yet, as it could be that some other
// background task will need to pull pages back in.
struct nofl_space *space = data;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
for (int age = NOFL_PAGE_OUT_QUEUE_SIZE - 3; age >= 0; age--) {
struct nofl_block_ref block =
nofl_block_stack_pop(&space->paged_out[age], &lock);
@ -1756,7 +1768,7 @@ nofl_space_advance_page_out_queue(void *data) {
break;
nofl_block_stack_push(&space->paged_out[age+1], block, &lock);
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
}
static void
@ -1764,7 +1776,7 @@ nofl_space_page_out_blocks(void *data) {
// This task is invoked by the background thread after other tasks. It
// actually pages out blocks that reached the end of the queue.
struct nofl_space *space = data;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
int age = NOFL_PAGE_OUT_QUEUE_SIZE - 2;
while (1) {
struct nofl_block_ref block =
@ -1775,7 +1787,7 @@ nofl_space_page_out_blocks(void *data) {
madvise((void*)block.addr, NOFL_BLOCK_SIZE, MADV_DONTNEED);
nofl_block_stack_push(&space->paged_out[age + 1], block, &lock);
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
}
static int
@ -1797,7 +1809,7 @@ nofl_space_init(struct nofl_space *space, size_t size, int atomic,
space->evacuation_minimum_reserve = 0.02;
space->evacuation_reserve = space->evacuation_minimum_reserve;
space->promotion_threshold = promotion_threshold;
struct nofl_lock lock = nofl_space_lock(space);
struct gc_lock lock = nofl_space_lock(space);
for (size_t slab = 0; slab < nslabs; slab++) {
for (size_t idx = 0; idx < NOFL_NONMETA_BLOCKS_PER_SLAB; idx++) {
uintptr_t addr = (uintptr_t)slabs[slab].blocks[idx].data;
@ -1812,7 +1824,7 @@ nofl_space_init(struct nofl_space *space, size_t size, int atomic,
}
}
}
nofl_lock_release(&lock);
gc_lock_release(&lock);
gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_START,
nofl_space_advance_page_out_queue,
space);