mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-06-08 13:10:19 +02:00
Implement adaptive heap sizing for semi
This commit is contained in:
parent
1bf250f62a
commit
b7306950bc
6 changed files with 149 additions and 135 deletions
|
@ -9,8 +9,9 @@
|
|||
#include "assert.h"
|
||||
#include "background-thread.h"
|
||||
#include "debug.h"
|
||||
#include "heap-sizer.h"
|
||||
#include "gc-config.h"
|
||||
#include "gc-platform.h"
|
||||
#include "heap-sizer.h"
|
||||
|
||||
// This is the MemBalancer algorithm from "Optimal Heap Limits for Reducing
|
||||
// Browser Memory Use" by Marisa Kirisame, Pranav Shenoy, and Pavel Panchekha
|
||||
|
@ -25,9 +26,9 @@
|
|||
// high on program startup.
|
||||
|
||||
struct gc_adaptive_heap_sizer {
|
||||
uint64_t (*get_allocation_counter)(void *callback_data);
|
||||
void (*set_heap_size)(size_t size, void *callback_data);
|
||||
void *callback_data;
|
||||
uint64_t (*get_allocation_counter)(struct gc_heap *heap);
|
||||
void (*set_heap_size)(struct gc_heap *heap, size_t size);
|
||||
struct gc_heap *heap;
|
||||
uint64_t smoothed_pause_time;
|
||||
uint64_t smoothed_live_bytes;
|
||||
uint64_t live_bytes;
|
||||
|
@ -38,12 +39,28 @@ struct gc_adaptive_heap_sizer {
|
|||
double maximum_multiplier;
|
||||
double minimum_free_space;
|
||||
double expansiveness;
|
||||
#if GC_PARALLEL
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
int background_task_id;
|
||||
uint64_t last_bytes_allocated;
|
||||
uint64_t last_heartbeat;
|
||||
};
|
||||
|
||||
static void
|
||||
gc_adaptive_heap_sizer_lock(struct gc_adaptive_heap_sizer *sizer) {
|
||||
#if GC_PARALLEL
|
||||
pthread_mutex_lock(&sizer->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
gc_adaptive_heap_sizer_unlock(struct gc_adaptive_heap_sizer *sizer) {
|
||||
#if GC_PARALLEL
|
||||
pthread_mutex_unlock(&sizer->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
// With lock
|
||||
static uint64_t
|
||||
gc_adaptive_heap_sizer_calculate_size(struct gc_adaptive_heap_sizer *sizer) {
|
||||
|
@ -65,34 +82,33 @@ gc_adaptive_heap_sizer_calculate_size(struct gc_adaptive_heap_sizer *sizer) {
|
|||
static uint64_t
|
||||
gc_adaptive_heap_sizer_set_expansiveness(struct gc_adaptive_heap_sizer *sizer,
|
||||
double expansiveness) {
|
||||
pthread_mutex_lock(&sizer->lock);
|
||||
gc_adaptive_heap_sizer_lock(sizer);
|
||||
sizer->expansiveness = expansiveness;
|
||||
uint64_t heap_size = gc_adaptive_heap_sizer_calculate_size(sizer);
|
||||
pthread_mutex_unlock(&sizer->lock);
|
||||
gc_adaptive_heap_sizer_unlock(sizer);
|
||||
return heap_size;
|
||||
}
|
||||
|
||||
static void
|
||||
gc_adaptive_heap_sizer_on_gc(struct gc_adaptive_heap_sizer *sizer,
|
||||
size_t live_bytes, uint64_t pause_ns,
|
||||
void (*set_heap_size)(size_t, void*),
|
||||
void *data) {
|
||||
pthread_mutex_lock(&sizer->lock);
|
||||
void (*set_heap_size)(struct gc_heap*, size_t)) {
|
||||
gc_adaptive_heap_sizer_lock(sizer);
|
||||
sizer->live_bytes = live_bytes;
|
||||
sizer->smoothed_live_bytes *= 1.0 - sizer->collection_smoothing_factor;
|
||||
sizer->smoothed_live_bytes += sizer->collection_smoothing_factor * live_bytes;
|
||||
sizer->smoothed_pause_time *= 1.0 - sizer->collection_smoothing_factor;
|
||||
sizer->smoothed_pause_time += sizer->collection_smoothing_factor * pause_ns;
|
||||
set_heap_size(gc_adaptive_heap_sizer_calculate_size(sizer), data);
|
||||
pthread_mutex_unlock(&sizer->lock);
|
||||
set_heap_size(sizer->heap, gc_adaptive_heap_sizer_calculate_size(sizer));
|
||||
gc_adaptive_heap_sizer_unlock(sizer);
|
||||
}
|
||||
|
||||
static void
|
||||
gc_adaptive_heap_sizer_background_task(void *data) {
|
||||
struct gc_adaptive_heap_sizer *sizer = data;
|
||||
pthread_mutex_lock(&sizer->lock);
|
||||
gc_adaptive_heap_sizer_lock(sizer);
|
||||
uint64_t bytes_allocated =
|
||||
sizer->get_allocation_counter(sizer->callback_data);
|
||||
sizer->get_allocation_counter(sizer->heap);
|
||||
uint64_t heartbeat = gc_platform_monotonic_nanoseconds();
|
||||
double rate = (double) (bytes_allocated - sizer->last_bytes_allocated) /
|
||||
(double) (heartbeat - sizer->last_heartbeat);
|
||||
|
@ -102,16 +118,15 @@ gc_adaptive_heap_sizer_background_task(void *data) {
|
|||
sizer->smoothed_allocation_rate += rate * sizer->allocation_smoothing_factor;
|
||||
sizer->last_heartbeat = heartbeat;
|
||||
sizer->last_bytes_allocated = bytes_allocated;
|
||||
sizer->set_heap_size(gc_adaptive_heap_sizer_calculate_size(sizer),
|
||||
sizer->callback_data);
|
||||
pthread_mutex_unlock(&sizer->lock);
|
||||
sizer->set_heap_size(sizer->heap,
|
||||
gc_adaptive_heap_sizer_calculate_size(sizer));
|
||||
gc_adaptive_heap_sizer_unlock(sizer);
|
||||
}
|
||||
|
||||
static struct gc_adaptive_heap_sizer*
|
||||
gc_make_adaptive_heap_sizer(double expansiveness,
|
||||
uint64_t (*get_allocation_counter)(void *),
|
||||
void (*set_heap_size)(size_t , void *),
|
||||
void *callback_data,
|
||||
gc_make_adaptive_heap_sizer(struct gc_heap *heap, double expansiveness,
|
||||
uint64_t (*get_allocation_counter)(struct gc_heap*),
|
||||
void (*set_heap_size)(struct gc_heap*, size_t),
|
||||
struct gc_background_thread *thread) {
|
||||
struct gc_adaptive_heap_sizer *sizer;
|
||||
sizer = malloc(sizeof(*sizer));
|
||||
|
@ -120,7 +135,7 @@ gc_make_adaptive_heap_sizer(double expansiveness,
|
|||
memset(sizer, 0, sizeof(*sizer));
|
||||
sizer->get_allocation_counter = get_allocation_counter;
|
||||
sizer->set_heap_size = set_heap_size;
|
||||
sizer->callback_data = callback_data;
|
||||
sizer->heap = heap;
|
||||
// Baseline estimate of GC speed: 10 MB/ms, or 10 bytes/ns. However since we
|
||||
// observe this speed by separately noisy measurements, we have to provide
|
||||
// defaults for numerator and denominator; estimate 2ms for initial GC pauses
|
||||
|
@ -136,14 +151,17 @@ gc_make_adaptive_heap_sizer(double expansiveness,
|
|||
sizer->maximum_multiplier = 5;
|
||||
sizer->minimum_free_space = 4 * 1024 * 1024;
|
||||
sizer->expansiveness = expansiveness;
|
||||
pthread_mutex_init(&thread->lock, NULL);
|
||||
sizer->last_bytes_allocated = get_allocation_counter(callback_data);
|
||||
sizer->last_bytes_allocated = get_allocation_counter(heap);
|
||||
sizer->last_heartbeat = gc_platform_monotonic_nanoseconds();
|
||||
sizer->background_task_id = thread
|
||||
? gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_MIDDLE,
|
||||
gc_adaptive_heap_sizer_background_task,
|
||||
sizer)
|
||||
: -1;
|
||||
#if GC_PARALLEL
|
||||
pthread_mutex_init(&thread->lock, NULL);
|
||||
sizer->background_task_id =
|
||||
gc_background_thread_add_task(thread, GC_BACKGROUND_TASK_MIDDLE,
|
||||
gc_adaptive_heap_sizer_background_task,
|
||||
sizer);
|
||||
#else
|
||||
sizer->background_task_id = -1;
|
||||
#endif
|
||||
return sizer;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
// the heap.
|
||||
|
||||
struct gc_growable_heap_sizer {
|
||||
struct gc_heap *heap;
|
||||
double multiplier;
|
||||
pthread_mutex_t lock;
|
||||
};
|
||||
|
@ -29,22 +30,22 @@ static void
|
|||
gc_growable_heap_sizer_on_gc(struct gc_growable_heap_sizer *sizer,
|
||||
size_t heap_size, size_t live_bytes,
|
||||
uint64_t pause_ns,
|
||||
void (*set_heap_size)(size_t, void*),
|
||||
void *data) {
|
||||
void (*set_heap_size)(struct gc_heap*, size_t)) {
|
||||
pthread_mutex_lock(&sizer->lock);
|
||||
size_t target_size = live_bytes * sizer->multiplier;
|
||||
if (target_size > heap_size)
|
||||
set_heap_size(target_size, data);
|
||||
set_heap_size(sizer->heap, target_size);
|
||||
pthread_mutex_unlock(&sizer->lock);
|
||||
}
|
||||
|
||||
static struct gc_growable_heap_sizer*
|
||||
gc_make_growable_heap_sizer(double multiplier) {
|
||||
gc_make_growable_heap_sizer(struct gc_heap *heap, double multiplier) {
|
||||
struct gc_growable_heap_sizer *sizer;
|
||||
sizer = malloc(sizeof(*sizer));
|
||||
if (!sizer)
|
||||
GC_CRASH();
|
||||
memset(sizer, 0, sizeof(*sizer));
|
||||
sizer->heap = heap;
|
||||
sizer->multiplier = multiplier;
|
||||
pthread_mutex_init(&sizer->lock, NULL);
|
||||
return sizer;
|
||||
|
|
|
@ -18,9 +18,8 @@ struct gc_heap_sizer {
|
|||
static struct gc_heap_sizer
|
||||
gc_make_heap_sizer(struct gc_heap *heap,
|
||||
const struct gc_common_options *options,
|
||||
uint64_t (*get_allocation_counter_from_thread)(void*),
|
||||
void (*set_heap_size_from_thread)(size_t, void*),
|
||||
void *data,
|
||||
uint64_t (*get_allocation_counter_from_thread)(struct gc_heap*),
|
||||
void (*set_heap_size_from_thread)(struct gc_heap*, size_t),
|
||||
struct gc_background_thread *thread) {
|
||||
struct gc_heap_sizer ret = { options->heap_size_policy, };
|
||||
switch (options->heap_size_policy) {
|
||||
|
@ -28,15 +27,16 @@ gc_make_heap_sizer(struct gc_heap *heap,
|
|||
break;
|
||||
|
||||
case GC_HEAP_SIZE_GROWABLE:
|
||||
ret.growable = gc_make_growable_heap_sizer(options->heap_size_multiplier);
|
||||
ret.growable =
|
||||
gc_make_growable_heap_sizer(heap, options->heap_size_multiplier);
|
||||
break;
|
||||
|
||||
case GC_HEAP_SIZE_ADAPTIVE:
|
||||
ret.adaptive =
|
||||
gc_make_adaptive_heap_sizer (options->heap_expansiveness,
|
||||
gc_make_adaptive_heap_sizer (heap, options->heap_expansiveness,
|
||||
get_allocation_counter_from_thread,
|
||||
set_heap_size_from_thread,
|
||||
heap, thread);
|
||||
thread);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -48,19 +48,21 @@ gc_make_heap_sizer(struct gc_heap *heap,
|
|||
static void
|
||||
gc_heap_sizer_on_gc(struct gc_heap_sizer sizer, size_t heap_size,
|
||||
size_t live_bytes, size_t pause_ns,
|
||||
void (*set_heap_size)(size_t, void*), void *data) {
|
||||
void (*set_heap_size)(struct gc_heap*, size_t)) {
|
||||
switch (sizer.policy) {
|
||||
case GC_HEAP_SIZE_FIXED:
|
||||
break;
|
||||
|
||||
case GC_HEAP_SIZE_GROWABLE:
|
||||
gc_growable_heap_sizer_on_gc(sizer.growable, heap_size, live_bytes,
|
||||
pause_ns, set_heap_size, data);
|
||||
pause_ns, set_heap_size);
|
||||
break;
|
||||
|
||||
case GC_HEAP_SIZE_ADAPTIVE:
|
||||
if (sizer.adaptive->background_task_id < 0)
|
||||
gc_adaptive_heap_sizer_background_task(sizer.adaptive);
|
||||
gc_adaptive_heap_sizer_on_gc(sizer.adaptive, live_bytes, pause_ns,
|
||||
set_heap_size, data);
|
||||
set_heap_size);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
49
src/mmc.c
49
src/mmc.c
|
@ -462,8 +462,7 @@ maybe_pause_mutator_for_collection(struct gc_mutator *mut) {
|
|||
}
|
||||
|
||||
static void
|
||||
resize_heap(size_t new_size, void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
resize_heap(struct gc_heap *heap, size_t new_size) {
|
||||
if (new_size == heap->size)
|
||||
return;
|
||||
DEBUG("------ resizing heap\n");
|
||||
|
@ -801,7 +800,7 @@ collect(struct gc_mutator *mut, enum gc_collection_kind requested_kind) {
|
|||
heap_estimate_live_data_after_gc(heap, live_bytes, yield);
|
||||
DEBUG("--- total live bytes estimate: %zu\n", live_bytes_estimate);
|
||||
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_bytes_estimate, pause_ns,
|
||||
resize_heap, heap);
|
||||
resize_heap);
|
||||
heap->size_at_last_gc = heap->size;
|
||||
HEAP_EVENT(heap, restarting_mutators);
|
||||
allow_mutators_to_continue(heap);
|
||||
|
@ -987,6 +986,22 @@ gc_options_parse_and_set(struct gc_options *options, int option,
|
|||
return gc_common_options_parse_and_set(&options->common, option, value);
|
||||
}
|
||||
|
||||
static uint64_t allocation_counter_from_thread(struct gc_heap *heap) {
|
||||
uint64_t ret = heap->total_allocated_bytes_at_last_gc;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return ret;
|
||||
nofl_space_add_to_allocation_counter(heap_nofl_space(heap), &ret);
|
||||
large_object_space_add_to_allocation_counter(heap_large_object_space(heap),
|
||||
&ret);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_heap_size_from_thread(struct gc_heap *heap, size_t size) {
|
||||
if (pthread_mutex_trylock(&heap->lock)) return;
|
||||
resize_heap(heap, size);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
||||
// *heap is already initialized to 0.
|
||||
|
@ -1016,28 +1031,14 @@ heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
|||
GC_CRASH();
|
||||
|
||||
heap->background_thread = gc_make_background_thread();
|
||||
heap->sizer = gc_make_heap_sizer(heap, &options->common,
|
||||
allocation_counter_from_thread,
|
||||
set_heap_size_from_thread,
|
||||
heap->background_thread);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint64_t allocation_counter_from_thread(void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
uint64_t ret = heap->total_allocated_bytes_at_last_gc;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return ret;
|
||||
nofl_space_add_to_allocation_counter(heap_nofl_space(heap), &ret);
|
||||
large_object_space_add_to_allocation_counter(heap_large_object_space(heap),
|
||||
&ret);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_heap_size_from_thread(size_t size, void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return;
|
||||
resize_heap(size, heap);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
}
|
||||
|
||||
int
|
||||
gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mut,
|
||||
|
@ -1081,12 +1082,6 @@ gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
if (!large_object_space_init(heap_large_object_space(*heap), *heap))
|
||||
GC_CRASH();
|
||||
|
||||
(*heap)->sizer = gc_make_heap_sizer(*heap, &options->common,
|
||||
allocation_counter_from_thread,
|
||||
set_heap_size_from_thread,
|
||||
(*heap),
|
||||
(*heap)->background_thread);
|
||||
|
||||
*mut = calloc(1, sizeof(struct gc_mutator));
|
||||
if (!*mut) GC_CRASH();
|
||||
gc_stack_init(&(*mut)->stack, stack_base);
|
||||
|
|
50
src/pcc.c
50
src/pcc.c
|
@ -322,8 +322,7 @@ static inline void maybe_pause_mutator_for_collection(struct gc_mutator *mut) {
|
|||
pause_mutator_for_collection_without_lock(mut);
|
||||
}
|
||||
|
||||
static void resize_heap(size_t new_size, void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
static void resize_heap(struct gc_heap *heap, size_t new_size) {
|
||||
if (new_size == heap->size)
|
||||
return;
|
||||
DEBUG("------ resizing heap\n");
|
||||
|
@ -431,7 +430,7 @@ static void collect(struct gc_mutator *mut) {
|
|||
uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns;
|
||||
HEAP_EVENT(heap, live_data_size, live_size);
|
||||
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_size, pause_ns,
|
||||
resize_heap, heap);
|
||||
resize_heap);
|
||||
if (!copy_space_page_out_blocks_until_memory_released(copy_space)
|
||||
&& heap->sizer.policy == GC_HEAP_SIZE_FIXED) {
|
||||
fprintf(stderr, "ran out of space, heap size %zu (%zu slabs)\n",
|
||||
|
@ -585,6 +584,22 @@ int gc_options_parse_and_set(struct gc_options *options, int option,
|
|||
return gc_common_options_parse_and_set(&options->common, option, value);
|
||||
}
|
||||
|
||||
static uint64_t allocation_counter_from_thread(struct gc_heap *heap) {
|
||||
uint64_t ret = heap->total_allocated_bytes_at_last_gc;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return ret;
|
||||
copy_space_add_to_allocation_counter(heap_copy_space(heap), &ret);
|
||||
large_object_space_add_to_allocation_counter(heap_large_object_space(heap),
|
||||
&ret);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_heap_size_from_thread(struct gc_heap *heap, size_t size) {
|
||||
if (pthread_mutex_trylock(&heap->lock)) return;
|
||||
resize_heap(heap, size);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
}
|
||||
|
||||
static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
||||
// *heap is already initialized to 0.
|
||||
|
||||
|
@ -607,28 +622,14 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
|||
GC_CRASH();
|
||||
|
||||
heap->background_thread = gc_make_background_thread();
|
||||
heap->sizer = gc_make_heap_sizer(heap, &options->common,
|
||||
allocation_counter_from_thread,
|
||||
set_heap_size_from_thread,
|
||||
heap->background_thread);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint64_t allocation_counter_from_thread(void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
uint64_t ret = heap->total_allocated_bytes_at_last_gc;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return ret;
|
||||
copy_space_add_to_allocation_counter(heap_copy_space(heap), &ret);
|
||||
large_object_space_add_to_allocation_counter(heap_large_object_space(heap),
|
||||
&ret);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_heap_size_from_thread(size_t size, void *data) {
|
||||
struct gc_heap *heap = data;
|
||||
if (pthread_mutex_trylock(&heap->lock)) return;
|
||||
resize_heap(size, heap);
|
||||
pthread_mutex_unlock(&heap->lock);
|
||||
}
|
||||
|
||||
int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mut,
|
||||
struct gc_event_listener event_listener,
|
||||
|
@ -663,13 +664,6 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
if (!large_object_space_init(heap_large_object_space(*heap), *heap))
|
||||
GC_CRASH();
|
||||
|
||||
(*heap)->background_thread = gc_make_background_thread();
|
||||
(*heap)->sizer = gc_make_heap_sizer(*heap, &options->common,
|
||||
allocation_counter_from_thread,
|
||||
set_heap_size_from_thread,
|
||||
(*heap),
|
||||
(*heap)->background_thread);
|
||||
|
||||
*mut = calloc(1, sizeof(struct gc_mutator));
|
||||
if (!*mut) GC_CRASH();
|
||||
add_mutator(*heap, *mut);
|
||||
|
|
82
src/semi.c
82
src/semi.c
|
@ -10,6 +10,8 @@
|
|||
#define GC_IMPL 1
|
||||
#include "gc-internal.h"
|
||||
|
||||
#include "gc-platform.h"
|
||||
#include "heap-sizer.h"
|
||||
#include "semi-attrs.h"
|
||||
#include "large-object-space.h"
|
||||
|
||||
|
@ -32,6 +34,7 @@ struct semi_space {
|
|||
struct region to_space;
|
||||
size_t page_size;
|
||||
size_t stolen_pages;
|
||||
size_t live_bytes_at_last_gc;
|
||||
};
|
||||
struct gc_heap {
|
||||
struct semi_space semi_space;
|
||||
|
@ -42,10 +45,12 @@ struct gc_heap {
|
|||
double pending_ephemerons_size_factor;
|
||||
double pending_ephemerons_size_slop;
|
||||
size_t size;
|
||||
size_t total_allocated_bytes_at_last_gc;
|
||||
long count;
|
||||
int check_pending_ephemerons;
|
||||
const struct gc_options *options;
|
||||
struct gc_heap_roots *roots;
|
||||
struct gc_heap_sizer sizer;
|
||||
struct gc_event_listener event_listener;
|
||||
void *event_listener_data;
|
||||
};
|
||||
|
@ -134,10 +139,18 @@ static int semi_space_steal_pages(struct semi_space *space, size_t npages) {
|
|||
|
||||
static void semi_space_finish_gc(struct semi_space *space,
|
||||
size_t large_object_pages) {
|
||||
space->live_bytes_at_last_gc = space->hp - space->to_space.base;
|
||||
space->stolen_pages = large_object_pages;
|
||||
space->limit = 0; // set in adjust_heap_size_and_limits
|
||||
}
|
||||
|
||||
static void
|
||||
semi_space_add_to_allocation_counter(struct semi_space *space,
|
||||
uint64_t *counter) {
|
||||
size_t base = space->to_space.base + space->live_bytes_at_last_gc;
|
||||
*counter += space->hp - base;
|
||||
}
|
||||
|
||||
static void flip(struct semi_space *space) {
|
||||
struct region tmp;
|
||||
GC_ASSERT(space->hp <= space->limit);
|
||||
|
@ -258,10 +271,9 @@ static int grow_region_if_needed(struct region *region, size_t new_size) {
|
|||
if (new_size <= region->mapped_size)
|
||||
return 1;
|
||||
|
||||
new_size = max_size(new_size, region->mapped_size * 2);
|
||||
|
||||
void *mem = mmap(NULL, new_size, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
DEBUG("new size %zx\n", new_size);
|
||||
if (mem == MAP_FAILED) {
|
||||
perror("mmap failed");
|
||||
return 0;
|
||||
|
@ -286,38 +298,9 @@ static void truncate_region(struct region *region, size_t new_size) {
|
|||
}
|
||||
}
|
||||
|
||||
static size_t compute_new_heap_size(struct gc_heap *heap, size_t for_alloc) {
|
||||
static void resize_heap(struct gc_heap *heap, size_t new_heap_size) {
|
||||
struct semi_space *semi = heap_semi_space(heap);
|
||||
struct large_object_space *large = heap_large_object_space(heap);
|
||||
size_t live_bytes = semi->hp - semi->to_space.base;
|
||||
live_bytes += large->live_pages_at_last_collection * semi->page_size;
|
||||
live_bytes += for_alloc;
|
||||
|
||||
HEAP_EVENT(heap, live_data_size, live_bytes);
|
||||
|
||||
size_t new_heap_size = heap->size;
|
||||
switch (heap->options->common.heap_size_policy) {
|
||||
case GC_HEAP_SIZE_FIXED:
|
||||
break;
|
||||
|
||||
case GC_HEAP_SIZE_GROWABLE: {
|
||||
new_heap_size =
|
||||
max_size(heap->size,
|
||||
live_bytes * heap->options->common.heap_size_multiplier);
|
||||
break;
|
||||
}
|
||||
|
||||
case GC_HEAP_SIZE_ADAPTIVE:
|
||||
default:
|
||||
GC_CRASH();
|
||||
}
|
||||
return align_up(new_heap_size, semi->page_size * 2);
|
||||
}
|
||||
|
||||
static void adjust_heap_size_and_limits(struct gc_heap *heap,
|
||||
size_t for_alloc) {
|
||||
struct semi_space *semi = heap_semi_space(heap);
|
||||
size_t new_heap_size = compute_new_heap_size(heap, for_alloc);
|
||||
new_heap_size = align_up(new_heap_size, semi->page_size * 2);
|
||||
size_t new_region_size = new_heap_size / 2;
|
||||
|
||||
// Note that there is an asymmetry in how heap size is adjusted: we
|
||||
|
@ -386,6 +369,7 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
struct gc_heap *heap = mutator_heap(mut);
|
||||
int is_minor = 0;
|
||||
int is_compacting = 1;
|
||||
uint64_t start_ns = gc_platform_monotonic_nanoseconds();
|
||||
|
||||
HEAP_EVENT(heap, requesting_stop);
|
||||
HEAP_EVENT(heap, waiting_for_stop);
|
||||
|
@ -395,6 +379,9 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
struct semi_space *semi = heap_semi_space(heap);
|
||||
struct large_object_space *large = heap_large_object_space(heap);
|
||||
// fprintf(stderr, "start collect #%ld:\n", space->count);
|
||||
uint64_t *counter_loc = &heap->total_allocated_bytes_at_last_gc;
|
||||
semi_space_add_to_allocation_counter(semi, counter_loc);
|
||||
large_object_space_add_to_allocation_counter(large, counter_loc);
|
||||
large_object_space_start_gc(large, 0);
|
||||
gc_extern_space_start_gc(heap->extern_space, 0);
|
||||
flip(semi);
|
||||
|
@ -420,7 +407,15 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
gc_extern_space_finish_gc(heap->extern_space, 0);
|
||||
semi_space_finish_gc(semi, large->live_pages_at_last_collection);
|
||||
gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
|
||||
adjust_heap_size_and_limits(heap, for_alloc);
|
||||
size_t live_size = semi->live_bytes_at_last_gc;
|
||||
live_size += large_object_space_size_at_last_collection(large);
|
||||
live_size += for_alloc;
|
||||
uint64_t pause_ns = gc_platform_monotonic_nanoseconds() - start_ns;
|
||||
HEAP_EVENT(heap, live_data_size, live_size);
|
||||
DEBUG("gc %zu: live size %zu, heap size %zu\n", heap->count, live_size,
|
||||
heap->size);
|
||||
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_size, pause_ns,
|
||||
resize_heap);
|
||||
|
||||
HEAP_EVENT(heap, restarting_mutators);
|
||||
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
|
||||
|
@ -595,7 +590,7 @@ static int heap_init(struct gc_heap *heap, const struct gc_options *options) {
|
|||
if (!heap->finalizer_state)
|
||||
GC_CRASH();
|
||||
|
||||
return heap_prepare_pending_ephemerons(heap);
|
||||
return heap_prepare_pending_ephemerons(heap);
|
||||
}
|
||||
|
||||
int gc_option_from_string(const char *str) {
|
||||
|
@ -622,6 +617,14 @@ int gc_options_parse_and_set(struct gc_options *options, int option,
|
|||
return gc_common_options_parse_and_set(&options->common, option, value);
|
||||
}
|
||||
|
||||
static uint64_t get_allocation_counter(struct gc_heap *heap) {
|
||||
return heap->total_allocated_bytes_at_last_gc;
|
||||
}
|
||||
|
||||
static void ignore_async_heap_size_adjustment(struct gc_heap *heap,
|
||||
size_t size) {
|
||||
}
|
||||
|
||||
int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mut,
|
||||
struct gc_event_listener event_listener,
|
||||
|
@ -633,10 +636,6 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
|
||||
if (!options) options = gc_allocate_options();
|
||||
|
||||
if (options->common.heap_size_policy == GC_HEAP_SIZE_ADAPTIVE) {
|
||||
fprintf(stderr, "adaptive heap size is currently unimplemented\n");
|
||||
return 0;
|
||||
}
|
||||
if (options->common.parallelism != 1)
|
||||
fprintf(stderr, "warning: parallelism unimplemented in semispace copying collector\n");
|
||||
|
||||
|
@ -656,6 +655,11 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
if (!large_object_space_init(heap_large_object_space(*heap), *heap))
|
||||
return 0;
|
||||
|
||||
(*heap)->sizer = gc_make_heap_sizer(*heap, &options->common,
|
||||
get_allocation_counter,
|
||||
ignore_async_heap_size_adjustment,
|
||||
NULL);
|
||||
|
||||
// Ignore stack base, as we are precise.
|
||||
(*mut)->roots = NULL;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue