mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-05-18 18:40:22 +02:00
Rework stats collection to use listener interface
This commit is contained in:
parent
c7499740c9
commit
5130380ae5
10 changed files with 368 additions and 43 deletions
|
@ -5,6 +5,7 @@
|
|||
#include "gc-assert.h"
|
||||
#include "gc-attrs.h"
|
||||
#include "gc-edge.h"
|
||||
#include "gc-event-listener.h"
|
||||
#include "gc-inline.h"
|
||||
#include "gc-options.h"
|
||||
#include "gc-ref.h"
|
||||
|
@ -24,7 +25,9 @@ GC_API_ void* gc_call_with_stack_addr(void* (*f)(struct gc_stack_addr *,
|
|||
|
||||
GC_API_ int gc_init(const struct gc_options *options,
|
||||
struct gc_stack_addr *base, struct gc_heap **heap,
|
||||
struct gc_mutator **mutator);
|
||||
struct gc_mutator **mutator,
|
||||
struct gc_event_listener event_listener,
|
||||
void *event_listener_data);
|
||||
|
||||
struct gc_mutator_roots;
|
||||
GC_API_ void gc_mutator_set_roots(struct gc_mutator *mut,
|
||||
|
@ -43,7 +46,6 @@ GC_API_ struct gc_mutator* gc_init_for_thread(struct gc_stack_addr *base,
|
|||
GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
|
||||
GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
|
||||
void *data) GC_NEVER_INLINE;
|
||||
GC_API_ void gc_print_stats(struct gc_heap *heap);
|
||||
|
||||
GC_API_ void gc_collect(struct gc_mutator *mut);
|
||||
|
||||
|
|
130
api/gc-basic-stats.h
Normal file
130
api/gc-basic-stats.h
Normal file
|
@ -0,0 +1,130 @@
|
|||
#ifndef GC_BASIC_STATS_H
|
||||
#define GC_BASIC_STATS_H
|
||||
|
||||
#include "gc-event-listener.h"
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
struct gc_basic_stats {
|
||||
uint64_t major_collection_count;
|
||||
uint64_t minor_collection_count;
|
||||
uint64_t last_time_usec;
|
||||
uint64_t elapsed_mutator_usec;
|
||||
uint64_t elapsed_collector_usec;
|
||||
size_t heap_size;
|
||||
size_t max_heap_size;
|
||||
size_t max_live_data_size;
|
||||
};
|
||||
|
||||
static inline uint64_t gc_basic_stats_now(void) {
|
||||
struct timeval tv;
|
||||
if (gettimeofday(&tv, NULL) != 0) GC_CRASH();
|
||||
uint64_t ret = tv.tv_sec;
|
||||
ret *= 1000 * 1000;
|
||||
ret += tv.tv_usec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void gc_basic_stats_init(void *data, size_t heap_size) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
stats->last_time_usec = gc_basic_stats_now();
|
||||
stats->heap_size = stats->max_heap_size = heap_size;
|
||||
}
|
||||
|
||||
static inline void gc_basic_stats_prepare_gc(void *data,
|
||||
int is_minor,
|
||||
int is_compacting) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
if (is_minor)
|
||||
stats->minor_collection_count++;
|
||||
else
|
||||
stats->major_collection_count++;
|
||||
uint64_t now = gc_basic_stats_now();
|
||||
stats->elapsed_mutator_usec += now - stats->last_time_usec;
|
||||
stats->last_time_usec = now;
|
||||
}
|
||||
|
||||
static inline void gc_basic_stats_requesting_stop(void *data) {}
|
||||
static inline void gc_basic_stats_waiting_for_stop(void *data) {}
|
||||
static inline void gc_basic_stats_mutators_stopped(void *data) {}
|
||||
static inline void gc_basic_stats_roots_traced(void *data) {}
|
||||
static inline void gc_basic_stats_heap_traced(void *data) {}
|
||||
static inline void gc_basic_stats_ephemerons_traced(void *data) {}
|
||||
|
||||
static inline void gc_basic_stats_restarting_mutators(void *data) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
uint64_t now = gc_basic_stats_now();
|
||||
stats->elapsed_collector_usec += now - stats->last_time_usec;
|
||||
stats->last_time_usec = now;
|
||||
}
|
||||
|
||||
static inline void* gc_basic_stats_mutator_added(void *data) {
|
||||
return NULL;
|
||||
}
|
||||
static inline void gc_basic_stats_mutator_cause_gc(void *mutator_data) {}
|
||||
static inline void gc_basic_stats_mutator_stopping(void *mutator_data) {}
|
||||
static inline void gc_basic_stats_mutator_stopped(void *mutator_data) {}
|
||||
static inline void gc_basic_stats_mutator_restarted(void *mutator_data) {}
|
||||
static inline void gc_basic_stats_mutator_removed(void *mutator_data) {}
|
||||
|
||||
static inline void gc_basic_stats_heap_resized(void *data, size_t size) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
stats->heap_size = size;
|
||||
if (size > stats->max_heap_size)
|
||||
stats->max_heap_size = size;
|
||||
}
|
||||
|
||||
static inline void gc_basic_stats_live_data_size(void *data, size_t size) {
|
||||
struct gc_basic_stats *stats = data;
|
||||
if (size > stats->max_live_data_size)
|
||||
stats->max_live_data_size = size;
|
||||
}
|
||||
|
||||
#define GC_BASIC_STATS \
|
||||
((struct gc_event_listener) { \
|
||||
gc_basic_stats_init, \
|
||||
gc_basic_stats_prepare_gc, \
|
||||
gc_basic_stats_requesting_stop, \
|
||||
gc_basic_stats_waiting_for_stop, \
|
||||
gc_basic_stats_mutators_stopped, \
|
||||
gc_basic_stats_roots_traced, \
|
||||
gc_basic_stats_heap_traced, \
|
||||
gc_basic_stats_ephemerons_traced, \
|
||||
gc_basic_stats_restarting_mutators, \
|
||||
gc_basic_stats_mutator_added, \
|
||||
gc_basic_stats_mutator_cause_gc, \
|
||||
gc_basic_stats_mutator_stopping, \
|
||||
gc_basic_stats_mutator_stopped, \
|
||||
gc_basic_stats_mutator_restarted, \
|
||||
gc_basic_stats_mutator_removed, \
|
||||
gc_basic_stats_heap_resized, \
|
||||
gc_basic_stats_live_data_size, \
|
||||
})
|
||||
|
||||
static inline void gc_basic_stats_finish(struct gc_basic_stats *stats) {
|
||||
uint64_t now = gc_basic_stats_now();
|
||||
stats->elapsed_mutator_usec += stats->last_time_usec - now;
|
||||
stats->last_time_usec = now;
|
||||
}
|
||||
|
||||
static inline void gc_basic_stats_print(struct gc_basic_stats *stats, FILE *f) {
|
||||
fprintf(f, "Completed %" PRIu64 " major collections (%" PRIu64 " minor).\n",
|
||||
stats->major_collection_count, stats->minor_collection_count);
|
||||
uint64_t stopped = stats->elapsed_collector_usec;
|
||||
uint64_t elapsed = stats->elapsed_mutator_usec + stopped;
|
||||
uint64_t ms = 1000; // per usec
|
||||
fprintf(f, "%" PRIu64 ".%.3" PRIu64 " ms total time "
|
||||
"(%" PRIu64 ".%.3" PRIu64 " stopped).\n",
|
||||
elapsed / ms, elapsed % ms, stopped / ms, stopped % ms);
|
||||
double MB = 1e6;
|
||||
fprintf(f, "Heap size is %.3f MB (max %.3f MB); peak live data %.3f MB.\n",
|
||||
stats->heap_size / MB, stats->max_heap_size / MB,
|
||||
stats->max_live_data_size / MB);
|
||||
}
|
||||
|
||||
#endif // GC_BASIC_STATS_H_
|
26
api/gc-event-listener.h
Normal file
26
api/gc-event-listener.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
#ifndef GC_EVENT_LISTENER_H
|
||||
#define GC_EVENT_LISTENER_H
|
||||
|
||||
struct gc_event_listener {
|
||||
void (*init)(void *data, size_t heap_size);
|
||||
void (*prepare_gc)(void *data, int is_minor, int is_compacting);
|
||||
void (*requesting_stop)(void *data);
|
||||
void (*waiting_for_stop)(void *data);
|
||||
void (*mutators_stopped)(void *data);
|
||||
void (*roots_traced)(void *data);
|
||||
void (*heap_traced)(void *data);
|
||||
void (*ephemerons_traced)(void *data);
|
||||
void (*restarting_mutators)(void *data);
|
||||
|
||||
void* (*mutator_added)(void *data);
|
||||
void (*mutator_cause_gc)(void *mutator_data);
|
||||
void (*mutator_stopping)(void *mutator_data);
|
||||
void (*mutator_stopped)(void *mutator_data);
|
||||
void (*mutator_restarted)(void *mutator_data);
|
||||
void (*mutator_removed)(void *mutator_data);
|
||||
|
||||
void (*heap_resized)(void *data, size_t size);
|
||||
void (*live_data_size)(void *data, size_t size);
|
||||
};
|
||||
|
||||
#endif // GC_EVENT_LISTENER_H
|
49
api/gc-null-event-listener.h
Normal file
49
api/gc-null-event-listener.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
#ifndef GC_NULL_EVENT_LISTENER_H
|
||||
#define GC_NULL_EVENT_LISTENER_H
|
||||
|
||||
#include "gc-event-listener.h"
|
||||
|
||||
static inline void gc_null_event_listener_init(void *data, size_t size) {}
|
||||
static inline void gc_null_event_listener_prepare_gc(void *data,
|
||||
int is_minor,
|
||||
int is_compacting) {}
|
||||
static inline void gc_null_event_listener_requesting_stop(void *data) {}
|
||||
static inline void gc_null_event_listener_waiting_for_stop(void *data) {}
|
||||
static inline void gc_null_event_listener_mutators_stopped(void *data) {}
|
||||
static inline void gc_null_event_listener_roots_traced(void *data) {}
|
||||
static inline void gc_null_event_listener_heap_traced(void *data) {}
|
||||
static inline void gc_null_event_listener_ephemerons_traced(void *data) {}
|
||||
static inline void gc_null_event_listener_restarting_mutators(void *data) {}
|
||||
|
||||
static inline void* gc_null_event_listener_mutator_added(void *data) {}
|
||||
static inline void gc_null_event_listener_mutator_cause_gc(void *mutator_data) {}
|
||||
static inline void gc_null_event_listener_mutator_stopping(void *mutator_data) {}
|
||||
static inline void gc_null_event_listener_mutator_stopped(void *mutator_data) {}
|
||||
static inline void gc_null_event_listener_mutator_restarted(void *mutator_data) {}
|
||||
static inline void gc_null_event_listener_mutator_removed(void *mutator_data) {}
|
||||
|
||||
static inline void gc_null_event_listener_heap_resized(void *, size_t) {}
|
||||
static inline void gc_null_event_listener_live_data_size(void *, size_t) {}
|
||||
|
||||
#define GC_NULL_EVENT_LISTENER \
|
||||
((struct gc_event_listener) { \
|
||||
gc_null_event_listener_init, \
|
||||
gc_null_event_listener_prepare_gc, \
|
||||
gc_null_event_listener_requesting_stop, \
|
||||
gc_null_event_listener_waiting_for_stop, \
|
||||
gc_null_event_listener_mutators_stopped, \
|
||||
gc_null_event_listener_roots_traced, \
|
||||
gc_null_event_listener_heap_traced, \
|
||||
gc_null_event_listener_ephemerons_traced, \
|
||||
gc_null_event_listener_restarting_mutators, \
|
||||
gc_null_event_listener_mutator_added, \
|
||||
gc_null_event_listener_mutator_cause_gc, \
|
||||
gc_null_event_listener_mutator_stopping, \
|
||||
gc_null_event_listener_mutator_stopped, \
|
||||
gc_null_event_listener_mutator_restarted, \
|
||||
gc_null_event_listener_mutator_removed, \
|
||||
gc_null_event_listener_heap_resized, \
|
||||
gc_null_event_listener_live_data_size, \
|
||||
})
|
||||
|
||||
#endif // GC_NULL_EVENT_LISTENER_H_
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include "assert.h"
|
||||
#include "gc-api.h"
|
||||
#include "gc-basic-stats.h"
|
||||
#include "gc-ephemeron.h"
|
||||
#include "simple-roots-api.h"
|
||||
#include "ephemerons-types.h"
|
||||
|
@ -231,7 +232,8 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
struct gc_heap *heap;
|
||||
struct gc_mutator *mut;
|
||||
if (!gc_init(options, NULL, &heap, &mut)) {
|
||||
struct gc_basic_stats stats;
|
||||
if (!gc_init(options, NULL, &heap, &mut, GC_BASIC_STATS, &stats)) {
|
||||
fprintf(stderr, "Failed to initialize GC with heap size %zu bytes\n",
|
||||
(size_t)heap_size);
|
||||
return 1;
|
||||
|
@ -239,8 +241,6 @@ int main(int argc, char *argv[]) {
|
|||
struct thread main_thread = { mut, };
|
||||
gc_mutator_set_roots(mut, &main_thread.roots);
|
||||
|
||||
unsigned long test_start = current_time();
|
||||
|
||||
pthread_t threads[MAX_THREAD_COUNT];
|
||||
// Run one of the threads in the main thread.
|
||||
for (size_t i = 1; i < nthreads; i++) {
|
||||
|
@ -262,9 +262,9 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
print_elapsed("test", test_start);
|
||||
|
||||
gc_print_stats(heap);
|
||||
gc_basic_stats_finish(&stats);
|
||||
fputs("\n", stdout);
|
||||
gc_basic_stats_print(&stats, stdout);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
|
||||
#include "assert.h"
|
||||
#include "gc-api.h"
|
||||
#include "gc-basic-stats.h"
|
||||
#include "mt-gcbench-types.h"
|
||||
#include "simple-roots-api.h"
|
||||
#include "simple-allocator.h"
|
||||
|
@ -362,7 +363,8 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
struct gc_heap *heap;
|
||||
struct gc_mutator *mut;
|
||||
if (!gc_init(options, NULL, &heap, &mut)) {
|
||||
struct gc_basic_stats stats;
|
||||
if (!gc_init(options, NULL, &heap, &mut, GC_BASIC_STATS, &stats)) {
|
||||
fprintf(stderr, "Failed to initialize GC with heap size %zu bytes\n",
|
||||
heap_size);
|
||||
return 1;
|
||||
|
@ -373,8 +375,6 @@ int main(int argc, char *argv[]) {
|
|||
printf("Garbage Collector Test\n");
|
||||
printf(" Live storage will peak at %zd bytes.\n\n", heap_max_live);
|
||||
|
||||
unsigned long start = current_time();
|
||||
|
||||
pthread_t threads[MAX_THREAD_COUNT];
|
||||
// Run one of the threads in the main thread.
|
||||
for (size_t i = 1; i < nthreads; i++) {
|
||||
|
@ -396,6 +396,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
printf("Completed in %.3f msec\n", elapsed_millis(start));
|
||||
gc_print_stats(heap);
|
||||
gc_basic_stats_finish(&stats);
|
||||
fputs("\n", stdout);
|
||||
gc_basic_stats_print(&stats, stdout);
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "assert.h"
|
||||
#include "gc-api.h"
|
||||
#include "gc-basic-stats.h"
|
||||
#include "simple-roots-api.h"
|
||||
#include "quads-types.h"
|
||||
#include "simple-allocator.h"
|
||||
|
@ -118,7 +119,6 @@ int main(int argc, char *argv[]) {
|
|||
size_t tree_bytes = nquads * sizeof(Quad);
|
||||
size_t heap_size = tree_bytes * multiplier;
|
||||
|
||||
unsigned long gc_start = current_time();
|
||||
printf("Allocating heap of %.3fGB (%.2f multiplier of live data).\n",
|
||||
heap_size / 1e9, multiplier);
|
||||
|
||||
|
@ -134,7 +134,8 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
struct gc_heap *heap;
|
||||
struct gc_mutator *mut;
|
||||
if (!gc_init(options, NULL, &heap, &mut)) {
|
||||
struct gc_basic_stats stats;
|
||||
if (!gc_init(options, NULL, &heap, &mut, GC_BASIC_STATS, &stats)) {
|
||||
fprintf(stderr, "Failed to initialize GC with heap size %zu bytes\n",
|
||||
heap_size);
|
||||
return 1;
|
||||
|
@ -169,9 +170,10 @@ int main(int argc, char *argv[]) {
|
|||
validate_tree(HANDLE_REF(quad), depth);
|
||||
}
|
||||
print_elapsed("allocation loop", garbage_start);
|
||||
print_elapsed("quads test", gc_start);
|
||||
|
||||
gc_print_stats(heap);
|
||||
gc_basic_stats_finish(&stats);
|
||||
fputs("\n", stdout);
|
||||
gc_basic_stats_print(&stats, stdout);
|
||||
|
||||
POP_HANDLE(&t);
|
||||
return 0;
|
||||
|
|
75
src/bdw.c
75
src/bdw.c
|
@ -54,6 +54,8 @@ struct gc_heap {
|
|||
pthread_mutex_t lock;
|
||||
struct gc_heap_roots *roots;
|
||||
struct gc_mutator *mutators;
|
||||
struct gc_event_listener event_listener;
|
||||
void *event_listener_data;
|
||||
};
|
||||
|
||||
struct gc_mutator {
|
||||
|
@ -62,8 +64,15 @@ struct gc_mutator {
|
|||
struct gc_mutator_roots *roots;
|
||||
struct gc_mutator *next; // with heap lock
|
||||
struct gc_mutator **prev; // with heap lock
|
||||
void *event_listener_data;
|
||||
};
|
||||
|
||||
struct gc_heap *__the_bdw_gc_heap;
|
||||
#define HEAP_EVENT(event, ...) \
|
||||
__the_bdw_gc_heap->event_listener.event(__the_bdw_gc_heap->event_listener_data, ##__VA_ARGS__)
|
||||
#define MUTATOR_EVENT(mut, event, ...) \
|
||||
__the_bdw_gc_heap->event_listener.event(mut->event_listener_data, ##__VA_ARGS__)
|
||||
|
||||
static inline size_t gc_inline_bytes_to_freelist_index(size_t bytes) {
|
||||
return (bytes - 1U) / GC_INLINE_GRANULE_BYTES;
|
||||
}
|
||||
|
@ -272,6 +281,7 @@ static inline struct gc_mutator *add_mutator(struct gc_heap *heap) {
|
|||
struct gc_mutator *ret =
|
||||
GC_generic_malloc(sizeof(struct gc_mutator), mutator_gc_kind);
|
||||
ret->heap = heap;
|
||||
ret->event_listener_data = HEAP_EVENT(mutator_added);
|
||||
|
||||
pthread_mutex_lock(&heap->lock);
|
||||
ret->next = heap->mutators;
|
||||
|
@ -317,10 +327,56 @@ gc_heap_pending_ephemerons(struct gc_heap *heap) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct gc_heap *__the_bdw_gc_heap;
|
||||
static void on_collection_event(GC_EventType event) {
|
||||
switch (event) {
|
||||
case GC_EVENT_START: {
|
||||
int is_minor = 0;
|
||||
int is_compacting = 0;
|
||||
HEAP_EVENT(prepare_gc, is_minor, is_compacting);
|
||||
HEAP_EVENT(requesting_stop);
|
||||
HEAP_EVENT(waiting_for_stop);
|
||||
break;
|
||||
}
|
||||
case GC_EVENT_MARK_START:
|
||||
HEAP_EVENT(mutators_stopped);
|
||||
break;
|
||||
case GC_EVENT_MARK_END:
|
||||
HEAP_EVENT(roots_traced);
|
||||
HEAP_EVENT(heap_traced);
|
||||
break;
|
||||
case GC_EVENT_RECLAIM_START:
|
||||
break;
|
||||
case GC_EVENT_RECLAIM_END:
|
||||
// Sloppily attribute finalizers and eager reclamation to
|
||||
// ephemerons.
|
||||
HEAP_EVENT(ephemerons_traced);
|
||||
HEAP_EVENT(live_data_size, GC_get_heap_size() - GC_get_free_bytes());
|
||||
break;
|
||||
case GC_EVENT_END:
|
||||
HEAP_EVENT(restarting_mutators);
|
||||
break;
|
||||
case GC_EVENT_PRE_START_WORLD:
|
||||
case GC_EVENT_POST_STOP_WORLD:
|
||||
// Can't rely on these, as they are only fired when threads are
|
||||
// enabled.
|
||||
break;
|
||||
case GC_EVENT_THREAD_SUSPENDED:
|
||||
case GC_EVENT_THREAD_UNSUSPENDED:
|
||||
// No nice way to map back to the mutator.
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void on_heap_resize(GC_word size) {
|
||||
HEAP_EVENT(heap_resized, size);
|
||||
}
|
||||
|
||||
int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mutator) {
|
||||
struct gc_heap **heap, struct gc_mutator **mutator,
|
||||
struct gc_event_listener event_listener,
|
||||
void *event_listener_data) {
|
||||
// Root the heap, which will also cause all mutators to be marked.
|
||||
GC_ASSERT_EQ(gc_allocator_small_granule_size(), GC_INLINE_GRANULE_BYTES);
|
||||
GC_ASSERT_EQ(gc_allocator_large_threshold(),
|
||||
|
@ -389,9 +445,16 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
|
||||
*heap = GC_generic_malloc(sizeof(struct gc_heap), heap_gc_kind);
|
||||
pthread_mutex_init(&(*heap)->lock, NULL);
|
||||
*mutator = add_mutator(*heap);
|
||||
|
||||
(*heap)->event_listener = event_listener;
|
||||
(*heap)->event_listener_data = event_listener_data;
|
||||
|
||||
__the_bdw_gc_heap = *heap;
|
||||
HEAP_EVENT(init, GC_get_heap_size());
|
||||
GC_set_on_collection_event(on_collection_event);
|
||||
GC_set_on_heap_resize(on_heap_resize);
|
||||
|
||||
*mutator = add_mutator(*heap);
|
||||
|
||||
// Sanity check.
|
||||
if (!GC_is_visible (&__the_bdw_gc_heap))
|
||||
|
@ -408,6 +471,7 @@ struct gc_mutator* gc_init_for_thread(struct gc_stack_addr *stack_base,
|
|||
}
|
||||
void gc_finish_for_thread(struct gc_mutator *mut) {
|
||||
pthread_mutex_lock(&mut->heap->lock);
|
||||
MUTATOR_EVENT(mut, mutator_removed);
|
||||
*mut->prev = mut->next;
|
||||
if (mut->next)
|
||||
mut->next->prev = mut->prev;
|
||||
|
@ -432,8 +496,3 @@ void gc_heap_set_roots(struct gc_heap *heap, struct gc_heap_roots *roots) {
|
|||
void gc_heap_set_extern_space(struct gc_heap *heap,
|
||||
struct gc_extern_space *space) {
|
||||
}
|
||||
|
||||
void gc_print_stats(struct gc_heap *heap) {
|
||||
printf("Completed %ld collections\n", (long)GC_get_gc_no());
|
||||
printf("Heap size is %ld\n", (long)GC_get_heap_size());
|
||||
}
|
||||
|
|
41
src/semi.c
41
src/semi.c
|
@ -45,13 +45,21 @@ struct gc_heap {
|
|||
int check_pending_ephemerons;
|
||||
const struct gc_options *options;
|
||||
struct gc_heap_roots *roots;
|
||||
struct gc_event_listener event_listener;
|
||||
void *event_listener_data;
|
||||
};
|
||||
// One mutator per space, can just store the heap in the mutator.
|
||||
struct gc_mutator {
|
||||
struct gc_heap heap;
|
||||
struct gc_mutator_roots *roots;
|
||||
void *event_listener_data;
|
||||
};
|
||||
|
||||
#define HEAP_EVENT(heap, event, ...) \
|
||||
(heap)->event_listener.event((heap)->event_listener_data, ##__VA_ARGS__)
|
||||
#define MUTATOR_EVENT(mut, event, ...) \
|
||||
(mut)->heap->event_listener.event((mut)->event_listener_data, ##__VA_ARGS__)
|
||||
|
||||
static inline void clear_memory(uintptr_t addr, size_t size) {
|
||||
memset((char*)addr, 0, size);
|
||||
}
|
||||
|
@ -284,6 +292,8 @@ static size_t compute_new_heap_size(struct gc_heap *heap, size_t for_alloc) {
|
|||
live_bytes += large->live_pages_at_last_collection * semi->page_size;
|
||||
live_bytes += for_alloc;
|
||||
|
||||
HEAP_EVENT(heap, live_data_size, live_bytes);
|
||||
|
||||
size_t new_heap_size = heap->size;
|
||||
switch (heap->options->common.heap_size_policy) {
|
||||
case GC_HEAP_SIZE_FIXED:
|
||||
|
@ -324,7 +334,10 @@ static void adjust_heap_size_and_limits(struct gc_heap *heap,
|
|||
new_region_size = min_size(new_region_size,
|
||||
min_size(semi->to_space.mapped_size,
|
||||
semi->from_space.mapped_size));
|
||||
size_t old_heap_size = heap->size;
|
||||
heap->size = new_region_size * 2;
|
||||
if (heap->size != old_heap_size)
|
||||
HEAP_EVENT(heap, heap_resized, heap->size);
|
||||
size_t stolen = align_up(semi->stolen_pages, 2) * semi->page_size;
|
||||
GC_ASSERT(new_region_size > stolen/2);
|
||||
size_t new_active_region_size = new_region_size - stolen/2;
|
||||
|
@ -339,6 +352,14 @@ static void adjust_heap_size_and_limits(struct gc_heap *heap,
|
|||
|
||||
static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
int is_minor = 0;
|
||||
int is_compacting = 1;
|
||||
HEAP_EVENT(heap, prepare_gc, is_minor, is_compacting);
|
||||
|
||||
HEAP_EVENT(heap, requesting_stop);
|
||||
HEAP_EVENT(heap, waiting_for_stop);
|
||||
HEAP_EVENT(heap, mutators_stopped);
|
||||
|
||||
struct semi_space *semi = heap_semi_space(heap);
|
||||
struct large_object_space *large = heap_large_object_space(heap);
|
||||
// fprintf(stderr, "start collect #%ld:\n", space->count);
|
||||
|
@ -352,20 +373,24 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
|||
gc_trace_heap_roots(heap->roots, trace, heap, NULL);
|
||||
if (mut->roots)
|
||||
gc_trace_mutator_roots(mut->roots, trace, heap, NULL);
|
||||
HEAP_EVENT(heap, roots_traced);
|
||||
// fprintf(stderr, "pushed %zd bytes in roots\n", space->hp - grey);
|
||||
while(grey < semi->hp)
|
||||
grey = scan(heap, gc_ref(grey));
|
||||
HEAP_EVENT(heap, heap_traced);
|
||||
gc_scan_pending_ephemerons(heap->pending_ephemerons, heap, 0, 1);
|
||||
heap->check_pending_ephemerons = 1;
|
||||
while (gc_pop_resolved_ephemerons(heap, trace, NULL))
|
||||
while(grey < semi->hp)
|
||||
grey = scan(heap, gc_ref(grey));
|
||||
HEAP_EVENT(heap, ephemerons_traced);
|
||||
large_object_space_finish_gc(large, 0);
|
||||
gc_extern_space_finish_gc(heap->extern_space, 0);
|
||||
semi_space_finish_gc(semi, large->live_pages_at_last_collection);
|
||||
gc_sweep_pending_ephemerons(heap->pending_ephemerons, 0, 1);
|
||||
adjust_heap_size_and_limits(heap, for_alloc);
|
||||
|
||||
HEAP_EVENT(heap, restarting_mutators);
|
||||
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
|
||||
}
|
||||
|
||||
|
@ -539,7 +564,9 @@ int gc_options_parse_and_set(struct gc_options *options, int option,
|
|||
}
|
||||
|
||||
int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mut) {
|
||||
struct gc_heap **heap, struct gc_mutator **mut,
|
||||
struct gc_event_listener event_listener,
|
||||
void *event_listener_data) {
|
||||
GC_ASSERT_EQ(gc_allocator_allocation_pointer_offset(),
|
||||
offsetof(struct semi_space, hp));
|
||||
GC_ASSERT_EQ(gc_allocator_allocation_limit_offset(),
|
||||
|
@ -563,6 +590,10 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
if (!heap_init(*heap, options))
|
||||
return 0;
|
||||
|
||||
(*heap)->event_listener = event_listener;
|
||||
(*heap)->event_listener_data = event_listener_data;
|
||||
HEAP_EVENT(*heap, init, (*heap)->size);
|
||||
|
||||
if (!semi_space_init(heap_semi_space(*heap), *heap))
|
||||
return 0;
|
||||
if (!large_object_space_init(heap_large_object_space(*heap), *heap))
|
||||
|
@ -571,6 +602,9 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
// Ignore stack base, as we are precise.
|
||||
(*mut)->roots = NULL;
|
||||
|
||||
(*mut)->event_listener_data =
|
||||
event_listener.mutator_added(event_listener_data);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -600,8 +634,3 @@ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
|
|||
// Can't be threads, then there won't be collection.
|
||||
return f(data);
|
||||
}
|
||||
|
||||
void gc_print_stats(struct gc_heap *heap) {
|
||||
printf("Completed %ld collections\n", heap->count);
|
||||
printf("Heap size is %zd\n", heap->size);
|
||||
}
|
||||
|
|
|
@ -328,8 +328,15 @@ struct gc_heap {
|
|||
double minimum_major_gc_yield_threshold;
|
||||
double pending_ephemerons_size_factor;
|
||||
double pending_ephemerons_size_slop;
|
||||
struct gc_event_listener event_listener;
|
||||
void *event_listener_data;
|
||||
};
|
||||
|
||||
#define HEAP_EVENT(heap, event, ...) \
|
||||
(heap)->event_listener.event((heap)->event_listener_data, ##__VA_ARGS__)
|
||||
#define MUTATOR_EVENT(mut, event, ...) \
|
||||
(mut)->heap->event_listener.event((mut)->event_listener_data, ##__VA_ARGS__)
|
||||
|
||||
struct gc_mutator_mark_buf {
|
||||
size_t size;
|
||||
size_t capacity;
|
||||
|
@ -345,6 +352,7 @@ struct gc_mutator {
|
|||
struct gc_stack stack;
|
||||
struct gc_mutator_roots *roots;
|
||||
struct gc_mutator_mark_buf mark_buf;
|
||||
void *event_listener_data;
|
||||
// Three uses for this in-object linked-list pointer:
|
||||
// - inactive (blocked in syscall) mutators
|
||||
// - grey objects when stopping active mutators for mark-in-place
|
||||
|
@ -855,6 +863,8 @@ static inline void heap_unlock(struct gc_heap *heap) {
|
|||
|
||||
static void add_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
|
||||
mut->heap = heap;
|
||||
mut->event_listener_data =
|
||||
heap->event_listener.mutator_added(heap->event_listener_data);
|
||||
heap_lock(heap);
|
||||
// We have no roots. If there is a GC currently in progress, we have
|
||||
// nothing to add. Just wait until it's done.
|
||||
|
@ -868,6 +878,7 @@ static void add_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
|
|||
}
|
||||
|
||||
static void remove_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
|
||||
MUTATOR_EVENT(mut, mutator_removed);
|
||||
mut->heap = NULL;
|
||||
heap_lock(heap);
|
||||
heap->active_mutator_count--;
|
||||
|
@ -1416,10 +1427,13 @@ static void trace_generational_roots(struct gc_heap *heap) {
|
|||
}
|
||||
}
|
||||
|
||||
static void pause_mutator_for_collection(struct gc_heap *heap) GC_NEVER_INLINE;
|
||||
static void pause_mutator_for_collection(struct gc_heap *heap) {
|
||||
static void pause_mutator_for_collection(struct gc_heap *heap,
|
||||
struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void pause_mutator_for_collection(struct gc_heap *heap,
|
||||
struct gc_mutator *mut) {
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
GC_ASSERT(heap->active_mutator_count);
|
||||
MUTATOR_EVENT(mut, mutator_stopped);
|
||||
heap->active_mutator_count--;
|
||||
if (heap->active_mutator_count == 0)
|
||||
pthread_cond_signal(&heap->collector_cond);
|
||||
|
@ -1436,6 +1450,7 @@ static void pause_mutator_for_collection(struct gc_heap *heap) {
|
|||
pthread_cond_wait(&heap->mutator_cond, &heap->lock);
|
||||
while (mutators_are_stopping(heap) && heap->count == epoch);
|
||||
|
||||
MUTATOR_EVENT(mut, mutator_restarted);
|
||||
heap->active_mutator_count++;
|
||||
}
|
||||
|
||||
|
@ -1443,6 +1458,7 @@ static void pause_mutator_for_collection_with_lock(struct gc_mutator *mut) GC_NE
|
|||
static void pause_mutator_for_collection_with_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
finish_sweeping_in_block(mut);
|
||||
gc_stack_capture_hot(&mut->stack);
|
||||
if (mutator_should_mark_while_stopping(mut))
|
||||
|
@ -1450,20 +1466,21 @@ static void pause_mutator_for_collection_with_lock(struct gc_mutator *mut) {
|
|||
trace_mutator_roots_with_lock(mut);
|
||||
else
|
||||
enqueue_mutator_for_tracing(mut);
|
||||
pause_mutator_for_collection(heap);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
}
|
||||
|
||||
static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) GC_NEVER_INLINE;
|
||||
static void pause_mutator_for_collection_without_lock(struct gc_mutator *mut) {
|
||||
struct gc_heap *heap = mutator_heap(mut);
|
||||
GC_ASSERT(mutators_are_stopping(heap));
|
||||
MUTATOR_EVENT(mut, mutator_stopping);
|
||||
finish_sweeping(mut);
|
||||
gc_stack_capture_hot(&mut->stack);
|
||||
if (mutator_should_mark_while_stopping(mut))
|
||||
trace_stopping_mutator_roots(mut);
|
||||
enqueue_mutator_for_tracing(mut);
|
||||
heap_lock(heap);
|
||||
pause_mutator_for_collection(heap);
|
||||
pause_mutator_for_collection(heap, mut);
|
||||
heap_unlock(heap);
|
||||
release_stopping_mutator_roots(mut);
|
||||
}
|
||||
|
@ -1816,28 +1833,38 @@ static void collect(struct gc_mutator *mut) {
|
|||
DEBUG("grew heap instead of collecting #%ld:\n", heap->count);
|
||||
return;
|
||||
}
|
||||
MUTATOR_EVENT(mut, mutator_cause_gc);
|
||||
DEBUG("start collect #%ld:\n", heap->count);
|
||||
enum gc_kind gc_kind = determine_collection_kind(heap);
|
||||
HEAP_EVENT(heap, prepare_gc, gc_kind & GC_KIND_FLAG_MINOR,
|
||||
gc_kind & GC_KIND_FLAG_EVACUATING);
|
||||
update_mark_patterns(space, !(gc_kind & GC_KIND_FLAG_MINOR));
|
||||
large_object_space_start_gc(lospace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
gc_extern_space_start_gc(exspace, gc_kind & GC_KIND_FLAG_MINOR);
|
||||
resolve_ephemerons_lazily(heap);
|
||||
tracer_prepare(heap);
|
||||
HEAP_EVENT(heap, requesting_stop);
|
||||
request_mutators_to_stop(heap);
|
||||
trace_mutator_roots_with_lock_before_stop(mut);
|
||||
finish_sweeping(mut);
|
||||
HEAP_EVENT(heap, waiting_for_stop);
|
||||
wait_for_mutators_to_stop(heap);
|
||||
HEAP_EVENT(heap, mutators_stopped);
|
||||
double yield = heap_last_gc_yield(heap);
|
||||
double fragmentation = heap_fragmentation(heap);
|
||||
HEAP_EVENT(heap, live_data_size, heap->size * (1 - yield));
|
||||
fprintf(stderr, "last gc yield: %f; fragmentation: %f\n", yield, fragmentation);
|
||||
detect_out_of_memory(heap);
|
||||
trace_pinned_roots_after_stop(heap);
|
||||
prepare_for_evacuation(heap);
|
||||
trace_roots_after_stop(heap);
|
||||
HEAP_EVENT(heap, roots_traced);
|
||||
tracer_trace(heap);
|
||||
HEAP_EVENT(heap, heap_traced);
|
||||
resolve_ephemerons_eagerly(heap);
|
||||
while (enqueue_resolved_ephemerons(heap))
|
||||
tracer_trace(heap);
|
||||
HEAP_EVENT(heap, ephemerons_traced);
|
||||
sweep_ephemerons(heap);
|
||||
tracer_release(heap);
|
||||
mark_space_finish_gc(space, gc_kind);
|
||||
|
@ -1848,6 +1875,7 @@ static void collect(struct gc_mutator *mut) {
|
|||
if (heap->last_collection_was_minor)
|
||||
heap->minor_count++;
|
||||
heap_reset_large_object_pages(heap, lospace->live_pages_at_last_collection);
|
||||
HEAP_EVENT(heap, restarting_mutators);
|
||||
allow_mutators_to_continue(heap);
|
||||
DEBUG("collect done\n");
|
||||
}
|
||||
|
@ -2345,7 +2373,9 @@ static int mark_space_init(struct mark_space *space, struct gc_heap *heap) {
|
|||
}
|
||||
|
||||
int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
||||
struct gc_heap **heap, struct gc_mutator **mut) {
|
||||
struct gc_heap **heap, struct gc_mutator **mut,
|
||||
struct gc_event_listener event_listener,
|
||||
void *event_listener_data) {
|
||||
GC_ASSERT_EQ(gc_allocator_small_granule_size(), GRANULE_SIZE);
|
||||
GC_ASSERT_EQ(gc_allocator_large_threshold(), LARGE_OBJECT_THRESHOLD);
|
||||
GC_ASSERT_EQ(gc_allocator_allocation_pointer_offset(),
|
||||
|
@ -2372,6 +2402,10 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr *stack_base,
|
|||
if (!heap_init(*heap, options))
|
||||
GC_CRASH();
|
||||
|
||||
(*heap)->event_listener = event_listener;
|
||||
(*heap)->event_listener_data = event_listener_data;
|
||||
HEAP_EVENT(*heap, init, (*heap)->size);
|
||||
|
||||
struct mark_space *space = heap_mark_space(*heap);
|
||||
if (!mark_space_init(space, *heap)) {
|
||||
free(*heap);
|
||||
|
@ -2439,10 +2473,3 @@ void* gc_call_without_gc(struct gc_mutator *mut,
|
|||
reactivate_mutator(heap, mut);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gc_print_stats(struct gc_heap *heap) {
|
||||
printf("Completed %ld collections (%ld major)\n",
|
||||
heap->count, heap->count - heap->minor_count);
|
||||
printf("Heap size with overhead is %zd (%zu slabs)\n",
|
||||
heap->size, heap_mark_space(heap)->nslabs);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue