diff --git a/mark-sweep.h b/mark-sweep.h index 034f59813..7feb78d50 100644 --- a/mark-sweep.h +++ b/mark-sweep.h @@ -136,8 +136,8 @@ static inline struct context* mutator_context(struct mutator *mut) { return mark_space_context(mutator_mark_space(mut)); } -static inline struct marker* context_marker(struct context *cx) { - return &cx->marker; +static inline struct marker* mark_space_marker(struct mark_space *space) { + return &mark_space_context(space)->marker; } static inline struct gcobj_free** @@ -154,15 +154,16 @@ static inline void clear_memory(uintptr_t addr, size_t size) { static void collect(struct mutator *mut) NEVER_INLINE; -static inline uint8_t* mark_byte(struct context *cx, struct gcobj *obj) { +static inline uint8_t* mark_byte(struct mark_space *space, struct gcobj *obj) { + struct context *cx = mark_space_context(space); ASSERT(cx->heap_base <= (uintptr_t) obj); ASSERT((uintptr_t) obj < cx->heap_base + cx->heap_size); uintptr_t granule = (((uintptr_t) obj) - cx->heap_base) / GRANULE_SIZE; return &cx->mark_bytes[granule]; } -static inline int mark_object(struct context *cx, struct gcobj *obj) { - uint8_t *byte = mark_byte(cx, obj); +static inline int mark_object(struct mark_space *space, struct gcobj *obj) { + uint8_t *byte = mark_byte(space, obj); if (*byte) return 0; *byte = 1; @@ -189,13 +190,17 @@ static void clear_freelists(struct context *cx) { } static void collect(struct mutator *mut) { + struct mark_space *space = mutator_mark_space(mut); struct context *cx = mutator_context(mut); DEBUG("start collect #%ld:\n", cx->count); - marker_prepare(cx); - for (struct handle *h = mut->roots; h; h = h->next) - marker_visit_root(&h->v, cx); - marker_trace(cx); - marker_release(cx); + marker_prepare(space); + for (struct handle *h = mut->roots; h; h = h->next) { + struct gcobj *root = h->v; + if (root && mark_object(space, root)) + marker_enqueue_root(mark_space_marker(space), root); + } + marker_trace(space); + marker_release(space); DEBUG("done marking\n"); cx->sweep = cx->heap_base; clear_freelists(cx); @@ -303,9 +308,10 @@ static size_t next_mark(const uint8_t *mark, size_t limit) { // Sweep some heap to reclaim free space. Return 1 if there is more // heap to sweep, or 0 if we reached the end. -static int sweep(struct context *cx, size_t for_granules) { +static int sweep(struct mark_space *space, size_t for_granules) { // Sweep until we have reclaimed 128 granules (1024 kB), or we reach // the end of the heap. + struct context *cx = mark_space_context(space); ssize_t to_reclaim = 128; uintptr_t sweep = cx->sweep; uintptr_t limit = cx->heap_base + cx->heap_size; @@ -314,7 +320,7 @@ static int sweep(struct context *cx, size_t for_granules) { return 0; while (to_reclaim > 0 && sweep < limit) { - uint8_t* mark = mark_byte(cx, (struct gcobj*)sweep); + uint8_t* mark = mark_byte(space, (struct gcobj*)sweep); size_t limit_granules = (limit - sweep) >> GRANULE_SIZE_LOG_2; if (limit_granules > for_granules) limit_granules = for_granules; @@ -360,7 +366,7 @@ static void* allocate_large(struct mutator *mut, enum alloc_kind kind, } } already_scanned = cx->large_objects; - } while (sweep(cx, granules)); + } while (sweep(mutator_mark_space(mut), granules)); // No large object, and we swept across the whole heap. Collect. if (swept_from_beginning) { @@ -403,7 +409,7 @@ static void fill_small(struct mutator *mut, enum small_object_size kind) { return; } - if (!sweep(cx, LARGE_OBJECT_GRANULE_THRESHOLD)) { + if (!sweep(mutator_mark_space(mut), LARGE_OBJECT_GRANULE_THRESHOLD)) { if (swept_from_beginning) { fprintf(stderr, "ran out of space, heap size %zu\n", cx->heap_size); abort(); @@ -495,7 +501,7 @@ static int initialize_gc(size_t size, struct heap **heap, clear_freelists(cx); cx->sweep = cx->heap_base + cx->heap_size; cx->count = 0; - if (!marker_init(cx)) + if (!marker_init(space)) abort(); reclaim(cx, (void*)cx->heap_base, size_to_granules(cx->heap_size)); diff --git a/parallel-marker.h b/parallel-marker.h index 1bfbbbd93..6240210aa 100644 --- a/parallel-marker.h +++ b/parallel-marker.h @@ -276,7 +276,7 @@ enum mark_worker_state { }; struct mark_worker { - struct context *cx; + struct mark_space *space; size_t id; size_t steal_id; pthread_t thread; @@ -301,19 +301,19 @@ struct marker { struct local_marker { struct mark_worker *worker; struct mark_deque *share_deque; - struct context *cx; + struct mark_space *space; struct local_mark_queue local; }; struct context; -static inline struct marker* context_marker(struct context *cx); +static inline struct marker* mark_space_marker(struct mark_space *space); static size_t number_of_current_processors(void) { return 1; } static int -mark_worker_init(struct mark_worker *worker, struct context *cx, +mark_worker_init(struct mark_worker *worker, struct mark_space *space, struct marker *marker, size_t id) { - worker->cx = cx; + worker->space = space; worker->id = id; worker->steal_id = 0; worker->thread = 0; @@ -367,7 +367,7 @@ mark_worker_spawn(struct mark_worker *worker) { static void mark_worker_request_mark(struct mark_worker *worker) { - struct marker *marker = context_marker(worker->cx); + struct marker *marker = mark_space_marker(worker->space); pthread_mutex_lock(&worker->lock); ASSERT(worker->state == MARK_WORKER_IDLE); @@ -379,7 +379,7 @@ mark_worker_request_mark(struct mark_worker *worker) { static void mark_worker_finished_marking(struct mark_worker *worker) { // Signal controller that we are done with marking. - struct marker *marker = context_marker(worker->cx); + struct marker *marker = mark_space_marker(worker->space); if (atomic_fetch_sub(&marker->running_markers, 1) == 1) { pthread_mutex_lock(&marker->lock); @@ -399,8 +399,8 @@ mark_worker_request_stop(struct mark_worker *worker) { } static int -marker_init(struct context *cx) { - struct marker *marker = context_marker(cx); +marker_init(struct mark_space *space) { + struct marker *marker = mark_space_marker(space); atomic_init(&marker->active_markers, 0); atomic_init(&marker->running_markers, 0); marker->count = 0; @@ -414,7 +414,7 @@ marker_init(struct context *cx) { if (desired_worker_count > MARK_WORKERS_MAX_COUNT) desired_worker_count = MARK_WORKERS_MAX_COUNT; for (size_t i = 0; i < desired_worker_count; i++) { - if (!mark_worker_init(&marker->workers[i], cx, marker, i)) + if (!mark_worker_init(&marker->workers[i], space, marker, i)) break; if (mark_worker_spawn(&marker->workers[i])) marker->worker_count++; @@ -424,13 +424,13 @@ marker_init(struct context *cx) { return marker->worker_count > 0; } -static void marker_prepare(struct context *cx) { - struct marker *marker = context_marker(cx); +static void marker_prepare(struct mark_space *space) { + struct marker *marker = mark_space_marker(space); for (size_t i = 0; i < marker->worker_count; i++) marker->workers[i].steal_id = 0; } -static void marker_release(struct context *cx) { - struct marker *marker = context_marker(cx); +static void marker_release(struct mark_space *space) { + struct marker *marker = mark_space_marker(space); for (size_t i = 0; i < marker->worker_count; i++) mark_deque_release(&marker->workers[i].deque); } @@ -438,7 +438,7 @@ static void marker_release(struct context *cx) { struct gcobj; static inline void marker_visit(void **loc, void *mark_data) ALWAYS_INLINE; static inline void trace_one(struct gcobj *obj, void *mark_data) ALWAYS_INLINE; -static inline int mark_object(struct context *cx, +static inline int mark_object(struct mark_space *space, struct gcobj *obj) ALWAYS_INLINE; static inline void @@ -452,7 +452,7 @@ static inline void marker_visit(void **loc, void *mark_data) { struct local_marker *mark = mark_data; struct gcobj *obj = *loc; - if (obj && mark_object(mark->cx, obj)) { + if (obj && mark_object(mark->space, obj)) { if (local_mark_queue_full(&mark->local)) marker_share(mark); local_mark_queue_push(&mark->local, (uintptr_t)obj); @@ -550,7 +550,7 @@ mark_worker_check_termination(struct mark_worker *worker, static uintptr_t mark_worker_steal(struct local_marker *mark) { - struct marker *marker = context_marker(mark->cx); + struct marker *marker = mark_space_marker(mark->space); struct mark_worker *worker = mark->worker; while (1) { @@ -569,7 +569,7 @@ mark_worker_mark(struct mark_worker *worker) { struct local_marker mark; mark.worker = worker; mark.share_deque = &worker->deque; - mark.cx = worker->cx; + mark.space = worker->space; local_mark_queue_init(&mark.local); size_t n = 0; @@ -592,16 +592,14 @@ mark_worker_mark(struct mark_worker *worker) { } static inline void -marker_visit_root(void **loc, struct context *cx) { - struct gcobj *obj = *loc; - struct mark_deque *worker0_deque = &context_marker(cx)->workers[0].deque; - if (obj && mark_object(cx, obj)) - mark_deque_push(worker0_deque, (uintptr_t)obj); +marker_enqueue_root(struct marker *marker, struct gcobj *obj) { + struct mark_deque *worker0_deque = &marker->workers[0].deque; + mark_deque_push(worker0_deque, (uintptr_t)obj); } static inline void -marker_trace(struct context *cx) { - struct marker *marker = context_marker(cx); +marker_trace(struct mark_space *space) { + struct marker *marker = mark_space_marker(space); pthread_mutex_lock(&marker->lock); long mark_count = marker->count; diff --git a/serial-marker.h b/serial-marker.h index 8cc65a328..5f5330b6d 100644 --- a/serial-marker.h +++ b/serial-marker.h @@ -124,40 +124,45 @@ struct marker { struct mark_queue queue; }; -struct context; -static inline struct marker* context_marker(struct context *cx); +struct mark_space; +static inline struct marker* mark_space_marker(struct mark_space *space); static int -marker_init(struct context *cx) { - return mark_queue_init(&context_marker(cx)->queue); +marker_init(struct mark_space *space) { + return mark_queue_init(&mark_space_marker(space)->queue); } -static void marker_prepare(struct context *cx) {} -static void marker_release(struct context *cx) { - mark_queue_release(&context_marker(cx)->queue); +static void marker_prepare(struct mark_space *space) {} +static void marker_release(struct mark_space *space) { + mark_queue_release(&mark_space_marker(space)->queue); } struct gcobj; static inline void marker_visit(void **loc, void *mark_data) ALWAYS_INLINE; static inline void trace_one(struct gcobj *obj, void *mark_data) ALWAYS_INLINE; -static inline int mark_object(struct context *cx, +static inline int mark_object(struct mark_space *space, struct gcobj *obj) ALWAYS_INLINE; +static inline void +marker_enqueue_root(struct marker *marker, struct gcobj *obj) { + mark_queue_push(&marker->queue, obj); +} +static inline void +marker_enqueue_roots(struct marker *marker, struct gcobj **objs, + size_t count) { + mark_queue_push_many(&marker->queue, objs, count); +} static inline void marker_visit(void **loc, void *mark_data) { - struct context *cx = mark_data; + struct mark_space *space = mark_data; struct gcobj *obj = *loc; - if (obj && mark_object(cx, obj)) - mark_queue_push(&context_marker(cx)->queue, obj); + if (obj && mark_object(space, obj)) + marker_enqueue_root(mark_space_marker(space), obj); } static inline void -marker_visit_root(void **loc, struct context *cx) { - marker_visit(loc, cx); -} -static inline void -marker_trace(struct context *cx) { +marker_trace(struct mark_space *space) { struct gcobj *obj; - while ((obj = mark_queue_pop(&context_marker(cx)->queue))) - trace_one(obj, cx); + while ((obj = mark_queue_pop(&mark_space_marker(space)->queue))) + trace_one(obj, space); } #endif // SERIAL_MARK_H