mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-06-07 20:50:17 +02:00
lospace: Rely on object_map to be immutable during collection
This avoids having multiple threads serialize through a mutex. We still have to allow for mutation on object_tree and remembered_edges, though.
This commit is contained in:
parent
7885ea1037
commit
7a9de35aaa
4 changed files with 150 additions and 136 deletions
|
@ -16,21 +16,25 @@
|
||||||
#include "background-thread.h"
|
#include "background-thread.h"
|
||||||
#include "freelist.h"
|
#include "freelist.h"
|
||||||
|
|
||||||
// Logically the large object space is a treadmill space -- somewhat like a
|
// A mark-sweep space with generational support.
|
||||||
// copying collector, in that we allocate into tospace, and collection flips
|
|
||||||
// tospace to fromspace, except that we just keep a record on the side of which
|
|
||||||
// objects are in which space. That way we slot into the abstraction of a
|
|
||||||
// copying collector while not actually copying data.
|
|
||||||
|
|
||||||
struct gc_heap;
|
struct gc_heap;
|
||||||
|
|
||||||
|
enum large_object_state {
|
||||||
|
LARGE_OBJECT_NURSERY = 0,
|
||||||
|
LARGE_OBJECT_MARKED_BIT = 1,
|
||||||
|
LARGE_OBJECT_MARK_TOGGLE_BIT = 2,
|
||||||
|
LARGE_OBJECT_MARK_0 = LARGE_OBJECT_MARKED_BIT,
|
||||||
|
LARGE_OBJECT_MARK_1 = LARGE_OBJECT_MARKED_BIT | LARGE_OBJECT_MARK_TOGGLE_BIT
|
||||||
|
};
|
||||||
|
|
||||||
struct large_object {
|
struct large_object {
|
||||||
uintptr_t addr;
|
uintptr_t addr;
|
||||||
size_t size;
|
size_t size;
|
||||||
};
|
};
|
||||||
struct large_object_node;
|
struct large_object_node;
|
||||||
struct large_object_live_data {
|
struct large_object_live_data {
|
||||||
uint8_t is_survivor;
|
uint8_t mark;
|
||||||
};
|
};
|
||||||
struct large_object_dead_data {
|
struct large_object_dead_data {
|
||||||
uint8_t age;
|
uint8_t age;
|
||||||
|
@ -65,17 +69,35 @@ DEFINE_FREELIST(large_object_freelist, sizeof(uintptr_t) * 8 - 1, 2,
|
||||||
struct large_object_node*);
|
struct large_object_node*);
|
||||||
|
|
||||||
struct large_object_space {
|
struct large_object_space {
|
||||||
// Access to all members protected by lock.
|
// Lock for object_map, quarantine, nursery, and marked.
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
|
// Lock for object_tree.
|
||||||
|
pthread_mutex_t object_tree_lock;
|
||||||
|
// Lock for remembered_edges.
|
||||||
|
pthread_mutex_t remembered_edges_lock;
|
||||||
|
// Locking order: You must hold the space lock when taking
|
||||||
|
// object_tree_lock. Take no other lock while holding
|
||||||
|
// object_tree_lock. remembered_edges_lock is a leaf; take no locks
|
||||||
|
// when holding it.
|
||||||
|
|
||||||
|
// The value for a large_object_node's "mark" field indicating a
|
||||||
|
// marked object; always nonzero, and alternating between two values
|
||||||
|
// at every major GC.
|
||||||
|
uint8_t marked;
|
||||||
|
|
||||||
// Splay tree of objects, keyed by <addr, size> tuple. Useful when
|
// Splay tree of objects, keyed by <addr, size> tuple. Useful when
|
||||||
// looking up object-for-address.
|
// looking up object-for-address.
|
||||||
struct large_object_tree object_tree;
|
struct large_object_tree object_tree;
|
||||||
|
|
||||||
// Hash table of objects, where values are pointers to splay tree
|
// Hash table of objects, where values are pointers to splay tree
|
||||||
// nodes. Useful when you have the object address and just want to
|
// nodes. Useful when you have the object address and just want to
|
||||||
// check something about it (for example its size).
|
// check something about it (for example its size).
|
||||||
struct address_map object_map;
|
struct address_map object_map;
|
||||||
|
|
||||||
|
// In generational configurations, we collect all allocations in the
|
||||||
|
// last cycle into the nursery.
|
||||||
|
struct address_map nursery;
|
||||||
|
|
||||||
// Size-segregated freelist of dead objects. Allocations are first
|
// Size-segregated freelist of dead objects. Allocations are first
|
||||||
// served from the quarantine freelist before falling back to the OS
|
// served from the quarantine freelist before falling back to the OS
|
||||||
// if needed. Collected objects spend a second or two in quarantine
|
// if needed. Collected objects spend a second or two in quarantine
|
||||||
|
@ -83,6 +105,10 @@ struct large_object_space {
|
||||||
// mucking about too much with the TLB and so on.
|
// mucking about too much with the TLB and so on.
|
||||||
struct large_object_freelist quarantine;
|
struct large_object_freelist quarantine;
|
||||||
|
|
||||||
|
// Set of edges from lospace that may reference young objects,
|
||||||
|
// possibly in other spaces.
|
||||||
|
struct address_set remembered_edges;
|
||||||
|
|
||||||
size_t page_size;
|
size_t page_size;
|
||||||
size_t page_size_log2;
|
size_t page_size_log2;
|
||||||
size_t total_pages;
|
size_t total_pages;
|
||||||
|
@ -90,17 +116,6 @@ struct large_object_space {
|
||||||
size_t live_pages_at_last_collection;
|
size_t live_pages_at_last_collection;
|
||||||
size_t pages_freed_by_last_collection;
|
size_t pages_freed_by_last_collection;
|
||||||
int synchronous_release;
|
int synchronous_release;
|
||||||
|
|
||||||
// A partition of the set of live objects into three sub-spaces. If
|
|
||||||
// all collections are major, the survivor space will always be empty.
|
|
||||||
// The values of these maps are splay tree nodes.
|
|
||||||
struct address_map from_space;
|
|
||||||
struct address_map to_space;
|
|
||||||
struct address_map survivor_space;
|
|
||||||
|
|
||||||
// Set of edges from lospace that may reference young objects,
|
|
||||||
// possibly in other spaces.
|
|
||||||
struct address_set remembered_edges;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
|
@ -113,11 +128,17 @@ large_object_space_size_at_last_collection(struct large_object_space *space) {
|
||||||
return space->live_pages_at_last_collection << space->page_size_log2;
|
return space->live_pages_at_last_collection << space->page_size_log2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
large_object_space_contains_with_lock(struct large_object_space *space,
|
||||||
|
struct gc_ref ref) {
|
||||||
|
return address_map_contains(&space->object_map, gc_ref_value(ref));
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
large_object_space_contains(struct large_object_space *space,
|
large_object_space_contains(struct large_object_space *space,
|
||||||
struct gc_ref ref) {
|
struct gc_ref ref) {
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->lock);
|
||||||
int ret = address_map_contains(&space->object_map, gc_ref_value(ref));
|
int ret = large_object_space_contains_with_lock(space, ref);
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -125,37 +146,22 @@ large_object_space_contains(struct large_object_space *space,
|
||||||
static inline struct gc_ref
|
static inline struct gc_ref
|
||||||
large_object_space_object_containing_edge(struct large_object_space *space,
|
large_object_space_object_containing_edge(struct large_object_space *space,
|
||||||
struct gc_edge edge) {
|
struct gc_edge edge) {
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->object_tree_lock);
|
||||||
struct large_object_node *node =
|
struct large_object_node *node =
|
||||||
large_object_tree_lookup(&space->object_tree, gc_edge_address(edge));
|
large_object_tree_lookup(&space->object_tree, gc_edge_address(edge));
|
||||||
uintptr_t addr = (node && node->value.is_live) ? node->key.addr : 0;
|
uintptr_t addr = (node && node->value.is_live) ? node->key.addr : 0;
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->object_tree_lock);
|
||||||
return gc_ref(addr);
|
return gc_ref(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
large_object_space_flip_survivor(uintptr_t addr, uintptr_t node_bits,
|
|
||||||
void *data) {
|
|
||||||
struct large_object_space *space = data;
|
|
||||||
struct large_object_node *node = (void*)node_bits;
|
|
||||||
GC_ASSERT(node->value.is_live && node->value.live.is_survivor);
|
|
||||||
node->value.live.is_survivor = 0;
|
|
||||||
address_map_add(&space->from_space, addr, (uintptr_t)node);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_object_space_start_gc(struct large_object_space *space, int is_minor_gc) {
|
large_object_space_start_gc(struct large_object_space *space, int is_minor_gc) {
|
||||||
// Flip. Note that when we flip, fromspace is empty, but it might have
|
// Take the space lock to prevent
|
||||||
// allocated storage, so we do need to do a proper swap.
|
// large_object_space_process_quarantine from concurrently mutating
|
||||||
struct address_map tmp;
|
// the object map.
|
||||||
memcpy(&tmp, &space->from_space, sizeof(tmp));
|
pthread_mutex_lock(&space->lock);
|
||||||
memcpy(&space->from_space, &space->to_space, sizeof(tmp));
|
|
||||||
memcpy(&space->to_space, &tmp, sizeof(tmp));
|
|
||||||
|
|
||||||
if (!is_minor_gc) {
|
if (!is_minor_gc) {
|
||||||
address_map_for_each(&space->survivor_space,
|
space->marked ^= LARGE_OBJECT_MARK_TOGGLE_BIT;
|
||||||
large_object_space_flip_survivor, space);
|
|
||||||
address_map_clear(&space->survivor_space);
|
|
||||||
space->live_pages_at_last_collection = 0;
|
space->live_pages_at_last_collection = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,56 +176,57 @@ large_object_space_object_size(struct large_object_space *space,
|
||||||
return node->key.size;
|
return node->key.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static uint8_t*
|
||||||
large_object_space_do_copy(struct large_object_space *space,
|
large_object_node_mark_loc(struct large_object_node *node) {
|
||||||
struct large_object_node *node) {
|
|
||||||
GC_ASSERT(address_map_contains(&space->from_space, node->key.addr));
|
|
||||||
GC_ASSERT(node->value.is_live);
|
GC_ASSERT(node->value.is_live);
|
||||||
GC_ASSERT(!node->value.live.is_survivor);
|
return &node->value.live.mark;
|
||||||
uintptr_t addr = node->key.addr;
|
}
|
||||||
size_t bytes = node->key.size;
|
|
||||||
uintptr_t node_bits = (uintptr_t)node;
|
static uint8_t
|
||||||
space->live_pages_at_last_collection += bytes >> space->page_size_log2;
|
large_object_node_get_mark(struct large_object_node *node) {
|
||||||
address_map_remove(&space->from_space, addr);
|
return atomic_load_explicit(large_object_node_mark_loc(node),
|
||||||
if (GC_GENERATIONAL) {
|
memory_order_acquire);
|
||||||
node->value.live.is_survivor = 1;
|
}
|
||||||
address_map_add(&space->survivor_space, addr, node_bits);
|
|
||||||
} else {
|
static struct large_object_node*
|
||||||
address_map_add(&space->to_space, addr, node_bits);
|
large_object_space_lookup(struct large_object_space *space, struct gc_ref ref) {
|
||||||
}
|
return (struct large_object_node*) address_map_lookup(&space->object_map,
|
||||||
|
gc_ref_value(ref),
|
||||||
|
0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
large_object_space_copy(struct large_object_space *space, struct gc_ref ref) {
|
large_object_space_mark(struct large_object_space *space, struct gc_ref ref) {
|
||||||
int copied = 0;
|
struct large_object_node *node = large_object_space_lookup(space, ref);
|
||||||
uintptr_t addr = gc_ref_value(ref);
|
if (!node)
|
||||||
pthread_mutex_lock(&space->lock);
|
return 0;
|
||||||
uintptr_t node_bits = address_map_lookup(&space->from_space, addr, 0);
|
GC_ASSERT(node->value.is_live);
|
||||||
if (node_bits) {
|
|
||||||
large_object_space_do_copy(space, (struct large_object_node*) node_bits);
|
uint8_t *loc = large_object_node_mark_loc(node);
|
||||||
// Object is grey; place it on mark stack to visit its fields.
|
uint8_t mark = atomic_load_explicit(loc, memory_order_relaxed);
|
||||||
copied = 1;
|
do {
|
||||||
}
|
if (mark == space->marked)
|
||||||
pthread_mutex_unlock(&space->lock);
|
return 0;
|
||||||
return copied;
|
} while (!atomic_compare_exchange_weak_explicit(loc, &mark, space->marked,
|
||||||
|
memory_order_acq_rel,
|
||||||
|
memory_order_acquire));
|
||||||
|
|
||||||
|
size_t pages = node->key.size >> space->page_size_log2;
|
||||||
|
space->live_pages_at_last_collection += pages;
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
large_object_space_is_copied(struct large_object_space *space,
|
large_object_space_is_marked(struct large_object_space *space,
|
||||||
struct gc_ref ref) {
|
struct gc_ref ref) {
|
||||||
GC_ASSERT(large_object_space_contains(space, ref));
|
struct large_object_node *node = large_object_space_lookup(space, ref);
|
||||||
int copied = 0;
|
if (!node)
|
||||||
uintptr_t addr = gc_ref_value(ref);
|
return 0;
|
||||||
pthread_mutex_lock(&space->lock);
|
GC_ASSERT(node->value.is_live);
|
||||||
copied = !address_map_contains(&space->from_space, addr);
|
|
||||||
pthread_mutex_unlock(&space->lock);
|
|
||||||
return copied;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
return atomic_load_explicit(large_object_node_mark_loc(node),
|
||||||
large_object_space_is_survivor_with_lock(struct large_object_space *space,
|
memory_order_acquire) == space->marked;
|
||||||
struct gc_ref ref) {
|
|
||||||
return address_map_contains(&space->survivor_space, gc_ref_value(ref));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -227,7 +234,7 @@ large_object_space_is_survivor(struct large_object_space *space,
|
||||||
struct gc_ref ref) {
|
struct gc_ref ref) {
|
||||||
GC_ASSERT(large_object_space_contains(space, ref));
|
GC_ASSERT(large_object_space_contains(space, ref));
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->lock);
|
||||||
int old = large_object_space_is_survivor_with_lock(space, ref);
|
int old = large_object_space_is_marked(space, ref);
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->lock);
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
|
@ -237,15 +244,17 @@ large_object_space_remember_edge(struct large_object_space *space,
|
||||||
struct gc_ref obj,
|
struct gc_ref obj,
|
||||||
struct gc_edge edge) {
|
struct gc_edge edge) {
|
||||||
GC_ASSERT(large_object_space_contains(space, obj));
|
GC_ASSERT(large_object_space_contains(space, obj));
|
||||||
int remembered = 0;
|
if (!large_object_space_is_survivor(space, obj))
|
||||||
|
return 0;
|
||||||
|
|
||||||
uintptr_t edge_addr = gc_edge_address(edge);
|
uintptr_t edge_addr = gc_edge_address(edge);
|
||||||
pthread_mutex_lock(&space->lock);
|
int remembered = 0;
|
||||||
if (large_object_space_is_survivor_with_lock(space, obj)
|
pthread_mutex_lock(&space->remembered_edges_lock);
|
||||||
&& !address_set_contains(&space->remembered_edges, edge_addr)) {
|
if (!address_set_contains(&space->remembered_edges, edge_addr)) {
|
||||||
address_set_add(&space->remembered_edges, edge_addr);
|
address_set_add(&space->remembered_edges, edge_addr);
|
||||||
remembered = 1;
|
remembered = 1;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->remembered_edges_lock);
|
||||||
return remembered;
|
return remembered;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,10 +262,10 @@ static void
|
||||||
large_object_space_forget_edge(struct large_object_space *space,
|
large_object_space_forget_edge(struct large_object_space *space,
|
||||||
struct gc_edge edge) {
|
struct gc_edge edge) {
|
||||||
uintptr_t edge_addr = gc_edge_address(edge);
|
uintptr_t edge_addr = gc_edge_address(edge);
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->remembered_edges_lock);
|
||||||
GC_ASSERT(address_set_contains(&space->remembered_edges, edge_addr));
|
GC_ASSERT(address_set_contains(&space->remembered_edges, edge_addr));
|
||||||
address_set_remove(&space->remembered_edges, edge_addr);
|
address_set_remove(&space->remembered_edges, edge_addr);
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->remembered_edges_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -264,11 +273,6 @@ large_object_space_clear_remembered_edges(struct large_object_space *space) {
|
||||||
address_set_clear(&space->remembered_edges);
|
address_set_clear(&space->remembered_edges);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int large_object_space_mark_object(struct large_object_space *space,
|
|
||||||
struct gc_ref ref) {
|
|
||||||
return large_object_space_copy(space, ref);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_object_space_add_to_freelist(struct large_object_space *space,
|
large_object_space_add_to_freelist(struct large_object_space *space,
|
||||||
struct large_object_node *node) {
|
struct large_object_node *node) {
|
||||||
|
@ -297,11 +301,16 @@ large_object_space_remove_from_freelist(struct large_object_space *space,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_object_space_reclaim_one(uintptr_t addr, uintptr_t node_bits,
|
large_object_space_sweep_one(uintptr_t addr, uintptr_t node_bits,
|
||||||
void *data) {
|
void *data) {
|
||||||
struct large_object_space *space = data;
|
struct large_object_space *space = data;
|
||||||
struct large_object_node *node = (struct large_object_node*) node_bits;
|
struct large_object_node *node = (struct large_object_node*) node_bits;
|
||||||
|
if (!GC_GENERATIONAL && !node->value.is_live)
|
||||||
|
return;
|
||||||
GC_ASSERT(node->value.is_live);
|
GC_ASSERT(node->value.is_live);
|
||||||
|
uint8_t mark = atomic_load_explicit(large_object_node_mark_loc(node),
|
||||||
|
memory_order_acquire);
|
||||||
|
if (mark != space->marked)
|
||||||
large_object_space_add_to_freelist(space, node);
|
large_object_space_add_to_freelist(space, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,6 +318,7 @@ static void
|
||||||
large_object_space_process_quarantine(void *data) {
|
large_object_space_process_quarantine(void *data) {
|
||||||
struct large_object_space *space = data;
|
struct large_object_space *space = data;
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->lock);
|
||||||
|
pthread_mutex_lock(&space->object_tree_lock);
|
||||||
for (size_t idx = 0; idx < large_object_freelist_num_size_classes(); idx++) {
|
for (size_t idx = 0; idx < large_object_freelist_num_size_classes(); idx++) {
|
||||||
struct large_object_node **link = &space->quarantine.buckets[idx];
|
struct large_object_node **link = &space->quarantine.buckets[idx];
|
||||||
for (struct large_object_node *node = *link; node; node = *link) {
|
for (struct large_object_node *node = *link; node; node = *link) {
|
||||||
|
@ -324,16 +334,23 @@ large_object_space_process_quarantine(void *data) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pthread_mutex_unlock(&space->object_tree_lock);
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
large_object_space_finish_gc(struct large_object_space *space,
|
large_object_space_finish_gc(struct large_object_space *space,
|
||||||
int is_minor_gc) {
|
int is_minor_gc) {
|
||||||
pthread_mutex_lock(&space->lock);
|
if (GC_GENERATIONAL) {
|
||||||
address_map_for_each(&space->from_space, large_object_space_reclaim_one,
|
address_map_for_each(is_minor_gc ? &space->nursery : &space->object_map,
|
||||||
|
large_object_space_sweep_one,
|
||||||
space);
|
space);
|
||||||
address_map_clear(&space->from_space);
|
address_map_clear(&space->nursery);
|
||||||
|
} else {
|
||||||
|
address_map_for_each(&space->object_map,
|
||||||
|
large_object_space_sweep_one,
|
||||||
|
space);
|
||||||
|
}
|
||||||
size_t free_pages =
|
size_t free_pages =
|
||||||
space->total_pages - space->live_pages_at_last_collection;
|
space->total_pages - space->live_pages_at_last_collection;
|
||||||
space->pages_freed_by_last_collection = free_pages - space->free_pages;
|
space->pages_freed_by_last_collection = free_pages - space->free_pages;
|
||||||
|
@ -366,24 +383,20 @@ large_object_space_mark_conservative_ref(struct large_object_space *space,
|
||||||
addr -= displacement;
|
addr -= displacement;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_lock(&space->lock);
|
struct large_object_node *node;
|
||||||
struct large_object_node *node = NULL;
|
|
||||||
if (possibly_interior) {
|
if (possibly_interior) {
|
||||||
|
pthread_mutex_lock(&space->object_tree_lock);
|
||||||
node = large_object_tree_lookup(&space->object_tree, addr);
|
node = large_object_tree_lookup(&space->object_tree, addr);
|
||||||
if (node && !address_map_contains(&space->from_space, node->key.addr))
|
pthread_mutex_unlock(&space->object_tree_lock);
|
||||||
node = NULL;
|
|
||||||
} else {
|
} else {
|
||||||
uintptr_t node_bits = address_map_lookup(&space->from_space, addr, 0);
|
node = large_object_space_lookup(space, gc_ref(addr));
|
||||||
node = (struct large_object_node*) node_bits;
|
|
||||||
}
|
}
|
||||||
struct gc_ref ret = gc_ref_null();
|
|
||||||
if (node) {
|
|
||||||
large_object_space_do_copy(space, node);
|
|
||||||
ret = gc_ref(node->key.addr);
|
|
||||||
}
|
|
||||||
pthread_mutex_unlock(&space->lock);
|
|
||||||
|
|
||||||
return ret;
|
if (node && node->value.is_live &&
|
||||||
|
large_object_space_mark(space, gc_ref(node->key.addr)))
|
||||||
|
return gc_ref(node->key.addr);
|
||||||
|
|
||||||
|
return gc_ref_null();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void*
|
static void*
|
||||||
|
@ -417,8 +430,9 @@ large_object_space_alloc(struct large_object_space *space, size_t npages) {
|
||||||
large_object_space_add_to_freelist(space, tail_node);
|
large_object_space_add_to_freelist(space, tail_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the object to tospace.
|
// Add the object to the nursery.
|
||||||
address_map_add(&space->to_space, node->key.addr, (uintptr_t)node);
|
if (GC_GENERATIONAL)
|
||||||
|
address_map_add(&space->nursery, node->key.addr, (uintptr_t)node);
|
||||||
|
|
||||||
space->free_pages -= npages;
|
space->free_pages -= npages;
|
||||||
ret = (void*)node->key.addr;
|
ret = (void*)node->key.addr;
|
||||||
|
@ -441,15 +455,16 @@ large_object_space_obtain_and_alloc(struct large_object_space *space,
|
||||||
struct large_object k = { addr, bytes };
|
struct large_object k = { addr, bytes };
|
||||||
struct large_object_data v = {0,};
|
struct large_object_data v = {0,};
|
||||||
v.is_live = 1;
|
v.is_live = 1;
|
||||||
v.live.is_survivor = 0;
|
v.live.mark = 0;
|
||||||
|
|
||||||
pthread_mutex_lock(&space->lock);
|
pthread_mutex_lock(&space->lock);
|
||||||
|
pthread_mutex_lock(&space->object_tree_lock);
|
||||||
struct large_object_node *node =
|
struct large_object_node *node =
|
||||||
large_object_tree_insert(&space->object_tree, k, v);
|
large_object_tree_insert(&space->object_tree, k, v);
|
||||||
uintptr_t node_bits = (uintptr_t)node;
|
uintptr_t node_bits = (uintptr_t)node;
|
||||||
address_map_add(&space->object_map, addr, node_bits);
|
address_map_add(&space->object_map, addr, node_bits);
|
||||||
address_map_add(&space->to_space, addr, node_bits);
|
|
||||||
space->total_pages += npages;
|
space->total_pages += npages;
|
||||||
|
pthread_mutex_unlock(&space->object_tree_lock);
|
||||||
pthread_mutex_unlock(&space->lock);
|
pthread_mutex_unlock(&space->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -461,18 +476,17 @@ large_object_space_init(struct large_object_space *space,
|
||||||
struct gc_background_thread *thread) {
|
struct gc_background_thread *thread) {
|
||||||
memset(space, 0, sizeof(*space));
|
memset(space, 0, sizeof(*space));
|
||||||
pthread_mutex_init(&space->lock, NULL);
|
pthread_mutex_init(&space->lock, NULL);
|
||||||
|
pthread_mutex_init(&space->object_tree_lock, NULL);
|
||||||
|
pthread_mutex_init(&space->remembered_edges_lock, NULL);
|
||||||
|
|
||||||
space->page_size = getpagesize();
|
space->page_size = getpagesize();
|
||||||
space->page_size_log2 = __builtin_ctz(space->page_size);
|
space->page_size_log2 = __builtin_ctz(space->page_size);
|
||||||
|
|
||||||
large_object_tree_init(&space->object_tree);
|
large_object_tree_init(&space->object_tree);
|
||||||
address_map_init(&space->object_map);
|
address_map_init(&space->object_map);
|
||||||
|
address_map_init(&space->nursery);
|
||||||
large_object_freelist_init(&space->quarantine);
|
large_object_freelist_init(&space->quarantine);
|
||||||
|
|
||||||
address_map_init(&space->from_space);
|
|
||||||
address_map_init(&space->to_space);
|
|
||||||
address_map_init(&space->survivor_space);
|
|
||||||
address_set_init(&space->remembered_edges);
|
address_set_init(&space->remembered_edges);
|
||||||
|
|
||||||
if (thread)
|
if (thread)
|
||||||
|
|
|
@ -135,8 +135,7 @@ do_trace(struct gc_heap *heap, struct gc_edge edge, struct gc_ref ref,
|
||||||
return nofl_space_evacuate_or_mark_object(heap_nofl_space(heap), edge, ref,
|
return nofl_space_evacuate_or_mark_object(heap_nofl_space(heap), edge, ref,
|
||||||
&data->allocator);
|
&data->allocator);
|
||||||
else if (large_object_space_contains(heap_large_object_space(heap), ref))
|
else if (large_object_space_contains(heap_large_object_space(heap), ref))
|
||||||
return large_object_space_mark_object(heap_large_object_space(heap),
|
return large_object_space_mark(heap_large_object_space(heap), ref);
|
||||||
ref);
|
|
||||||
else
|
else
|
||||||
return gc_extern_space_visit(heap_extern_space(heap), edge, ref);
|
return gc_extern_space_visit(heap_extern_space(heap), edge, ref);
|
||||||
}
|
}
|
||||||
|
@ -170,8 +169,8 @@ gc_visit_ephemeron_key(struct gc_edge edge, struct gc_heap *heap) {
|
||||||
return nofl_space_forward_or_mark_if_traced(nofl_space, edge, ref);
|
return nofl_space_forward_or_mark_if_traced(nofl_space, edge, ref);
|
||||||
|
|
||||||
struct large_object_space *lospace = heap_large_object_space(heap);
|
struct large_object_space *lospace = heap_large_object_space(heap);
|
||||||
if (large_object_space_contains(lospace, ref))
|
if (large_object_space_contains_with_lock(lospace, ref))
|
||||||
return large_object_space_is_copied(lospace, ref);
|
return large_object_space_is_marked(lospace, ref);
|
||||||
|
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
10
src/pcc.c
10
src/pcc.c
|
@ -382,7 +382,7 @@ static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
|
||||||
// Note that although the target of the edge might not be in lospace, this
|
// Note that although the target of the edge might not be in lospace, this
|
||||||
// will do what we want and return 1 if and only if ref is was a young
|
// will do what we want and return 1 if and only if ref is was a young
|
||||||
// object in lospace.
|
// object in lospace.
|
||||||
return large_object_space_copy(heap_large_object_space(heap), ref);
|
return large_object_space_mark(heap_large_object_space(heap), ref);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,8 +411,8 @@ static inline int do_trace(struct gc_heap *heap, struct gc_edge edge,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fall through for objects in large or extern spaces.
|
// Fall through for objects in large or extern spaces.
|
||||||
if (large_object_space_contains(heap_large_object_space(heap), ref))
|
if (large_object_space_contains_with_lock(heap_large_object_space(heap), ref))
|
||||||
return large_object_space_mark_object(heap_large_object_space(heap), ref);
|
return large_object_space_mark(heap_large_object_space(heap), ref);
|
||||||
else
|
else
|
||||||
return gc_extern_space_visit(heap_extern_space(heap), edge, ref);
|
return gc_extern_space_visit(heap_extern_space(heap), edge, ref);
|
||||||
}
|
}
|
||||||
|
@ -451,8 +451,8 @@ int gc_visit_ephemeron_key(struct gc_edge edge, struct gc_heap *heap) {
|
||||||
return copy_space_forward_if_traced(heap_mono_space(heap), edge, ref);
|
return copy_space_forward_if_traced(heap_mono_space(heap), edge, ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (large_object_space_contains(heap_large_object_space(heap), ref))
|
if (large_object_space_contains_with_lock(heap_large_object_space(heap), ref))
|
||||||
return large_object_space_is_copied(heap_large_object_space(heap), ref);
|
return large_object_space_is_marked(heap_large_object_space(heap), ref);
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -207,7 +207,7 @@ static void visit_semi_space(struct gc_heap *heap, struct semi_space *space,
|
||||||
static void visit_large_object_space(struct gc_heap *heap,
|
static void visit_large_object_space(struct gc_heap *heap,
|
||||||
struct large_object_space *space,
|
struct large_object_space *space,
|
||||||
struct gc_ref ref) {
|
struct gc_ref ref) {
|
||||||
if (large_object_space_copy(space, ref)) {
|
if (large_object_space_mark(space, ref)) {
|
||||||
if (GC_UNLIKELY(heap->check_pending_ephemerons))
|
if (GC_UNLIKELY(heap->check_pending_ephemerons))
|
||||||
gc_resolve_pending_ephemerons(ref, heap);
|
gc_resolve_pending_ephemerons(ref, heap);
|
||||||
|
|
||||||
|
@ -245,7 +245,8 @@ static void visit(struct gc_edge edge, struct gc_heap *heap) {
|
||||||
return;
|
return;
|
||||||
if (semi_space_contains(heap_semi_space(heap), ref))
|
if (semi_space_contains(heap_semi_space(heap), ref))
|
||||||
visit_semi_space(heap, heap_semi_space(heap), edge, ref);
|
visit_semi_space(heap, heap_semi_space(heap), edge, ref);
|
||||||
else if (large_object_space_contains(heap_large_object_space(heap), ref))
|
else if (large_object_space_contains_with_lock(heap_large_object_space(heap),
|
||||||
|
ref))
|
||||||
visit_large_object_space(heap, heap_large_object_space(heap), ref);
|
visit_large_object_space(heap, heap_large_object_space(heap), ref);
|
||||||
else
|
else
|
||||||
visit_external_object(heap, heap->extern_space, edge, ref);
|
visit_external_object(heap, heap->extern_space, edge, ref);
|
||||||
|
@ -268,8 +269,8 @@ int gc_visit_ephemeron_key(struct gc_edge edge, struct gc_heap *heap) {
|
||||||
return 0;
|
return 0;
|
||||||
gc_edge_update(edge, gc_ref(forwarded));
|
gc_edge_update(edge, gc_ref(forwarded));
|
||||||
return 1;
|
return 1;
|
||||||
} else if (large_object_space_contains(heap_large_object_space(heap), ref)) {
|
} else if (large_object_space_contains_with_lock(heap_large_object_space(heap), ref)) {
|
||||||
return large_object_space_is_copied(heap_large_object_space(heap), ref);
|
return large_object_space_is_marked(heap_large_object_space(heap), ref);
|
||||||
}
|
}
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue