mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-06-06 20:20:20 +02:00
Remove gc_allocator_needs_clear
Whether the returned object needs to be cleared or not depends on a couple things: - Whether the embedder actually needs the object to be cleared. - Whether the collector allocated the object from memory that was all zeroes already. The goal of course would be to prevent clearing memory if the mutator was just going to write all over it. But it's hard to know statically if the memory would have been all zeroes anyway, and in that case if you did clear it you'd be doing double work. In the end it's simpler to just require collectors to clear memory in bulk. We can revisit this later if it is an issue.
This commit is contained in:
parent
3db1e48ea6
commit
f1b660484e
8 changed files with 1 additions and 31 deletions
|
@ -36,10 +36,6 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gc_allocator_needs_clear(void) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t) {
|
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t) {
|
||||||
return GC_OLD_GENERATION_CHECK_NONE;
|
return GC_OLD_GENERATION_CHECK_NONE;
|
||||||
}
|
}
|
||||||
|
|
10
api/gc-api.h
10
api/gc-api.h
|
@ -56,14 +56,6 @@ GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
|
||||||
GC_API_ void gc_collect(struct gc_mutator *mut,
|
GC_API_ void gc_collect(struct gc_mutator *mut,
|
||||||
enum gc_collection_kind requested_kind);
|
enum gc_collection_kind requested_kind);
|
||||||
|
|
||||||
static inline void gc_clear_fresh_allocation(struct gc_ref obj,
|
|
||||||
size_t size) GC_ALWAYS_INLINE;
|
|
||||||
static inline void gc_clear_fresh_allocation(struct gc_ref obj,
|
|
||||||
size_t size) {
|
|
||||||
if (!gc_allocator_needs_clear()) return;
|
|
||||||
memset(gc_ref_heap_object(obj), 0, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void gc_update_alloc_table(struct gc_ref obj,
|
static inline void gc_update_alloc_table(struct gc_ref obj,
|
||||||
size_t size) GC_ALWAYS_INLINE;
|
size_t size) GC_ALWAYS_INLINE;
|
||||||
static inline void gc_update_alloc_table(struct gc_ref obj,
|
static inline void gc_update_alloc_table(struct gc_ref obj,
|
||||||
|
@ -119,7 +111,6 @@ static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut,
|
||||||
|
|
||||||
*hp_loc = new_hp;
|
*hp_loc = new_hp;
|
||||||
|
|
||||||
gc_clear_fresh_allocation(gc_ref(hp), size);
|
|
||||||
gc_update_alloc_table(gc_ref(hp), size);
|
gc_update_alloc_table(gc_ref(hp), size);
|
||||||
|
|
||||||
return (void*)hp;
|
return (void*)hp;
|
||||||
|
@ -140,7 +131,6 @@ static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, size
|
||||||
|
|
||||||
*freelist_loc = *(void**)head;
|
*freelist_loc = *(void**)head;
|
||||||
|
|
||||||
gc_clear_fresh_allocation(gc_ref_from_heap_object(head), size);
|
|
||||||
gc_update_alloc_table(gc_ref_from_heap_object(head), size);
|
gc_update_alloc_table(gc_ref_from_heap_object(head), size);
|
||||||
|
|
||||||
return head;
|
return head;
|
||||||
|
|
|
@ -25,8 +25,6 @@ static inline size_t gc_allocator_alloc_table_alignment(void) GC_ALWAYS_INLINE;
|
||||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) GC_ALWAYS_INLINE;
|
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) GC_ALWAYS_INLINE;
|
||||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) GC_ALWAYS_INLINE;
|
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) GC_ALWAYS_INLINE;
|
||||||
|
|
||||||
static inline int gc_allocator_needs_clear(void) GC_ALWAYS_INLINE;
|
|
||||||
|
|
||||||
enum gc_old_generation_check_kind {
|
enum gc_old_generation_check_kind {
|
||||||
GC_OLD_GENERATION_CHECK_NONE,
|
GC_OLD_GENERATION_CHECK_NONE,
|
||||||
GC_OLD_GENERATION_CHECK_ALLOC_TABLE,
|
GC_OLD_GENERATION_CHECK_ALLOC_TABLE,
|
||||||
|
|
|
@ -36,10 +36,6 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||||
return 16;
|
return 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gc_allocator_needs_clear(void) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t obj_size) {
|
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t obj_size) {
|
||||||
if (GC_GENERATIONAL) {
|
if (GC_GENERATIONAL) {
|
||||||
if (obj_size <= gc_allocator_large_threshold())
|
if (obj_size <= gc_allocator_large_threshold())
|
||||||
|
|
|
@ -39,10 +39,6 @@ static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gc_allocator_needs_clear(void) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t size) {
|
static inline enum gc_old_generation_check_kind gc_old_generation_check_kind(size_t size) {
|
||||||
if (!GC_GENERATIONAL)
|
if (!GC_GENERATIONAL)
|
||||||
return GC_OLD_GENERATION_CHECK_NONE;
|
return GC_OLD_GENERATION_CHECK_NONE;
|
||||||
|
|
|
@ -28,10 +28,6 @@ static inline size_t gc_allocator_freelist_offset(size_t size) {
|
||||||
GC_CRASH();
|
GC_CRASH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gc_allocator_needs_clear(void) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -997,7 +997,6 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
gc_clear_fresh_allocation(ret, size);
|
|
||||||
return gc_ref_heap_object(ret);
|
return gc_ref_heap_object(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -437,6 +437,7 @@ static void collect(struct gc_mutator *mut, size_t for_alloc) {
|
||||||
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_size, pause_ns,
|
gc_heap_sizer_on_gc(heap->sizer, heap->size, live_size, pause_ns,
|
||||||
resize_heap);
|
resize_heap);
|
||||||
reset_heap_limits(heap);
|
reset_heap_limits(heap);
|
||||||
|
clear_memory(semi->hp, semi->limit - semi->hp);
|
||||||
|
|
||||||
HEAP_EVENT(heap, restarting_mutators);
|
HEAP_EVENT(heap, restarting_mutators);
|
||||||
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
|
// fprintf(stderr, "%zd bytes copied\n", (space->size>>1)-(space->limit-space->hp));
|
||||||
|
@ -520,8 +521,6 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
space->hp = new_hp;
|
space->hp = new_hp;
|
||||||
// FIXME: Allow allocator to avoid clearing memory?
|
|
||||||
clear_memory(addr, size);
|
|
||||||
return (void *)addr;
|
return (void *)addr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue