mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-04-30 03:40:34 +02:00
Add gc_allocation_kind argument to gc_allocate
Adapt all users. Will eventually allow for mmc to have untagged allocations.
This commit is contained in:
parent
5bddd522cf
commit
521cd44ebd
13 changed files with 158 additions and 75 deletions
|
@ -21,15 +21,29 @@ static inline size_t gc_allocator_allocation_limit_offset(void) {
|
|||
GC_CRASH();
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size) {
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size);
|
||||
return sizeof(void*) * ((size - 1) / gc_allocator_small_granule_size());
|
||||
size_t base;
|
||||
switch (kind) {
|
||||
case GC_ALLOCATION_TAGGED:
|
||||
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
|
||||
base = 0;
|
||||
break;
|
||||
case GC_ALLOCATION_UNTAGGED_POINTERLESS:
|
||||
case GC_ALLOCATION_TAGGED_POINTERLESS:
|
||||
base = (sizeof(void*) * gc_allocator_large_threshold() /
|
||||
gc_allocator_small_granule_size());
|
||||
break;
|
||||
}
|
||||
size_t bucket = (size - 1) / gc_allocator_small_granule_size();
|
||||
return base + sizeof(void*) * bucket;
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
||||
return 0;
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||
|
|
19
api/gc-allocation-kind.h
Normal file
19
api/gc-allocation-kind.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
#ifndef GC_ALLOCATION_KIND_H
|
||||
#define GC_ALLOCATION_KIND_H
|
||||
|
||||
enum gc_allocation_kind {
|
||||
// An object whose type can be inspected at run-time based on its contents,
|
||||
// and whose fields be traced via the gc_trace_object procedure.
|
||||
GC_ALLOCATION_TAGGED,
|
||||
// Like GC_ALLOCATION_TAGGED, but not containing any fields that reference
|
||||
// GC-managed objects. The GC may choose to handle these specially.
|
||||
GC_ALLOCATION_TAGGED_POINTERLESS,
|
||||
// A raw allocation whose type cannot be inspected at trace-time, and whose
|
||||
// fields should be traced conservatively.
|
||||
GC_ALLOCATION_UNTAGGED_CONSERVATIVE,
|
||||
// A raw allocation whose type cannot be inspected at trace-time, but
|
||||
// containing no fields that reference GC-managed objects.
|
||||
GC_ALLOCATION_UNTAGGED_POINTERLESS
|
||||
};
|
||||
|
||||
#endif // GC_ALLOCATION_KIND_H
|
65
api/gc-api.h
65
api/gc-api.h
|
@ -2,6 +2,7 @@
|
|||
#define GC_API_H_
|
||||
|
||||
#include "gc-config.h"
|
||||
#include "gc-allocation-kind.h"
|
||||
#include "gc-assert.h"
|
||||
#include "gc-attrs.h"
|
||||
#include "gc-collection-kind.h"
|
||||
|
@ -56,10 +57,10 @@ GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
|
|||
GC_API_ void gc_collect(struct gc_mutator *mut,
|
||||
enum gc_collection_kind requested_kind);
|
||||
|
||||
static inline void gc_update_alloc_table(struct gc_ref obj,
|
||||
size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void gc_update_alloc_table(struct gc_ref obj,
|
||||
size_t size) {
|
||||
static inline void gc_update_alloc_table(struct gc_ref obj, size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void gc_update_alloc_table(struct gc_ref obj, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
size_t alignment = gc_allocator_alloc_table_alignment();
|
||||
if (!alignment) return;
|
||||
|
||||
|
@ -69,7 +70,7 @@ static inline void gc_update_alloc_table(struct gc_ref obj,
|
|||
uintptr_t granule = (addr & (alignment - 1)) / granule_size;
|
||||
uint8_t *alloc = (uint8_t*)(base + granule);
|
||||
|
||||
uint8_t begin_pattern = gc_allocator_alloc_table_begin_pattern();
|
||||
uint8_t begin_pattern = gc_allocator_alloc_table_begin_pattern(kind);
|
||||
uint8_t end_pattern = gc_allocator_alloc_table_end_pattern();
|
||||
if (end_pattern) {
|
||||
size_t granules = size / granule_size;
|
||||
|
@ -86,11 +87,15 @@ static inline void gc_update_alloc_table(struct gc_ref obj,
|
|||
}
|
||||
}
|
||||
|
||||
GC_API_ void* gc_allocate_slow(struct gc_mutator *mut, size_t bytes) GC_NEVER_INLINE;
|
||||
GC_API_ void* gc_allocate_slow(struct gc_mutator *mut, size_t bytes,
|
||||
enum gc_allocation_kind kind) GC_NEVER_INLINE;
|
||||
|
||||
static inline void*
|
||||
gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size) {
|
||||
gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut,
|
||||
size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size <= gc_allocator_large_threshold());
|
||||
|
||||
size_t granule_size = gc_allocator_small_granule_size();
|
||||
|
@ -111,17 +116,20 @@ static inline void* gc_allocate_small_fast_bump_pointer(struct gc_mutator *mut,
|
|||
|
||||
*hp_loc = new_hp;
|
||||
|
||||
gc_update_alloc_table(gc_ref(hp), size);
|
||||
gc_update_alloc_table(gc_ref(hp), size, kind);
|
||||
|
||||
return (void*)hp;
|
||||
}
|
||||
|
||||
static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut,
|
||||
size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, size_t size) {
|
||||
size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut,
|
||||
size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size <= gc_allocator_large_threshold());
|
||||
|
||||
size_t freelist_offset = gc_allocator_freelist_offset(size);
|
||||
size_t freelist_offset = gc_allocator_freelist_offset(size, kind);
|
||||
uintptr_t base_addr = (uintptr_t)mut;
|
||||
void **freelist_loc = (void**)(base_addr + freelist_offset);
|
||||
|
||||
|
@ -131,21 +139,23 @@ static inline void* gc_allocate_small_fast_freelist(struct gc_mutator *mut, size
|
|||
|
||||
*freelist_loc = *(void**)head;
|
||||
|
||||
gc_update_alloc_table(gc_ref_from_heap_object(head), size);
|
||||
gc_update_alloc_table(gc_ref_from_heap_object(head), size, kind);
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size) {
|
||||
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size != 0);
|
||||
GC_ASSERT(size <= gc_allocator_large_threshold());
|
||||
|
||||
switch (gc_allocator_kind()) {
|
||||
case GC_ALLOCATOR_INLINE_BUMP_POINTER:
|
||||
return gc_allocate_small_fast_bump_pointer(mut, size);
|
||||
return gc_allocate_small_fast_bump_pointer(mut, size, kind);
|
||||
case GC_ALLOCATOR_INLINE_FREELIST:
|
||||
return gc_allocate_small_fast_freelist(mut, size);
|
||||
return gc_allocate_small_fast_freelist(mut, size, kind);
|
||||
case GC_ALLOCATOR_INLINE_NONE:
|
||||
return NULL;
|
||||
default:
|
||||
|
@ -153,27 +163,28 @@ static inline void* gc_allocate_small_fast(struct gc_mutator *mut, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size) {
|
||||
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate_fast(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size != 0);
|
||||
if (size > gc_allocator_large_threshold())
|
||||
return NULL;
|
||||
|
||||
return gc_allocate_small_fast(mut, size);
|
||||
return gc_allocate_small_fast(mut, size, kind);
|
||||
}
|
||||
|
||||
static inline void* gc_allocate(struct gc_mutator *mut, size_t size) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate(struct gc_mutator *mut, size_t size) {
|
||||
void *ret = gc_allocate_fast(mut, size);
|
||||
static inline void* gc_allocate(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline void* gc_allocate(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
void *ret = gc_allocate_fast(mut, size, kind);
|
||||
if (GC_LIKELY(ret != NULL))
|
||||
return ret;
|
||||
|
||||
return gc_allocate_slow(mut, size);
|
||||
return gc_allocate_slow(mut, size, kind);
|
||||
}
|
||||
|
||||
// FIXME: remove :P
|
||||
GC_API_ void* gc_allocate_pointerless(struct gc_mutator *mut, size_t bytes);
|
||||
|
||||
GC_API_ int gc_object_is_old_generation_slow(struct gc_mutator *mut,
|
||||
struct gc_ref obj) GC_NEVER_INLINE;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define GC_ATTRS_H
|
||||
|
||||
#include "gc-inline.h"
|
||||
#include "gc-allocation-kind.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
@ -19,10 +20,11 @@ static inline size_t gc_allocator_small_granule_size(void) GC_ALWAYS_INLINE;
|
|||
static inline size_t gc_allocator_allocation_pointer_offset(void) GC_ALWAYS_INLINE;
|
||||
static inline size_t gc_allocator_allocation_limit_offset(void) GC_ALWAYS_INLINE;
|
||||
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size) GC_ALWAYS_INLINE;
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size,
|
||||
enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
|
||||
static inline size_t gc_allocator_alloc_table_alignment(void) GC_ALWAYS_INLINE;
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) GC_ALWAYS_INLINE;
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_kind kind) GC_ALWAYS_INLINE;
|
||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) GC_ALWAYS_INLINE;
|
||||
|
||||
enum gc_old_generation_check_kind {
|
||||
|
|
|
@ -22,14 +22,15 @@ static inline size_t gc_allocator_allocation_limit_offset(void) {
|
|||
return sizeof(uintptr_t) * 1;
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size) {
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
||||
return 4 * 1024 * 1024;
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_kind kind) {
|
||||
return 1;
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||
|
|
|
@ -25,14 +25,14 @@ static inline size_t gc_allocator_allocation_limit_offset(void) {
|
|||
return sizeof(uintptr_t) * 1;
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size) {
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size, enum gc_allocation_kind kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
||||
return 0;
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_kind kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||
|
|
|
@ -24,14 +24,15 @@ static inline size_t gc_allocator_allocation_limit_offset(void) {
|
|||
return sizeof(uintptr_t) * 1;
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size) {
|
||||
static inline size_t gc_allocator_freelist_offset(size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
static inline size_t gc_allocator_alloc_table_alignment(void) {
|
||||
return 0;
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(void) {
|
||||
static inline uint8_t gc_allocator_alloc_table_begin_pattern(enum gc_allocation_kind kind) {
|
||||
GC_CRASH();
|
||||
}
|
||||
static inline uint8_t gc_allocator_alloc_table_end_pattern(void) {
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
|
||||
static inline void*
|
||||
gc_allocate_with_kind(struct gc_mutator *mut, enum alloc_kind kind, size_t bytes) {
|
||||
void *obj = gc_allocate(mut, bytes);
|
||||
void *obj = gc_allocate(mut, bytes, GC_ALLOCATION_TAGGED);
|
||||
*tag_word(gc_ref_from_heap_object(obj)) = tag_live(kind);
|
||||
return obj;
|
||||
}
|
||||
|
||||
static inline void*
|
||||
gc_allocate_pointerless_with_kind(struct gc_mutator *mut, enum alloc_kind kind, size_t bytes) {
|
||||
void *obj = gc_allocate_pointerless(mut, bytes);
|
||||
void *obj = gc_allocate(mut, bytes, GC_ALLOCATION_TAGGED_POINTERLESS);
|
||||
*tag_word(gc_ref_from_heap_object(obj)) = tag_live(kind);
|
||||
return obj;
|
||||
}
|
||||
|
|
44
src/bdw.c
44
src/bdw.c
|
@ -63,6 +63,7 @@ struct gc_heap {
|
|||
|
||||
struct gc_mutator {
|
||||
void *freelists[GC_INLINE_FREELIST_COUNT];
|
||||
void *pointerless_freelists[GC_INLINE_FREELIST_COUNT];
|
||||
struct gc_heap *heap;
|
||||
struct gc_mutator_roots *roots;
|
||||
struct gc_mutator *next; // with heap lock
|
||||
|
@ -122,27 +123,48 @@ allocate_small(void **freelist, size_t idx, enum gc_inline_kind kind) {
|
|||
}
|
||||
|
||||
*freelist = *(void **)(head);
|
||||
|
||||
if (kind == GC_INLINE_KIND_POINTERLESS)
|
||||
memset(head, 0, gc_inline_freelist_object_size(idx));
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size != 0);
|
||||
if (size <= gc_allocator_large_threshold()) {
|
||||
size_t idx = gc_inline_bytes_to_freelist_index(size);
|
||||
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
|
||||
void **freelists;
|
||||
enum gc_inline_kind freelist_kind;
|
||||
switch (kind) {
|
||||
case GC_ALLOCATION_TAGGED:
|
||||
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
|
||||
return allocate_small(&mut->freelists[idx], idx, GC_INLINE_KIND_NORMAL);
|
||||
case GC_ALLOCATION_TAGGED_POINTERLESS:
|
||||
case GC_ALLOCATION_UNTAGGED_POINTERLESS:
|
||||
return allocate_small(&mut->pointerless_freelists[idx], idx,
|
||||
GC_INLINE_KIND_POINTERLESS);
|
||||
default:
|
||||
GC_CRASH();
|
||||
}
|
||||
} else {
|
||||
return GC_malloc(size);
|
||||
switch (kind) {
|
||||
case GC_ALLOCATION_TAGGED:
|
||||
case GC_ALLOCATION_UNTAGGED_CONSERVATIVE:
|
||||
return GC_malloc(size);
|
||||
case GC_ALLOCATION_TAGGED_POINTERLESS:
|
||||
case GC_ALLOCATION_UNTAGGED_POINTERLESS: {
|
||||
void *ret = GC_malloc_atomic(size);
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
GC_CRASH();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* gc_allocate_pointerless(struct gc_mutator *mut,
|
||||
size_t size) {
|
||||
// Because the BDW API requires us to implement a custom marker so
|
||||
// that the pointerless freelist gets traced, even though it's in a
|
||||
// pointerless region, we punt on thread-local pointerless freelists.
|
||||
return GC_malloc_atomic(size);
|
||||
}
|
||||
|
||||
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
|
22
src/mmc.c
22
src/mmc.c
|
@ -891,7 +891,15 @@ collect_for_small_allocation(void *mut) {
|
|||
}
|
||||
|
||||
void*
|
||||
gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||
gc_allocate_slow(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
if (GC_UNLIKELY(kind != GC_ALLOCATION_TAGGED
|
||||
&& kind != GC_ALLOCATION_TAGGED_POINTERLESS)) {
|
||||
fprintf(stderr, "mmc collector cannot make allocations of kind %d\n",
|
||||
(int)kind);
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
GC_ASSERT(size > 0); // allocating 0 bytes would be silly
|
||||
|
||||
if (size > gc_allocator_large_threshold())
|
||||
|
@ -900,12 +908,7 @@ gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
|||
return gc_ref_heap_object(nofl_allocate(&mut->allocator,
|
||||
heap_nofl_space(mutator_heap(mut)),
|
||||
size, collect_for_small_allocation,
|
||||
mut));
|
||||
}
|
||||
|
||||
void*
|
||||
gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||
return gc_allocate(mut, size);
|
||||
mut, kind));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -952,7 +955,8 @@ gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
|
|||
struct gc_ephemeron*
|
||||
gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||
struct gc_ref ret =
|
||||
gc_ref_from_heap_object(gc_allocate(mut, gc_ephemeron_size()));
|
||||
gc_ref_from_heap_object(gc_allocate(mut, gc_ephemeron_size(),
|
||||
GC_ALLOCATION_TAGGED));
|
||||
nofl_space_set_ephemeron_flag(ret);
|
||||
return gc_ref_heap_object(ret);
|
||||
}
|
||||
|
@ -977,7 +981,7 @@ gc_heap_ephemeron_trace_epoch(struct gc_heap *heap) {
|
|||
|
||||
struct gc_finalizer*
|
||||
gc_allocate_finalizer(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_finalizer_size());
|
||||
return gc_allocate(mut, gc_finalizer_size(), GC_ALLOCATION_TAGGED);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -871,7 +871,8 @@ nofl_allocator_next_hole(struct nofl_allocator *alloc,
|
|||
|
||||
static struct gc_ref
|
||||
nofl_allocate(struct nofl_allocator *alloc, struct nofl_space *space,
|
||||
size_t size, void (*gc)(void*), void *gc_data) {
|
||||
size_t size, void (*gc)(void*), void *gc_data,
|
||||
enum gc_allocation_kind kind) {
|
||||
GC_ASSERT(size > 0);
|
||||
GC_ASSERT(size <= gc_allocator_large_threshold());
|
||||
size = align_up(size, NOFL_GRANULE_SIZE);
|
||||
|
@ -890,7 +891,7 @@ nofl_allocate(struct nofl_allocator *alloc, struct nofl_space *space,
|
|||
|
||||
struct gc_ref ret = gc_ref(alloc->alloc);
|
||||
alloc->alloc += size;
|
||||
gc_update_alloc_table(ret, size);
|
||||
gc_update_alloc_table(ret, size, kind);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
17
src/pcc.c
17
src/pcc.c
|
@ -978,7 +978,14 @@ static void get_more_empty_blocks_for_mutator(void *mut) {
|
|||
trigger_collection(mut, GC_COLLECTION_MINOR);
|
||||
}
|
||||
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
if (GC_UNLIKELY(kind != GC_ALLOCATION_TAGGED
|
||||
&& kind != GC_ALLOCATION_TAGGED_POINTERLESS)) {
|
||||
fprintf(stderr, "pcc collector cannot make allocations of kind %d\n",
|
||||
(int)kind);
|
||||
GC_CRASH();
|
||||
}
|
||||
GC_ASSERT(size > 0); // allocating 0 bytes would be silly
|
||||
|
||||
if (size > gc_allocator_large_threshold())
|
||||
|
@ -998,10 +1005,6 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
|||
return gc_ref_heap_object(ret);
|
||||
}
|
||||
|
||||
void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||
return gc_allocate(mut, size);
|
||||
}
|
||||
|
||||
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||
GC_CRASH();
|
||||
}
|
||||
|
@ -1056,7 +1059,7 @@ void gc_safepoint_slow(struct gc_mutator *mut) {
|
|||
}
|
||||
|
||||
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_ephemeron_size());
|
||||
return gc_allocate(mut, gc_ephemeron_size(), GC_ALLOCATION_TAGGED);
|
||||
}
|
||||
|
||||
void gc_ephemeron_init(struct gc_mutator *mut, struct gc_ephemeron *ephemeron,
|
||||
|
@ -1077,7 +1080,7 @@ unsigned gc_heap_ephemeron_trace_epoch(struct gc_heap *heap) {
|
|||
}
|
||||
|
||||
struct gc_finalizer* gc_allocate_finalizer(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_finalizer_size());
|
||||
return gc_allocate(mut, gc_finalizer_size(), GC_ALLOCATION_TAGGED);
|
||||
}
|
||||
|
||||
void gc_finalizer_attach(struct gc_mutator *mut, struct gc_finalizer *finalizer,
|
||||
|
|
17
src/semi.c
17
src/semi.c
|
@ -505,7 +505,15 @@ static void* allocate_large(struct gc_mutator *mut, size_t size) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
||||
void* gc_allocate_slow(struct gc_mutator *mut, size_t size,
|
||||
enum gc_allocation_kind kind) {
|
||||
if (GC_UNLIKELY(kind != GC_ALLOCATION_TAGGED
|
||||
&& kind != GC_ALLOCATION_TAGGED_POINTERLESS)) {
|
||||
fprintf(stderr, "semispace collector cannot make allocations of kind %d\n",
|
||||
(int)kind);
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
if (size > gc_allocator_large_threshold())
|
||||
return allocate_large(mut, size);
|
||||
|
||||
|
@ -522,16 +530,13 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
|
|||
return (void *)addr;
|
||||
}
|
||||
}
|
||||
void* gc_allocate_pointerless(struct gc_mutator *mut, size_t size) {
|
||||
return gc_allocate(mut, size);
|
||||
}
|
||||
|
||||
void gc_pin_object(struct gc_mutator *mut, struct gc_ref ref) {
|
||||
GC_CRASH();
|
||||
}
|
||||
|
||||
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_ephemeron_size());
|
||||
return gc_allocate(mut, gc_ephemeron_size(), GC_ALLOCATION_TAGGED);
|
||||
}
|
||||
|
||||
void gc_ephemeron_init(struct gc_mutator *mut, struct gc_ephemeron *ephemeron,
|
||||
|
@ -540,7 +545,7 @@ void gc_ephemeron_init(struct gc_mutator *mut, struct gc_ephemeron *ephemeron,
|
|||
}
|
||||
|
||||
struct gc_finalizer* gc_allocate_finalizer(struct gc_mutator *mut) {
|
||||
return gc_allocate(mut, gc_finalizer_size());
|
||||
return gc_allocate(mut, gc_finalizer_size(), GC_ALLOCATION_TAGGED);
|
||||
}
|
||||
|
||||
void gc_finalizer_attach(struct gc_mutator *mut, struct gc_finalizer *finalizer,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue