1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-23 13:00:34 +02:00

* tags.h: (SCM_DOUBLE_CELLP, SCM_NDOUBLE_CELLP): new macros.

* gc.h: (typedef struct scm_freelist_t) remove from here.

* gc.c: (CELL_UP, CELL_DN) make these macros take additional
parameter (the span).
(CLUSTER_SIZE_IN_BYTES, ALIGNMENT_SLACK) new macros.
(typedef struct scm_freelist_t) move here from gc.h, it had no
business being externally visible.
(typedef struct scm_heap_seg_data_t) renamed from
scm_heap_seg_data, to be style-compliant.
(scm_mark_locations) if the possible pointer points to a
multy-cell, check that it's properly aligned.
(init_heap_seg) alighn multy-cells properly, work with the
assumption that the segment size divides cleanly by cluster size
(so that there's no spill).
(round_to_cluster_size) new function.
(alloc_some_heap, make_initial_segment) use round_to_cluster_size
to satisfy the new init_heap_seg invariant.
This commit is contained in:
Michael Livshin 2000-03-18 11:09:41 +00:00
parent 28b3236d36
commit a00c95d9c6
4 changed files with 221 additions and 175 deletions

View file

@ -1,3 +1,25 @@
2000-03-18 Michael Livshin <mlivshin@bigfoot.com>
* tags.h: (SCM_DOUBLE_CELLP, SCM_NDOUBLE_CELLP): new macros.
* gc.h: (typedef struct scm_freelist_t) remove from here.
* gc.c: (CELL_UP, CELL_DN) make these macros take additional
parameter (the span).
(CLUSTER_SIZE_IN_BYTES, ALIGNMENT_SLACK) new macros.
(typedef struct scm_freelist_t) move here from gc.h, it had no
business being externally visible.
(typedef struct scm_heap_seg_data_t) renamed from
scm_heap_seg_data, to be style-compliant.
(scm_mark_locations) if the possible pointer points to a
multy-cell, check that it's properly aligned.
(init_heap_seg) alighn multy-cells properly, work with the
assumption that the segment size divides cleanly by cluster size
(so that there's no spill).
(round_to_cluster_size) new function.
(alloc_some_heap, make_initial_segment) use round_to_cluster_size
to satisfy the new init_heap_seg invariant.
2000-03-18 Dirk Herrmann <D.Herrmann@tu-bs.de>
* _scm.h: Don't include async.h everywhere...

View file

@ -145,23 +145,58 @@
#ifdef PROT386
/*in 386 protected mode we must only adjust the offset */
# define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
# define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
#else
# ifdef _UNICOS
# define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
# define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
# else
# define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
# define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
# endif /* UNICOS */
#endif /* PROT386 */
#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
/* scm_freelists
*/
typedef struct scm_freelist_t {
/* collected cells */
SCM cells;
#ifdef GUILE_NEW_GC_SCHEME
/* number of cells left to collect before cluster is full */
unsigned int left_to_collect;
/* a list of freelists, each of size gc_trigger,
except the last one which may be shorter */
SCM clusters;
SCM *clustertail;
/* this is the number of cells in each cluster, including the spine cell */
int cluster_size;
/* set to grow the heap when we run out of clusters
*/
int grow_heap_p;
/* minimum number of objects allocated before GC is triggered
*/
int gc_trigger;
/* defines gc_trigger as percent of heap size
* 0 => constant trigger
*/
int gc_trigger_fraction;
#endif
/* number of cells per object on this list */
int span;
/* number of collected cells during last GC */
int collected;
/* total number of cells in heap segments
* belonging to this list.
*/
int heap_size;
} scm_freelist_t;
#ifdef GUILE_NEW_GC_SCHEME
SCM scm_freelist = SCM_EOL;
scm_freelist_t scm_master_freelist = {
@ -222,8 +257,7 @@ SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
struct scm_heap_seg_data
typedef struct scm_heap_seg_data_t
{
/* lower and upper bounds of the segment */
SCM_CELLPTR bounds[2];
@ -240,7 +274,7 @@ struct scm_heap_seg_data
SEG_DATA, and mark the object iff the function returns non-zero.
At the moment, I don't think anyone uses this. */
int (*valid) ();
};
} scm_heap_seg_data_t;
@ -1292,6 +1326,8 @@ scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
if ( !scm_heap_table[seg_id].valid
|| scm_heap_table[seg_id].valid (ptr,
&scm_heap_table[seg_id]))
if ( scm_heap_table[seg_id].span == 1
|| SCM_DOUBLE_CELLP (*(SCM **) (& x[m])))
scm_gc_mark (*(SCM *) & x[m]);
break;
}
@ -1482,8 +1518,8 @@ scm_gc_sweep ()
#endif
span = scm_heap_table[i].span;
ptr = CELL_UP (scm_heap_table[i].bounds[0]);
seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
for (j = seg_size + span; j -= span; ptr += span)
{
#ifdef SCM_POINTERS_MUNGED
@ -1999,7 +2035,7 @@ scm_sizet scm_max_segment_size;
*/
SCM_CELLPTR scm_heap_org;
struct scm_heap_seg_data * scm_heap_table = 0;
scm_heap_seg_data_t * scm_heap_table = 0;
int scm_n_heap_segs = 0;
/* init_heap_seg
@ -2031,13 +2067,11 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
if (seg_org == NULL)
return 0;
ptr = seg_org;
size = (size / sizeof (scm_cell) / span) * span * sizeof (scm_cell);
ptr = CELL_UP (seg_org, span);
/* Compute the ceiling on valid object pointers w/in this segment.
*/
seg_end = CELL_DN ((char *) ptr + size);
seg_end = CELL_DN ((char *) seg_org + size, span);
/* Find the right place and insert the segment record.
*
@ -2065,7 +2099,7 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
/* Compute the least valid object pointer w/in this segment
*/
ptr = CELL_UP (ptr);
ptr = CELL_UP (ptr, span);
/*n_new_cells*/
@ -2075,8 +2109,7 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
freelist->heap_size += n_new_cells;
/* Partition objects in this segment into clusters
*/
/* Partition objects in this segment into clusters */
{
SCM clusters;
SCM *clusterp = &clusters;
@ -2092,10 +2125,9 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
n_new_cells -= n_cluster_cells;
}
else
{
seg_end = ptr + n_new_cells;
n_new_cells = 0;
}
/* [cmm] looks like the segment size doesn't divide cleanly by
cluster size. bad cmm! */
abort();
/* Allocate cluster spine
*/
@ -2147,7 +2179,7 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
* to join it to the input freelist.
*/
SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
freelist->cells = PTR2SCM (CELL_UP (seg_org));
freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
freelist->heap_size += n_new_cells;
@ -2162,11 +2194,26 @@ init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
#endif
}
#ifndef GUILE_NEW_GC_SCHEME
#define round_to_cluster_size(freelist, len) len
#else
static scm_sizet
round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
{
scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
return
(len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
+ ALIGNMENT_SLACK (freelist);
}
#endif
static void
alloc_some_heap (scm_freelist_t *freelist)
{
struct scm_heap_seg_data * tmptable;
scm_heap_seg_data_t * tmptable;
SCM_CELLPTR ptr;
scm_sizet len;
@ -2180,9 +2227,9 @@ alloc_some_heap (scm_freelist_t *freelist)
* Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
* only if the allocation of the segment itself succeeds.
*/
len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
realloc ((char *)scm_heap_table, len)));
if (!tmptable)
scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
@ -2225,18 +2272,24 @@ alloc_some_heap (scm_freelist_t *freelist)
{
scm_sizet smallest;
#ifndef GUILE_NEW_GC_SCHEME
smallest = (freelist->span * sizeof (scm_cell));
#else
smallest = CLUSTER_SIZE_IN_BYTES (freelist);
#endif
if (len < smallest)
len = (freelist->span * sizeof (scm_cell));
len = smallest;
/* Allocate with decaying ambition. */
while ((len >= SCM_MIN_HEAP_SEG_SIZE)
&& (len >= smallest))
{
SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
scm_sizet rounded_len = round_to_cluster_size(freelist, len);
SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
if (ptr)
{
init_heap_seg (ptr, len, freelist);
init_heap_seg (ptr, rounded_len, freelist);
return;
}
len /= 2;
@ -2399,13 +2452,14 @@ cleanup (int status, void *arg)
static int
make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
{
if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
init_heap_size,
scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
rounded_size,
freelist))
{
init_heap_size = SCM_HEAP_SEG_SIZE;
if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
init_heap_size,
rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
rounded_size,
freelist))
return 1;
}
@ -2487,8 +2541,8 @@ scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
j = SCM_HEAP_SEG_SIZE;
scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
scm_heap_table = ((struct scm_heap_seg_data *)
scm_must_malloc (sizeof (struct scm_heap_seg_data) * 2, "hplims"));
scm_heap_table = ((scm_heap_seg_data_t *)
scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
#ifdef GUILE_NEW_GC_SCHEME
if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
@ -2500,7 +2554,7 @@ scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
return 1;
#endif
scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
/* scm_hplims[0] can change. do not remove scm_heap_org */
scm_weak_vectors = SCM_EOL;

View file

@ -60,56 +60,23 @@
: SCM_GCMARKP(x))
#define SCM_NMARKEDP(x) (!SCM_MARKEDP(x))
extern struct scm_heap_seg_data *scm_heap_table;
extern struct scm_heap_seg_data_t *scm_heap_table;
extern int scm_n_heap_segs;
extern int scm_take_stdin;
extern int scm_block_gc;
extern int scm_gc_heap_lock;
typedef struct scm_freelist_t {
/* collected cells */
SCM cells;
#ifdef GUILE_NEW_GC_SCHEME
/* number of cells left to collect before cluster is full */
unsigned int left_to_collect;
/* a list of freelists, each of size gc_trigger,
except the last one which may be shorter */
SCM clusters;
SCM *clustertail;
/* this is the number of cells in each cluster, including the spine cell */
int cluster_size;
/* set to grow the heap when we run out of clusters
*/
int grow_heap_p;
/* minimum number of objects allocated before GC is triggered
*/
int gc_trigger;
/* defines gc_trigger as percent of heap size
* 0 => constant trigger
*/
int gc_trigger_fraction;
#endif
/* number of cells per object on this list */
int span;
/* number of collected cells during last GC */
int collected;
/* total number of cells in heap segments
* belonging to this list.
*/
int heap_size;
} scm_freelist_t;
extern scm_sizet scm_max_segment_size;
extern SCM_CELLPTR scm_heap_org;
#ifdef GUILE_NEW_GC_SCHEME
extern SCM scm_freelist;
extern scm_freelist_t scm_master_freelist;
extern struct scm_freelist_t scm_master_freelist;
extern SCM scm_freelist2;
extern scm_freelist_t scm_master_freelist2;
extern struct scm_freelist_t scm_master_freelist2;
#else
extern scm_freelist_t scm_freelist;
extern scm_freelist_t scm_freelist2;
extern struct scm_freelist_t scm_freelist;
extern struct scm_freelist_t scm_freelist2;
#endif
extern unsigned long scm_gc_cells_collected;
extern unsigned long scm_gc_malloc_collected;
@ -136,14 +103,14 @@ extern SCM scm_gc_stats (void);
extern void scm_gc_start (const char *what);
extern void scm_gc_end (void);
extern SCM scm_gc (void);
extern void scm_gc_for_alloc (scm_freelist_t *freelist);
extern void scm_gc_for_alloc (struct scm_freelist_t *freelist);
#ifdef GUILE_NEW_GC_SCHEME
extern SCM scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist);
extern SCM scm_gc_for_newcell (struct scm_freelist_t *master, SCM *freelist);
#if 0
extern void scm_alloc_cluster (scm_freelist_t *master);
extern void scm_alloc_cluster (struct scm_freelist_t *master);
#endif
#else
extern SCM scm_gc_for_newcell (scm_freelist_t *freelist);
extern SCM scm_gc_for_newcell (struct scm_freelist_t *freelist);
#endif
extern void scm_igc (const char *what);
extern void scm_gc_mark (SCM p);

View file

@ -305,6 +305,9 @@ typedef void * SCM;
#define SCM_CELLP(x) (!SCM_NCELLP (x))
#define SCM_NCELLP(x) ((sizeof (scm_cell) - 1) & SCM_UNPACK (x))
#define SCM_DOUBLE_CELLP(x) (!SCM_NDOUBLE_CELLP (x))
#define SCM_NDOUBLE_CELLP(x) ((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x))
/* See numbers.h for macros relating to immediate integers.
*/