1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-04-30 20:00:19 +02:00

* gc.h, gc.c (SCM_FREECELL_P): Removed for good.

(scm_block_gc, scm_gc_heap_lock): Removed.  Removed all uses.
(scm_gc_running_p): Now a macro that refers to the scm_i_thread
field.
(scm_i_sweep_mutex): Now a non-recursive mutex.  GC can not happen
recursively.
(scm_igc, scm_i_gc): Renamed former to latter.  Changed all uses.
Do not lock scm_i_sweep_mutex, which is now non-recursive, or set
scm_gc_running_p.  Do not run the scm_after_gc_c_hook.
(scm_gc): Lock scm_i_sweep_mutex, set scm_gc_running_p and run the
scm_after_gc_c_hook here.
(scm_gc_for_new_cell): Set scm_gc_running_p here and run the
scm_after_gc_c_hook when a full GC has in fact been performed.
(scm_i_expensive_validation_check): Call scm_gc, not scm_i_gc.

* gc-segment.c (scm_i_get_new_heap_segment): Do not check
scm_gc_heap_lock.

* gc-malloc.c (scm_realloc, increase_mtrigger): Set
scm_gc_running_p while the scm_i_sweep_mutex is locked.
This commit is contained in:
Marius Vollmer 2005-03-10 18:39:53 +00:00
parent 94d375b5a7
commit b17e0ac397
4 changed files with 85 additions and 107 deletions

View file

@ -111,19 +111,22 @@ scm_realloc (void *mem, size_t size)
return ptr; return ptr;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
scm_gc_running_p = 1;
scm_i_sweep_all_segments ("realloc"); scm_i_sweep_all_segments ("realloc");
SCM_SYSCALL (ptr = realloc (mem, size)); SCM_SYSCALL (ptr = realloc (mem, size));
if (ptr) if (ptr)
{ {
scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
return ptr; return ptr;
} }
scm_igc ("realloc"); scm_i_gc ("realloc");
scm_i_sweep_all_segments ("realloc"); scm_i_sweep_all_segments ("realloc");
scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
SCM_SYSCALL (ptr = realloc (mem, size)); SCM_SYSCALL (ptr = realloc (mem, size));
@ -219,9 +222,10 @@ increase_mtrigger (size_t size, const char *what)
float yield; float yield;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
scm_gc_running_p = 1;
prev_alloced = mallocated; prev_alloced = mallocated;
scm_igc (what); scm_i_gc (what);
scm_i_sweep_all_segments ("mtrigger"); scm_i_sweep_all_segments ("mtrigger");
yield = (((float) prev_alloced - (float) scm_mallocated) yield = (((float) prev_alloced - (float) scm_mallocated)
@ -263,6 +267,7 @@ increase_mtrigger (size_t size, const char *what)
#endif #endif
} }
scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
} }
} }

View file

@ -89,7 +89,7 @@ scm_i_initialize_heap_segment_data (scm_t_heap_segment * segment, size_t request
scm_t_cell * memory = 0; scm_t_cell * memory = 0;
/* /*
We use malloc to alloc the heap. On GNU libc this is We use calloc to alloc the heap. On GNU libc this is
equivalent to mmapping /dev/zero equivalent to mmapping /dev/zero
*/ */
SCM_SYSCALL (memory = (scm_t_cell * ) calloc (1, mem_needed)); SCM_SYSCALL (memory = (scm_t_cell * ) calloc (1, mem_needed));
@ -320,7 +320,7 @@ SCM
scm_i_sweep_some_segments (scm_t_cell_type_statistics * fl) scm_i_sweep_some_segments (scm_t_cell_type_statistics * fl)
{ {
int i = fl->heap_segment_idx; int i = fl->heap_segment_idx;
SCM collected =SCM_EOL; SCM collected = SCM_EOL;
if (i == -1) if (i == -1)
i++; i++;
@ -458,19 +458,11 @@ scm_i_find_heap_segment_containing_object (SCM obj)
RETURN: the index of the segment. RETURN: the index of the segment.
*/ */
int int
scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist, policy_on_error error_policy) scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist,
policy_on_error error_policy)
{ {
size_t len; size_t len;
if (scm_gc_heap_lock)
{
/* Critical code sections (such as the garbage collector) aren't
* supposed to add heap segments.
*/
fprintf (stderr, "scm_i_get_new_heap_segment: Can not extend locked heap.\n");
abort ();
}
{ {
/* Assure that the new segment is predicted to be large enough. /* Assure that the new segment is predicted to be large enough.
* *

View file

@ -67,13 +67,9 @@ extern unsigned long * __libc_ia64_register_backing_store_base;
#include <unistd.h> #include <unistd.h>
#endif #endif
unsigned int scm_gc_running_p = 0;
/* Lock this mutex before doing lazy sweeping. /* Lock this mutex before doing lazy sweeping.
*/ */
scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER; scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
/* Set this to != 0 if every cell that is accessed shall be checked: /* Set this to != 0 if every cell that is accessed shall be checked:
*/ */
@ -129,7 +125,7 @@ scm_i_expensive_validation_check (SCM cell)
else else
{ {
counter = scm_debug_cells_gc_interval; counter = scm_debug_cells_gc_interval;
scm_igc ("scm_assert_cell_valid"); scm_gc ();
} }
} }
} }
@ -214,18 +210,6 @@ SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
*/ */
unsigned long scm_mtrigger; unsigned long scm_mtrigger;
/* scm_gc_heap_lock
* If set, don't expand the heap. Set only during gc, during which no allocation
* is supposed to take place anyway.
*/
int scm_gc_heap_lock = 0;
/* GC Blocking
* Don't pause for collection if this is set -- just
* expand the heap.
*/
int scm_block_gc = 1;
/* During collection, this accumulates objects holding /* During collection, this accumulates objects holding
* weak references. * weak references.
*/ */
@ -456,7 +440,12 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
"no longer accessible.") "no longer accessible.")
#define FUNC_NAME s_scm_gc #define FUNC_NAME s_scm_gc
{ {
scm_igc ("call"); scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
scm_gc_running_p = 1;
scm_i_gc ("call");
scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
scm_c_hook_run (&scm_after_gc_c_hook, 0);
return SCM_UNSPECIFIED; return SCM_UNSPECIFIED;
} }
#undef FUNC_NAME #undef FUNC_NAME
@ -464,16 +453,18 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
/* When we get POSIX threads support, the master will be global and /* The master is global and common while the freelist will be
* common while the freelist will be individual for each thread. * individual for each thread.
*/ */
SCM SCM
scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells) scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
{ {
SCM cell; SCM cell;
int did_gc = 0;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
scm_gc_running_p = 1;
*free_cells = scm_i_sweep_some_segments (freelist); *free_cells = scm_i_sweep_some_segments (freelist);
if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist)) if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
@ -482,10 +473,10 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
*free_cells = scm_i_sweep_some_segments (freelist); *free_cells = scm_i_sweep_some_segments (freelist);
} }
if (*free_cells == SCM_EOL && !scm_block_gc) if (*free_cells == SCM_EOL)
{ {
/* /*
with the advent of lazy sweep, GC yield is only know just with the advent of lazy sweep, GC yield is only known just
before doing the GC. before doing the GC.
*/ */
scm_i_adjust_min_yield (freelist); scm_i_adjust_min_yield (freelist);
@ -494,7 +485,8 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
out of fresh cells. Try to get some new ones. out of fresh cells. Try to get some new ones.
*/ */
scm_igc ("cells"); did_gc = 1;
scm_i_gc ("cells");
*free_cells = scm_i_sweep_some_segments (freelist); *free_cells = scm_i_sweep_some_segments (freelist);
} }
@ -515,8 +507,12 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
*free_cells = SCM_FREE_CELL_CDR (cell); *free_cells = SCM_FREE_CELL_CDR (cell);
scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
if (did_gc)
scm_c_hook_run (&scm_after_gc_c_hook, 0);
return cell; return cell;
} }
@ -527,18 +523,14 @@ scm_t_c_hook scm_before_sweep_c_hook;
scm_t_c_hook scm_after_sweep_c_hook; scm_t_c_hook scm_after_sweep_c_hook;
scm_t_c_hook scm_after_gc_c_hook; scm_t_c_hook scm_after_gc_c_hook;
/* Must be called while holding scm_i_sweep_mutex.
*/
void void
scm_igc (const char *what) scm_i_gc (const char *what)
{ {
if (scm_block_gc)
return;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
/* During the critical section, only the current thread may run. */
scm_i_thread_put_to_sleep (); scm_i_thread_put_to_sleep ();
++scm_gc_running_p;
scm_c_hook_run (&scm_before_gc_c_hook, 0); scm_c_hook_run (&scm_before_gc_c_hook, 0);
#ifdef DEBUGINFO #ifdef DEBUGINFO
@ -552,22 +544,13 @@ scm_igc (const char *what)
gc_start_stats (what); gc_start_stats (what);
if (scm_gc_heap_lock)
/* We've invoked the collector while a GC is already in progress.
That should never happen. */
abort ();
/* /*
Set freelists to NULL so scm_cons() always triggers gc, causing Set freelists to NULL so scm_cons() always triggers gc, causing
the above abort() to be triggered. the assertion above to fail.
*/ */
*SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
*SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
++scm_gc_heap_lock;
/* /*
Let's finish the sweep. The conservative GC might point into the Let's finish the sweep. The conservative GC might point into the
garbage, and marking that would create a mess. garbage, and marking that would create a mess.
@ -589,28 +572,17 @@ scm_igc (const char *what)
scm_mallocated -= scm_i_deprecated_memory_return; scm_mallocated -= scm_i_deprecated_memory_return;
/* Mark */
scm_c_hook_run (&scm_before_mark_c_hook, 0); scm_c_hook_run (&scm_before_mark_c_hook, 0);
scm_mark_all (); scm_mark_all ();
scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc); scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
scm_c_hook_run (&scm_before_sweep_c_hook, 0); /* Sweep
/* TODO: the after_sweep hook should probably be moved to just before
Moved this lock upwards so that we can alloc new heap at the end of a sweep. the mark, since that's where the sweep is finished in lazy
sweeping.
DOCME: why should the heap be locked anyway?
*/
--scm_gc_heap_lock;
scm_gc_sweep ();
/*
TODO: this hook should probably be moved to just before the mark,
since that's where the sweep is finished in lazy sweeping.
MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
original meaning implied at least two things: that it would be original meaning implied at least two things: that it would be
@ -631,18 +603,15 @@ scm_igc (const char *what)
distinct classes of hook functions since this can prevent some distinct classes of hook functions since this can prevent some
bad interference when several modules adds gc hooks. bad interference when several modules adds gc hooks.
*/ */
scm_c_hook_run (&scm_before_sweep_c_hook, 0);
scm_gc_sweep ();
scm_c_hook_run (&scm_after_sweep_c_hook, 0); scm_c_hook_run (&scm_after_sweep_c_hook, 0);
gc_end_stats (); gc_end_stats ();
--scm_gc_running_p;
scm_i_thread_wake_up (); scm_i_thread_wake_up ();
/*
See above.
*/
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
scm_c_hook_run (&scm_after_gc_c_hook, 0);
/* /*
For debugging purposes, you could do For debugging purposes, you could do
scm_i_sweep_all_segments("debug"), but then the remains of the scm_i_sweep_all_segments("debug"), but then the remains of the
@ -916,7 +885,6 @@ scm_init_storage ()
j = SCM_NUM_PROTECTS; j = SCM_NUM_PROTECTS;
while (j) while (j)
scm_sys_protects[--j] = SCM_BOOL_F; scm_sys_protects[--j] = SCM_BOOL_F;
scm_block_gc = 1;
scm_gc_init_freelist(); scm_gc_init_freelist();
scm_gc_init_malloc (); scm_gc_init_malloc ();
@ -1046,8 +1014,6 @@ scm_gc_sweep (void)
*/ */
scm_i_reset_segments (); scm_i_reset_segments ();
/* When we move to POSIX threads private freelists should probably
be GC-protected instead. */
*SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
*SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;

View file

@ -29,6 +29,34 @@
/* Cell allocation and garbage collection work rouhgly in the
following manner:
Each thread has a 'freelist', which is a list of available cells.
(It actually has two freelists, one for single cells and one for
double cells. Everything works analogous for double cells.)
When a thread wants to allocate a cell and the freelist is empty,
it refers to a global list of unswept 'cards'. A card is a small
block of cells that are contigous in memory, together with the
corresponding mark bits. A unswept card is one where the mark bits
are set for cells that have been in use during the last global mark
phase, but the unmarked cells of the card have not been scanned and
freed yet.
The thread takes one of the unswept cards and sweeps it, thereby
building a new freelist that it then uses. Sweeping a card will
call the smob free functions of unmarked cells, for example, and
thus, these free functions can run at any time, in any thread.
When there are no more unswept cards available, the thread performs
a global garbage collection. For this, all other threads are
stopped. A global mark is performed and all cards are put into the
global list of unswept cards. Whennecessary, new cards are
allocated and initialized at this time. The other threads are then
started again.
*/
typedef struct scm_t_cell typedef struct scm_t_cell
{ {
SCM word_0; SCM word_0;
@ -66,7 +94,7 @@ typedef struct scm_t_cell
#define SCM_GC_CARD_N_HEADER_CELLS 1 #define SCM_GC_CARD_N_HEADER_CELLS 1
#define SCM_GC_CARD_N_CELLS 256 #define SCM_GC_CARD_N_CELLS 256
#define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell) #define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell)
#define SCM_GC_CARD_BVEC(card) ((scm_t_c_bvec_long *) ((card)->word_0)) #define SCM_GC_CARD_BVEC(card) ((scm_t_c_bvec_long *) ((card)->word_0))
#define SCM_GC_SET_CARD_BVEC(card, bvec) \ #define SCM_GC_SET_CARD_BVEC(card, bvec) \
@ -187,6 +215,10 @@ typedef unsigned long scm_t_c_bvec_long;
#define SCM_SET_CELL_OBJECT_2(x, v) SCM_SET_CELL_OBJECT ((x), 2, (v)) #define SCM_SET_CELL_OBJECT_2(x, v) SCM_SET_CELL_OBJECT ((x), 2, (v))
#define SCM_SET_CELL_OBJECT_3(x, v) SCM_SET_CELL_OBJECT ((x), 3, (v)) #define SCM_SET_CELL_OBJECT_3(x, v) SCM_SET_CELL_OBJECT ((x), 3, (v))
#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n))))
#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0))
#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1))
#define SCM_CELL_TYPE(x) SCM_CELL_WORD_0 (x) #define SCM_CELL_TYPE(x) SCM_CELL_WORD_0 (x)
#define SCM_SET_CELL_TYPE(x, t) SCM_SET_CELL_WORD_0 ((x), (t)) #define SCM_SET_CELL_TYPE(x, t) SCM_SET_CELL_WORD_0 ((x), (t))
@ -195,27 +227,11 @@ typedef unsigned long scm_t_c_bvec_long;
* the freelist. Due to this structure, freelist cells are not cons cells * the freelist. Due to this structure, freelist cells are not cons cells
* and thus may not be accessed using SCM_CAR and SCM_CDR. */ * and thus may not be accessed using SCM_CAR and SCM_CDR. */
/*
SCM_FREECELL_P removed ; the semantics are ambiguous with lazy
sweeping. Could mean "this cell is no longer in use (will be swept)"
or "this cell has just been swept, and is not yet in use".
*/
#define SCM_FREECELL_P this_macro_has_been_removed_see_gc_header_file
#define SCM_FREE_CELL_CDR(x) \ #define SCM_FREE_CELL_CDR(x) \
(SCM_GC_CELL_OBJECT ((x), 1)) (SCM_GC_CELL_OBJECT ((x), 1))
#define SCM_SET_FREE_CELL_CDR(x, v) \ #define SCM_SET_FREE_CELL_CDR(x, v) \
(SCM_GC_SET_CELL_OBJECT ((x), 1, (v))) (SCM_GC_SET_CELL_OBJECT ((x), 1, (v)))
#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n))))
#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0))
#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1))
#if (SCM_DEBUG_CELL_ACCESSES == 1) #if (SCM_DEBUG_CELL_ACCESSES == 1)
/* Set this to != 0 if every cell that is accessed shall be checked: /* Set this to != 0 if every cell that is accessed shall be checked:
*/ */
@ -227,10 +243,9 @@ void scm_i_expensive_validation_check (SCM cell);
SCM_API scm_i_pthread_mutex_t scm_i_gc_admin_mutex; SCM_API scm_i_pthread_mutex_t scm_i_gc_admin_mutex;
SCM_API int scm_block_gc; #define scm_gc_running_p (SCM_I_CURRENT_THREAD->gc_running_p)
SCM_API int scm_gc_heap_lock;
SCM_API unsigned int scm_gc_running_p;
SCM_API scm_i_pthread_mutex_t scm_i_sweep_mutex; SCM_API scm_i_pthread_mutex_t scm_i_sweep_mutex;
#if (SCM_ENABLE_DEPRECATED == 1) #if (SCM_ENABLE_DEPRECATED == 1)
@ -305,7 +320,7 @@ SCM_API SCM scm_gc_live_object_stats (void);
SCM_API SCM scm_gc (void); SCM_API SCM scm_gc (void);
SCM_API void scm_gc_for_alloc (struct scm_t_cell_type_statistics *freelist); SCM_API void scm_gc_for_alloc (struct scm_t_cell_type_statistics *freelist);
SCM_API SCM scm_gc_for_newcell (struct scm_t_cell_type_statistics *master, SCM *freelist); SCM_API SCM scm_gc_for_newcell (struct scm_t_cell_type_statistics *master, SCM *freelist);
SCM_API void scm_igc (const char *what); SCM_API void scm_i_gc (const char *what);
SCM_API void scm_gc_mark (SCM p); SCM_API void scm_gc_mark (SCM p);
SCM_API void scm_gc_mark_dependencies (SCM p); SCM_API void scm_gc_mark_dependencies (SCM p);
SCM_API void scm_mark_locations (SCM_STACKITEM x[], unsigned long n); SCM_API void scm_mark_locations (SCM_STACKITEM x[], unsigned long n);