diff --git a/libguile/gc-malloc.c b/libguile/gc-malloc.c index 127805eb0..a58ed1376 100644 --- a/libguile/gc-malloc.c +++ b/libguile/gc-malloc.c @@ -111,19 +111,22 @@ scm_realloc (void *mem, size_t size) return ptr; scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); - + scm_gc_running_p = 1; + scm_i_sweep_all_segments ("realloc"); SCM_SYSCALL (ptr = realloc (mem, size)); if (ptr) { + scm_gc_running_p = 0; scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); return ptr; } - scm_igc ("realloc"); + scm_i_gc ("realloc"); scm_i_sweep_all_segments ("realloc"); + scm_gc_running_p = 0; scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); SCM_SYSCALL (ptr = realloc (mem, size)); @@ -219,9 +222,10 @@ increase_mtrigger (size_t size, const char *what) float yield; scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); + scm_gc_running_p = 1; prev_alloced = mallocated; - scm_igc (what); + scm_i_gc (what); scm_i_sweep_all_segments ("mtrigger"); yield = (((float) prev_alloced - (float) scm_mallocated) @@ -262,7 +266,8 @@ increase_mtrigger (size_t size, const char *what) scm_mtrigger); #endif } - + + scm_gc_running_p = 0; scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); } } diff --git a/libguile/gc-segment.c b/libguile/gc-segment.c index fee06e7f9..ee87ba73f 100644 --- a/libguile/gc-segment.c +++ b/libguile/gc-segment.c @@ -89,7 +89,7 @@ scm_i_initialize_heap_segment_data (scm_t_heap_segment * segment, size_t request scm_t_cell * memory = 0; /* - We use malloc to alloc the heap. On GNU libc this is + We use calloc to alloc the heap. On GNU libc this is equivalent to mmapping /dev/zero */ SCM_SYSCALL (memory = (scm_t_cell * ) calloc (1, mem_needed)); @@ -320,7 +320,7 @@ SCM scm_i_sweep_some_segments (scm_t_cell_type_statistics * fl) { int i = fl->heap_segment_idx; - SCM collected =SCM_EOL; + SCM collected = SCM_EOL; if (i == -1) i++; @@ -458,19 +458,11 @@ scm_i_find_heap_segment_containing_object (SCM obj) RETURN: the index of the segment. */ int -scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist, policy_on_error error_policy) +scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist, + policy_on_error error_policy) { size_t len; - if (scm_gc_heap_lock) - { - /* Critical code sections (such as the garbage collector) aren't - * supposed to add heap segments. - */ - fprintf (stderr, "scm_i_get_new_heap_segment: Can not extend locked heap.\n"); - abort (); - } - { /* Assure that the new segment is predicted to be large enough. * diff --git a/libguile/gc.c b/libguile/gc.c index 9ff2d41ee..4b4c2ea82 100644 --- a/libguile/gc.c +++ b/libguile/gc.c @@ -67,13 +67,9 @@ extern unsigned long * __libc_ia64_register_backing_store_base; #include #endif - - -unsigned int scm_gc_running_p = 0; - /* Lock this mutex before doing lazy sweeping. */ -scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER; +scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER; /* Set this to != 0 if every cell that is accessed shall be checked: */ @@ -129,7 +125,7 @@ scm_i_expensive_validation_check (SCM cell) else { counter = scm_debug_cells_gc_interval; - scm_igc ("scm_assert_cell_valid"); + scm_gc (); } } } @@ -214,18 +210,6 @@ SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0, */ unsigned long scm_mtrigger; -/* scm_gc_heap_lock - * If set, don't expand the heap. Set only during gc, during which no allocation - * is supposed to take place anyway. - */ -int scm_gc_heap_lock = 0; - -/* GC Blocking - * Don't pause for collection if this is set -- just - * expand the heap. - */ -int scm_block_gc = 1; - /* During collection, this accumulates objects holding * weak references. */ @@ -456,7 +440,12 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0, "no longer accessible.") #define FUNC_NAME s_scm_gc { - scm_igc ("call"); + scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); + scm_gc_running_p = 1; + scm_i_gc ("call"); + scm_gc_running_p = 0; + scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); + scm_c_hook_run (&scm_after_gc_c_hook, 0); return SCM_UNSPECIFIED; } #undef FUNC_NAME @@ -464,16 +453,18 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0, -/* When we get POSIX threads support, the master will be global and - * common while the freelist will be individual for each thread. +/* The master is global and common while the freelist will be + * individual for each thread. */ SCM scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells) { SCM cell; + int did_gc = 0; scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); + scm_gc_running_p = 1; *free_cells = scm_i_sweep_some_segments (freelist); if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist)) @@ -482,10 +473,10 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells) *free_cells = scm_i_sweep_some_segments (freelist); } - if (*free_cells == SCM_EOL && !scm_block_gc) + if (*free_cells == SCM_EOL) { /* - with the advent of lazy sweep, GC yield is only know just + with the advent of lazy sweep, GC yield is only known just before doing the GC. */ scm_i_adjust_min_yield (freelist); @@ -494,7 +485,8 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells) out of fresh cells. Try to get some new ones. */ - scm_igc ("cells"); + did_gc = 1; + scm_i_gc ("cells"); *free_cells = scm_i_sweep_some_segments (freelist); } @@ -515,8 +507,12 @@ scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells) *free_cells = SCM_FREE_CELL_CDR (cell); + scm_gc_running_p = 0; scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); + if (did_gc) + scm_c_hook_run (&scm_after_gc_c_hook, 0); + return cell; } @@ -527,18 +523,14 @@ scm_t_c_hook scm_before_sweep_c_hook; scm_t_c_hook scm_after_sweep_c_hook; scm_t_c_hook scm_after_gc_c_hook; +/* Must be called while holding scm_i_sweep_mutex. + */ + void -scm_igc (const char *what) +scm_i_gc (const char *what) { - if (scm_block_gc) - return; - - scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex); - - /* During the critical section, only the current thread may run. */ scm_i_thread_put_to_sleep (); - ++scm_gc_running_p; scm_c_hook_run (&scm_before_gc_c_hook, 0); #ifdef DEBUGINFO @@ -552,22 +544,13 @@ scm_igc (const char *what) gc_start_stats (what); - - - if (scm_gc_heap_lock) - /* We've invoked the collector while a GC is already in progress. - That should never happen. */ - abort (); - /* Set freelists to NULL so scm_cons() always triggers gc, causing - the above abort() to be triggered. + the assertion above to fail. */ *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL; - ++scm_gc_heap_lock; - /* Let's finish the sweep. The conservative GC might point into the garbage, and marking that would create a mess. @@ -589,28 +572,17 @@ scm_igc (const char *what) scm_mallocated -= scm_i_deprecated_memory_return; - - scm_c_hook_run (&scm_before_mark_c_hook, 0); + /* Mark */ + scm_c_hook_run (&scm_before_mark_c_hook, 0); scm_mark_all (); - scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc); - scm_c_hook_run (&scm_before_sweep_c_hook, 0); + /* Sweep - /* - Moved this lock upwards so that we can alloc new heap at the end of a sweep. - - DOCME: why should the heap be locked anyway? - */ - --scm_gc_heap_lock; - - scm_gc_sweep (); - - - /* - TODO: this hook should probably be moved to just before the mark, - since that's where the sweep is finished in lazy sweeping. + TODO: the after_sweep hook should probably be moved to just before + the mark, since that's where the sweep is finished in lazy + sweeping. MDJ 030219 : No, probably not. The original meaning implied at least two things: that it would be @@ -631,18 +603,15 @@ scm_igc (const char *what) distinct classes of hook functions since this can prevent some bad interference when several modules adds gc hooks. */ + + scm_c_hook_run (&scm_before_sweep_c_hook, 0); + scm_gc_sweep (); scm_c_hook_run (&scm_after_sweep_c_hook, 0); + gc_end_stats (); - --scm_gc_running_p; scm_i_thread_wake_up (); - /* - See above. - */ - scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex); - scm_c_hook_run (&scm_after_gc_c_hook, 0); - /* For debugging purposes, you could do scm_i_sweep_all_segments("debug"), but then the remains of the @@ -789,7 +758,7 @@ scm_gc_unprotect_object (SCM obj) fprintf (stderr, "scm_unprotect_object called during GC.\n"); abort (); } - + handle = scm_hashq_get_handle (scm_protects, obj); if (scm_is_false (handle)) @@ -916,7 +885,6 @@ scm_init_storage () j = SCM_NUM_PROTECTS; while (j) scm_sys_protects[--j] = SCM_BOOL_F; - scm_block_gc = 1; scm_gc_init_freelist(); scm_gc_init_malloc (); @@ -1046,8 +1014,6 @@ scm_gc_sweep (void) */ scm_i_reset_segments (); - /* When we move to POSIX threads private freelists should probably - be GC-protected instead. */ *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL; *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL; diff --git a/libguile/gc.h b/libguile/gc.h index 4cb78a714..baf77fa66 100644 --- a/libguile/gc.h +++ b/libguile/gc.h @@ -29,6 +29,34 @@ +/* Cell allocation and garbage collection work rouhgly in the + following manner: + + Each thread has a 'freelist', which is a list of available cells. + (It actually has two freelists, one for single cells and one for + double cells. Everything works analogous for double cells.) + + When a thread wants to allocate a cell and the freelist is empty, + it refers to a global list of unswept 'cards'. A card is a small + block of cells that are contigous in memory, together with the + corresponding mark bits. A unswept card is one where the mark bits + are set for cells that have been in use during the last global mark + phase, but the unmarked cells of the card have not been scanned and + freed yet. + + The thread takes one of the unswept cards and sweeps it, thereby + building a new freelist that it then uses. Sweeping a card will + call the smob free functions of unmarked cells, for example, and + thus, these free functions can run at any time, in any thread. + + When there are no more unswept cards available, the thread performs + a global garbage collection. For this, all other threads are + stopped. A global mark is performed and all cards are put into the + global list of unswept cards. Whennecessary, new cards are + allocated and initialized at this time. The other threads are then + started again. +*/ + typedef struct scm_t_cell { SCM word_0; @@ -66,7 +94,7 @@ typedef struct scm_t_cell #define SCM_GC_CARD_N_HEADER_CELLS 1 #define SCM_GC_CARD_N_CELLS 256 -#define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell) +#define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell) #define SCM_GC_CARD_BVEC(card) ((scm_t_c_bvec_long *) ((card)->word_0)) #define SCM_GC_SET_CARD_BVEC(card, bvec) \ @@ -187,6 +215,10 @@ typedef unsigned long scm_t_c_bvec_long; #define SCM_SET_CELL_OBJECT_2(x, v) SCM_SET_CELL_OBJECT ((x), 2, (v)) #define SCM_SET_CELL_OBJECT_3(x, v) SCM_SET_CELL_OBJECT ((x), 3, (v)) +#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n)))) +#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0)) +#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1)) + #define SCM_CELL_TYPE(x) SCM_CELL_WORD_0 (x) #define SCM_SET_CELL_TYPE(x, t) SCM_SET_CELL_WORD_0 ((x), (t)) @@ -195,27 +227,11 @@ typedef unsigned long scm_t_c_bvec_long; * the freelist. Due to this structure, freelist cells are not cons cells * and thus may not be accessed using SCM_CAR and SCM_CDR. */ -/* - SCM_FREECELL_P removed ; the semantics are ambiguous with lazy - sweeping. Could mean "this cell is no longer in use (will be swept)" - or "this cell has just been swept, and is not yet in use". - */ - -#define SCM_FREECELL_P this_macro_has_been_removed_see_gc_header_file - #define SCM_FREE_CELL_CDR(x) \ (SCM_GC_CELL_OBJECT ((x), 1)) #define SCM_SET_FREE_CELL_CDR(x, v) \ (SCM_GC_SET_CELL_OBJECT ((x), 1, (v))) - -#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n)))) -#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0)) -#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1)) - - - - #if (SCM_DEBUG_CELL_ACCESSES == 1) /* Set this to != 0 if every cell that is accessed shall be checked: */ @@ -227,10 +243,9 @@ void scm_i_expensive_validation_check (SCM cell); SCM_API scm_i_pthread_mutex_t scm_i_gc_admin_mutex; -SCM_API int scm_block_gc; -SCM_API int scm_gc_heap_lock; -SCM_API unsigned int scm_gc_running_p; +#define scm_gc_running_p (SCM_I_CURRENT_THREAD->gc_running_p) SCM_API scm_i_pthread_mutex_t scm_i_sweep_mutex; + #if (SCM_ENABLE_DEPRECATED == 1) @@ -305,7 +320,7 @@ SCM_API SCM scm_gc_live_object_stats (void); SCM_API SCM scm_gc (void); SCM_API void scm_gc_for_alloc (struct scm_t_cell_type_statistics *freelist); SCM_API SCM scm_gc_for_newcell (struct scm_t_cell_type_statistics *master, SCM *freelist); -SCM_API void scm_igc (const char *what); +SCM_API void scm_i_gc (const char *what); SCM_API void scm_gc_mark (SCM p); SCM_API void scm_gc_mark_dependencies (SCM p); SCM_API void scm_mark_locations (SCM_STACKITEM x[], unsigned long n);