mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-04-30 03:40:34 +02:00
* coop.c, iselect.c: Since thread switches are now performed with
interrupts masked, we can't use the old mechanism of delivering signals immediately when they arrive. Signals must instead be delivered when the asyncs run *after* the end of the critical section in scm_internal_select. But this also means after context switch so that the signal will be delivered to a different thread. To avoid this, I have changed the protocol of coop_wait_for_runnable_thread and friends so that they are allowed to return the original thread. So, if a signal arrives during scm_internal_select, we won't any longer be forced do a context switch, but can remain in the same thread and deliver the signal to it. * iselect.c: Small fixes.
This commit is contained in:
parent
9cb5124fe3
commit
c69dfa6575
3 changed files with 95 additions and 33 deletions
|
@ -1,4 +1,53 @@
|
|||
1998-01-26 Mikael Djurfeldt <mdj@mdj.nada.kth.se>
|
||||
1998-01-30 Mikael Djurfeldt <mdj@mdj.nada.kth.se>
|
||||
|
||||
* dynwind.c (scm_wind_chain): New debug function.
|
||||
|
||||
* coop-threads.c (scheme_launch_data, scheme_body_bootstrip,
|
||||
scheme_handler_bootstrip, scheme_launch_thread, c_launch_data,
|
||||
c_body_bootstrip, c_handler_bootstrip, c_launch_thread): Add an
|
||||
extra layer of functions around the body and handler of a thread.
|
||||
This extra layer makes sure that the handler is called in the
|
||||
dynamic context of the surround (= empty dynwind list), but under
|
||||
the *dynamic root* of the body. We can not use the dynamic root
|
||||
of the surround since that root belongs to another thread => stack
|
||||
is not handled correctly. It may seem ugly to use this extra
|
||||
layer, but the extra cost in terms of execution time is really
|
||||
negligible compared to the total amount of time required to create
|
||||
a thread, and it would reduce maintainability to duplicate the
|
||||
crucial and complicated steps performed by cwdr.
|
||||
|
||||
* __scm.h (SCM_ASYNC_TICK): Removed thread switching code.
|
||||
(SCM_ALLOW_INTS): Added thread switching code before interrupts
|
||||
get re-enabled. The important effect of this is that interrupts
|
||||
are blocked during thread switching so that thread data structures
|
||||
don't risk getting messed up by an unfortunate signal.
|
||||
(SCM_REDEFER_INTS, SCM_REALLOW_INTS): It turned out that gcc-2.8.0
|
||||
seems to do more aggressive optimization which actually move
|
||||
instructions around in these macros in a fatal way. Therefore:
|
||||
Introduce Anthony's SCM_FENCE macro! (And I who thought he was
|
||||
just superstitious...)
|
||||
(SCM_TICK): Maybe do a context switch and take care of asyncs.
|
||||
This macro should be used instead of SCM_ASYNC_TICK since the
|
||||
latter doesn't do context switches any more.
|
||||
|
||||
* eval.c (scm_eval, scm_deval), eq.c (scm_equal_p): Use SCM_TICK
|
||||
instead of SCM_ASYNC_TICK.
|
||||
|
||||
* coop.c, iselect.c: Since thread switches are now performed with
|
||||
interrupts masked, we can't use the old mechanism of delivering
|
||||
signals immediately when they arrive. Signals must instead be
|
||||
delivered when the asyncs run *after* the end of the critical
|
||||
section in scm_internal_select. But this also means after context
|
||||
switch so that the signal will be delivered to a different thread.
|
||||
To avoid this, I have changed the protocol of
|
||||
coop_wait_for_runnable_thread and friends so that they are allowed
|
||||
to return the original thread. So, if a signal arrives during
|
||||
scm_internal_select, we won't any longer be forced do a context
|
||||
switch, but can remain in the same thread and deliver the signal
|
||||
to it.
|
||||
|
||||
* async.c, async.h (asyncs_pending): Renamed asyncs_pending -->
|
||||
scm_asyncs_pending and made it global.
|
||||
|
||||
* iselect.c: Small fixes.
|
||||
|
||||
|
|
|
@ -97,6 +97,8 @@ typedef unsigned long *ulongptr;
|
|||
static char bc[256]; /* Bit counting array. bc[x] is the number of
|
||||
bits in x. */
|
||||
|
||||
int scm_I_am_dead;
|
||||
|
||||
/* This flag indicates that several threads are waiting on the same
|
||||
file descriptor. When this is the case, the common fd sets are
|
||||
updated in a more inefficient way. */
|
||||
|
@ -415,8 +417,8 @@ first_interesting_fd (void)
|
|||
}
|
||||
|
||||
/* Revive all threads with an error status. */
|
||||
static void
|
||||
error_revive (void)
|
||||
void
|
||||
scm_error_revive_threads (void)
|
||||
{
|
||||
coop_t *t;
|
||||
|
||||
|
@ -424,8 +426,10 @@ error_revive (void)
|
|||
{
|
||||
t->_errno = errno;
|
||||
t->retval = -1;
|
||||
coop_qput (&coop_global_runq, t);
|
||||
if (t != coop_global_curr)
|
||||
coop_qput (&coop_global_runq, t);
|
||||
}
|
||||
collisionp = 0;
|
||||
gnfds = 0;
|
||||
FD_ZERO (&greadfds);
|
||||
FD_ZERO (&gwritefds);
|
||||
|
@ -436,12 +440,21 @@ error_revive (void)
|
|||
try to wake up some threads and return the first one. Return NULL
|
||||
if we couldn't find any. */
|
||||
static coop_t *
|
||||
find_thread (int n, struct timeval *now)
|
||||
find_thread (int n, struct timeval *now, int sleepingp)
|
||||
{
|
||||
coop_t *t;
|
||||
int fd;
|
||||
|
||||
if (n == 0)
|
||||
if (n < 0)
|
||||
/* An error or a signal has occured. Wake all threads. Since we
|
||||
don't care to calculate if there is a sinner we report the
|
||||
error to all of them. */
|
||||
{
|
||||
scm_error_revive_threads ();
|
||||
if (!scm_I_am_dead)
|
||||
return coop_global_curr;
|
||||
}
|
||||
else if (n == 0)
|
||||
{
|
||||
while (!QEMPTYP (coop_global_sleepq)
|
||||
&& (t = QFIRST (coop_global_sleepq))->timeoutp
|
||||
|
@ -510,11 +523,6 @@ find_thread (int n, struct timeval *now)
|
|||
coop_qput (&coop_global_sleepq, t);
|
||||
}
|
||||
}
|
||||
else /* n < 0 */
|
||||
/* An error has occured. Wake all threads. Since we don't care
|
||||
to calculate if there is a sinner we report the error to all of
|
||||
them. */
|
||||
error_revive ();
|
||||
|
||||
return coop_qget (&coop_global_runq);
|
||||
}
|
||||
|
@ -532,9 +540,13 @@ coop_next_runnable_thread ()
|
|||
|
||||
/* Just return next thread on the runq if the sleepq is empty. */
|
||||
if (QEMPTYP (coop_global_sleepq))
|
||||
return coop_qget (&coop_global_runq);
|
||||
{
|
||||
if (QEMPTYP (coop_global_runq))
|
||||
return coop_global_curr;
|
||||
else
|
||||
return coop_qget (&coop_global_runq);
|
||||
}
|
||||
|
||||
++scm_ints_disabled;
|
||||
if (gnfds > 0)
|
||||
n = safe_select (gnfds, &greadfds, &gwritefds, &gexceptfds, &timeout0);
|
||||
else
|
||||
|
@ -542,13 +554,11 @@ coop_next_runnable_thread ()
|
|||
if (QFIRST (coop_global_sleepq)->timeoutp)
|
||||
{
|
||||
gettimeofday (&now, NULL);
|
||||
t = find_thread (n, &now);
|
||||
t = find_thread (n, &now, 0);
|
||||
}
|
||||
else
|
||||
t = find_thread (n, 0);
|
||||
if (!--scm_ints_disabled)
|
||||
SCM_ASYNC_TICK;
|
||||
return t;
|
||||
t = find_thread (n, 0, 0);
|
||||
return t == NULL ? coop_global_curr : t;
|
||||
}
|
||||
|
||||
coop_t *
|
||||
|
@ -557,13 +567,12 @@ coop_wait_for_runnable_thread_now (struct timeval *now)
|
|||
int n;
|
||||
coop_t *t;
|
||||
|
||||
++scm_ints_disabled;
|
||||
if (gnfds > 0)
|
||||
n = safe_select (gnfds, &greadfds, &gwritefds, &gexceptfds, &timeout0);
|
||||
else
|
||||
n = 0;
|
||||
/* Is there any other runnable thread? */
|
||||
t = find_thread (n, now);
|
||||
t = find_thread (n, now, 1);
|
||||
while (t == NULL)
|
||||
{
|
||||
/* No. Let the process go to sleep. */
|
||||
|
@ -582,11 +591,9 @@ coop_wait_for_runnable_thread_now (struct timeval *now)
|
|||
else
|
||||
n = safe_select (gnfds, &greadfds, &gwritefds, &gexceptfds, NULL);
|
||||
gettimeofday (now, NULL);
|
||||
t = find_thread (n, now);
|
||||
t = find_thread (n, now, 1);
|
||||
}
|
||||
|
||||
if (!--scm_ints_disabled)
|
||||
SCM_ASYNC_TICK;
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -596,7 +603,12 @@ coop_wait_for_runnable_thread ()
|
|||
struct timeval now;
|
||||
|
||||
if (QEMPTYP (coop_global_sleepq))
|
||||
return coop_qget (&coop_global_runq);
|
||||
{
|
||||
if (QEMPTYP (coop_global_runq))
|
||||
return coop_global_curr;
|
||||
else
|
||||
return coop_qget (&coop_global_runq);
|
||||
}
|
||||
|
||||
if (QFIRST (coop_global_sleepq)->timeoutp)
|
||||
gettimeofday (&now, NULL);
|
||||
|
@ -613,14 +625,14 @@ scm_internal_select (int nfds,
|
|||
{
|
||||
struct timeval now;
|
||||
coop_t *t, *curr = coop_global_curr;
|
||||
|
||||
|
||||
/* If the timeout is 0, we're polling and can handle it quickly. */
|
||||
if (timeout != NULL
|
||||
&& timeout->tv_sec == 0
|
||||
&& timeout->tv_usec == 0)
|
||||
return select (nfds, readfds, writefds, exceptfds, timeout);
|
||||
|
||||
++scm_ints_disabled;
|
||||
SCM_DEFER_INTS;
|
||||
|
||||
/* Add our file descriptor flags to the common set. */
|
||||
curr->nfds = nfds;
|
||||
|
@ -652,20 +664,18 @@ scm_internal_select (int nfds,
|
|||
t = coop_wait_for_runnable_thread_now (&now);
|
||||
}
|
||||
|
||||
if (!--scm_ints_disabled)
|
||||
SCM_ASYNC_TICK;
|
||||
|
||||
/* If the new thread is the same as the sleeping thread, do nothing */
|
||||
if (t != curr)
|
||||
if (t != coop_global_curr)
|
||||
{
|
||||
/* Do a context switch. */
|
||||
coop_global_curr = t;
|
||||
QT_BLOCK (coop_sleephelp, curr, NULL, t->sp);
|
||||
}
|
||||
|
||||
if (curr->retval == -1)
|
||||
errno = curr->_errno;
|
||||
return curr->retval;
|
||||
if (coop_global_curr->retval == -1)
|
||||
errno = coop_global_curr->_errno;
|
||||
SCM_ALLOW_INTS;
|
||||
return coop_global_curr->retval;
|
||||
}
|
||||
|
||||
/* Initialize bit counting array */
|
||||
|
|
|
@ -83,11 +83,14 @@
|
|||
|
||||
#endif /* no FD_SET */
|
||||
|
||||
extern int scm_I_am_dead;
|
||||
|
||||
extern int scm_internal_select (int fds,
|
||||
SELECT_TYPE *rfds,
|
||||
SELECT_TYPE *wfds,
|
||||
SELECT_TYPE *efds,
|
||||
struct timeval *timeout);
|
||||
extern void scm_error_revive_threads (void);
|
||||
extern void scm_init_iselect (void);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue