1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-07-01 07:20:20 +02:00

Inline thread wakeup data into "struct scm_thread"

This way we don't allocate an untagged wake data, and we don't need a
type tag.  On the other hand we have to roll a more complicated seqlock,
but that's fine.

Also switch to require C11 atomics.

* libguile/atomics-internal.h: Remove fallback for when we don't have
C11 atomics.
(scm_atomic_ref_uint32, scm_atomic_swap_uint32, scm_atomic_set_uint32):
New helpers.
* libguile/threads-internal.h:
* libguile/async.h:
* libguile/async.c: Inline the thread wake data.  Happily, waking a
remote thread is still wait-free from both sides.
This commit is contained in:
Andy Wingo 2025-06-25 16:00:07 +02:00
parent 7d1eda149e
commit b0ce014801
4 changed files with 170 additions and 179 deletions

View file

@ -25,6 +25,7 @@
#endif
#include <full-write.h>
#include <stdatomic.h>
#include <string.h>
#include <unistd.h>
@ -154,27 +155,106 @@ scm_async_tick (void)
scm_call_0 (scm_i_async_pop (t));
}
struct scm_thread_wake_data {
enum { WAIT_FD, WAIT_COND } kind;
union {
struct {
int fd;
} wait_fd;
struct {
scm_i_pthread_mutex_t *mutex;
scm_i_pthread_cond_t *cond;
} wait_cond;
} data;
};
static const int NOT_WAITING = 0;
static const int WAITING_ON_FD = 1;
static const int WAITING_ON_COND = 2;
int
static inline void
publish_wake_data_for_thread (scm_thread *t, struct scm_thread_wake_data data)
{
/* Hand-rolled seqlock over the wait state. The thread itself is the
only writer of the wait state, and foreign threads are the only
readers. */
uint32_t seq = scm_atomic_swap_uint32 (&t->wake_data.seq, 1);
/* Counter must be even before we prepare to wait. */
if (seq & 1) abort ();
switch (data.state)
{
case NOT_WAITING:
atomic_store_explicit ((atomic_int *) &t->wake_data.state,
NOT_WAITING, memory_order_relaxed);
break;
case WAITING_ON_FD:
atomic_store_explicit ((atomic_int *) &t->wake_data.state,
WAITING_ON_FD, memory_order_relaxed);
atomic_store_explicit ((atomic_int *) &t->wake_data.fd,
data.fd, memory_order_relaxed);
break;
case WAITING_ON_COND:
atomic_store_explicit ((atomic_int *) &t->wake_data.state,
WAITING_ON_COND, memory_order_relaxed);
atomic_store_explicit ((_Atomic void**) &t->wake_data.mutex,
(void*) data.mutex, memory_order_relaxed);
atomic_store_explicit ((_Atomic void**) &t->wake_data.cond,
(void*) data.cond, memory_order_relaxed);
break;
default:
abort ();
}
scm_atomic_set_uint32 (&t->wake_data.seq, seq + 2);
}
static inline struct scm_thread_wake_data
read_wake_data_for_remote_thread_after_publishing_async (scm_thread *t)
{
struct scm_thread_wake_data wake = {0,};
uint32_t seq = scm_atomic_ref_uint32 (&t->wake_data.seq);
if (seq & 1)
{
/* The thread is preparing to wait but will check the
pending_asyncs before it sleeps. */
wake.state = NOT_WAITING;
}
else
{
wake.state =
atomic_load_explicit ((atomic_int *) &t->wake_data.state,
memory_order_relaxed);
switch (wake.state)
{
case NOT_WAITING:
break;
case WAITING_ON_FD:
wake.fd =
atomic_load_explicit ((atomic_int *) &t->wake_data.fd,
memory_order_relaxed);
break;
case WAITING_ON_COND:
wake.mutex = (scm_i_pthread_mutex_t *)
atomic_load_explicit ((_Atomic void**) &t->wake_data.mutex,
memory_order_relaxed);
wake.cond = (scm_i_pthread_cond_t *)
atomic_load_explicit ((_Atomic void**) &t->wake_data.cond,
memory_order_relaxed);
break;
default:
abort();
}
if (seq != scm_atomic_ref_uint32 (&t->wake_data.seq))
/* If the thread updated the wake state since we started
reading it, then the thread also checked the
pending_asyncs, so we don't have to do anything. */
wake.state = NOT_WAITING;
}
return wake;
}
static int
scm_i_prepare_to_wait (scm_thread *t,
struct scm_thread_wake_data *wake)
struct scm_thread_wake_data data)
{
if (t->block_asyncs)
return 0;
scm_atomic_set_pointer ((void **)&t->wake, wake);
publish_wake_data_for_thread (t, data);
/* If no interrupt was registered in the meantime, then any future
wakeup will signal the FD or cond var. */
@ -190,16 +270,17 @@ scm_i_prepare_to_wait (scm_thread *t,
void
scm_i_wait_finished (scm_thread *t)
{
scm_atomic_set_pointer ((void **)&t->wake, NULL);
struct scm_thread_wake_data data = { .state = NOT_WAITING };
publish_wake_data_for_thread (t, data);
}
int
scm_i_prepare_to_wait_on_fd (scm_thread *t, int fd)
{
struct scm_thread_wake_data *wake;
wake = scm_gc_typed_calloc (struct scm_thread_wake_data);
wake->kind = WAIT_FD;
wake->data.wait_fd.fd = fd;
struct scm_thread_wake_data wake = {
.state = WAITING_ON_FD,
.fd = fd
};
return scm_i_prepare_to_wait (t, wake);
}
@ -214,11 +295,11 @@ scm_i_prepare_to_wait_on_cond (scm_thread *t,
scm_i_pthread_mutex_t *m,
scm_i_pthread_cond_t *c)
{
struct scm_thread_wake_data *wake;
wake = scm_gc_typed_calloc (struct scm_thread_wake_data);
wake->kind = WAIT_COND;
wake->data.wait_cond.mutex = m;
wake->data.wait_cond.cond = c;
struct scm_thread_wake_data wake = {
.state = WAITING_ON_COND,
.mutex = m,
.cond = c
};
return scm_i_prepare_to_wait (t, wake);
}
@ -235,6 +316,7 @@ scm_c_wait_finished (void)
scm_i_wait_finished (SCM_I_CURRENT_THREAD);
}
SCM_DEFINE (scm_system_async_mark_for_thread, "system-async-mark", 1, 1, 0,
(SCM proc, SCM thread),
"Mark @var{proc} (a procedure with zero arguments) for future execution\n"
@ -248,7 +330,6 @@ SCM_DEFINE (scm_system_async_mark_for_thread, "system-async-mark", 1, 1, 0,
#define FUNC_NAME s_scm_system_async_mark_for_thread
{
scm_thread *t;
struct scm_thread_wake_data *wake;
if (SCM_UNBNDP (thread))
t = SCM_I_CURRENT_THREAD;
@ -262,8 +343,28 @@ SCM_DEFINE (scm_system_async_mark_for_thread, "system-async-mark", 1, 1, 0,
/* At this point the async is enqueued. However if the thread is
sleeping, we have to wake it up. */
if ((wake = scm_atomic_ref_pointer ((void **) &t->wake)))
struct scm_thread_wake_data wake =
read_wake_data_for_remote_thread_after_publishing_async (t);
switch (wake.state)
{
case NOT_WAITING:
break;
case WAITING_ON_FD:
{
char dummy = 0;
/* T might already been done with sleeping here, but
interrupting it once too often does no harm. T might also
not yet have started sleeping, but this is no problem either
since the data written to a pipe will not be lost, unlike a
condition variable signal. */
full_write (wake.fd, &dummy, 1);
}
break;
case WAITING_ON_COND:
/* By now, the thread T might be out of its sleep already, or
might even be in the next, unrelated sleep. Interrupting it
anyway does no harm, however.
@ -273,25 +374,13 @@ SCM_DEFINE (scm_system_async_mark_for_thread, "system-async-mark", 1, 1, 0,
mutex locked while preparing the wait and will only unlock it
again while waiting on the cond.
*/
if (wake->kind == WAIT_COND)
{
scm_i_scm_pthread_mutex_lock (wake->data.wait_cond.mutex);
scm_i_pthread_cond_signal (wake->data.wait_cond.cond);
scm_i_pthread_mutex_unlock (wake->data.wait_cond.mutex);
}
else if (wake->kind == WAIT_FD)
{
char dummy = 0;
scm_i_scm_pthread_mutex_lock (wake.mutex);
scm_i_pthread_cond_signal (wake.cond);
scm_i_pthread_mutex_unlock (wake.mutex);
break;
/* Likewise, T might already been done with sleeping here, but
interrupting it once too often does no harm. T might also
not yet have started sleeping, but this is no problem
either since the data written to a pipe will not be lost,
unlike a condition variable signal. */
full_write (wake->data.wait_fd.fd, &dummy, 1);
}
else
abort ();
default:
abort ();
}
return SCM_UNSPECIFIED;

View file

@ -1,7 +1,7 @@
#ifndef SCM_ASYNC_H
#define SCM_ASYNC_H
/* Copyright 1995-1998,2000-2002,2004-2006,2008-2009,2011,2014,2018
/* Copyright 1995-1998,2000-2002,2004-2006,2008-2009,2011,2014,2018,2025
Free Software Foundation, Inc.
This file is part of Guile.
@ -48,8 +48,6 @@ SCM_API void *scm_c_call_with_unblocked_asyncs (void *(*p) (void *d), void *d);
SCM_API void scm_dynwind_block_asyncs (void);
SCM_API void scm_dynwind_unblock_asyncs (void);
SCM_INTERNAL int scm_i_prepare_to_wait (scm_thread *,
struct scm_thread_wake_data *);
SCM_INTERNAL void scm_i_wait_finished (scm_thread *);
SCM_INTERNAL int scm_i_prepare_to_wait_on_fd (scm_thread *, int);
SCM_INTERNAL int scm_i_prepare_to_wait_on_cond (scm_thread *,

View file

@ -25,13 +25,33 @@
#include "scm.h"
#ifdef HAVE_STDATOMIC_H
#ifndef HAVE_STDATOMIC_H
#error Guile needs C11 stdatomic.h
#endif
#include <stdatomic.h>
static inline uint32_t
scm_atomic_ref_uint32 (uint32_t *loc)
{
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
return atomic_load (a_loc);
}
static inline uint32_t
scm_atomic_swap_uint32 (uint32_t *loc, uint32_t val)
{
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
return atomic_exchange (a_loc, val);
}
static inline void
scm_atomic_set_uint32 (uint32_t *loc, uint32_t val)
{
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
atomic_store (a_loc, val);
}
static inline uint32_t
scm_atomic_subtract_uint32 (uint32_t *loc, uint32_t arg)
{
@ -102,131 +122,5 @@ scm_atomic_compare_and_swap_scm (SCM *loc, SCM expected, SCM desired)
SCM_UNPACK (desired));
return result;
}
#else /* HAVE_STDATOMIC_H */
/* Fallback implementation using locks. */
#include "libguile/threads.h"
static scm_i_pthread_mutex_t atomics_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
static inline uint32_t
scm_atomic_subtract_uint32 (uint32_t *loc, uint32_t arg)
{
uint32_t ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
*loc -= arg;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline int
scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
uint32_t desired)
{
int ret;
scm_i_pthread_mutex_lock (&atomics_lock);
if (*loc == *expected)
{
*loc = desired;
ret = 1;
}
else
{
*expected = *loc;
ret = 0;
}
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline size_t
scm_atomic_subtract_size (size_t *loc, size_t arg)
{
size_t ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
*loc -= arg;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline void
scm_atomic_set_pointer (void **loc, void *val)
{
scm_i_pthread_mutex_lock (&atomics_lock);
*loc = val;
scm_i_pthread_mutex_unlock (&atomics_lock);
}
static inline void *
scm_atomic_ref_pointer (void **loc)
{
void *ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline void *
scm_atomic_swap_pointer (void **loc, void *new_val)
{
void *ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
*loc = new_val;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline void
scm_atomic_set_bits (scm_t_bits *loc, scm_t_bits val)
{
scm_i_pthread_mutex_lock (&atomics_lock);
*loc = val;
scm_i_pthread_mutex_unlock (&atomics_lock);
}
static inline void
scm_atomic_set_scm (SCM *loc, SCM val)
{
scm_i_pthread_mutex_lock (&atomics_lock);
*loc = val;
scm_i_pthread_mutex_unlock (&atomics_lock);
}
static inline SCM
scm_atomic_ref_scm (SCM *loc)
{
SCM ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline SCM
scm_atomic_swap_scm (SCM *loc, SCM val)
{
SCM ret;
scm_i_pthread_mutex_lock (&atomics_lock);
ret = *loc;
*loc = val;
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
static inline SCM
scm_atomic_compare_and_swap_scm (SCM *loc, SCM expected, SCM desired)
{
SCM ret;
scm_i_pthread_mutex_lock (&atomics_lock);
if (*loc == expected)
{
*loc = desired;
ret = expected;
}
else
{
ret = *loc;
}
scm_i_pthread_mutex_unlock (&atomics_lock);
return ret;
}
#endif /* HAVE_STDATOMIC_H */
#endif /* SCM_ATOMICS_INTERNAL_H */

View file

@ -29,6 +29,16 @@
struct gc_mutator;
/* See async.c for the details about how to wake up a thread. */
struct scm_thread_wake_data
{
uint32_t seq;
int state;
int fd;
scm_i_pthread_mutex_t *mutex;
scm_i_pthread_cond_t *cond;
};
struct scm_thread {
scm_t_bits tag;
@ -58,7 +68,7 @@ struct scm_thread {
this thread exits. */
int needs_unregister;
struct scm_thread_wake_data *wake;
struct scm_thread_wake_data wake_data;
scm_i_pthread_cond_t sleep_cond;
int sleep_pipe[2];