mirror of
https://git.savannah.gnu.org/git/guile.git
synced 2025-07-04 08:40:21 +02:00
Inline thread wakeup data into "struct scm_thread"
This way we don't allocate an untagged wake data, and we don't need a type tag. On the other hand we have to roll a more complicated seqlock, but that's fine. Also switch to require C11 atomics. * libguile/atomics-internal.h: Remove fallback for when we don't have C11 atomics. (scm_atomic_ref_uint32, scm_atomic_swap_uint32, scm_atomic_set_uint32): New helpers. * libguile/threads-internal.h: * libguile/async.h: * libguile/async.c: Inline the thread wake data. Happily, waking a remote thread is still wait-free from both sides.
This commit is contained in:
parent
7d1eda149e
commit
b0ce014801
4 changed files with 170 additions and 179 deletions
|
@ -25,13 +25,33 @@
|
|||
|
||||
#include "scm.h"
|
||||
|
||||
|
||||
|
||||
|
||||
#ifdef HAVE_STDATOMIC_H
|
||||
#ifndef HAVE_STDATOMIC_H
|
||||
#error Guile needs C11 stdatomic.h
|
||||
#endif
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
|
||||
|
||||
|
||||
static inline uint32_t
|
||||
scm_atomic_ref_uint32 (uint32_t *loc)
|
||||
{
|
||||
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
|
||||
return atomic_load (a_loc);
|
||||
}
|
||||
static inline uint32_t
|
||||
scm_atomic_swap_uint32 (uint32_t *loc, uint32_t val)
|
||||
{
|
||||
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
|
||||
return atomic_exchange (a_loc, val);
|
||||
}
|
||||
static inline void
|
||||
scm_atomic_set_uint32 (uint32_t *loc, uint32_t val)
|
||||
{
|
||||
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
|
||||
atomic_store (a_loc, val);
|
||||
}
|
||||
static inline uint32_t
|
||||
scm_atomic_subtract_uint32 (uint32_t *loc, uint32_t arg)
|
||||
{
|
||||
|
@ -102,131 +122,5 @@ scm_atomic_compare_and_swap_scm (SCM *loc, SCM expected, SCM desired)
|
|||
SCM_UNPACK (desired));
|
||||
return result;
|
||||
}
|
||||
#else /* HAVE_STDATOMIC_H */
|
||||
|
||||
/* Fallback implementation using locks. */
|
||||
#include "libguile/threads.h"
|
||||
static scm_i_pthread_mutex_t atomics_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
|
||||
static inline uint32_t
|
||||
scm_atomic_subtract_uint32 (uint32_t *loc, uint32_t arg)
|
||||
{
|
||||
uint32_t ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
*loc -= arg;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
static inline int
|
||||
scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
|
||||
uint32_t desired)
|
||||
{
|
||||
int ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
if (*loc == *expected)
|
||||
{
|
||||
*loc = desired;
|
||||
ret = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
*expected = *loc;
|
||||
ret = 0;
|
||||
}
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
scm_atomic_subtract_size (size_t *loc, size_t arg)
|
||||
{
|
||||
size_t ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
*loc -= arg;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
scm_atomic_set_pointer (void **loc, void *val)
|
||||
{
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
*loc = val;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
}
|
||||
static inline void *
|
||||
scm_atomic_ref_pointer (void **loc)
|
||||
{
|
||||
void *ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
static inline void *
|
||||
scm_atomic_swap_pointer (void **loc, void *new_val)
|
||||
{
|
||||
void *ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
*loc = new_val;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
scm_atomic_set_bits (scm_t_bits *loc, scm_t_bits val)
|
||||
{
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
*loc = val;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
scm_atomic_set_scm (SCM *loc, SCM val)
|
||||
{
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
*loc = val;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
}
|
||||
static inline SCM
|
||||
scm_atomic_ref_scm (SCM *loc)
|
||||
{
|
||||
SCM ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
static inline SCM
|
||||
scm_atomic_swap_scm (SCM *loc, SCM val)
|
||||
{
|
||||
SCM ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
ret = *loc;
|
||||
*loc = val;
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
static inline SCM
|
||||
scm_atomic_compare_and_swap_scm (SCM *loc, SCM expected, SCM desired)
|
||||
{
|
||||
SCM ret;
|
||||
scm_i_pthread_mutex_lock (&atomics_lock);
|
||||
if (*loc == expected)
|
||||
{
|
||||
*loc = desired;
|
||||
ret = expected;
|
||||
}
|
||||
else
|
||||
{
|
||||
ret = *loc;
|
||||
}
|
||||
scm_i_pthread_mutex_unlock (&atomics_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* HAVE_STDATOMIC_H */
|
||||
|
||||
#endif /* SCM_ATOMICS_INTERNAL_H */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue