1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-06-02 02:10:19 +02:00

Remove BDW alloc lock API in vm.c

* libguile/vm.c (vm_expand_stack_inner):
(vm_expand_stack):
(reinstate_continuation_x):
(compose_continuation): Use gc_inhibit_preemption /
gc_reallow_preemption.
This commit is contained in:
Andy Wingo 2025-05-15 09:25:03 +02:00
parent 1e3ce66224
commit 2bfc66554e

View file

@ -777,29 +777,17 @@ scm_i_vm_free_stack (struct scm_vm *vp)
memset (vp, 0, sizeof (*vp));
}
struct vm_expand_stack_data
static union scm_vm_stack_element *
vm_expand_stack_inner (struct scm_vm *vp, size_t needed_size,
union scm_vm_stack_element *new_sp)
{
struct scm_vm *vp;
size_t stack_size;
union scm_vm_stack_element *new_sp;
};
static void *
vm_expand_stack_inner (void *data_ptr)
{
struct vm_expand_stack_data *data = data_ptr;
struct scm_vm *vp = data->vp;
union scm_vm_stack_element *old_top, *new_bottom;
size_t new_size;
ptrdiff_t reloc;
old_top = vp->stack_top;
new_size = vp->stack_size;
while (new_size < data->stack_size)
union scm_vm_stack_element *old_top = vp->stack_top;
size_t new_size = vp->stack_size;
while (new_size < needed_size)
new_size *= 2;
new_bottom = expand_stack (vp->stack_bottom, vp->stack_size, new_size);
union scm_vm_stack_element *new_bottom =
expand_stack (vp->stack_bottom, vp->stack_size, new_size);
if (!new_bottom)
return NULL;
@ -807,13 +795,12 @@ vm_expand_stack_inner (void *data_ptr)
vp->stack_size = new_size;
vp->stack_top = vp->stack_bottom + new_size;
vp->stack_limit = vp->stack_bottom;
reloc = vp->stack_top - old_top;
ptrdiff_t reloc = vp->stack_top - old_top;
if (vp->fp)
vp->fp += reloc;
data->new_sp += reloc;
return new_bottom;
return new_sp + reloc;
}
static ptrdiff_t
@ -873,17 +860,13 @@ vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
if (stack_size > vp->stack_size)
{
struct vm_expand_stack_data data;
gc_inhibit_preemption (SCM_I_CURRENT_THREAD->mutator);
new_sp = vm_expand_stack_inner (vp, stack_size, new_sp);
gc_reallow_preemption (SCM_I_CURRENT_THREAD->mutator);
data.vp = vp;
data.stack_size = stack_size;
data.new_sp = new_sp;
if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data))
if (!new_sp)
/* Throw an unwind-only exception. */
scm_report_stack_overflow ();
new_sp = data.new_sp;
}
vp->sp = new_sp;
@ -1082,34 +1065,6 @@ push_interrupt_frame (scm_thread *thread, uint8_t *mra)
thread->vm.fp = new_fp;
}
struct return_to_continuation_data
{
struct scm_vm_cont *cp;
struct scm_vm *vp;
};
/* Called with the GC lock to prevent the stack marker from traversing a
stack in an inconsistent state. */
static void *
vm_return_to_continuation_inner (void *data_ptr)
{
struct return_to_continuation_data *data = data_ptr;
struct scm_vm *vp = data->vp;
struct scm_vm_cont *cp = data->cp;
/* We know that there is enough space for the continuation, because we
captured it in the past. However there may have been an expansion
since the capture, so we may have to re-link the frame
pointers. */
memcpy (vp->stack_top - cp->stack_size,
cp->stack_bottom,
cp->stack_size * sizeof (*cp->stack_bottom));
vp->fp = vp->stack_top - cp->fp_offset;
vm_restore_sp (vp, vp->stack_top - cp->stack_size);
return NULL;
}
static void reinstate_continuation_x (scm_thread *thread, SCM cont) SCM_NORETURN;
static void
@ -1120,7 +1075,6 @@ reinstate_continuation_x (scm_thread *thread, SCM cont)
struct scm_vm_cont *cp;
size_t n, i, frame_overhead = 3;
union scm_vm_stack_element *argv;
struct return_to_continuation_data data;
if (!scm_is_eq (continuation->root, thread->continuation_root))
scm_misc_error
@ -1134,9 +1088,17 @@ reinstate_continuation_x (scm_thread *thread, SCM cont)
cp = SCM_VM_CONT_DATA (continuation->vm_cont);
data.cp = cp;
data.vp = vp;
GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data);
gc_inhibit_preemption (thread->mutator);
/* We know that there is enough space for the continuation, because we
captured it in the past. However there may have been an expansion
since the capture, so we may have to re-link the frame
pointers. */
memcpy (vp->stack_top - cp->stack_size,
cp->stack_bottom,
cp->stack_size * sizeof (*cp->stack_bottom));
vp->fp = vp->stack_top - cp->fp_offset;
vm_restore_sp (vp, vp->stack_top - cp->stack_size);
gc_reallow_preemption (thread->mutator);
/* Now we have the continuation properly copied over. We just need to
copy on an empty frame and the return values, as the continuation
@ -1170,39 +1132,14 @@ capture_continuation (scm_thread *thread)
return scm_i_make_continuation (thread, vm_cont);
}
struct compose_continuation_data
{
struct scm_vm *vp;
struct scm_vm_cont *cp;
};
static void *
compose_continuation_inner (void *data_ptr)
{
struct compose_continuation_data *data = data_ptr;
struct scm_vm *vp = data->vp;
struct scm_vm_cont *cp = data->cp;
memcpy (vp->fp - cp->stack_size,
cp->stack_bottom,
cp->stack_size * sizeof (*cp->stack_bottom));
vp->fp -= cp->fp_offset;
vp->ip = cp->vra;
return cp->mra;
}
static uint8_t*
compose_continuation (scm_thread *thread, SCM cont)
{
struct scm_vm *vp = &thread->vm;
size_t nargs;
struct compose_continuation_data data;
struct scm_vm_cont *cp;
union scm_vm_stack_element *args;
ptrdiff_t old_fp_offset;
uint8_t *mra;
if (SCM_UNLIKELY (! SCM_VM_CONT_REWINDABLE_P (cont)))
scm_wrong_type_arg_msg (NULL, 0, cont, "resumable continuation");
@ -1223,9 +1160,13 @@ compose_continuation (scm_thread *thread, SCM cont)
vm_push_sp (vp, vp->fp - (cp->stack_size + nargs));
data.vp = vp;
data.cp = cp;
mra = GC_call_with_alloc_lock (compose_continuation_inner, &data);
gc_inhibit_preemption (thread->mutator);
memcpy (vp->fp - cp->stack_size,
cp->stack_bottom,
cp->stack_size * sizeof (*cp->stack_bottom));
vp->fp -= cp->fp_offset;
vp->ip = cp->vra;
gc_reallow_preemption (thread->mutator);
/* The resumed continuation will expect ARGS on the stack as if from a
multiple-value return. */
@ -1251,7 +1192,7 @@ compose_continuation (scm_thread *thread, SCM cont)
}
}
return mra;
return cp->mra;
}
static void