1
Fork 0
mirror of https://git.savannah.gnu.org/git/guile.git synced 2025-05-20 03:30:27 +02:00

Remove vm->sp_min_since_gc

* libguile/jit.c (emit_alloc_frame_for_sp):
* libguile/vm-engine.c (ALLOC_FRAME, RESET_FRAME):
* libguile/vm.c (vm_increase_sp, scm_i_vm_prepare_stack):
  (return_unused_stack_to_os, vm_expand_stack, alloc_frame):
  (scm_call_with_stack_overflow_handler):
* libguile/vm.h (struct scm_vm): Remove sp_min_since_gc handling.  It
  was a very minor optimization when it was centralized in vm.c, but now
  with JIT it's causing too much duplicate code generation.
This commit is contained in:
Andy Wingo 2019-12-07 22:54:32 +01:00
parent 70ad8a2e72
commit 4a6a7e15d6
4 changed files with 13 additions and 50 deletions

View file

@ -221,7 +221,6 @@ DEFINE_THREAD_OFFSET (block_asyncs);
DEFINE_THREAD_VP_OFFSET (fp);
DEFINE_THREAD_VP_OFFSET (sp);
DEFINE_THREAD_VP_OFFSET (ip);
DEFINE_THREAD_VP_OFFSET (sp_min_since_gc);
DEFINE_THREAD_VP_OFFSET (stack_limit);
/* The current scm_thread*. Preserved across callouts. */
@ -705,15 +704,13 @@ emit_call_3 (scm_jit_state *j, void *f, jit_operand_t a, jit_operand_t b,
static void
emit_alloc_frame_for_sp (scm_jit_state *j, jit_gpr_t t)
{
jit_reloc_t k, fast, watermark;
jit_reloc_t k, fast;
uint32_t saved_state = save_reloadable_register_state (j);
ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
emit_ldxi (j, t, THREAD, thread_offset_sp_min_since_gc);
fast = jit_bger (j->jit, SP, t);
emit_ldxi (j, t, THREAD, thread_offset_stack_limit);
watermark = jit_bger (j->jit, SP, t);
fast = jit_bger (j->jit, SP, t);
/* Slow case: call out to expand stack. */
emit_store_current_ip (j, t);
@ -722,13 +719,10 @@ emit_alloc_frame_for_sp (scm_jit_state *j, jit_gpr_t t)
restore_reloadable_register_state (j, saved_state);
k = jit_jmp (j->jit);
/* Past sp_min_since_gc, but within stack_limit: update watermark and
fall through. */
jit_patch_here (j->jit, watermark);
jit_stxi (j->jit, thread_offset_sp_min_since_gc, THREAD, SP);
jit_patch_here (j->jit, fast);
/* Fast case: Just update sp. */
jit_patch_here (j->jit, fast);
emit_store_sp (j);
jit_patch_here (j->jit, k);
clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);

View file

@ -176,16 +176,11 @@
#define ALLOC_FRAME(n) \
do { \
sp = VP->fp - (n); \
if (sp < VP->sp_min_since_gc) \
if (SCM_UNLIKELY (sp < VP->stack_limit)) \
{ \
if (SCM_UNLIKELY (sp < VP->stack_limit)) \
{ \
SYNC_IP (); \
CALL_INTRINSIC (expand_stack, (thread, sp)); \
CACHE_SP (); \
} \
else \
VP->sp_min_since_gc = VP->sp = sp; \
SYNC_IP (); \
CALL_INTRINSIC (expand_stack, (thread, sp)); \
CACHE_SP (); \
} \
else \
VP->sp = sp; \
@ -195,12 +190,7 @@
stack expansion is needed. Note that in some cases this may lower
SP, e.g. after a return but where there are more locals below, but we
know it was preceded by an alloc-frame in that case, so no stack need
be allocated.
As an optimization, we don't update sp_min_since_gc in this case; the
principal place stacks are expanded is in ALLOC_FRAME. it doesn't
need to strictly be the min since GC, as it's just an optimization to
prevent passing too-large of a range to madvise. */
be allocated. */
#define RESET_FRAME(n) \
do { \
VP->sp = sp = VP->fp - (n); \

View file

@ -109,16 +109,10 @@ static inline void
vm_increase_sp (struct scm_vm *vp, union scm_vm_stack_element *new_sp,
enum vm_increase_sp_kind kind)
{
if (new_sp >= vp->sp_min_since_gc)
{
vp->sp = new_sp;
return;
}
if (kind == VM_SP_PUSH && new_sp < vp->stack_limit)
vm_expand_stack (vp, new_sp);
else
vp->sp_min_since_gc = vp->sp = new_sp;
vp->sp = new_sp;
}
static inline void
@ -620,7 +614,6 @@ scm_i_vm_prepare_stack (struct scm_vm *vp)
vp->overflow_handler_stack = SCM_EOL;
vp->ip = NULL;
vp->sp = vp->stack_top;
vp->sp_min_since_gc = vp->sp;
vp->fp = vp->stack_top;
vp->compare_result = SCM_F_COMPARE_NONE;
vp->engine = vm_default_engine;
@ -636,9 +629,6 @@ return_unused_stack_to_os (struct scm_vm *vp)
#if HAVE_SYS_MMAN_H
uintptr_t lo = (uintptr_t) vp->stack_bottom;
uintptr_t hi = (uintptr_t) vp->sp;
/* The second condition is needed to protect against wrap-around. */
if (vp->sp_min_since_gc >= vp->stack_bottom && vp->sp >= vp->sp_min_since_gc)
lo = (uintptr_t) vp->sp_min_since_gc;
lo &= ~(page_size - 1U); /* round down */
hi &= ~(page_size - 1U); /* round down */
@ -659,8 +649,6 @@ return_unused_stack_to_os (struct scm_vm *vp)
if (ret && errno != ENOSYS)
perror ("madvise failed");
}
vp->sp_min_since_gc = vp->sp;
#endif
}
@ -887,7 +875,7 @@ vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
new_sp = data.new_sp;
}
vp->sp_min_since_gc = vp->sp = new_sp;
vp->sp = new_sp;
if (should_handle_stack_overflow (vp, stack_size))
{
@ -950,13 +938,8 @@ alloc_frame (scm_thread *thread, uint32_t nlocals)
{
union scm_vm_stack_element *sp = thread->vm.fp - nlocals;
if (sp < thread->vm.sp_min_since_gc)
{
if (SCM_UNLIKELY (sp < thread->vm.stack_limit))
thread_expand_stack (thread, sp);
else
thread->vm.sp_min_since_gc = thread->vm.sp = sp;
}
if (SCM_UNLIKELY (sp < thread->vm.stack_limit))
thread_expand_stack (thread, sp);
else
thread->vm.sp = sp;
}
@ -1866,9 +1849,6 @@ SCM_DEFINE (scm_call_with_stack_overflow_handler,
scm_dynwind_unwind_handler (unwind_overflow_handler, &data,
SCM_F_WIND_EXPLICITLY);
/* Reset sp_min_since_gc so that the VM checks actually trigger. */
return_unused_stack_to_os (&t->vm);
ret = scm_call_0 (thunk);
scm_dynwind_end ();

View file

@ -40,7 +40,6 @@ struct scm_vm {
uint32_t *ip; /* instruction pointer */
union scm_vm_stack_element *sp; /* stack pointer */
union scm_vm_stack_element *fp; /* frame pointer */
union scm_vm_stack_element *sp_min_since_gc; /* deepest sp since last gc */
union scm_vm_stack_element *stack_limit; /* stack limit address */
uint8_t compare_result; /* flags register: a value from scm_compare */
uint8_t apply_hook_enabled; /* if apply hook is enabled */