diff --git a/ChangeLog b/ChangeLog index 2ec26e19d..052afa672 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,19 @@ +2012-12-29 Paulo Andrade + + * check/float.ok, check/float.tst: New test cases implementing + extensive validation of float comparison and branch code + generation as well as integer conversion, involving NaN and + [+-]Inf. + + * lib/jit_arm-swf.c, lib/jit_x86-sse.c, lib/jit_x86-x87.c: + Correct bugs found by new float test case. + + * lib/jit_x86.c: Correct cut&paste error added in commit to + convert jit_arg* return value to a jit_node_t*, that would + cause it to not properly handle double arguments in ix86. + + * check/Makefile.am: Update for the new test case. + 2012-12-28 Paulo Andrade * check/lightning.c, include/lightning.h, lib/jit_arm.c, diff --git a/TODO b/TODO index 8cb265714..22d33b42d 100644 --- a/TODO +++ b/TODO @@ -9,3 +9,7 @@ * Test and correct the ppc and mips ports, after the import and adaptation of the code to lightning. + + * Add new float/double comparison and branch codes: + unne: proper inverse of eq (nan/nan == 0) + eqeq: proper inverse of ltgt (nan/nan == 1) diff --git a/check/Makefile.am b/check/Makefile.am index c1eaf6a69..70bb439e1 100644 --- a/check/Makefile.am +++ b/check/Makefile.am @@ -64,6 +64,7 @@ EXTRA_DIST = \ clobber.tst clobber.ok \ carry.tst carry.ok \ call.tst call.ok \ + float.tst float.ok \ check.sh \ check.x87.sh \ check.arm.sh check.swf.sh \ @@ -84,7 +85,8 @@ base_TESTS = \ alu_com alu_neg \ fop_abs fop_sqrt \ varargs stack \ - clobber carry call + clobber carry call \ + float $(base_TESTS): check.sh $(LN_S) $(srcdir)/check.sh $@ diff --git a/check/float.ok b/check/float.ok new file mode 100644 index 000000000..9766475a4 --- /dev/null +++ b/check/float.ok @@ -0,0 +1 @@ +ok diff --git a/check/float.tst b/check/float.tst new file mode 100644 index 000000000..de5886880 --- /dev/null +++ b/check/float.tst @@ -0,0 +1,338 @@ + +.data 4 +ok: +.c "ok" + +. $($NaN = 0.0 / 0.0) +. $($pInf = 1.0 / 0.0) +. $($nInf = -1.0 / 0.0) +#if __WORDSIZE == 32 +# define x7f 0x7fffffff +# define x80 0x80000000 +#else +# define x7f 0x7fffffffffffffff +# define x80 0x8000000000000000 +#endif + +#if __mips__ +# define wnan x7f +#elif __arm__ +# define wnan 0 +#else +# define wnan x80 +#endif +#if __mips__ || __arm__ +# define wpinf x7f +#else +# define wpinf x80 +#endif +#define wninf x80 + +/* ensure result is correct and 0 or 1 in the result register */ +#define xtcmp(l, t, op, r0, f0, f1, li, ri) \ + movi##t %f0 li \ + movi##t %f1 ri \ + op##r##t %r0 %f0 %f1 \ + bnei T##op##r##t##r0##f0##f1##l %r0 0 \ + calli @abort \ +T##op##r##t##r0##f0##f1##l: \ + movi##t %f0 li \ + movi##t %f1 ri \ + b##op##r##t bT##op##r##t##r0##f0##f1##l %f0 %f1 \ + calli @abort \ +bT##op##r##t##r0##f0##f1##l: \ + movi##t %f1 li \ + op##i##t %r0 %f0 ri \ + bnei T##op##i##t##r0##f0##f1##l %r0 0 \ + calli @abort \ +T##op##i##t##r0##f0##f1##l: \ + movi##t %f1 li \ + b##op##i##t bT##op##i##t##r0##f0##f1##l %f0 ri \ + calli @abort \ +bT##op##i##t##r0##f0##f1##l: \ + movi##t %f0 li \ + movi##t %f1 ri \ + op##r##t %r0 %f0 %f1 \ + beqi F##op##r##t##r0##f0##f1##l %r0 1 \ + calli @abort \ +F##op##r##t##r0##f0##f1##l: \ + movi##t %f1 li \ + op##i##t %r0 %f0 ri \ + beqi F##op##i##t##r0##f0##f1##l %r0 1 \ + calli @abort \ +F##op##i##t##r0##f0##f1##l: +#define tcmp1(l, t, op, r0, li, ri) \ + xtcmp(l, t, op, r0, f0, f1, li, ri) \ + xtcmp(l, t, op, r0, f1, f2, li, ri) \ + xtcmp(l, t, op, r0, f2, f3, li, ri) \ + xtcmp(l, t, op, r0, f3, f4, li, ri) \ + xtcmp(l, t, op, r0, f4, f5, li, ri) +#define tcmp0(l, t, op, li, ri) \ + tcmp1(l, t, op, v0, li, ri) \ + tcmp1(l, t, op, v1, li, ri) \ + tcmp1(l, t, op, v2, li, ri) \ + tcmp1(l, t, op, r0, li, ri) \ + tcmp1(l, t, op, r1, li, ri) \ + tcmp1(l, t, op, r2, li, ri) +#define tcmp(l, op, li, ri) \ + tcmp0(l, _f, op, li, ri) \ + tcmp0(l, _d, op, li, ri) + +#define xfcmp(l, t, op, r0, f0, f1, li, ri) \ + movi##t %f0 li \ + movi##t %f1 ri \ + op##r##t %r0 %f0 %f1 \ + beqi T##op##r##t##r0##f0##f1##l %r0 0 \ + calli @abort \ +T##op##r##t##r0##f0##f1##l: \ + movi##t %f1 li \ + op##i##t %r0 %f0 ri \ + beqi T##op##i##t##r0##f0##f1##l %r0 0 \ + calli @abort \ +T##op##i##t##r0##f0##f1##l: \ + movi##t %f0 li \ + movi##t %f1 ri \ + op##r##t %r0 %f0 %f1 \ + bnei F##op##r##t##r0##f0##f1##l %r0 1 \ + calli @abort \ +F##op##r##t##r0##f0##f1##l: \ + movi##t %f1 li \ + op##i##t %r0 %f0 ri \ + bnei F##op##i##t##r0##f0##f1##l %r0 1 \ + calli @abort \ +F##op##i##t##r0##f0##f1##l: +#define fcmp1(l, t, op, r0, li, ri) \ + xfcmp(l, t, op, r0, f0, f1, li, ri) \ + xfcmp(l, t, op, r0, f1, f2, li, ri) \ + xfcmp(l, t, op, r0, f2, f3, li, ri) \ + xfcmp(l, t, op, r0, f3, f4, li, ri) \ + xfcmp(l, t, op, r0, f4, f5, li, ri) +#define fcmp0(l, t, op, li, ri) \ + fcmp1(l, t, op, v0, li, ri) \ + fcmp1(l, t, op, v1, li, ri) \ + fcmp1(l, t, op, v2, li, ri) \ + fcmp1(l, t, op, r0, li, ri) \ + fcmp1(l, t, op, r1, li, ri) \ + fcmp1(l, t, op, r2, li, ri) +#define fcmp(l, op, li, ri) \ + fcmp0(l, _f, op, li, ri) \ + fcmp0(l, _d, op, li, ri) + +#define xf2w(l, f, r0, f0, iv, fv) \ + movi##f %f0 fv \ + truncr##f %r0 %f0 \ + beqi W##f##r0##f0##l %r0 iv \ + calli @abort \ +W##f##r0##f0##l: +#define f2w1(l, t, r0, iv, fv) \ + xf2w(l, t, r0, f0, iv, fv) \ + xf2w(l, t, r0, f1, iv, fv) \ + xf2w(l, t, r0, f2, iv, fv) \ + xf2w(l, t, r0, f3, iv, fv) \ + xf2w(l, t, r0, f4, iv, fv) \ + xf2w(l, t, r0, f5, iv, fv) +#define f2w0(l, t, iv, fv) \ + f2w1(l, t, v0, iv, fv) \ + f2w1(l, t, v1, iv, fv) \ + f2w1(l, t, v2, iv, fv) \ + f2w1(l, t, r0, iv, fv) \ + f2w1(l, t, r1, iv, fv) \ + f2w1(l, t, r2, iv, fv) +#define f2w(l, iv, fv) \ + f2w0(l, _f, iv, fv) \ + f2w0(l, _d, iv, fv) + +.code + prolog + + tcmp(__LINE__, lt, 0, 1) + tcmp(__LINE__, lt, $nInf, $pInf) + tcmp(__LINE__, lt, $nInf, 0) + tcmp(__LINE__, lt, 0, $pInf) + fcmp(__LINE__, lt, $NaN, 0) + fcmp(__LINE__, lt, $NaN, $NaN) + fcmp(__LINE__, lt, $nInf, $NaN) + fcmp(__LINE__, lt, 1, 0) + fcmp(__LINE__, lt, 0, 0) + fcmp(__LINE__, lt, $pInf, $nInf) + fcmp(__LINE__, lt, 0, $nInf) + fcmp(__LINE__, lt, 0, $NaN) + + tcmp(__LINE__, le, 0, 1) + tcmp(__LINE__, le, 0, 0) + tcmp(__LINE__, le, 1, 1) + tcmp(__LINE__, le, $nInf, $pInf) + tcmp(__LINE__, le, $nInf, 0) + tcmp(__LINE__, le, 0, $pInf) + fcmp(__LINE__, le, $NaN, 0) + fcmp(__LINE__, le, $NaN, $NaN) + fcmp(__LINE__, le, $nInf, $NaN) + fcmp(__LINE__, le, 1, 0) + fcmp(__LINE__, le, $pInf, $nInf) + fcmp(__LINE__, le, 0, $nInf) + fcmp(__LINE__, le, 0, $NaN) + + tcmp(__LINE__, eq, 0, 0) + tcmp(__LINE__, eq, 1, 1) + fcmp(__LINE__, eq, $NaN, 0) + fcmp(__LINE__, eq, $NaN, $NaN) + fcmp(__LINE__, eq, $nInf, $NaN) + fcmp(__LINE__, eq, 0, 1) + fcmp(__LINE__, eq, 1, 0) + fcmp(__LINE__, eq, $pInf, $nInf) + fcmp(__LINE__, eq, 0, $nInf) + fcmp(__LINE__, eq, 0, $NaN) + + tcmp(__LINE__, ge, 1, 0) + tcmp(__LINE__, ge, 0, 0) + tcmp(__LINE__, ge, 1, 1) + tcmp(__LINE__, ge, $pInf, $nInf) + tcmp(__LINE__, ge, 0, $nInf) + tcmp(__LINE__, ge, $pInf, 0) + fcmp(__LINE__, ge, $NaN, 0) + fcmp(__LINE__, ge, $NaN, $NaN) + fcmp(__LINE__, ge, $nInf, $NaN) + fcmp(__LINE__, ge, 0, 1) + fcmp(__LINE__, ge, $nInf, $pInf) + fcmp(__LINE__, ge, $nInf, 0) + fcmp(__LINE__, ge, 0, $NaN) + + tcmp(__LINE__, gt, 1, 0) + tcmp(__LINE__, gt, $pInf, $nInf) + tcmp(__LINE__, gt, 0, $nInf) + tcmp(__LINE__, gt, $pInf, 0) + fcmp(__LINE__, gt, $NaN, 0) + fcmp(__LINE__, gt, $NaN, $NaN) + fcmp(__LINE__, gt, $nInf, $NaN) + fcmp(__LINE__, gt, 0, 1) + fcmp(__LINE__, gt, 0, 0) + fcmp(__LINE__, gt, $nInf, $pInf) + fcmp(__LINE__, gt, $nInf, 0) + fcmp(__LINE__, gt, 0, $NaN) + + tcmp(__LINE__, ne, 0, 1) + tcmp(__LINE__, ne, 1, 0) + tcmp(__LINE__, ne, $NaN, $NaN) + tcmp(__LINE__, ne, $nInf, $pInf) + tcmp(__LINE__, ne, $NaN, 0) + tcmp(__LINE__, ne, $nInf, $NaN) + tcmp(__LINE__, ne, $pInf, $nInf) + tcmp(__LINE__, ne, 0, $nInf) + tcmp(__LINE__, ne, 0, $NaN) + fcmp(__LINE__, ne, 0, 0) + fcmp(__LINE__, ne, 1, 1) + + tcmp(__LINE__, unlt, 0, 1) + tcmp(__LINE__, unlt, $nInf, $pInf) + tcmp(__LINE__, unlt, $nInf, 0) + tcmp(__LINE__, unlt, 0, $pInf) + tcmp(__LINE__, unlt, $NaN, 0) + tcmp(__LINE__, unlt, $NaN, $NaN) + tcmp(__LINE__, unlt, $nInf, $NaN) + tcmp(__LINE__, unlt, 0, $NaN) + fcmp(__LINE__, unlt, 1, 0) + fcmp(__LINE__, unlt, 0, 0) + fcmp(__LINE__, unlt, $pInf, $nInf) + fcmp(__LINE__, unlt, 0, $nInf) + + tcmp(__LINE__, unle, 0, 1) + tcmp(__LINE__, unle, 0, 0) + tcmp(__LINE__, unle, 1, 1) + tcmp(__LINE__, unle, $nInf, $pInf) + tcmp(__LINE__, unle, $nInf, 0) + tcmp(__LINE__, unle, 0, $pInf) + tcmp(__LINE__, unle, $NaN, 0) + tcmp(__LINE__, unle, $NaN, $NaN) + tcmp(__LINE__, unle, $nInf, $NaN) + tcmp(__LINE__, unle, 0, $NaN) + fcmp(__LINE__, unle, 1, 0) + fcmp(__LINE__, unle, $pInf, $nInf) + fcmp(__LINE__, unle, 0, $nInf) + + tcmp(__LINE__, uneq, 0, 0) + tcmp(__LINE__, uneq, 1, 1) + tcmp(__LINE__, uneq, $NaN, 0) + tcmp(__LINE__, uneq, $NaN, $NaN) + tcmp(__LINE__, uneq, $nInf, $NaN) + tcmp(__LINE__, uneq, 0, $NaN) + fcmp(__LINE__, uneq, 0, 1) + fcmp(__LINE__, uneq, 1, 0) + fcmp(__LINE__, uneq, $pInf, $nInf) + fcmp(__LINE__, uneq, 0, $nInf) + + tcmp(__LINE__, unge, 1, 0) + tcmp(__LINE__, unge, 0, 0) + tcmp(__LINE__, unge, 1, 1) + tcmp(__LINE__, unge, $pInf, $nInf) + tcmp(__LINE__, unge, 0, $nInf) + tcmp(__LINE__, unge, $pInf, 0) + tcmp(__LINE__, unge, $NaN, 0) + tcmp(__LINE__, unge, $NaN, $NaN) + tcmp(__LINE__, unge, $nInf, $NaN) + tcmp(__LINE__, unge, 0, $NaN) + fcmp(__LINE__, unge, 0, 1) + fcmp(__LINE__, unge, $nInf, $pInf) + fcmp(__LINE__, unge, $nInf, 0) + + tcmp(__LINE__, ungt, 1, 0) + tcmp(__LINE__, ungt, $pInf, $nInf) + tcmp(__LINE__, ungt, 0, $nInf) + tcmp(__LINE__, ungt, $pInf, 0) + tcmp(__LINE__, ungt, $NaN, 0) + tcmp(__LINE__, ungt, $NaN, $NaN) + tcmp(__LINE__, ungt, $nInf, $NaN) + tcmp(__LINE__, ungt, 0, $NaN) + fcmp(__LINE__, ungt, 0, 1) + fcmp(__LINE__, ungt, 0, 0) + fcmp(__LINE__, ungt, $nInf, $pInf) + fcmp(__LINE__, ungt, $nInf, 0) + + tcmp(__LINE__, ltgt, 0, 1) + tcmp(__LINE__, ltgt, 1, 0) + tcmp(__LINE__, ltgt, $nInf, $pInf) + tcmp(__LINE__, ltgt, $pInf, $nInf) + tcmp(__LINE__, ltgt, 0, $nInf) + fcmp(__LINE__, ltgt, $NaN, $NaN) + fcmp(__LINE__, ltgt, $NaN, 0) + fcmp(__LINE__, ltgt, $nInf, $NaN) + fcmp(__LINE__, ltgt, 0, $NaN) + fcmp(__LINE__, ltgt, 0, 0) + fcmp(__LINE__, ltgt, 1, 1) + + tcmp(__LINE__, ord, 0, 1) + tcmp(__LINE__, ord, 1, 0) + tcmp(__LINE__, ord, $nInf, $pInf) + tcmp(__LINE__, ord, $pInf, $nInf) + tcmp(__LINE__, ord, 0, $nInf) + tcmp(__LINE__, ord, 0, 0) + tcmp(__LINE__, ord, 1, 1) + fcmp(__LINE__, ord, $NaN, $NaN) + fcmp(__LINE__, ord, $NaN, 0) + fcmp(__LINE__, ord, $nInf, $NaN) + fcmp(__LINE__, ord, 0, $NaN) + + tcmp(__LINE__, unord, $NaN, $NaN) + tcmp(__LINE__, unord, $NaN, 0) + tcmp(__LINE__, unord, $nInf, $NaN) + tcmp(__LINE__, unord, 0, $NaN) + fcmp(__LINE__, unord, 0, 1) + fcmp(__LINE__, unord, 1, 0) + fcmp(__LINE__, unord, $nInf, $pInf) + fcmp(__LINE__, unord, $pInf, $nInf) + fcmp(__LINE__, unord, 0, $nInf) + fcmp(__LINE__, unord, 0, 0) + fcmp(__LINE__, unord, 1, 1) + + f2w(__LINE__, 0, 0) + f2w(__LINE__, 1, 1) + f2w(__LINE__, wninf, $nInf) + f2w(__LINE__, wpinf, $pInf) + f2w(__LINE__, wnan, $NaN) + + prepare + pushargi ok + ellipsis + finishi @puts + + ret + epilog diff --git a/lib/jit_arm-swf.c b/lib/jit_arm-swf.c index 1315693f7..89aefed35 100644 --- a/lib/jit_arm-swf.c +++ b/lib/jit_arm-swf.c @@ -175,7 +175,7 @@ static void _swf_negr_d(jit_state_t*,jit_int32_t,jit_int32_t); # define swf_ger_d(r0,r1,r2) swf_idd(__aeabi_dcmpge,r0,r1,r2) # define swf_gei_d(r0,r1,i0) swf_idd_(__aeabi_dcmpge,r0,r1,i0) # define swf_gtr_f(r0,r1,r2) swf_iff(__aeabi_fcmpgt,r0,r1,r2) -# define swf_gti_f(r0,r1,i0) swf_iff(__aeabi_fcmpgt,r0,r1,i0) +# define swf_gti_f(r0,r1,i0) swf_iff_(__aeabi_fcmpgt,r0,r1,i0) # define swf_gtr_d(r0,r1,r2) swf_idd(__aeabi_dcmpgt,r0,r1,r2) # define swf_gti_d(r0,r1,i0) swf_idd_(__aeabi_dcmpgt,r0,r1,i0) # define swf_ner_f(r0,r1,r2) _swf_ner_f(_jit,r0,r1,r2) @@ -688,7 +688,7 @@ _swf_iunff(jit_state_t *_jit, int (*i0)(float, float), movr(_R1_REGNO, r2); swf_call(__aeabi_fcmpun, fcmpun, _R2_REGNO); if (jit_thumb_p()) { - T1_CMPI(_R0, 0); + T1_CMPI(_R0_REGNO, 0); IT(ARM_CC_NE); if (r0 < 8) T1_MOVI(r0, 1); @@ -698,7 +698,7 @@ _swf_iunff(jit_state_t *_jit, int (*i0)(float, float), T2_CC_B(ARM_CC_NE, 0); } else { - CMPI(_R0, 0); + CMPI(_R0_REGNO, 0); CC_MOVI(ARM_CC_NE, r0, 1); instr = _jit->pc.w; CC_B(ARM_CC_NE, 0); @@ -749,7 +749,7 @@ _swf_iundd(jit_state_t *_jit, int (*i0)(double, double), } swf_call_with_get_reg(__aeabi_dcmpun, dcmpun); if (jit_thumb_p()) { - T1_CMPI(_R0, 0); + T1_CMPI(_R0_REGNO, 0); IT(ARM_CC_NE); if (r0 < 8) T1_MOVI(r0, 1); @@ -759,7 +759,7 @@ _swf_iundd(jit_state_t *_jit, int (*i0)(double, double), T2_CC_B(ARM_CC_NE, 0); } else { - CMPI(_R0, 0); + CMPI(_R0_REGNO, 0); CC_MOVI(ARM_CC_NE, r0, 1); instr = _jit->pc.w; CC_B(ARM_CC_NE, 0); @@ -812,7 +812,7 @@ _swf_iunff_(jit_state_t *_jit, int (*i0)(float, float), movi(_R1_REGNO, data.i); swf_call(__aeabi_fcmpun, fcmpun, _R2_REGNO); if (jit_thumb_p()) { - T1_CMPI(_R0, 0); + T1_CMPI(__R0_REGNO, 0); IT(ARM_CC_NE); if (r0 < 8) T1_MOVI(r0, 1); @@ -822,7 +822,7 @@ _swf_iunff_(jit_state_t *_jit, int (*i0)(float, float), T2_CC_B(ARM_CC_NE, 0); } else { - CMPI(_R0, 0); + CMPI(__R0_REGNO, 0); CC_MOVI(ARM_CC_NE, r0, 1); instr = _jit->pc.w; CC_B(ARM_CC_NE, 0); @@ -865,7 +865,7 @@ _swf_iundd_(jit_state_t *_jit, int (*i0)(double, double), movi(_R3_REGNO, data.i[1]); swf_call_with_get_reg(__aeabi_dcmpun, dcmpun); if (jit_thumb_p()) { - T1_CMPI(_R0, 0); + T1_CMPI(__R0_REGNO, 0); IT(ARM_CC_NE); if (r0 < 8) T1_MOVI(r0, 1); @@ -875,7 +875,7 @@ _swf_iundd_(jit_state_t *_jit, int (*i0)(double, double), T2_CC_B(ARM_CC_NE, 0); } else { - CMPI(_R0, 0); + CMPI(__R0_REGNO, 0); CC_MOVI(ARM_CC_NE, r0, 1); instr = _jit->pc.w; CC_B(ARM_CC_NE, 0); diff --git a/lib/jit_x86-sse.c b/lib/jit_x86-sse.c index d67ab9dc8..afc7c9a7a 100644 --- a/lib/jit_x86-sse.c +++ b/lib/jit_x86-sse.c @@ -191,7 +191,7 @@ static void _sse_ungei_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); static void _sse_unger_f(jit_state_t*, jit_int32_t, jit_int32_t, jit_int32_t); # define sse_ungti_f(r0, r1, i0) _sse_ungti_f(_jit, r0, r1, i0) static void _sse_ungti_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); -# define sse_ungtr_f(r0, r1, r2) ssecmpf(X86_CC_NAE, r0, r2, r1) +# define sse_ungtr_f(r0, r1, r2) ssecmpf(X86_CC_NAE, r0, r1, r2) # define sse_ltgti_f(r0, r1, i0) _sse_ltgti_f(_jit, r0, r1, i0) static void _sse_ltgti_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); # define sse_ltgtr_f(r0, r1, r2) _sse_ltgtr_f(_jit, r0, r1, r2) @@ -323,7 +323,7 @@ static void _sse_uneqi_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); static void _sse_unger_d(jit_state_t*, jit_int32_t, jit_int32_t, jit_int32_t); # define sse_ungei_d(r0, r1, i0) _sse_ungei_d(_jit, r0, r1, i0) static void _sse_ungei_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); -# define sse_ungtr_d(r0, r1, r2) ssecmpd(X86_CC_NAE, r0, r2, r1) +# define sse_ungtr_d(r0, r1, r2) ssecmpd(X86_CC_NAE, r0, r1, r2) # define sse_ungti_d(r0, r1, i0) _sse_ungti_d(_jit, r0, r1, i0) static void _sse_ungti_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); # define sse_ltgtr_d(r0, r1, r2) _sse_ltgtr_d(_jit, r0, r1, r2) diff --git a/lib/jit_x86-x87.c b/lib/jit_x86-x87.c index 579c66136..7df115496 100644 --- a/lib/jit_x86-x87.c +++ b/lib/jit_x86-x87.c @@ -166,7 +166,7 @@ static void _x87_unlti_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); # define x87_unler_f(r0, r1, r2) x87cmp(X86_CC_NA, r0, r1, r2) # define x87_unlei_f(r0, r1, i0) _x87_unlei_f(_jit, r0, r1, i0) static void _x87_unlei_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); -# define x87_uneqr_f(r0, r1, r2) x87cmp2(X86_CC_E, r0, r1, r1) +# define x87_uneqr_f(r0, r1, r2) x87cmp2(X86_CC_E, r0, r1, r2) # define x87_uneqi_f(r0, r1, i0) _x87_uneqi_f(_jit, r0, r1, i0) static void _x87_uneqi_f(jit_state_t*,jit_int32_t,jit_int32_t,jit_float32_t*); # define x87_unger_f(r0, r1, r2) x87cmp(X86_CC_NA, r0, r2, r1) @@ -210,7 +210,7 @@ static void _x87_unlti_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); # define x87_unler_d(r0, r1, r2) x87cmp(X86_CC_NA, r0, r1, r2) # define x87_unlei_d(r0, r1, i0) _x87_unlei_d(_jit, r0, r1, i0) static void _x87_unlei_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); -# define x87_uneqr_d(r0, r1, r2) x87cmp2(X86_CC_E, r0, r1, r1) +# define x87_uneqr_d(r0, r1, r2) x87cmp2(X86_CC_E, r0, r1, r2) # define x87_uneqi_d(r0, r1, i0) _x87_uneqi_d(_jit, r0, r1, i0) static void _x87_uneqi_d(jit_state_t*,jit_int32_t,jit_int32_t,jit_float64_t*); # define x87_unger_d(r0, r1, r2) x87cmp(X86_CC_NA, r0, r2, r1) diff --git a/lib/jit_x86.c b/lib/jit_x86.c index 7104edbfa..6b1480824 100644 --- a/lib/jit_x86.c +++ b/lib/jit_x86.c @@ -480,11 +480,7 @@ _jit_arg_d(jit_state_t *_jit) #endif { offset = _jit->function->self.size; -#if __WORDSIZE == 32 - _jit->function->self.size += sizeof(jit_float32_t); -#else _jit->function->self.size += sizeof(jit_float64_t); -#endif } return (jit_new_node_w(jit_code_arg_d, offset)); }