summaryrefslogtreecommitdiffstatshomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/emu/cpu/m68000/m68kfpu.c30
-rw-r--r--src/lib/lib.mak4
-rwxr-xr-xsrc/lib/softfloat/fpu_constant.h80
-rwxr-xr-xsrc/lib/softfloat/fsincos.c569
-rw-r--r--src/lib/softfloat/softfloat-macros16
-rw-r--r--src/lib/softfloat/softfloat-specialize38
-rw-r--r--src/lib/softfloat/softfloat.c230
-rw-r--r--src/lib/softfloat/softfloat.h197
8 files changed, 926 insertions, 238 deletions
diff --git a/src/emu/cpu/m68000/m68kfpu.c b/src/emu/cpu/m68000/m68kfpu.c
index 7e17bbfb1a5..9556df0a6c2 100644
--- a/src/emu/cpu/m68000/m68kfpu.c
+++ b/src/emu/cpu/m68000/m68kfpu.c
@@ -1318,11 +1318,22 @@ static void fpgen_rm_reg(m68ki_cpu_core *m68k, UINT16 w2)
m68k->remaining_cycles -= 109;
break;
}
-// case 0x0e: // FSIN
-// {
-// // TODO
-// break;
-// }
+ case 0x0e: // FSIN
+ {
+ REG_FP[dst] = source;
+ floatx80_fsin(REG_FP[dst]);
+ SET_CONDITION_CODES(m68k, REG_FP[dst]);
+ m68k->remaining_cycles -= 75;
+ break;
+ }
+ case 0x0f: // FTAN
+ {
+ REG_FP[dst] = source;
+ floatx80_ftan(REG_FP[dst]);
+ SET_CONDITION_CODES(m68k, REG_FP[dst]);
+ m68k->remaining_cycles -= 75;
+ break;
+ }
case 0x18: // FABS
{
REG_FP[dst] = source;
@@ -1339,9 +1350,16 @@ static void fpgen_rm_reg(m68ki_cpu_core *m68k, UINT16 w2)
m68k->remaining_cycles -= 3;
break;
}
+ case 0x1d: // FCOS
+ {
+ REG_FP[dst] = source;
+ floatx80_fcos(REG_FP[dst]);
+ SET_CONDITION_CODES(m68k, REG_FP[dst]);
+ m68k->remaining_cycles -= 75;
+ break;
+ }
case 0x1e: // FGETEXP
{
-// floatx80 temp = source;
INT16 temp2;
temp2 = source.high; // get the exponent
diff --git a/src/lib/lib.mak b/src/lib/lib.mak
index 28a0faa3739..6f3fbb552b3 100644
--- a/src/lib/lib.mak
+++ b/src/lib/lib.mak
@@ -192,11 +192,13 @@ PROCESSOR_H = $(LIBSRC)/softfloat/processors/mamesf.h
SOFTFLOAT_MACROS = $(LIBSRC)/softfloat/softfloat/bits64/softfloat-macros
SOFTFLOATOBJS = \
- $(LIBOBJ)/softfloat/softfloat.o
+ $(LIBOBJ)/softfloat/softfloat.o \
+ $(LIBOBJ)/softfloat/fsincos.o
$(OBJ)/libsoftfloat.a: $(SOFTFLOATOBJS)
$(LIBOBJ)/softfloat/softfloat.o: $(LIBSRC)/softfloat/softfloat.c $(LIBSRC)/softfloat/softfloat.h $(LIBSRC)/softfloat/softfloat-macros $(LIBSRC)/softfloat/softfloat-specialize
+$(LIBOBJ)/softfloat/fsincos.o: $(LIBSRC)/softfloat/fsincos.c $(LIBSRC)/softfloat/fpu_constant.h $(LIBSRC)/softfloat/softfloat.h $(LIBSRC)/softfloat/softfloat-macros $(LIBSRC)/softfloat/softfloat-specialize
diff --git a/src/lib/softfloat/fpu_constant.h b/src/lib/softfloat/fpu_constant.h
new file mode 100755
index 00000000000..3ac8862ba0c
--- /dev/null
+++ b/src/lib/softfloat/fpu_constant.h
@@ -0,0 +1,80 @@
+/*============================================================================
+This source file is an extension to the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b, written for Bochs (x86 achitecture simulator)
+floating point emulation.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+=============================================================================*/
+
+#ifndef _FPU_CONSTANTS_H_
+#define _FPU_CONSTANTS_H_
+
+// Pentium CPU uses only 68-bit precision M_PI approximation
+#define BETTER_THAN_PENTIUM
+
+/*============================================================================
+ * Written for Bochs (x86 achitecture simulator) by
+ * Stanislav Shwartsman [sshwarts at sourceforge net]
+ * ==========================================================================*/
+
+//////////////////////////////
+// PI, PI/2, PI/4 constants
+//////////////////////////////
+
+#define FLOATX80_PI_EXP (0x4000)
+
+// 128-bit PI fraction
+#ifdef BETTER_THAN_PENTIUM
+#define FLOAT_PI_HI (U64(0xc90fdaa22168c234))
+#define FLOAT_PI_LO (U64(0xc4c6628b80dc1cd1))
+#else
+#define FLOAT_PI_HI (U64(0xc90fdaa22168c234))
+#define FLOAT_PI_LO (U64(0xC000000000000000))
+#endif
+
+#define FLOATX80_PI2_EXP (0x3FFF)
+#define FLOATX80_PI4_EXP (0x3FFE)
+
+//////////////////////////////
+// 3PI/4 constant
+//////////////////////////////
+
+#define FLOATX80_3PI4_EXP (0x4000)
+
+// 128-bit 3PI/4 fraction
+#ifdef BETTER_THAN_PENTIUM
+#define FLOAT_3PI4_HI (U64(0x96cbe3f9990e91a7))
+#define FLOAT_3PI4_LO (U64(0x9394c9e8a0a5159c))
+#else
+#define FLOAT_3PI4_HI (U64(0x96cbe3f9990e91a7))
+#define FLOAT_3PI4_LO (U64(0x9000000000000000))
+#endif
+
+//////////////////////////////
+// 1/LN2 constant
+//////////////////////////////
+
+#define FLOAT_LN2INV_EXP (0x3FFF)
+
+// 128-bit 1/LN2 fraction
+#ifdef BETTER_THAN_PENTIUM
+#define FLOAT_LN2INV_HI (U64(0xb8aa3b295c17f0bb))
+#define FLOAT_LN2INV_LO (U64(0xbe87fed0691d3e89))
+#else
+#define FLOAT_LN2INV_HI (U64(0xb8aa3b295c17f0bb))
+#define FLOAT_LN2INV_LO (U64(0xC000000000000000))
+#endif
+
+#endif
diff --git a/src/lib/softfloat/fsincos.c b/src/lib/softfloat/fsincos.c
new file mode 100755
index 00000000000..3a0d4ccc1e0
--- /dev/null
+++ b/src/lib/softfloat/fsincos.c
@@ -0,0 +1,569 @@
+/*============================================================================
+This source file is an extension to the SoftFloat IEC/IEEE Floating-point
+Arithmetic Package, Release 2b, written for Bochs (x86 achitecture simulator)
+floating point emulation.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+=============================================================================*/
+
+/*============================================================================
+ * Written for Bochs (x86 achitecture simulator) by
+ * Stanislav Shwartsman [sshwarts at sourceforge net]
+ * ==========================================================================*/
+
+#define FLOAT128
+
+#define USE_estimateDiv128To64
+#include "mamesf.h"
+#include "softfloat.h"
+//#include "softfloat-specialize"
+#include "fpu_constant.h"
+
+static const floatx80 floatx80_one = packFloatx80(0, 0x3fff, U64(0x8000000000000000));
+static const floatx80 floatx80_default_nan = packFloatx80(0, 0xffff, U64(0xffffffffffffffff));
+
+#define packFloat2x128m(zHi, zLo) {(zLo), (zHi)}
+#define PACK_FLOAT_128(hi,lo) packFloat2x128m(LIT64(hi),LIT64(lo))
+
+#define EXP_BIAS 0x3FFF
+
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the extended double-precision floating-point
+| value `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE bits64 extractFloatx80Frac( floatx80 a )
+{
+
+ return a.low;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the extended double-precision floating-point
+| value `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE int32 extractFloatx80Exp( floatx80 a )
+{
+
+ return a.high & 0x7FFF;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the extended double-precision floating-point value
+| `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE flag extractFloatx80Sign( floatx80 a )
+{
+
+ return a.high>>15;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes extended double-precision floating-point NaN `a' and returns the
+| appropriate NaN result. If `a' is a signaling NaN, the invalid exception
+| is raised.
+*----------------------------------------------------------------------------*/
+
+INLINE floatx80 propagateFloatx80NaNOneArg(floatx80 a)
+{
+ if (floatx80_is_signaling_nan(a))
+ float_raise(float_flag_invalid);
+
+ a.low |= U64(0xC000000000000000);
+
+ return a;
+}
+
+/*----------------------------------------------------------------------------
+| Normalizes the subnormal extended double-precision floating-point value
+| represented by the denormalized significand `aSig'. The normalized exponent
+| and significand are stored at the locations pointed to by `zExpPtr' and
+| `zSigPtr', respectively.
+*----------------------------------------------------------------------------*/
+
+void normalizeFloatx80Subnormal(UINT64 aSig, INT32 *zExpPtr, UINT64 *zSigPtr)
+{
+ int shiftCount = countLeadingZeros64(aSig);
+ *zSigPtr = aSig<<shiftCount;
+ *zExpPtr = 1 - shiftCount;
+}
+
+/* reduce trigonometric function argument using 128-bit precision
+ M_PI approximation */
+static UINT64 argument_reduction_kernel(UINT64 aSig0, int Exp, UINT64 *zSig0, UINT64 *zSig1)
+{
+ UINT64 term0, term1, term2;
+ UINT64 aSig1 = 0;
+
+ shortShift128Left(aSig1, aSig0, Exp, &aSig1, &aSig0);
+ UINT64 q = estimateDiv128To64(aSig1, aSig0, FLOAT_PI_HI);
+ mul128By64To192(FLOAT_PI_HI, FLOAT_PI_LO, q, &term0, &term1, &term2);
+ sub128(aSig1, aSig0, term0, term1, zSig1, zSig0);
+ while ((INT64)(*zSig1) < 0) {
+ --q;
+ add192(*zSig1, *zSig0, term2, 0, FLOAT_PI_HI, FLOAT_PI_LO, zSig1, zSig0, &term2);
+ }
+ *zSig1 = term2;
+ return q;
+}
+
+static int reduce_trig_arg(int expDiff, int &zSign, UINT64 &aSig0, UINT64 &aSig1)
+{
+ UINT64 term0, term1, q = 0;
+
+ if (expDiff < 0) {
+ shift128Right(aSig0, 0, 1, &aSig0, &aSig1);
+ expDiff = 0;
+ }
+ if (expDiff > 0) {
+ q = argument_reduction_kernel(aSig0, expDiff, &aSig0, &aSig1);
+ }
+ else {
+ if (FLOAT_PI_HI <= aSig0) {
+ aSig0 -= FLOAT_PI_HI;
+ q = 1;
+ }
+ }
+
+ shift128Right(FLOAT_PI_HI, FLOAT_PI_LO, 1, &term0, &term1);
+ if (! lt128(aSig0, aSig1, term0, term1))
+ {
+ int lt = lt128(term0, term1, aSig0, aSig1);
+ int eq = eq128(aSig0, aSig1, term0, term1);
+
+ if ((eq && (q & 1)) || lt) {
+ zSign = !zSign;
+ ++q;
+ }
+ if (lt) sub128(FLOAT_PI_HI, FLOAT_PI_LO, aSig0, aSig1, &aSig0, &aSig1);
+ }
+
+ return (int)(q & 3);
+}
+
+#define SIN_ARR_SIZE 11
+#define COS_ARR_SIZE 11
+
+static float128 sin_arr[SIN_ARR_SIZE] =
+{
+ PACK_FLOAT_128(0x3fff000000000000, 0x0000000000000000), /* 1 */
+ PACK_FLOAT_128(0xbffc555555555555, 0x5555555555555555), /* 3 */
+ PACK_FLOAT_128(0x3ff8111111111111, 0x1111111111111111), /* 5 */
+ PACK_FLOAT_128(0xbff2a01a01a01a01, 0xa01a01a01a01a01a), /* 7 */
+ PACK_FLOAT_128(0x3fec71de3a556c73, 0x38faac1c88e50017), /* 9 */
+ PACK_FLOAT_128(0xbfe5ae64567f544e, 0x38fe747e4b837dc7), /* 11 */
+ PACK_FLOAT_128(0x3fde6124613a86d0, 0x97ca38331d23af68), /* 13 */
+ PACK_FLOAT_128(0xbfd6ae7f3e733b81, 0xf11d8656b0ee8cb0), /* 15 */
+ PACK_FLOAT_128(0x3fce952c77030ad4, 0xa6b2605197771b00), /* 17 */
+ PACK_FLOAT_128(0xbfc62f49b4681415, 0x724ca1ec3b7b9675), /* 19 */
+ PACK_FLOAT_128(0x3fbd71b8ef6dcf57, 0x18bef146fcee6e45) /* 21 */
+};
+
+static float128 cos_arr[COS_ARR_SIZE] =
+{
+ PACK_FLOAT_128(0x3fff000000000000, 0x0000000000000000), /* 0 */
+ PACK_FLOAT_128(0xbffe000000000000, 0x0000000000000000), /* 2 */
+ PACK_FLOAT_128(0x3ffa555555555555, 0x5555555555555555), /* 4 */
+ PACK_FLOAT_128(0xbff56c16c16c16c1, 0x6c16c16c16c16c17), /* 6 */
+ PACK_FLOAT_128(0x3fefa01a01a01a01, 0xa01a01a01a01a01a), /* 8 */
+ PACK_FLOAT_128(0xbfe927e4fb7789f5, 0xc72ef016d3ea6679), /* 10 */
+ PACK_FLOAT_128(0x3fe21eed8eff8d89, 0x7b544da987acfe85), /* 12 */
+ PACK_FLOAT_128(0xbfda93974a8c07c9, 0xd20badf145dfa3e5), /* 14 */
+ PACK_FLOAT_128(0x3fd2ae7f3e733b81, 0xf11d8656b0ee8cb0), /* 16 */
+ PACK_FLOAT_128(0xbfca6827863b97d9, 0x77bb004886a2c2ab), /* 18 */
+ PACK_FLOAT_128(0x3fc1e542ba402022, 0x507a9cad2bf8f0bb) /* 20 */
+};
+
+extern float128 OddPoly (float128 x, float128 *arr, unsigned n);
+
+/* 0 <= x <= pi/4 */
+INLINE float128 poly_sin(float128 x)
+{
+ // 3 5 7 9 11 13 15
+ // x x x x x x x
+ // sin (x) ~ x - --- + --- - --- + --- - ---- + ---- - ---- =
+ // 3! 5! 7! 9! 11! 13! 15!
+ //
+ // 2 4 6 8 10 12 14
+ // x x x x x x x
+ // = x * [ 1 - --- + --- - --- + --- - ---- + ---- - ---- ] =
+ // 3! 5! 7! 9! 11! 13! 15!
+ //
+ // 3 3
+ // -- 4k -- 4k+2
+ // p(x) = > C * x > 0 q(x) = > C * x < 0
+ // -- 2k -- 2k+1
+ // k=0 k=0
+ //
+ // 2
+ // sin(x) ~ x * [ p(x) + x * q(x) ]
+ //
+
+ return OddPoly(x, sin_arr, SIN_ARR_SIZE);
+}
+
+extern float128 EvenPoly(float128 x, float128 *arr, unsigned n);
+
+/* 0 <= x <= pi/4 */
+INLINE float128 poly_cos(float128 x)
+{
+ // 2 4 6 8 10 12 14
+ // x x x x x x x
+ // cos (x) ~ 1 - --- + --- - --- + --- - ---- + ---- - ----
+ // 2! 4! 6! 8! 10! 12! 14!
+ //
+ // 3 3
+ // -- 4k -- 4k+2
+ // p(x) = > C * x > 0 q(x) = > C * x < 0
+ // -- 2k -- 2k+1
+ // k=0 k=0
+ //
+ // 2
+ // cos(x) ~ [ p(x) + x * q(x) ]
+ //
+
+ return EvenPoly(x, cos_arr, COS_ARR_SIZE);
+}
+
+INLINE void sincos_invalid(floatx80 *sin_a, floatx80 *cos_a, floatx80 a)
+{
+ if (sin_a) *sin_a = a;
+ if (cos_a) *cos_a = a;
+}
+
+INLINE void sincos_tiny_argument(floatx80 *sin_a, floatx80 *cos_a, floatx80 a)
+{
+ if (sin_a) *sin_a = a;
+ if (cos_a) *cos_a = floatx80_one;
+}
+
+static floatx80 sincos_approximation(int neg, float128 r, UINT64 quotient)
+{
+ if (quotient & 0x1) {
+ r = poly_cos(r);
+ neg = 0;
+ } else {
+ r = poly_sin(r);
+ }
+
+ floatx80 result = float128_to_floatx80(r);
+ if (quotient & 0x2)
+ neg = ! neg;
+
+ if (neg)
+ floatx80_chs(result);
+
+ return result;
+}
+
+// =================================================
+// SFFSINCOS Compute sin(x) and cos(x)
+// =================================================
+
+//
+// Uses the following identities:
+// ----------------------------------------------------------
+//
+// sin(-x) = -sin(x)
+// cos(-x) = cos(x)
+//
+// sin(x+y) = sin(x)*cos(y)+cos(x)*sin(y)
+// cos(x+y) = sin(x)*sin(y)+cos(x)*cos(y)
+//
+// sin(x+ pi/2) = cos(x)
+// sin(x+ pi) = -sin(x)
+// sin(x+3pi/2) = -cos(x)
+// sin(x+2pi) = sin(x)
+//
+
+int sf_fsincos(floatx80 a, floatx80 *sin_a, floatx80 *cos_a)
+{
+ UINT64 aSig0, aSig1 = 0;
+ INT32 aExp, zExp, expDiff;
+ int aSign, zSign;
+ int q = 0;
+
+ aSig0 = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ /* invalid argument */
+ if (aExp == 0x7FFF) {
+ if ((UINT64) (aSig0<<1)) {
+ sincos_invalid(sin_a, cos_a, propagateFloatx80NaNOneArg(a));
+ return 0;
+ }
+
+ float_raise(float_flag_invalid);
+ sincos_invalid(sin_a, cos_a, floatx80_default_nan);
+ return 0;
+ }
+
+ if (aExp == 0) {
+ if (aSig0 == 0) {
+ sincos_tiny_argument(sin_a, cos_a, a);
+ return 0;
+ }
+
+// float_raise(float_flag_denormal);
+
+ /* handle pseudo denormals */
+ if (! (aSig0 & U64(0x8000000000000000)))
+ {
+ float_raise(float_flag_inexact);
+ if (sin_a)
+ float_raise(float_flag_underflow);
+ sincos_tiny_argument(sin_a, cos_a, a);
+ return 0;
+ }
+
+ normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0);
+ }
+
+ zSign = aSign;
+ zExp = EXP_BIAS;
+ expDiff = aExp - zExp;
+
+ /* argument is out-of-range */
+ if (expDiff >= 63)
+ return -1;
+
+ float_raise(float_flag_inexact);
+
+ if (expDiff < -1) { // doesn't require reduction
+ if (expDiff <= -68) {
+ a = packFloatx80(aSign, aExp, aSig0);
+ sincos_tiny_argument(sin_a, cos_a, a);
+ return 0;
+ }
+ zExp = aExp;
+ }
+ else {
+ q = reduce_trig_arg(expDiff, zSign, aSig0, aSig1);
+ }
+
+ /* **************************** */
+ /* argument reduction completed */
+ /* **************************** */
+
+ /* using float128 for approximation */
+ float128 r = normalizeRoundAndPackFloat128(0, zExp-0x10, aSig0, aSig1);
+
+ if (aSign) q = -q;
+ if (sin_a) *sin_a = sincos_approximation(zSign, r, q);
+ if (cos_a) *cos_a = sincos_approximation(zSign, r, q+1);
+
+ return 0;
+}
+
+int floatx80_fsin(floatx80 &a)
+{
+ return sf_fsincos(a, &a, 0);
+}
+
+int floatx80_fcos(floatx80 &a)
+{
+ return sf_fsincos(a, 0, &a);
+}
+
+// =================================================
+// FPTAN Compute tan(x)
+// =================================================
+
+//
+// Uses the following identities:
+//
+// 1. ----------------------------------------------------------
+//
+// sin(-x) = -sin(x)
+// cos(-x) = cos(x)
+//
+// sin(x+y) = sin(x)*cos(y)+cos(x)*sin(y)
+// cos(x+y) = sin(x)*sin(y)+cos(x)*cos(y)
+//
+// sin(x+ pi/2) = cos(x)
+// sin(x+ pi) = -sin(x)
+// sin(x+3pi/2) = -cos(x)
+// sin(x+2pi) = sin(x)
+//
+// 2. ----------------------------------------------------------
+//
+// sin(x)
+// tan(x) = ------
+// cos(x)
+//
+
+int floatx80_ftan(floatx80 &a)
+{
+ UINT64 aSig0, aSig1 = 0;
+ INT32 aExp, zExp, expDiff;
+ int aSign, zSign;
+ int q = 0;
+
+ aSig0 = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ /* invalid argument */
+ if (aExp == 0x7FFF) {
+ if ((UINT64) (aSig0<<1))
+ {
+ a = propagateFloatx80NaNOneArg(a);
+ return 0;
+ }
+
+ float_raise(float_flag_invalid);
+ a = floatx80_default_nan;
+ return 0;
+ }
+
+ if (aExp == 0) {
+ if (aSig0 == 0) return 0;
+// float_raise(float_flag_denormal);
+ /* handle pseudo denormals */
+ if (! (aSig0 & U64(0x8000000000000000)))
+ {
+ float_raise(float_flag_inexact | float_flag_underflow);
+ return 0;
+ }
+ normalizeFloatx80Subnormal(aSig0, &aExp, &aSig0);
+ }
+
+ zSign = aSign;
+ zExp = EXP_BIAS;
+ expDiff = aExp - zExp;
+
+ /* argument is out-of-range */
+ if (expDiff >= 63)
+ return -1;
+
+ float_raise(float_flag_inexact);
+
+ if (expDiff < -1) { // doesn't require reduction
+ if (expDiff <= -68) {
+ a = packFloatx80(aSign, aExp, aSig0);
+ return 0;
+ }
+ zExp = aExp;
+ }
+ else {
+ q = reduce_trig_arg(expDiff, zSign, aSig0, aSig1);
+ }
+
+ /* **************************** */
+ /* argument reduction completed */
+ /* **************************** */
+
+ /* using float128 for approximation */
+ float128 r = normalizeRoundAndPackFloat128(0, zExp-0x10, aSig0, aSig1);
+
+ float128 sin_r = poly_sin(r);
+ float128 cos_r = poly_cos(r);
+
+ if (q & 0x1) {
+ r = float128_div(cos_r, sin_r);
+ zSign = ! zSign;
+ } else {
+ r = float128_div(sin_r, cos_r);
+ }
+
+ a = float128_to_floatx80(r);
+ if (zSign)
+ floatx80_chs(a);
+
+ return 0;
+}
+
+// 2 3 4 n
+// f(x) ~ C + (C * x) + (C * x) + (C * x) + (C * x) + ... + (C * x)
+// 0 1 2 3 4 n
+//
+// -- 2k -- 2k+1
+// p(x) = > C * x q(x) = > C * x
+// -- 2k -- 2k+1
+//
+// f(x) ~ [ p(x) + x * q(x) ]
+//
+
+float128 EvalPoly(float128 x, float128 *arr, unsigned n)
+{
+ float128 x2 = float128_mul(x, x);
+ unsigned i;
+
+ assert(n > 1);
+
+ float128 r1 = arr[--n];
+ i = n;
+ while(i >= 2) {
+ r1 = float128_mul(r1, x2);
+ i -= 2;
+ r1 = float128_add(r1, arr[i]);
+ }
+ if (i) r1 = float128_mul(r1, x);
+
+ float128 r2 = arr[--n];
+ i = n;
+ while(i >= 2) {
+ r2 = float128_mul(r2, x2);
+ i -= 2;
+ r2 = float128_add(r2, arr[i]);
+ }
+ if (i) r2 = float128_mul(r2, x);
+
+ return float128_add(r1, r2);
+}
+
+// 2 4 6 8 2n
+// f(x) ~ C + (C * x) + (C * x) + (C * x) + (C * x) + ... + (C * x)
+// 0 1 2 3 4 n
+//
+// -- 4k -- 4k+2
+// p(x) = > C * x q(x) = > C * x
+// -- 2k -- 2k+1
+//
+// 2
+// f(x) ~ [ p(x) + x * q(x) ]
+//
+
+float128 EvenPoly(float128 x, float128 *arr, unsigned n)
+{
+ return EvalPoly(float128_mul(x, x), arr, n);
+}
+
+// 3 5 7 9 2n+1
+// f(x) ~ (C * x) + (C * x) + (C * x) + (C * x) + (C * x) + ... + (C * x)
+// 0 1 2 3 4 n
+// 2 4 6 8 2n
+// = x * [ C + (C * x) + (C * x) + (C * x) + (C * x) + ... + (C * x)
+// 0 1 2 3 4 n
+//
+// -- 4k -- 4k+2
+// p(x) = > C * x q(x) = > C * x
+// -- 2k -- 2k+1
+//
+// 2
+// f(x) ~ x * [ p(x) + x * q(x) ]
+//
+
+float128 OddPoly(float128 x, float128 *arr, unsigned n)
+{
+ return float128_mul(x, EvenPoly(x, arr, n));
+}
+
diff --git a/src/lib/softfloat/softfloat-macros b/src/lib/softfloat/softfloat-macros
index 2c8f18b1cec..e4a059f3df8 100644
--- a/src/lib/softfloat/softfloat-macros
+++ b/src/lib/softfloat/softfloat-macros
@@ -543,7 +543,7 @@ INLINE void
| unsigned integer is returned.
*----------------------------------------------------------------------------*/
-static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
+INLINE bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
{
bits64 b0, b1;
bits64 rem0, rem1, term0, term1;
@@ -575,7 +575,7 @@ static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
| value.
*----------------------------------------------------------------------------*/
-static bits32 estimateSqrt32( int16 aExp, bits32 a )
+INLINE bits32 estimateSqrt32( int16 aExp, bits32 a )
{
static const bits16 sqrtOddAdjustments[] = {
0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0,
@@ -718,3 +718,15 @@ INLINE flag ne128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
}
+/*-----------------------------------------------------------------------------
+| Changes the sign of the extended double-precision floating-point value 'a'.
+| The operation is performed according to the IEC/IEEE Standard for Binary
+| Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+
+INLINE floatx80 floatx80_chs(floatx80 reg)
+{
+ reg.high ^= 0x8000;
+ return reg;
+}
+
diff --git a/src/lib/softfloat/softfloat-specialize b/src/lib/softfloat/softfloat-specialize
index 7091af35264..9993f3fd2b3 100644
--- a/src/lib/softfloat/softfloat-specialize
+++ b/src/lib/softfloat/softfloat-specialize
@@ -331,6 +331,44 @@ static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b )
}
+#define EXP_BIAS 0x3FFF
+
+/*----------------------------------------------------------------------------
+| Returns the fraction bits of the extended double-precision floating-point
+| value `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE bits64 extractFloatx80Frac( floatx80 a )
+{
+
+ return a.low;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the extended double-precision floating-point
+| value `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE int32 extractFloatx80Exp( floatx80 a )
+{
+
+ return a.high & 0x7FFF;
+
+}
+
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the extended double-precision floating-point value
+| `a'.
+*----------------------------------------------------------------------------*/
+
+INLINE flag extractFloatx80Sign( floatx80 a )
+{
+
+ return a.high>>15;
+
+}
+
#endif
#ifdef FLOAT128
diff --git a/src/lib/softfloat/softfloat.c b/src/lib/softfloat/softfloat.c
index 2bd5a73def6..f607feb27db 100644
--- a/src/lib/softfloat/softfloat.c
+++ b/src/lib/softfloat/softfloat.c
@@ -37,18 +37,12 @@ these four paragraphs for those parts of this code that are retained.
| Floating-point rounding mode, extended double-precision rounding precision,
| and exception flags.
*----------------------------------------------------------------------------*/
-int8 float_rounding_mode = float_round_nearest_even;
int8 float_exception_flags = 0;
#ifdef FLOATX80
int8 floatx80_rounding_precision = 80;
#endif
-/*----------------------------------------------------------------------------
-| Primitive arithmetic functions, including multi-word arithmetic, and
-| division and square root approximations. (Can be specialized to target if
-| desired.)
-*----------------------------------------------------------------------------*/
-#include "softfloat-macros"
+int8 float_rounding_mode = float_round_nearest_even;
/*----------------------------------------------------------------------------
| Functions and definitions to determine: (1) whether tininess for underflow
@@ -489,42 +483,6 @@ static float64
#ifdef FLOATX80
/*----------------------------------------------------------------------------
-| Returns the fraction bits of the extended double-precision floating-point
-| value `a'.
-*----------------------------------------------------------------------------*/
-
-INLINE bits64 extractFloatx80Frac( floatx80 a )
-{
-
- return a.low;
-
-}
-
-/*----------------------------------------------------------------------------
-| Returns the exponent bits of the extended double-precision floating-point
-| value `a'.
-*----------------------------------------------------------------------------*/
-
-INLINE int32 extractFloatx80Exp( floatx80 a )
-{
-
- return a.high & 0x7FFF;
-
-}
-
-/*----------------------------------------------------------------------------
-| Returns the sign bit of the extended double-precision floating-point value
-| `a'.
-*----------------------------------------------------------------------------*/
-
-INLINE flag extractFloatx80Sign( floatx80 a )
-{
-
- return a.high>>15;
-
-}
-
-/*----------------------------------------------------------------------------
| Normalizes the subnormal extended double-precision floating-point value
| represented by the denormalized significand `aSig'. The normalized exponent
| and significand are stored at the locations pointed to by `zExpPtr' and
@@ -543,21 +501,6 @@ static void
}
/*----------------------------------------------------------------------------
-| Packs the sign `zSign', exponent `zExp', and significand `zSig' into an
-| extended double-precision floating-point value, returning the result.
-*----------------------------------------------------------------------------*/
-
-INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig )
-{
- floatx80 z;
-
- z.low = zSig;
- z.high = ( ( (bits16) zSign )<<15 ) + zExp;
- return z;
-
-}
-
-/*----------------------------------------------------------------------------
| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
| and extended significand formed by the concatenation of `zSig0' and `zSig1',
| and returns the proper extended double-precision floating-point value
@@ -861,177 +804,6 @@ static void
}
-/*----------------------------------------------------------------------------
-| Packs the sign `zSign', the exponent `zExp', and the significand formed
-| by the concatenation of `zSig0' and `zSig1' into a quadruple-precision
-| floating-point value, returning the result. After being shifted into the
-| proper positions, the three fields `zSign', `zExp', and `zSig0' are simply
-| added together to form the most significant 32 bits of the result. This
-| means that any integer portion of `zSig0' will be added into the exponent.
-| Since a properly normalized significand will have an integer portion equal
-| to 1, the `zExp' input should be 1 less than the desired result exponent
-| whenever `zSig0' and `zSig1' concatenated form a complete, normalized
-| significand.
-*----------------------------------------------------------------------------*/
-
-INLINE float128
- packFloat128( flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 )
-{
- float128 z;
-
- z.low = zSig1;
- z.high = ( ( (bits64) zSign )<<63 ) + ( ( (bits64) zExp )<<48 ) + zSig0;
- return z;
-
-}
-
-/*----------------------------------------------------------------------------
-| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
-| and extended significand formed by the concatenation of `zSig0', `zSig1',
-| and `zSig2', and returns the proper quadruple-precision floating-point value
-| corresponding to the abstract input. Ordinarily, the abstract value is
-| simply rounded and packed into the quadruple-precision format, with the
-| inexact exception raised if the abstract input cannot be represented
-| exactly. However, if the abstract value is too large, the overflow and
-| inexact exceptions are raised and an infinity or maximal finite value is
-| returned. If the abstract value is too small, the input value is rounded to
-| a subnormal number, and the underflow and inexact exceptions are raised if
-| the abstract input cannot be represented exactly as a subnormal quadruple-
-| precision floating-point number.
-| The input significand must be normalized or smaller. If the input
-| significand is not normalized, `zExp' must be 0; in that case, the result
-| returned is a subnormal number, and it must not require rounding. In the
-| usual case that the input significand is normalized, `zExp' must be 1 less
-| than the ``true'' floating-point exponent. The handling of underflow and
-| overflow follows the IEC/IEEE Standard for Binary Floating-Point Arithmetic.
-*----------------------------------------------------------------------------*/
-
-static float128
- roundAndPackFloat128(
- flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1, bits64 zSig2 )
-{
- int8 roundingMode;
- flag roundNearestEven, increment, isTiny;
-
- roundingMode = float_rounding_mode;
- roundNearestEven = ( roundingMode == float_round_nearest_even );
- increment = ( (sbits64) zSig2 < 0 );
- if ( ! roundNearestEven ) {
- if ( roundingMode == float_round_to_zero ) {
- increment = 0;
- }
- else {
- if ( zSign ) {
- increment = ( roundingMode == float_round_down ) && zSig2;
- }
- else {
- increment = ( roundingMode == float_round_up ) && zSig2;
- }
- }
- }
- if ( 0x7FFD <= (bits32) zExp ) {
- if ( ( 0x7FFD < zExp )
- || ( ( zExp == 0x7FFD )
- && eq128(
- LIT64( 0x0001FFFFFFFFFFFF ),
- LIT64( 0xFFFFFFFFFFFFFFFF ),
- zSig0,
- zSig1
- )
- && increment
- )
- ) {
- float_raise( float_flag_overflow | float_flag_inexact );
- if ( ( roundingMode == float_round_to_zero )
- || ( zSign && ( roundingMode == float_round_up ) )
- || ( ! zSign && ( roundingMode == float_round_down ) )
- ) {
- return
- packFloat128(
- zSign,
- 0x7FFE,
- LIT64( 0x0000FFFFFFFFFFFF ),
- LIT64( 0xFFFFFFFFFFFFFFFF )
- );
- }
- return packFloat128( zSign, 0x7FFF, 0, 0 );
- }
- if ( zExp < 0 ) {
- isTiny =
- ( float_detect_tininess == float_tininess_before_rounding )
- || ( zExp < -1 )
- || ! increment
- || lt128(
- zSig0,
- zSig1,
- LIT64( 0x0001FFFFFFFFFFFF ),
- LIT64( 0xFFFFFFFFFFFFFFFF )
- );
- shift128ExtraRightJamming(
- zSig0, zSig1, zSig2, - zExp, &zSig0, &zSig1, &zSig2 );
- zExp = 0;
- if ( isTiny && zSig2 ) float_raise( float_flag_underflow );
- if ( roundNearestEven ) {
- increment = ( (sbits64) zSig2 < 0 );
- }
- else {
- if ( zSign ) {
- increment = ( roundingMode == float_round_down ) && zSig2;
- }
- else {
- increment = ( roundingMode == float_round_up ) && zSig2;
- }
- }
- }
- }
- if ( zSig2 ) float_exception_flags |= float_flag_inexact;
- if ( increment ) {
- add128( zSig0, zSig1, 0, 1, &zSig0, &zSig1 );
- zSig1 &= ~ ( ( zSig2 + zSig2 == 0 ) & roundNearestEven );
- }
- else {
- if ( ( zSig0 | zSig1 ) == 0 ) zExp = 0;
- }
- return packFloat128( zSign, zExp, zSig0, zSig1 );
-
-}
-
-/*----------------------------------------------------------------------------
-| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
-| and significand formed by the concatenation of `zSig0' and `zSig1', and
-| returns the proper quadruple-precision floating-point value corresponding
-| to the abstract input. This routine is just like `roundAndPackFloat128'
-| except that the input significand has fewer bits and does not have to be
-| normalized. In all cases, `zExp' must be 1 less than the ``true'' floating-
-| point exponent.
-*----------------------------------------------------------------------------*/
-
-static float128
- normalizeRoundAndPackFloat128(
- flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 )
-{
- int8 shiftCount;
- bits64 zSig2;
-
- if ( zSig0 == 0 ) {
- zSig0 = zSig1;
- zSig1 = 0;
- zExp -= 64;
- }
- shiftCount = countLeadingZeros64( zSig0 ) - 15;
- if ( 0 <= shiftCount ) {
- zSig2 = 0;
- shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 );
- }
- else {
- shift128ExtraRightJamming(
- zSig0, zSig1, 0, - shiftCount, &zSig0, &zSig1, &zSig2 );
- }
- zExp -= shiftCount;
- return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 );
-
-}
-
#endif
/*----------------------------------------------------------------------------
diff --git a/src/lib/softfloat/softfloat.h b/src/lib/softfloat/softfloat.h
index cb5ee9458ad..642daa901d6 100644
--- a/src/lib/softfloat/softfloat.h
+++ b/src/lib/softfloat/softfloat.h
@@ -58,6 +58,13 @@ typedef struct {
#endif
/*----------------------------------------------------------------------------
+| Primitive arithmetic functions, including multi-word arithmetic, and
+| division and square root approximations. (Can be specialized to target if
+| desired.)
+*----------------------------------------------------------------------------*/
+#include "softfloat-macros"
+
+/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point underflow tininess-detection mode.
*----------------------------------------------------------------------------*/
extern int8 float_detect_tininess;
@@ -197,6 +204,21 @@ float128 floatx80_to_float128( floatx80 );
#endif
/*----------------------------------------------------------------------------
+| Packs the sign `zSign', exponent `zExp', and significand `zSig' into an
+| extended double-precision floating-point value, returning the result.
+*----------------------------------------------------------------------------*/
+
+INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig )
+{
+ floatx80 z;
+
+ z.low = zSig;
+ z.high = ( ( (bits16) zSign )<<15 ) + zExp;
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision rounding precision. Valid
| values are 32, 64, and 80.
*----------------------------------------------------------------------------*/
@@ -220,6 +242,10 @@ flag floatx80_le_quiet( floatx80, floatx80 );
flag floatx80_lt_quiet( floatx80, floatx80 );
flag floatx80_is_signaling_nan( floatx80 );
+int floatx80_fsin(floatx80 &a);
+int floatx80_fcos(floatx80 &a);
+int floatx80_ftan(floatx80 &a);
+
#endif
#ifdef FLOAT128
@@ -255,5 +281,176 @@ flag float128_le_quiet( float128, float128 );
flag float128_lt_quiet( float128, float128 );
flag float128_is_signaling_nan( float128 );
+/*----------------------------------------------------------------------------
+| Packs the sign `zSign', the exponent `zExp', and the significand formed
+| by the concatenation of `zSig0' and `zSig1' into a quadruple-precision
+| floating-point value, returning the result. After being shifted into the
+| proper positions, the three fields `zSign', `zExp', and `zSig0' are simply
+| added together to form the most significant 32 bits of the result. This
+| means that any integer portion of `zSig0' will be added into the exponent.
+| Since a properly normalized significand will have an integer portion equal
+| to 1, the `zExp' input should be 1 less than the desired result exponent
+| whenever `zSig0' and `zSig1' concatenated form a complete, normalized
+| significand.
+*----------------------------------------------------------------------------*/
+
+INLINE float128
+ packFloat128( flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 )
+{
+ float128 z;
+
+ z.low = zSig1;
+ z.high = ( ( (bits64) zSign )<<63 ) + ( ( (bits64) zExp )<<48 ) + zSig0;
+ return z;
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and extended significand formed by the concatenation of `zSig0', `zSig1',
+| and `zSig2', and returns the proper quadruple-precision floating-point value
+| corresponding to the abstract input. Ordinarily, the abstract value is
+| simply rounded and packed into the quadruple-precision format, with the
+| inexact exception raised if the abstract input cannot be represented
+| exactly. However, if the abstract value is too large, the overflow and
+| inexact exceptions are raised and an infinity or maximal finite value is
+| returned. If the abstract value is too small, the input value is rounded to
+| a subnormal number, and the underflow and inexact exceptions are raised if
+| the abstract input cannot be represented exactly as a subnormal quadruple-
+| precision floating-point number.
+| The input significand must be normalized or smaller. If the input
+| significand is not normalized, `zExp' must be 0; in that case, the result
+| returned is a subnormal number, and it must not require rounding. In the
+| usual case that the input significand is normalized, `zExp' must be 1 less
+| than the ``true'' floating-point exponent. The handling of underflow and
+| overflow follows the IEC/IEEE Standard for Binary Floating-Point Arithmetic.
+*----------------------------------------------------------------------------*/
+
+INLINE float128
+ roundAndPackFloat128(
+ flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1, bits64 zSig2 )
+{
+ int8 roundingMode;
+ flag roundNearestEven, increment, isTiny;
+
+ roundingMode = float_rounding_mode;
+ roundNearestEven = ( roundingMode == float_round_nearest_even );
+ increment = ( (sbits64) zSig2 < 0 );
+ if ( ! roundNearestEven ) {
+ if ( roundingMode == float_round_to_zero ) {
+ increment = 0;
+ }
+ else {
+ if ( zSign ) {
+ increment = ( roundingMode == float_round_down ) && zSig2;
+ }
+ else {
+ increment = ( roundingMode == float_round_up ) && zSig2;
+ }
+ }
+ }
+ if ( 0x7FFD <= (bits32) zExp ) {
+ if ( ( 0x7FFD < zExp )
+ || ( ( zExp == 0x7FFD )
+ && eq128(
+ LIT64( 0x0001FFFFFFFFFFFF ),
+ LIT64( 0xFFFFFFFFFFFFFFFF ),
+ zSig0,
+ zSig1
+ )
+ && increment
+ )
+ ) {
+ float_raise( float_flag_overflow | float_flag_inexact );
+ if ( ( roundingMode == float_round_to_zero )
+ || ( zSign && ( roundingMode == float_round_up ) )
+ || ( ! zSign && ( roundingMode == float_round_down ) )
+ ) {
+ return
+ packFloat128(
+ zSign,
+ 0x7FFE,
+ LIT64( 0x0000FFFFFFFFFFFF ),
+ LIT64( 0xFFFFFFFFFFFFFFFF )
+ );
+ }
+ return packFloat128( zSign, 0x7FFF, 0, 0 );
+ }
+ if ( zExp < 0 ) {
+ isTiny =
+ ( float_detect_tininess == float_tininess_before_rounding )
+ || ( zExp < -1 )
+ || ! increment
+ || lt128(
+ zSig0,
+ zSig1,
+ LIT64( 0x0001FFFFFFFFFFFF ),
+ LIT64( 0xFFFFFFFFFFFFFFFF )
+ );
+ shift128ExtraRightJamming(
+ zSig0, zSig1, zSig2, - zExp, &zSig0, &zSig1, &zSig2 );
+ zExp = 0;
+ if ( isTiny && zSig2 ) float_raise( float_flag_underflow );
+ if ( roundNearestEven ) {
+ increment = ( (sbits64) zSig2 < 0 );
+ }
+ else {
+ if ( zSign ) {
+ increment = ( roundingMode == float_round_down ) && zSig2;
+ }
+ else {
+ increment = ( roundingMode == float_round_up ) && zSig2;
+ }
+ }
+ }
+ }
+ if ( zSig2 ) float_exception_flags |= float_flag_inexact;
+ if ( increment ) {
+ add128( zSig0, zSig1, 0, 1, &zSig0, &zSig1 );
+ zSig1 &= ~ ( ( zSig2 + zSig2 == 0 ) & roundNearestEven );
+ }
+ else {
+ if ( ( zSig0 | zSig1 ) == 0 ) zExp = 0;
+ }
+ return packFloat128( zSign, zExp, zSig0, zSig1 );
+
+}
+
+/*----------------------------------------------------------------------------
+| Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+| and significand formed by the concatenation of `zSig0' and `zSig1', and
+| returns the proper quadruple-precision floating-point value corresponding
+| to the abstract input. This routine is just like `roundAndPackFloat128'
+| except that the input significand has fewer bits and does not have to be
+| normalized. In all cases, `zExp' must be 1 less than the ``true'' floating-
+| point exponent.
+*----------------------------------------------------------------------------*/
+
+INLINE float128
+ normalizeRoundAndPackFloat128(
+ flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 )
+{
+ int8 shiftCount;
+ bits64 zSig2;
+
+ if ( zSig0 == 0 ) {
+ zSig0 = zSig1;
+ zSig1 = 0;
+ zExp -= 64;
+ }
+ shiftCount = countLeadingZeros64( zSig0 ) - 15;
+ if ( 0 <= shiftCount ) {
+ zSig2 = 0;
+ shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 );
+ }
+ else {
+ shift128ExtraRightJamming(
+ zSig0, zSig1, 0, - shiftCount, &zSig0, &zSig1, &zSig2 );
+ }
+ zExp -= shiftCount;
+ return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 );
+
+}
+
#endif