2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief tarval floating point calculations
10 * @author Mathias Heil
29 * portability stuff (why do we even care about the msvc people with their C89?)
33 static long double string_to_long_double(const char *str)
35 #if __STDC_VERSION__ >= 199901L || _POSIX_C_SOURCE >= 200112L
36 return strtold(str, NULL);
38 return strtod(str, NULL);
42 static bool my_isnan(long double val)
44 #if __STDC_VERSION__ >= 199901L
47 /* hopefully the compiler does not optimize aggressively (=incorrect) */
52 static bool my_isinf(long double val)
54 #if __STDC_VERSION__ >= 199901L
57 /* hopefully the compiler does not optimize aggressively (=incorrect) */
58 return my_isnan(val-val) && !my_isnan(val);
62 /** The number of extra precision rounding bits */
63 #define ROUNDING_BITS 2
67 #ifdef WORDS_BIGENDIAN
73 #ifdef WORDS_BIGENDIAN
80 #ifdef WORDS_BIGENDIAN
85 #ifdef WORDS_BIGENDIAN
91 volatile long double d;
94 #define CLEAR_BUFFER(buffer) memset(buffer, 0, calc_buffer_size)
96 /* our floating point value */
98 float_descriptor_t desc;
101 char value[1]; /* exp[value_size] + mant[value_size] */
104 #define _exp(a) &((a)->value[0])
105 #define _mant(a) &((a)->value[value_size])
107 #define _save_result(x) memcpy((x), sc_get_buffer(), value_size)
108 #define _shift_right(x, y, res) sc_shr((x), (y), value_size*4, 0, (res))
109 #define _shift_left(x, y, res) sc_shl((x), (y), value_size*4, 0, (res))
111 /** A temporary buffer. */
112 static fp_value *calc_buffer = NULL;
114 /** Current rounding mode.*/
115 static fc_rounding_mode_t rounding_mode;
117 static int calc_buffer_size;
118 static int value_size;
119 static int max_precision;
122 static int fc_exact = 1;
124 /** pack machine-like */
125 static void *pack(const fp_value *int_float, void *packed)
129 fp_value *val_buffer;
132 temp = (char*) alloca(value_size);
133 shift_val = (char*) alloca(value_size);
135 switch ((value_class_t)int_float->clss) {
137 val_buffer = (fp_value*) alloca(calc_buffer_size);
138 fc_get_qnan(&int_float->desc, val_buffer);
139 int_float = val_buffer;
143 val_buffer = (fp_value*) alloca(calc_buffer_size);
144 fc_get_plusinf(&int_float->desc, val_buffer);
145 val_buffer->sign = int_float->sign;
146 int_float = val_buffer;
152 assert(int_float->desc.explicit_one <= 1);
154 /* pack sign: move it to the left after exponent AND mantissa */
155 sc_val_from_ulong(int_float->sign, temp);
157 pos = int_float->desc.exponent_size + int_float->desc.mantissa_size + int_float->desc.explicit_one;
158 sc_val_from_ulong(pos, NULL);
159 _shift_left(temp, sc_get_buffer(), packed);
161 /* pack exponent: move it to the left after mantissa */
162 pos = int_float->desc.mantissa_size + int_float->desc.explicit_one;
163 sc_val_from_ulong(pos, shift_val);
164 _shift_left(_exp(int_float), shift_val, temp);
166 /* combine sign|exponent */
167 sc_or(temp, packed, packed);
169 /* extract mantissa */
170 /* remove rounding bits */
171 sc_val_from_ulong(ROUNDING_BITS, shift_val);
172 _shift_right(_mant(int_float), shift_val, temp);
174 /* remove leading 1 (or 0 if denormalized) */
175 sc_max_from_bits(pos, 0, shift_val); /* all mantissa bits are 1's */
176 sc_and(temp, shift_val, temp);
178 /* combine sign|exponent|mantissa */
179 sc_or(temp, packed, packed);
185 * Normalize a fp_value.
187 * @return non-zero if result is exact
189 static int normalize(const fp_value *in_val, fp_value *out_val, int sticky)
193 char lsb, guard, round, round_dir = 0;
194 char *temp = (char*) alloca(value_size);
196 /* save rounding bits at the end */
197 hsb = ROUNDING_BITS + in_val->desc.mantissa_size - sc_get_highest_set_bit(_mant(in_val)) - 1;
199 if (in_val != out_val) {
200 out_val->sign = in_val->sign;
201 out_val->desc = in_val->desc;
204 out_val->clss = FC_NORMAL;
206 /* mantissa all zeros, so zero exponent (because of explicit one) */
207 if (hsb == ROUNDING_BITS + in_val->desc.mantissa_size) {
208 sc_val_from_ulong(0, _exp(out_val));
212 /* shift the first 1 into the left of the radix point (i.e. hsb == -1) */
215 sc_val_from_ulong(-hsb-1, temp);
217 _shift_right(_mant(in_val), temp, _mant(out_val));
219 /* remember if some bits were shifted away */
220 if (sc_had_carry()) {
224 sc_add(_exp(in_val), temp, _exp(out_val));
225 } else if (hsb > -1) {
227 sc_val_from_ulong(hsb+1, temp);
229 _shift_left(_mant(in_val), temp, _mant(out_val));
231 sc_sub(_exp(in_val), temp, _exp(out_val));
234 /* check for exponent underflow */
235 if (sc_is_negative(_exp(out_val)) || sc_is_zero(_exp(out_val))) {
236 /* exponent underflow */
237 /* shift the mantissa right to have a zero exponent */
238 sc_val_from_ulong(1, temp);
239 sc_sub(temp, _exp(out_val), NULL);
241 _shift_right(_mant(out_val), sc_get_buffer(), _mant(out_val));
242 if (sc_had_carry()) {
246 /* denormalized means exponent of zero */
247 sc_val_from_ulong(0, _exp(out_val));
249 out_val->clss = FC_SUBNORMAL;
252 /* perform rounding by adding a value that clears the guard bit and the round bit
253 * and either causes a carry to round up or not */
254 /* get the last 3 bits of the value */
255 lsb = sc_sub_bits(_mant(out_val), out_val->desc.mantissa_size + ROUNDING_BITS, 0) & 0x7;
256 guard = (lsb&0x2)>>1;
259 switch (rounding_mode) {
261 /* round to nearest representable value, if in doubt choose the version
263 round_dir = guard && (sticky || round || lsb>>2);
266 /* if positive: round to one if the exact value is bigger, else to zero */
267 round_dir = (!out_val->sign && (guard || round || sticky));
270 /* if negative: round to one if the exact value is bigger, else to zero */
271 round_dir = (out_val->sign && (guard || round || sticky));
274 /* always round to 0 (chopping mode) */
279 if (round_dir == 1) {
280 guard = (round^guard)<<1;
281 lsb = !(round || guard)<<2 | guard | round;
283 lsb = -((guard<<1) | round);
286 /* add the rounded value */
288 sc_val_from_long(lsb, temp);
289 sc_add(_mant(out_val), temp, _mant(out_val));
293 /* could have rounded down to zero */
294 if (sc_is_zero(_mant(out_val)) && (out_val->clss == FC_SUBNORMAL))
295 out_val->clss = FC_ZERO;
297 /* check for rounding overflow */
298 hsb = ROUNDING_BITS + out_val->desc.mantissa_size - sc_get_highest_set_bit(_mant(out_val)) - 1;
299 if ((out_val->clss != FC_SUBNORMAL) && (hsb < -1)) {
300 sc_val_from_ulong(1, temp);
301 _shift_right(_mant(out_val), temp, _mant(out_val));
302 if (exact && sc_had_carry())
304 sc_add(_exp(out_val), temp, _exp(out_val));
305 } else if ((out_val->clss == FC_SUBNORMAL) && (hsb == -1)) {
306 /* overflow caused the mantissa to be normal again,
307 * so adapt the exponent accordingly */
308 sc_val_from_ulong(1, temp);
309 sc_add(_exp(out_val), temp, _exp(out_val));
311 out_val->clss = FC_NORMAL;
313 /* no further rounding is needed, because rounding overflow means
314 * the carry of the original rounding was propagated all the way
315 * up to the bit left of the radix point. This implies the bits
316 * to the right are all zeros (rounding is +1) */
318 /* check for exponent overflow */
319 sc_val_from_ulong((1 << out_val->desc.exponent_size) - 1, temp);
320 if (sc_comp(_exp(out_val), temp) != ir_relation_less) {
321 /* exponent overflow, reaction depends on rounding method:
323 * mode | sign of value | result
324 *--------------------------------------------------------------
325 * TO_NEAREST | + | +inf
327 *--------------------------------------------------------------
328 * TO_POSITIVE | + | +inf
329 * | - | smallest representable value
330 *--------------------------------------------------------------
331 * TO_NEAGTIVE | + | largest representable value
333 *--------------------------------------------------------------
334 * TO_ZERO | + | largest representable value
335 * | - | smallest representable value
336 *--------------------------------------------------------------*/
337 if (out_val->sign == 0) {
338 /* value is positive */
339 switch (rounding_mode) {
342 out_val->clss = FC_INF;
347 fc_get_max(&out_val->desc, out_val);
350 /* value is negative */
351 switch (rounding_mode) {
354 out_val->clss = FC_INF;
359 fc_get_min(&out_val->desc, out_val);
367 * Operations involving NaN's must return NaN.
368 * They are NOT exact.
370 #define handle_NAN(a, b, result) \
372 if (a->clss == FC_NAN) { \
373 if (a != result) memcpy(result, a, calc_buffer_size); \
377 if (b->clss == FC_NAN) { \
378 if (b != result) memcpy(result, b, calc_buffer_size); \
386 * calculate a + b, where a is the value with the bigger exponent
388 static void _fadd(const fp_value *a, const fp_value *b, fp_value *result)
398 handle_NAN(a, b, result);
400 /* make sure result has a descriptor */
401 if (result != a && result != b)
402 result->desc = a->desc;
404 /* determine if this is an addition or subtraction */
405 sign = a->sign ^ b->sign;
407 /* produce NaN on inf - inf */
408 if (sign && (a->clss == FC_INF) && (b->clss == FC_INF)) {
410 fc_get_qnan(&a->desc, result);
414 temp = (char*) alloca(value_size);
415 exp_diff = (char*) alloca(value_size);
417 /* get exponent difference */
418 sc_sub(_exp(a), _exp(b), exp_diff);
420 /* initially set sign to be the sign of a, special treatment of subtraction
421 * when exponents are equal is required though.
422 * Also special care about the sign is needed when the mantissas are equal
424 if (sign && sc_val_to_long(exp_diff) == 0) {
425 switch (sc_comp(_mant(a), _mant(b))) {
426 case ir_relation_greater: /* a > b */
427 res_sign = a->sign; /* abs(a) is bigger and a is negative */
429 case ir_relation_equal: /* a == b */
430 res_sign = (rounding_mode == FC_TONEGATIVE);
432 case ir_relation_less: /* a < b */
433 res_sign = b->sign; /* abs(b) is bigger and b is negative */
436 panic("invalid comparison result");
441 result->sign = res_sign;
443 /* sign has been taken care of, check for special cases */
444 if (a->clss == FC_ZERO || b->clss == FC_INF) {
446 memcpy(result, b, calc_buffer_size);
447 fc_exact = b->clss == FC_NORMAL;
448 result->sign = res_sign;
451 if (b->clss == FC_ZERO || a->clss == FC_INF) {
453 memcpy(result, a, calc_buffer_size);
454 fc_exact = a->clss == FC_NORMAL;
455 result->sign = res_sign;
459 /* shift the smaller value to the right to align the radix point */
460 /* subnormals have their radix point shifted to the right,
461 * take care of this first */
462 if ((b->clss == FC_SUBNORMAL) && (a->clss != FC_SUBNORMAL)) {
463 sc_val_from_ulong(1, temp);
464 sc_sub(exp_diff, temp, exp_diff);
467 _shift_right(_mant(b), exp_diff, temp);
468 sticky = sc_had_carry();
471 if (sticky && sign) {
472 /* if subtracting a little more than the represented value or adding a little
473 * more than the represented value to a negative value this, in addition to the
474 * still set sticky bit, takes account of the 'little more' */
475 char *temp1 = (char*) alloca(calc_buffer_size);
476 sc_val_from_ulong(1, temp1);
477 sc_add(temp, temp1, temp);
481 if (sc_comp(_mant(a), temp) == ir_relation_less)
482 sc_sub(temp, _mant(a), _mant(result));
484 sc_sub(_mant(a), temp, _mant(result));
486 sc_add(_mant(a), temp, _mant(result));
489 /* _normalize expects a 'normal' radix point, adding two subnormals
490 * results in a subnormal radix point -> shifting before normalizing */
491 if ((a->clss == FC_SUBNORMAL) && (b->clss == FC_SUBNORMAL)) {
492 sc_val_from_ulong(1, NULL);
493 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
496 /* resulting exponent is the bigger one */
497 memmove(_exp(result), _exp(a), value_size);
499 fc_exact &= normalize(result, result, sticky);
505 static void _fmul(const fp_value *a, const fp_value *b, fp_value *result)
513 handle_NAN(a, b, result);
515 temp = (char*) alloca(value_size);
517 if (result != a && result != b)
518 result->desc = a->desc;
520 result->sign = res_sign = a->sign ^ b->sign;
522 /* produce NaN on 0 * inf */
523 if (a->clss == FC_ZERO) {
524 if (b->clss == FC_INF) {
525 fc_get_qnan(&a->desc, result);
529 memcpy(result, a, calc_buffer_size);
530 result->sign = res_sign;
534 if (b->clss == FC_ZERO) {
535 if (a->clss == FC_INF) {
536 fc_get_qnan(&a->desc, result);
540 memcpy(result, b, calc_buffer_size);
541 result->sign = res_sign;
546 if (a->clss == FC_INF) {
549 memcpy(result, a, calc_buffer_size);
550 result->sign = res_sign;
553 if (b->clss == FC_INF) {
556 memcpy(result, b, calc_buffer_size);
557 result->sign = res_sign;
561 /* exp = exp(a) + exp(b) - excess */
562 sc_add(_exp(a), _exp(b), _exp(result));
564 sc_val_from_ulong((1 << (a->desc.exponent_size - 1)) - 1, temp);
565 sc_sub(_exp(result), temp, _exp(result));
567 /* mixed normal, subnormal values introduce an error of 1, correct it */
568 if ((a->clss == FC_SUBNORMAL) ^ (b->clss == FC_SUBNORMAL)) {
569 sc_val_from_ulong(1, temp);
570 sc_add(_exp(result), temp, _exp(result));
573 sc_mul(_mant(a), _mant(b), _mant(result));
575 /* realign result: after a multiplication the digits right of the radix
576 * point are the sum of the factors' digits after the radix point. As all
577 * values are normalized they both have the same amount of these digits,
578 * which has to be restored by proper shifting
579 * because of the rounding bits */
580 sc_val_from_ulong(ROUNDING_BITS + result->desc.mantissa_size, temp);
582 _shift_right(_mant(result), temp, _mant(result));
583 sticky = sc_had_carry();
586 fc_exact &= normalize(result, result, sticky);
592 static void _fdiv(const fp_value *a, const fp_value *b, fp_value *result)
595 char *temp, *dividend;
600 handle_NAN(a, b, result);
602 temp = (char*) alloca(value_size);
603 dividend = (char*) alloca(value_size);
605 if (result != a && result != b)
606 result->desc = a->desc;
608 result->sign = res_sign = a->sign ^ b->sign;
610 /* produce FC_NAN on 0/0 and inf/inf */
611 if (a->clss == FC_ZERO) {
612 if (b->clss == FC_ZERO) {
614 fc_get_qnan(&a->desc, result);
619 memcpy(result, a, calc_buffer_size);
620 result->sign = res_sign;
625 if (b->clss == FC_INF) {
627 if (a->clss == FC_INF) {
629 fc_get_qnan(&a->desc, result);
632 sc_val_from_ulong(0, NULL);
633 _save_result(_exp(result));
634 _save_result(_mant(result));
635 result->clss = FC_ZERO;
640 if (a->clss == FC_INF) {
644 memcpy(result, a, calc_buffer_size);
645 result->sign = res_sign;
648 if (b->clss == FC_ZERO) {
650 /* division by zero */
652 fc_get_minusinf(&a->desc, result);
654 fc_get_plusinf(&a->desc, result);
658 /* exp = exp(a) - exp(b) + excess - 1*/
659 sc_sub(_exp(a), _exp(b), _exp(result));
660 sc_val_from_ulong((1 << (a->desc.exponent_size - 1)) - 2, temp);
661 sc_add(_exp(result), temp, _exp(result));
663 /* mixed normal, subnormal values introduce an error of 1, correct it */
664 if ((a->clss == FC_SUBNORMAL) ^ (b->clss == FC_SUBNORMAL)) {
665 sc_val_from_ulong(1, temp);
666 sc_add(_exp(result), temp, _exp(result));
669 /* mant(res) = mant(a) / 1/2mant(b) */
670 /* to gain more bits of precision in the result the dividend could be
671 * shifted left, as this operation does not loose bits. This would not
672 * fit into the integer precision, but due to the rounding bits (which
673 * are always zero because the values are all normalized) the divisor
674 * can be shifted right instead to achieve the same result */
675 sc_val_from_ulong(ROUNDING_BITS + result->desc.mantissa_size, temp);
677 _shift_left(_mant(a), temp, dividend);
680 char *divisor = (char*) alloca(calc_buffer_size);
681 sc_val_from_ulong(1, divisor);
682 _shift_right(_mant(b), divisor, divisor);
683 sc_div(dividend, divisor, _mant(result));
684 sticky = sc_had_carry();
688 fc_exact &= normalize(result, result, sticky);
692 * Truncate the fractional part away.
694 * This does not clip to any integer range.
696 static void _trunc(const fp_value *a, fp_value *result)
699 * When exponent == 0 all bits left of the radix point
700 * are the integral part of the value. For 15bit exp_size
701 * this would require a left shift of max. 16383 bits which
703 * But it is enough to ensure that no bit right of the radix
704 * point remains set. This restricts the interesting
705 * exponents to the interval [0, mant_size-1].
706 * Outside this interval the truncated value is either 0 or
707 * it does not have fractional parts.
710 int exp_bias, exp_val;
713 /* fixme: can be exact */
716 temp = (char*) alloca(value_size);
719 result->desc = a->desc;
720 result->clss = a->clss;
723 exp_bias = (1 << (a->desc.exponent_size - 1)) - 1;
724 exp_val = sc_val_to_long(_exp(a)) - exp_bias;
727 sc_val_from_ulong(0, NULL);
728 _save_result(_exp(result));
729 _save_result(_mant(result));
730 result->clss = FC_ZERO;
735 if (exp_val > (long)a->desc.mantissa_size) {
737 memcpy(result, a, calc_buffer_size);
742 /* set up a proper mask to delete all bits right of the
743 * radix point if the mantissa had been shifted until exp == 0 */
744 sc_max_from_bits(1 + exp_val, 0, temp);
745 sc_val_from_long(a->desc.mantissa_size - exp_val + 2, NULL);
746 _shift_left(temp, sc_get_buffer(), temp);
748 /* and the mask and return the result */
749 sc_and(_mant(a), temp, _mant(result));
752 memcpy(_exp(result), _exp(a), value_size);
753 result->sign = a->sign;
758 * functions defined in fltcalc.h
760 const void *fc_get_buffer(void)
765 int fc_get_buffer_length(void)
767 return calc_buffer_size;
770 void *fc_val_from_str(const char *str, size_t len,
771 const float_descriptor_t *desc, void *result)
775 /* XXX excuse of an implementation to make things work */
777 fp_value *tmp = (fp_value*) alloca(calc_buffer_size);
778 float_descriptor_t tmp_desc;
780 buffer = (char*) alloca(len+1);
781 memcpy(buffer, str, len);
783 val = string_to_long_double(buffer);
785 tmp_desc.exponent_size = 15;
786 tmp_desc.mantissa_size = 63;
787 tmp_desc.explicit_one = 1;
788 fc_val_from_ieee754(val, &tmp_desc, tmp);
790 return fc_cast(tmp, desc, (fp_value*) result);
793 fp_value *fc_val_from_ieee754(long double l, const float_descriptor_t *desc,
797 int bias_res, bias_val, mant_val;
800 uint32_t exponent, mantissa0, mantissa1;
801 size_t long_double_size = sizeof(long double);
804 bias_res = ((1 << (desc->exponent_size - 1)) - 1);
806 if (long_double_size == 8) {
809 sign = (srcval.val_ld8.high & 0x80000000) != 0;
810 exponent = (srcval.val_ld8.high & 0x7FF00000) >> 20;
811 mantissa0 = srcval.val_ld8.high & 0x000FFFFF;
812 mantissa1 = srcval.val_ld8.low;
814 /* we assume an x86-like 80bit representation of the value... */
815 assert(sizeof(long double)==12 || sizeof(long double)==16);
818 sign = (srcval.val_ld12.high & 0x00008000) != 0;
819 exponent = (srcval.val_ld12.high & 0x00007FFF) ;
820 mantissa0 = srcval.val_ld12.mid;
821 mantissa1 = srcval.val_ld12.low;
825 result = calc_buffer;
826 temp = (char*) alloca(value_size);
828 /* CLEAR the buffer, else some bits might be uninitialized */
829 memset(result, 0, fc_get_buffer_length());
831 result->desc = *desc;
832 result->clss = FC_NORMAL;
835 /* sign and flag suffice to identify NaN or inf, no exponent/mantissa
836 * encoding is needed. the function can return immediately in these cases */
838 result->clss = FC_NAN;
840 } else if (my_isinf(l)) {
841 result->clss = FC_INF;
845 /* build exponent, because input and output exponent and mantissa sizes may differ
846 * this looks more complicated than it is: unbiased input exponent + output bias,
847 * minus the mantissa difference which is added again later when the output float
848 * becomes normalized */
849 sc_val_from_long((exponent - bias_val + bias_res) - (mant_val - desc->mantissa_size), _exp(result));
851 /* build mantissa representation */
853 /* insert the hidden bit */
854 sc_val_from_ulong(1, temp);
855 sc_val_from_ulong(mant_val + ROUNDING_BITS, NULL);
856 _shift_left(temp, sc_get_buffer(), NULL);
859 sc_val_from_ulong(0, NULL);
862 _save_result(_mant(result));
864 /* bits from the upper word */
865 sc_val_from_ulong(mantissa0, temp);
866 sc_val_from_ulong(34, NULL);
867 _shift_left(temp, sc_get_buffer(), temp);
868 sc_or(_mant(result), temp, _mant(result));
870 /* bits from the lower word */
871 sc_val_from_ulong(mantissa1, temp);
872 sc_val_from_ulong(ROUNDING_BITS, NULL);
873 _shift_left(temp, sc_get_buffer(), temp);
874 sc_or(_mant(result), temp, _mant(result));
876 /* _normalize expects the radix point to be normal, so shift mantissa of subnormal
877 * origin one to the left */
879 sc_val_from_ulong(1, NULL);
880 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
883 normalize(result, result, 0);
888 long double fc_val_to_ieee754(const fp_value *val)
891 fp_value *temp = NULL;
893 unsigned byte_offset;
901 float_descriptor_t desc;
902 unsigned mantissa_size;
904 size_t long_double_size = sizeof(long double);
906 if (long_double_size == 8) {
907 desc.exponent_size = 11;
908 desc.mantissa_size = 52;
909 desc.explicit_one = 0;
911 desc.exponent_size = 15;
912 desc.mantissa_size = 63;
913 desc.explicit_one = 1;
915 mantissa_size = desc.mantissa_size + desc.explicit_one;
917 temp = (fp_value*) alloca(calc_buffer_size);
918 value = fc_cast(val, &desc, temp);
922 /* @@@ long double exponent is 15bit, so the use of sc_val_to_long should not
923 * lead to wrong results */
924 exponent = sc_val_to_long(_exp(value)) ;
926 sc_val_from_ulong(ROUNDING_BITS, NULL);
927 _shift_right(_mant(value), sc_get_buffer(), _mant(value));
932 for (byte_offset = 0; byte_offset < 4; byte_offset++)
933 mantissa1 |= sc_sub_bits(_mant(value), mantissa_size, byte_offset) << (byte_offset << 3);
935 for (; (byte_offset<<3) < desc.mantissa_size; byte_offset++)
936 mantissa0 |= sc_sub_bits(_mant(value), mantissa_size, byte_offset) << ((byte_offset - 4) << 3);
938 if (long_double_size == 8) {
939 mantissa0 &= 0x000FFFFF; /* get rid of garbage */
940 buildval.val_ld8.high = sign << 31;
941 buildval.val_ld8.high |= exponent << 20;
942 buildval.val_ld8.high |= mantissa0;
943 buildval.val_ld8.low = mantissa1;
945 buildval.val_ld12.high = sign << 15;
946 buildval.val_ld12.high |= exponent;
947 buildval.val_ld12.mid = mantissa0;
948 buildval.val_ld12.low = mantissa1;
954 fp_value *fc_cast(const fp_value *value, const float_descriptor_t *desc,
958 int exp_offset, val_bias, res_bias;
960 if (result == NULL) result = calc_buffer;
961 temp = (char*) alloca(value_size);
963 if (value->desc.exponent_size == desc->exponent_size &&
964 value->desc.mantissa_size == desc->mantissa_size &&
965 value->desc.explicit_one == desc->explicit_one) {
967 memcpy(result, value, calc_buffer_size);
971 if (value->clss == FC_NAN) {
972 if (sc_get_highest_set_bit(_mant(value)) == value->desc.mantissa_size + 1)
973 return fc_get_qnan(desc, result);
975 return fc_get_snan(desc, result);
977 else if (value->clss == FC_INF) {
978 if (value->sign == 0)
979 return fc_get_plusinf(desc, result);
981 return fc_get_minusinf(desc, result);
984 /* set the descriptor of the new value */
985 result->desc = *desc;
986 result->clss = value->clss;
987 result->sign = value->sign;
989 /* when the mantissa sizes differ normalizing has to shift to align it.
990 * this would change the exponent, which is unwanted. So calculate this
991 * offset and add it */
992 val_bias = (1 << (value->desc.exponent_size - 1)) - 1;
993 res_bias = (1 << (desc->exponent_size - 1)) - 1;
995 exp_offset = (res_bias - val_bias) - (value->desc.mantissa_size - desc->mantissa_size);
996 sc_val_from_long(exp_offset, temp);
997 sc_add(_exp(value), temp, _exp(result));
999 /* _normalize expects normalized radix point */
1000 if (value->clss == FC_SUBNORMAL) {
1001 sc_val_from_ulong(1, NULL);
1002 _shift_left(_mant(value), sc_get_buffer(), _mant(result));
1003 } else if (value != result) {
1004 memcpy(_mant(result), _mant(value), value_size);
1006 memmove(_mant(result), _mant(value), value_size);
1009 normalize(result, result, 0);
1013 fp_value *fc_get_max(const float_descriptor_t *desc, fp_value *result)
1015 if (result == NULL) result = calc_buffer;
1017 result->desc = *desc;
1018 result->clss = FC_NORMAL;
1021 sc_val_from_ulong((1 << desc->exponent_size) - 2, _exp(result));
1023 sc_max_from_bits(desc->mantissa_size + 1, 0, _mant(result));
1024 sc_val_from_ulong(ROUNDING_BITS, NULL);
1025 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
1030 fp_value *fc_get_min(const float_descriptor_t *desc, fp_value *result)
1032 if (result == NULL) result = calc_buffer;
1034 fc_get_max(desc, result);
1040 fp_value *fc_get_snan(const float_descriptor_t *desc, fp_value *result)
1042 if (result == NULL) result = calc_buffer;
1044 result->desc = *desc;
1045 result->clss = FC_NAN;
1048 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1050 /* signaling NaN has non-zero mantissa with msb not set */
1051 sc_val_from_ulong(1, _mant(result));
1056 fp_value *fc_get_qnan(const float_descriptor_t *desc, fp_value *result)
1058 if (result == NULL) result = calc_buffer;
1060 result->desc = *desc;
1061 result->clss = FC_NAN;
1064 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1066 /* quiet NaN has the msb of the mantissa set, so shift one there */
1067 sc_val_from_ulong(1, _mant(result));
1068 /* mantissa_size >+< 1 because of two extra rounding bits */
1069 sc_val_from_ulong(desc->mantissa_size + 1, NULL);
1070 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
1075 fp_value *fc_get_plusinf(const float_descriptor_t *desc, fp_value *result)
1079 if (result == NULL) result = calc_buffer;
1081 result->desc = *desc;
1082 result->clss = FC_INF;
1085 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1087 mant = _mant(result);
1088 sc_val_from_ulong(0, mant);
1089 if (desc->explicit_one) {
1090 sc_set_bit_at(mant, result->desc.mantissa_size + ROUNDING_BITS);
1096 fp_value *fc_get_minusinf(const float_descriptor_t *desc, fp_value *result)
1098 if (result == NULL) result = calc_buffer;
1100 fc_get_plusinf(desc, result);
1106 ir_relation fc_comp(fp_value const *const val_a, fp_value const *const val_b)
1109 * shortcut: if both values are identical, they are either
1110 * Unordered if NaN or equal
1113 return val_a->clss == FC_NAN ? ir_relation_unordered : ir_relation_equal;
1115 /* unordered if one is a NaN */
1116 if (val_a->clss == FC_NAN || val_b->clss == FC_NAN)
1117 return ir_relation_unordered;
1119 /* zero is equal independent of sign */
1120 if ((val_a->clss == FC_ZERO) && (val_b->clss == FC_ZERO))
1121 return ir_relation_equal;
1123 /* different signs make compare easy */
1124 if (val_a->sign != val_b->sign)
1125 return val_a->sign == 0 ? ir_relation_greater : ir_relation_less;
1127 ir_relation const mul = val_a->sign ? ir_relation_less_greater : ir_relation_false;
1129 /* both infinity means equality */
1130 if ((val_a->clss == FC_INF) && (val_b->clss == FC_INF))
1131 return ir_relation_equal;
1133 /* infinity is bigger than the rest */
1134 if (val_a->clss == FC_INF)
1135 return ir_relation_greater ^ mul;
1136 if (val_b->clss == FC_INF)
1137 return ir_relation_less ^ mul;
1139 /* check first exponent, that mantissa if equal */
1140 ir_relation rel = sc_comp(_exp(val_a), _exp(val_b));
1141 if (rel == ir_relation_equal)
1142 rel = sc_comp(_mant(val_a), _mant(val_b));
1143 if (rel != ir_relation_equal)
1148 int fc_is_zero(const fp_value *a)
1150 return a->clss == FC_ZERO;
1153 int fc_is_negative(const fp_value *a)
1158 int fc_is_inf(const fp_value *a)
1160 return a->clss == FC_INF;
1163 int fc_is_nan(const fp_value *a)
1165 return a->clss == FC_NAN;
1168 int fc_is_subnormal(const fp_value *a)
1170 return a->clss == FC_SUBNORMAL;
1173 char *fc_print(const fp_value *val, char *buf, int buflen, unsigned base)
1176 long double flt_val;
1178 mul_1 = (char*) alloca(calc_buffer_size);
1182 switch ((value_class_t)val->clss) {
1184 snprintf(buf, buflen, "%cINF", val->sign ? '-' : '+');
1187 snprintf(buf, buflen, "NaN");
1190 snprintf(buf, buflen, "0.0");
1193 flt_val = fc_val_to_ieee754(val);
1194 /* XXX 30 is arbitrary */
1195 snprintf(buf, buflen, "%.30LE", flt_val);
1200 switch ((value_class_t)val->clss) {
1202 snprintf(buf, buflen, "%cINF", val->sign ? '-' : '+');
1205 snprintf(buf, buflen, "NaN");
1208 snprintf(buf, buflen, "0.0");
1211 flt_val = fc_val_to_ieee754(val);
1212 snprintf(buf, buflen, "%LA", flt_val);
1218 snprintf(buf, buflen, "%s", sc_print(pack(val, mul_1), value_size*4, SC_HEX, 0));
1219 buf[buflen - 1] = '\0';
1225 unsigned char fc_sub_bits(const fp_value *value, unsigned num_bits, unsigned byte_ofs)
1227 /* this is used to cache the packed version of the value */
1228 static char *packed_value = NULL;
1230 if (packed_value == NULL) packed_value = XMALLOCN(char, value_size);
1233 pack(value, packed_value);
1235 return sc_sub_bits(packed_value, num_bits, byte_ofs);
1238 /* Returns non-zero if the mantissa is zero, i.e. 1.0Exxx */
1239 int fc_zero_mantissa(const fp_value *value)
1241 return sc_get_lowest_set_bit(_mant(value)) == ROUNDING_BITS + value->desc.mantissa_size;
1244 /* Returns the exponent of a value. */
1245 int fc_get_exponent(const fp_value *value)
1247 int exp_bias = (1 << (value->desc.exponent_size - 1)) - 1;
1248 return sc_val_to_long(_exp(value)) - exp_bias;
1251 /* Return non-zero if a given value can be converted lossless into another precision */
1252 int fc_can_lossless_conv_to(const fp_value *value, const float_descriptor_t *desc)
1257 /* handle some special cases first */
1258 switch (value->clss) {
1267 /* check if the exponent can be encoded: note, 0 and all ones are reserved for the exponent */
1268 exp_bias = (1 << (desc->exponent_size - 1)) - 1;
1269 v = fc_get_exponent(value) + exp_bias;
1270 if (0 < v && v < (1 << desc->exponent_size) - 1) {
1271 /* exponent can be encoded, now check the mantissa */
1272 v = value->desc.mantissa_size + ROUNDING_BITS - sc_get_lowest_set_bit(_mant(value));
1273 return v <= (int)desc->mantissa_size;
1279 fc_rounding_mode_t fc_set_rounding_mode(fc_rounding_mode_t mode)
1281 if (mode == FC_TONEAREST || mode == FC_TOPOSITIVE || mode == FC_TONEGATIVE || mode == FC_TOZERO)
1282 rounding_mode = mode;
1284 return rounding_mode;
1287 fc_rounding_mode_t fc_get_rounding_mode(void)
1289 return rounding_mode;
1292 void init_fltcalc(int precision)
1294 if (calc_buffer == NULL) {
1295 /* does nothing if already init */
1296 if (precision == 0) precision = FC_DEFAULT_PRECISION;
1298 init_strcalc(precision + 2 + ROUNDING_BITS);
1300 /* needs additionally rounding bits, one bit as explicit 1., and one for
1301 * addition overflow */
1302 max_precision = sc_get_precision() - (2 + ROUNDING_BITS);
1303 if (max_precision < precision)
1304 printf("WARNING: not enough precision available, using %d\n", max_precision);
1306 rounding_mode = FC_TONEAREST;
1307 value_size = sc_get_buffer_length();
1308 calc_buffer_size = sizeof(fp_value) + 2*value_size - 1;
1310 calc_buffer = (fp_value*) xmalloc(calc_buffer_size);
1311 memset(calc_buffer, 0, calc_buffer_size);
1315 void finish_fltcalc (void)
1317 free(calc_buffer); calc_buffer = NULL;
1320 /* definition of interface functions */
1321 fp_value *fc_add(const fp_value *a, const fp_value *b, fp_value *result)
1323 if (result == NULL) result = calc_buffer;
1325 /* make the value with the bigger exponent the first one */
1326 if (sc_comp(_exp(a), _exp(b)) == ir_relation_less)
1327 _fadd(b, a, result);
1329 _fadd(a, b, result);
1334 fp_value *fc_sub(const fp_value *a, const fp_value *b, fp_value *result)
1338 if (result == NULL) result = calc_buffer;
1340 temp = (fp_value*) alloca(calc_buffer_size);
1341 memcpy(temp, b, calc_buffer_size);
1342 temp->sign = !b->sign;
1343 if (sc_comp(_exp(a), _exp(temp)) == ir_relation_less)
1344 _fadd(temp, a, result);
1346 _fadd(a, temp, result);
1351 fp_value *fc_mul(const fp_value *a, const fp_value *b, fp_value *result)
1353 if (result == NULL) result = calc_buffer;
1355 _fmul(a, b, result);
1360 fp_value *fc_div(const fp_value *a, const fp_value *b, fp_value *result)
1362 if (result == NULL) result = calc_buffer;
1364 _fdiv(a, b, result);
1369 fp_value *fc_neg(const fp_value *a, fp_value *result)
1371 if (result == NULL) result = calc_buffer;
1374 memcpy(result, a, calc_buffer_size);
1375 result->sign = !a->sign;
1380 fp_value *fc_int(const fp_value *a, fp_value *result)
1382 if (result == NULL) result = calc_buffer;
1389 fp_value *fc_rnd(const fp_value *a, fp_value *result)
1394 panic("not yet implemented");
1398 * convert a floating point value into an sc value ...
1400 int fc_flt2int(const fp_value *a, void *result, ir_mode *dst_mode)
1402 if (a->clss == FC_NORMAL) {
1403 int exp_bias = (1 << (a->desc.exponent_size - 1)) - 1;
1404 int exp_val = sc_val_to_long(_exp(a)) - exp_bias;
1409 if (a->sign && !mode_is_signed(dst_mode)) {
1410 /* FIXME: for now we cannot convert this */
1414 tgt_bits = get_mode_size_bits(dst_mode);
1415 if (mode_is_signed(dst_mode))
1418 assert(exp_val >= 0 && "floating point value not integral before fc_flt2int() call");
1419 mantissa_size = a->desc.mantissa_size + ROUNDING_BITS;
1420 shift = exp_val - mantissa_size;
1422 if (tgt_bits < mantissa_size + 1)
1423 tgt_bits = mantissa_size + 1;
1425 sc_shlI(_mant(a), shift, tgt_bits, 0, result);
1427 sc_shrI(_mant(a), -shift, tgt_bits, 0, result);
1430 /* check for overflow */
1431 highest = sc_get_highest_set_bit(result);
1433 if (mode_is_signed(dst_mode)) {
1434 if (highest == sc_get_lowest_set_bit(result)) {
1435 /* need extra test for MIN_INT */
1436 if (highest >= (int) get_mode_size_bits(dst_mode)) {
1437 /* FIXME: handle overflow */
1441 if (highest >= (int) get_mode_size_bits(dst_mode) - 1) {
1442 /* FIXME: handle overflow */
1447 if (highest >= (int) get_mode_size_bits(dst_mode)) {
1448 /* FIXME: handle overflow */
1454 sc_neg(result, result);
1457 } else if (a->clss == FC_ZERO) {
1464 int fc_is_exact(void)