2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief tarval floating point calculations
24 * @author Mathias Heil
43 * portability stuff (why do we even care about the msvc people with their C89?)
47 static long double string_to_long_double(const char *str)
49 #if __STDC_VERSION__ >= 199901L || _POSIX_C_SOURCE >= 200112L
50 return strtold(str, NULL);
52 return strtod(str, NULL);
56 static bool my_isnan(long double val)
58 #if __STDC_VERSION__ >= 199901L
61 /* hopefully the compiler does not optimize aggressively (=incorrect) */
66 static bool my_isinf(long double val)
68 #if __STDC_VERSION__ >= 199901L
71 /* hopefully the compiler does not optimize aggressively (=incorrect) */
72 return my_isnan(val-val) && !my_isnan(val);
76 /** The number of extra precision rounding bits */
77 #define ROUNDING_BITS 2
81 #ifdef WORDS_BIGENDIAN
87 #ifdef WORDS_BIGENDIAN
94 #ifdef WORDS_BIGENDIAN
99 #ifdef WORDS_BIGENDIAN
105 volatile long double d;
108 #define CLEAR_BUFFER(buffer) memset(buffer, 0, calc_buffer_size)
110 /* our floating point value */
112 float_descriptor_t desc;
115 char value[1]; /* exp[value_size] + mant[value_size] */
118 #define _exp(a) &((a)->value[0])
119 #define _mant(a) &((a)->value[value_size])
121 #define _save_result(x) memcpy((x), sc_get_buffer(), value_size)
122 #define _shift_right(x, y, res) sc_shr((x), (y), value_size*4, 0, (res))
123 #define _shift_left(x, y, res) sc_shl((x), (y), value_size*4, 0, (res))
125 /** A temporal buffer. */
126 static fp_value *calc_buffer = NULL;
128 /** Current rounding mode.*/
129 static fc_rounding_mode_t rounding_mode;
131 static int calc_buffer_size;
132 static int value_size;
133 static int max_precision;
136 static int fc_exact = 1;
138 /** pack machine-like */
139 static void *pack(const fp_value *int_float, void *packed)
143 fp_value *val_buffer;
146 temp = (char*) alloca(value_size);
147 shift_val = (char*) alloca(value_size);
149 switch ((value_class_t)int_float->clss) {
151 val_buffer = (fp_value*) alloca(calc_buffer_size);
152 fc_get_qnan(&int_float->desc, val_buffer);
153 int_float = val_buffer;
157 val_buffer = (fp_value*) alloca(calc_buffer_size);
158 fc_get_plusinf(&int_float->desc, val_buffer);
159 val_buffer->sign = int_float->sign;
160 int_float = val_buffer;
166 assert(int_float->desc.explicit_one <= 1);
168 /* pack sign: move it to the left after exponent AND mantissa */
169 sc_val_from_ulong(int_float->sign, temp);
171 pos = int_float->desc.exponent_size + int_float->desc.mantissa_size + int_float->desc.explicit_one;
172 sc_val_from_ulong(pos, NULL);
173 _shift_left(temp, sc_get_buffer(), packed);
175 /* pack exponent: move it to the left after mantissa */
176 pos = int_float->desc.mantissa_size + int_float->desc.explicit_one;
177 sc_val_from_ulong(pos, shift_val);
178 _shift_left(_exp(int_float), shift_val, temp);
180 /* combine sign|exponent */
181 sc_or(temp, packed, packed);
183 /* extract mantissa */
184 /* remove rounding bits */
185 sc_val_from_ulong(ROUNDING_BITS, shift_val);
186 _shift_right(_mant(int_float), shift_val, temp);
188 /* remove leading 1 (or 0 if denormalized) */
189 sc_max_from_bits(pos, 0, shift_val); /* all mantissa bits are 1's */
190 sc_and(temp, shift_val, temp);
192 /* combine sign|exponent|mantissa */
193 sc_or(temp, packed, packed);
199 * Normalize a fp_value.
201 * @return non-zero if result is exact
203 static int normalize(const fp_value *in_val, fp_value *out_val, int sticky)
207 char lsb, guard, round, round_dir = 0;
208 char *temp = (char*) alloca(value_size);
210 /* save rounding bits at the end */
211 hsb = ROUNDING_BITS + in_val->desc.mantissa_size - sc_get_highest_set_bit(_mant(in_val)) - 1;
213 if (in_val != out_val) {
214 out_val->sign = in_val->sign;
215 out_val->desc = in_val->desc;
218 out_val->clss = FC_NORMAL;
220 /* mantissa all zeros, so zero exponent (because of explicit one) */
221 if (hsb == ROUNDING_BITS + in_val->desc.mantissa_size) {
222 sc_val_from_ulong(0, _exp(out_val));
226 /* shift the first 1 into the left of the radix point (i.e. hsb == -1) */
229 sc_val_from_ulong(-hsb-1, temp);
231 _shift_right(_mant(in_val), temp, _mant(out_val));
233 /* remember if some bits were shifted away */
234 if (sc_had_carry()) {
238 sc_add(_exp(in_val), temp, _exp(out_val));
239 } else if (hsb > -1) {
241 sc_val_from_ulong(hsb+1, temp);
243 _shift_left(_mant(in_val), temp, _mant(out_val));
245 sc_sub(_exp(in_val), temp, _exp(out_val));
248 /* check for exponent underflow */
249 if (sc_is_negative(_exp(out_val)) || sc_is_zero(_exp(out_val))) {
250 /* exponent underflow */
251 /* shift the mantissa right to have a zero exponent */
252 sc_val_from_ulong(1, temp);
253 sc_sub(temp, _exp(out_val), NULL);
255 _shift_right(_mant(out_val), sc_get_buffer(), _mant(out_val));
256 if (sc_had_carry()) {
260 /* denormalized means exponent of zero */
261 sc_val_from_ulong(0, _exp(out_val));
263 out_val->clss = FC_SUBNORMAL;
266 /* perform rounding by adding a value that clears the guard bit and the round bit
267 * and either causes a carry to round up or not */
268 /* get the last 3 bits of the value */
269 lsb = sc_sub_bits(_mant(out_val), out_val->desc.mantissa_size + ROUNDING_BITS, 0) & 0x7;
270 guard = (lsb&0x2)>>1;
273 switch (rounding_mode) {
275 /* round to nearest representable value, if in doubt choose the version
277 round_dir = guard && (sticky || round || lsb>>2);
280 /* if positive: round to one if the exact value is bigger, else to zero */
281 round_dir = (!out_val->sign && (guard || round || sticky));
284 /* if negative: round to one if the exact value is bigger, else to zero */
285 round_dir = (out_val->sign && (guard || round || sticky));
288 /* always round to 0 (chopping mode) */
293 if (round_dir == 1) {
294 guard = (round^guard)<<1;
295 lsb = !(round || guard)<<2 | guard | round;
297 lsb = -((guard<<1) | round);
300 /* add the rounded value */
302 sc_val_from_long(lsb, temp);
303 sc_add(_mant(out_val), temp, _mant(out_val));
307 /* could have rounded down to zero */
308 if (sc_is_zero(_mant(out_val)) && (out_val->clss == FC_SUBNORMAL))
309 out_val->clss = FC_ZERO;
311 /* check for rounding overflow */
312 hsb = ROUNDING_BITS + out_val->desc.mantissa_size - sc_get_highest_set_bit(_mant(out_val)) - 1;
313 if ((out_val->clss != FC_SUBNORMAL) && (hsb < -1)) {
314 sc_val_from_ulong(1, temp);
315 _shift_right(_mant(out_val), temp, _mant(out_val));
316 if (exact && sc_had_carry())
318 sc_add(_exp(out_val), temp, _exp(out_val));
319 } else if ((out_val->clss == FC_SUBNORMAL) && (hsb == -1)) {
320 /* overflow caused the mantissa to be normal again,
321 * so adapt the exponent accordingly */
322 sc_val_from_ulong(1, temp);
323 sc_add(_exp(out_val), temp, _exp(out_val));
325 out_val->clss = FC_NORMAL;
327 /* no further rounding is needed, because rounding overflow means
328 * the carry of the original rounding was propagated all the way
329 * up to the bit left of the radix point. This implies the bits
330 * to the right are all zeros (rounding is +1) */
332 /* check for exponent overflow */
333 sc_val_from_ulong((1 << out_val->desc.exponent_size) - 1, temp);
334 if (sc_comp(_exp(out_val), temp) != -1) {
335 /* exponent overflow, reaction depends on rounding method:
337 * mode | sign of value | result
338 *--------------------------------------------------------------
339 * TO_NEAREST | + | +inf
341 *--------------------------------------------------------------
342 * TO_POSITIVE | + | +inf
343 * | - | smallest representable value
344 *--------------------------------------------------------------
345 * TO_NEAGTIVE | + | largest representable value
347 *--------------------------------------------------------------
348 * TO_ZERO | + | largest representable value
349 * | - | smallest representable value
350 *--------------------------------------------------------------*/
351 if (out_val->sign == 0) {
352 /* value is positive */
353 switch (rounding_mode) {
356 out_val->clss = FC_INF;
361 fc_get_max(&out_val->desc, out_val);
364 /* value is negative */
365 switch (rounding_mode) {
368 out_val->clss = FC_INF;
373 fc_get_min(&out_val->desc, out_val);
381 * Operations involving NaN's must return NaN.
382 * They are NOT exact.
384 #define handle_NAN(a, b, result) \
386 if (a->clss == FC_NAN) { \
387 if (a != result) memcpy(result, a, calc_buffer_size); \
391 if (b->clss == FC_NAN) { \
392 if (b != result) memcpy(result, b, calc_buffer_size); \
400 * calculate a + b, where a is the value with the bigger exponent
402 static void _fadd(const fp_value *a, const fp_value *b, fp_value *result)
412 handle_NAN(a, b, result);
414 /* make sure result has a descriptor */
415 if (result != a && result != b)
416 result->desc = a->desc;
418 /* determine if this is an addition or subtraction */
419 sign = a->sign ^ b->sign;
421 /* produce NaN on inf - inf */
422 if (sign && (a->clss == FC_INF) && (b->clss == FC_INF)) {
424 fc_get_qnan(&a->desc, result);
428 temp = (char*) alloca(value_size);
429 exp_diff = (char*) alloca(value_size);
431 /* get exponent difference */
432 sc_sub(_exp(a), _exp(b), exp_diff);
434 /* initially set sign to be the sign of a, special treatment of subtraction
435 * when exponents are equal is required though.
436 * Also special care about the sign is needed when the mantissas are equal
438 if (sign && sc_val_to_long(exp_diff) == 0) {
439 switch (sc_comp(_mant(a), _mant(b))) {
441 res_sign = a->sign; /* abs(a) is bigger and a is negative */
444 res_sign = (rounding_mode == FC_TONEGATIVE);
447 res_sign = b->sign; /* abs(b) is bigger and b is negative */
450 /* can't be reached */
457 result->sign = res_sign;
459 /* sign has been taken care of, check for special cases */
460 if (a->clss == FC_ZERO || b->clss == FC_INF) {
462 memcpy(result, b, calc_buffer_size);
463 fc_exact = b->clss == FC_NORMAL;
464 result->sign = res_sign;
467 if (b->clss == FC_ZERO || a->clss == FC_INF) {
469 memcpy(result, a, calc_buffer_size);
470 fc_exact = a->clss == FC_NORMAL;
471 result->sign = res_sign;
475 /* shift the smaller value to the right to align the radix point */
476 /* subnormals have their radix point shifted to the right,
477 * take care of this first */
478 if ((b->clss == FC_SUBNORMAL) && (a->clss != FC_SUBNORMAL)) {
479 sc_val_from_ulong(1, temp);
480 sc_sub(exp_diff, temp, exp_diff);
483 _shift_right(_mant(b), exp_diff, temp);
484 sticky = sc_had_carry();
487 if (sticky && sign) {
488 /* if subtracting a little more than the represented value or adding a little
489 * more than the represented value to a negative value this, in addition to the
490 * still set sticky bit, takes account of the 'little more' */
491 char *temp1 = (char*) alloca(calc_buffer_size);
492 sc_val_from_ulong(1, temp1);
493 sc_add(temp, temp1, temp);
497 if (sc_comp(_mant(a), temp) == -1)
498 sc_sub(temp, _mant(a), _mant(result));
500 sc_sub(_mant(a), temp, _mant(result));
502 sc_add(_mant(a), temp, _mant(result));
505 /* _normalize expects a 'normal' radix point, adding two subnormals
506 * results in a subnormal radix point -> shifting before normalizing */
507 if ((a->clss == FC_SUBNORMAL) && (b->clss == FC_SUBNORMAL)) {
508 sc_val_from_ulong(1, NULL);
509 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
512 /* resulting exponent is the bigger one */
513 memmove(_exp(result), _exp(a), value_size);
515 fc_exact &= normalize(result, result, sticky);
521 static void _fmul(const fp_value *a, const fp_value *b, fp_value *result)
529 handle_NAN(a, b, result);
531 temp = (char*) alloca(value_size);
533 if (result != a && result != b)
534 result->desc = a->desc;
536 result->sign = res_sign = a->sign ^ b->sign;
538 /* produce NaN on 0 * inf */
539 if (a->clss == FC_ZERO) {
540 if (b->clss == FC_INF) {
541 fc_get_qnan(&a->desc, result);
545 memcpy(result, a, calc_buffer_size);
546 result->sign = res_sign;
550 if (b->clss == FC_ZERO) {
551 if (a->clss == FC_INF) {
552 fc_get_qnan(&a->desc, result);
556 memcpy(result, b, calc_buffer_size);
557 result->sign = res_sign;
562 if (a->clss == FC_INF) {
565 memcpy(result, a, calc_buffer_size);
566 result->sign = res_sign;
569 if (b->clss == FC_INF) {
572 memcpy(result, b, calc_buffer_size);
573 result->sign = res_sign;
577 /* exp = exp(a) + exp(b) - excess */
578 sc_add(_exp(a), _exp(b), _exp(result));
580 sc_val_from_ulong((1 << (a->desc.exponent_size - 1)) - 1, temp);
581 sc_sub(_exp(result), temp, _exp(result));
583 /* mixed normal, subnormal values introduce an error of 1, correct it */
584 if ((a->clss == FC_SUBNORMAL) ^ (b->clss == FC_SUBNORMAL)) {
585 sc_val_from_ulong(1, temp);
586 sc_add(_exp(result), temp, _exp(result));
589 sc_mul(_mant(a), _mant(b), _mant(result));
591 /* realign result: after a multiplication the digits right of the radix
592 * point are the sum of the factors' digits after the radix point. As all
593 * values are normalized they both have the same amount of these digits,
594 * which has to be restored by proper shifting
595 * because of the rounding bits */
596 sc_val_from_ulong(ROUNDING_BITS + result->desc.mantissa_size, temp);
598 _shift_right(_mant(result), temp, _mant(result));
599 sticky = sc_had_carry();
602 fc_exact &= normalize(result, result, sticky);
608 static void _fdiv(const fp_value *a, const fp_value *b, fp_value *result)
611 char *temp, *dividend;
616 handle_NAN(a, b, result);
618 temp = (char*) alloca(value_size);
619 dividend = (char*) alloca(value_size);
621 if (result != a && result != b)
622 result->desc = a->desc;
624 result->sign = res_sign = a->sign ^ b->sign;
626 /* produce FC_NAN on 0/0 and inf/inf */
627 if (a->clss == FC_ZERO) {
628 if (b->clss == FC_ZERO) {
630 fc_get_qnan(&a->desc, result);
635 memcpy(result, a, calc_buffer_size);
636 result->sign = res_sign;
641 if (b->clss == FC_INF) {
643 if (a->clss == FC_INF) {
645 fc_get_qnan(&a->desc, result);
648 sc_val_from_ulong(0, NULL);
649 _save_result(_exp(result));
650 _save_result(_mant(result));
651 result->clss = FC_ZERO;
656 if (a->clss == FC_INF) {
660 memcpy(result, a, calc_buffer_size);
661 result->sign = res_sign;
664 if (b->clss == FC_ZERO) {
666 /* division by zero */
668 fc_get_minusinf(&a->desc, result);
670 fc_get_plusinf(&a->desc, result);
674 /* exp = exp(a) - exp(b) + excess - 1*/
675 sc_sub(_exp(a), _exp(b), _exp(result));
676 sc_val_from_ulong((1 << (a->desc.exponent_size - 1)) - 2, temp);
677 sc_add(_exp(result), temp, _exp(result));
679 /* mixed normal, subnormal values introduce an error of 1, correct it */
680 if ((a->clss == FC_SUBNORMAL) ^ (b->clss == FC_SUBNORMAL)) {
681 sc_val_from_ulong(1, temp);
682 sc_add(_exp(result), temp, _exp(result));
685 /* mant(res) = mant(a) / 1/2mant(b) */
686 /* to gain more bits of precision in the result the dividend could be
687 * shifted left, as this operation does not loose bits. This would not
688 * fit into the integer precision, but due to the rounding bits (which
689 * are always zero because the values are all normalized) the divisor
690 * can be shifted right instead to achieve the same result */
691 sc_val_from_ulong(ROUNDING_BITS + result->desc.mantissa_size, temp);
693 _shift_left(_mant(a), temp, dividend);
696 char *divisor = (char*) alloca(calc_buffer_size);
697 sc_val_from_ulong(1, divisor);
698 _shift_right(_mant(b), divisor, divisor);
699 sc_div(dividend, divisor, _mant(result));
700 sticky = sc_had_carry();
704 fc_exact &= normalize(result, result, sticky);
708 * Truncate the fractional part away.
710 * This does not clip to any integer range.
712 static void _trunc(const fp_value *a, fp_value *result)
715 * When exponent == 0 all bits left of the radix point
716 * are the integral part of the value. For 15bit exp_size
717 * this would require a left shift of max. 16383 bits which
719 * But it is enough to ensure that no bit right of the radix
720 * point remains set. This restricts the interesting
721 * exponents to the interval [0, mant_size-1].
722 * Outside this interval the truncated value is either 0 or
723 * it does not have fractional parts.
726 int exp_bias, exp_val;
729 /* fixme: can be exact */
732 temp = (char*) alloca(value_size);
735 result->desc = a->desc;
736 result->clss = a->clss;
739 exp_bias = (1 << (a->desc.exponent_size - 1)) - 1;
740 exp_val = sc_val_to_long(_exp(a)) - exp_bias;
743 sc_val_from_ulong(0, NULL);
744 _save_result(_exp(result));
745 _save_result(_mant(result));
746 result->clss = FC_ZERO;
751 if (exp_val > (long)a->desc.mantissa_size) {
753 memcpy(result, a, calc_buffer_size);
758 /* set up a proper mask to delete all bits right of the
759 * radix point if the mantissa had been shifted until exp == 0 */
760 sc_max_from_bits(1 + exp_val, 0, temp);
761 sc_val_from_long(a->desc.mantissa_size - exp_val + 2, NULL);
762 _shift_left(temp, sc_get_buffer(), temp);
764 /* and the mask and return the result */
765 sc_and(_mant(a), temp, _mant(result));
768 memcpy(_exp(result), _exp(a), value_size);
769 result->sign = a->sign;
774 * functions defined in fltcalc.h
776 const void *fc_get_buffer(void)
781 int fc_get_buffer_length(void)
783 return calc_buffer_size;
786 void *fc_val_from_str(const char *str, size_t len,
787 const float_descriptor_t *desc, void *result)
791 /* XXX excuse of an implementation to make things work */
793 fp_value *tmp = (fp_value*) alloca(calc_buffer_size);
794 float_descriptor_t tmp_desc;
796 buffer = (char*) alloca(len+1);
797 memcpy(buffer, str, len);
799 val = string_to_long_double(buffer);
801 tmp_desc.exponent_size = 15;
802 tmp_desc.mantissa_size = 63;
803 tmp_desc.explicit_one = 1;
804 fc_val_from_ieee754(val, &tmp_desc, tmp);
806 return fc_cast(tmp, desc, (fp_value*) result);
809 fp_value *fc_val_from_ieee754(long double l, const float_descriptor_t *desc,
813 int bias_res, bias_val, mant_val;
816 uint32_t exponent, mantissa0, mantissa1;
817 size_t long_double_size = sizeof(long double);
820 bias_res = ((1 << (desc->exponent_size - 1)) - 1);
822 if (long_double_size == 8) {
825 sign = (srcval.val_ld8.high & 0x80000000) != 0;
826 exponent = (srcval.val_ld8.high & 0x7FF00000) >> 20;
827 mantissa0 = srcval.val_ld8.high & 0x000FFFFF;
828 mantissa1 = srcval.val_ld8.low;
830 /* we assume an x86-like 80bit representation of the value... */
831 assert(sizeof(long double)==12 || sizeof(long double)==16);
834 sign = (srcval.val_ld12.high & 0x00008000) != 0;
835 exponent = (srcval.val_ld12.high & 0x00007FFF) ;
836 mantissa0 = srcval.val_ld12.mid;
837 mantissa1 = srcval.val_ld12.low;
841 result = calc_buffer;
842 temp = (char*) alloca(value_size);
844 /* CLEAR the buffer, else some bits might be uninitialized */
845 memset(result, 0, fc_get_buffer_length());
847 result->desc = *desc;
848 result->clss = FC_NORMAL;
851 /* sign and flag suffice to identify NaN or inf, no exponent/mantissa
852 * encoding is needed. the function can return immediately in these cases */
854 result->clss = FC_NAN;
856 } else if (my_isinf(l)) {
857 result->clss = FC_INF;
861 /* build exponent, because input and output exponent and mantissa sizes may differ
862 * this looks more complicated than it is: unbiased input exponent + output bias,
863 * minus the mantissa difference which is added again later when the output float
864 * becomes normalized */
865 sc_val_from_long((exponent - bias_val + bias_res) - (mant_val - desc->mantissa_size), _exp(result));
867 /* build mantissa representation */
869 /* insert the hidden bit */
870 sc_val_from_ulong(1, temp);
871 sc_val_from_ulong(mant_val + ROUNDING_BITS, NULL);
872 _shift_left(temp, sc_get_buffer(), NULL);
875 sc_val_from_ulong(0, NULL);
878 _save_result(_mant(result));
880 /* bits from the upper word */
881 sc_val_from_ulong(mantissa0, temp);
882 sc_val_from_ulong(34, NULL);
883 _shift_left(temp, sc_get_buffer(), temp);
884 sc_or(_mant(result), temp, _mant(result));
886 /* bits from the lower word */
887 sc_val_from_ulong(mantissa1, temp);
888 sc_val_from_ulong(ROUNDING_BITS, NULL);
889 _shift_left(temp, sc_get_buffer(), temp);
890 sc_or(_mant(result), temp, _mant(result));
892 /* _normalize expects the radix point to be normal, so shift mantissa of subnormal
893 * origin one to the left */
895 sc_val_from_ulong(1, NULL);
896 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
899 normalize(result, result, 0);
904 long double fc_val_to_ieee754(const fp_value *val)
907 fp_value *temp = NULL;
909 unsigned byte_offset;
917 float_descriptor_t desc;
918 unsigned mantissa_size;
920 size_t long_double_size = sizeof(long double);
922 if (long_double_size == 8) {
923 desc.exponent_size = 11;
924 desc.mantissa_size = 52;
925 desc.explicit_one = 0;
927 desc.exponent_size = 15;
928 desc.mantissa_size = 63;
929 desc.explicit_one = 1;
931 mantissa_size = desc.mantissa_size + desc.explicit_one;
933 temp = (fp_value*) alloca(calc_buffer_size);
934 value = fc_cast(val, &desc, temp);
938 /* @@@ long double exponent is 15bit, so the use of sc_val_to_long should not
939 * lead to wrong results */
940 exponent = sc_val_to_long(_exp(value)) ;
942 sc_val_from_ulong(ROUNDING_BITS, NULL);
943 _shift_right(_mant(value), sc_get_buffer(), _mant(value));
948 for (byte_offset = 0; byte_offset < 4; byte_offset++)
949 mantissa1 |= sc_sub_bits(_mant(value), mantissa_size, byte_offset) << (byte_offset << 3);
951 for (; (byte_offset<<3) < desc.mantissa_size; byte_offset++)
952 mantissa0 |= sc_sub_bits(_mant(value), mantissa_size, byte_offset) << ((byte_offset - 4) << 3);
954 if (long_double_size == 8) {
955 mantissa0 &= 0x000FFFFF; /* get rid of garbage */
956 buildval.val_ld8.high = sign << 31;
957 buildval.val_ld8.high |= exponent << 20;
958 buildval.val_ld8.high |= mantissa0;
959 buildval.val_ld8.low = mantissa1;
961 buildval.val_ld12.high = sign << 15;
962 buildval.val_ld12.high |= exponent;
963 buildval.val_ld12.mid = mantissa0;
964 buildval.val_ld12.low = mantissa1;
970 fp_value *fc_cast(const fp_value *value, const float_descriptor_t *desc,
974 int exp_offset, val_bias, res_bias;
976 if (result == NULL) result = calc_buffer;
977 temp = (char*) alloca(value_size);
979 if (value->desc.exponent_size == desc->exponent_size &&
980 value->desc.mantissa_size == desc->mantissa_size &&
981 value->desc.explicit_one == desc->explicit_one) {
983 memcpy(result, value, calc_buffer_size);
987 if (value->clss == FC_NAN) {
988 if (sc_get_highest_set_bit(_mant(value)) == value->desc.mantissa_size + 1)
989 return fc_get_qnan(desc, result);
991 return fc_get_snan(desc, result);
993 else if (value->clss == FC_INF) {
994 if (value->sign == 0)
995 return fc_get_plusinf(desc, result);
997 return fc_get_minusinf(desc, result);
1000 /* set the descriptor of the new value */
1001 result->desc = *desc;
1002 result->clss = value->clss;
1003 result->sign = value->sign;
1005 /* when the mantissa sizes differ normalizing has to shift to align it.
1006 * this would change the exponent, which is unwanted. So calculate this
1007 * offset and add it */
1008 val_bias = (1 << (value->desc.exponent_size - 1)) - 1;
1009 res_bias = (1 << (desc->exponent_size - 1)) - 1;
1011 exp_offset = (res_bias - val_bias) - (value->desc.mantissa_size - desc->mantissa_size);
1012 sc_val_from_long(exp_offset, temp);
1013 sc_add(_exp(value), temp, _exp(result));
1015 /* _normalize expects normalized radix point */
1016 if (value->clss == FC_SUBNORMAL) {
1017 sc_val_from_ulong(1, NULL);
1018 _shift_left(_mant(value), sc_get_buffer(), _mant(result));
1019 } else if (value != result) {
1020 memcpy(_mant(result), _mant(value), value_size);
1022 memmove(_mant(result), _mant(value), value_size);
1025 normalize(result, result, 0);
1029 fp_value *fc_get_max(const float_descriptor_t *desc, fp_value *result)
1031 if (result == NULL) result = calc_buffer;
1033 result->desc = *desc;
1034 result->clss = FC_NORMAL;
1037 sc_val_from_ulong((1 << desc->exponent_size) - 2, _exp(result));
1039 sc_max_from_bits(desc->mantissa_size + 1, 0, _mant(result));
1040 sc_val_from_ulong(ROUNDING_BITS, NULL);
1041 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
1046 fp_value *fc_get_min(const float_descriptor_t *desc, fp_value *result)
1048 if (result == NULL) result = calc_buffer;
1050 fc_get_max(desc, result);
1056 fp_value *fc_get_snan(const float_descriptor_t *desc, fp_value *result)
1058 if (result == NULL) result = calc_buffer;
1060 result->desc = *desc;
1061 result->clss = FC_NAN;
1064 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1066 /* signaling NaN has non-zero mantissa with msb not set */
1067 sc_val_from_ulong(1, _mant(result));
1072 fp_value *fc_get_qnan(const float_descriptor_t *desc, fp_value *result)
1074 if (result == NULL) result = calc_buffer;
1076 result->desc = *desc;
1077 result->clss = FC_NAN;
1080 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1082 /* quiet NaN has the msb of the mantissa set, so shift one there */
1083 sc_val_from_ulong(1, _mant(result));
1084 /* mantissa_size >+< 1 because of two extra rounding bits */
1085 sc_val_from_ulong(desc->mantissa_size + 1, NULL);
1086 _shift_left(_mant(result), sc_get_buffer(), _mant(result));
1091 fp_value *fc_get_plusinf(const float_descriptor_t *desc, fp_value *result)
1095 if (result == NULL) result = calc_buffer;
1097 result->desc = *desc;
1098 result->clss = FC_INF;
1101 sc_val_from_ulong((1 << desc->exponent_size) - 1, _exp(result));
1103 mant = _mant(result);
1104 sc_val_from_ulong(0, mant);
1105 if (desc->explicit_one) {
1106 sc_set_bit_at(mant, result->desc.mantissa_size + ROUNDING_BITS);
1112 fp_value *fc_get_minusinf(const float_descriptor_t *desc, fp_value *result)
1114 if (result == NULL) result = calc_buffer;
1116 fc_get_plusinf(desc, result);
1122 int fc_comp(const fp_value *val_a, const fp_value *val_b)
1127 * shortcut: if both values are identical, they are either
1128 * Unordered if NaN or equal
1131 return val_a->clss == FC_NAN ? 2 : 0;
1133 /* unordered if one is a NaN */
1134 if (val_a->clss == FC_NAN || val_b->clss == FC_NAN)
1137 /* zero is equal independent of sign */
1138 if ((val_a->clss == FC_ZERO) && (val_b->clss == FC_ZERO))
1141 /* different signs make compare easy */
1142 if (val_a->sign != val_b->sign)
1143 return (val_a->sign == 0) ? (1) : (-1);
1145 mul = val_a->sign ? -1 : 1;
1147 /* both infinity means equality */
1148 if ((val_a->clss == FC_INF) && (val_b->clss == FC_INF))
1151 /* infinity is bigger than the rest */
1152 if (val_a->clss == FC_INF)
1154 if (val_b->clss == FC_INF)
1157 /* check first exponent, that mantissa if equal */
1158 switch (sc_comp(_exp(val_a), _exp(val_b))) {
1164 return sc_comp(_mant(val_a), _mant(val_b)) * mul;
1170 int fc_is_zero(const fp_value *a)
1172 return a->clss == FC_ZERO;
1175 int fc_is_negative(const fp_value *a)
1180 int fc_is_inf(const fp_value *a)
1182 return a->clss == FC_INF;
1185 int fc_is_nan(const fp_value *a)
1187 return a->clss == FC_NAN;
1190 int fc_is_subnormal(const fp_value *a)
1192 return a->clss == FC_SUBNORMAL;
1195 char *fc_print(const fp_value *val, char *buf, int buflen, unsigned base)
1198 long double flt_val;
1200 mul_1 = (char*) alloca(calc_buffer_size);
1204 switch ((value_class_t)val->clss) {
1206 snprintf(buf, buflen, "%cINF", val->sign ? '-' : '+');
1209 snprintf(buf, buflen, "NaN");
1212 snprintf(buf, buflen, "0.0");
1215 flt_val = fc_val_to_ieee754(val);
1216 /* XXX 30 is arbitrary */
1217 snprintf(buf, buflen, "%.30LE", flt_val);
1222 switch ((value_class_t)val->clss) {
1224 snprintf(buf, buflen, "%cINF", val->sign ? '-' : '+');
1227 snprintf(buf, buflen, "NaN");
1230 snprintf(buf, buflen, "0.0");
1233 flt_val = fc_val_to_ieee754(val);
1234 snprintf(buf, buflen, "%LA", flt_val);
1240 snprintf(buf, buflen, "%s", sc_print(pack(val, mul_1), value_size*4, SC_HEX, 0));
1241 buf[buflen - 1] = '\0';
1247 unsigned char fc_sub_bits(const fp_value *value, unsigned num_bits, unsigned byte_ofs)
1249 /* this is used to cache the packed version of the value */
1250 static char *packed_value = NULL;
1252 if (packed_value == NULL) packed_value = XMALLOCN(char, value_size);
1255 pack(value, packed_value);
1257 return sc_sub_bits(packed_value, num_bits, byte_ofs);
1260 /* Returns non-zero if the mantissa is zero, i.e. 1.0Exxx */
1261 int fc_zero_mantissa(const fp_value *value)
1263 return sc_get_lowest_set_bit(_mant(value)) == ROUNDING_BITS + value->desc.mantissa_size;
1266 /* Returns the exponent of a value. */
1267 int fc_get_exponent(const fp_value *value)
1269 int exp_bias = (1 << (value->desc.exponent_size - 1)) - 1;
1270 return sc_val_to_long(_exp(value)) - exp_bias;
1273 /* Return non-zero if a given value can be converted lossless into another precision */
1274 int fc_can_lossless_conv_to(const fp_value *value, const float_descriptor_t *desc)
1279 /* handle some special cases first */
1280 switch (value->clss) {
1289 /* check if the exponent can be encoded: note, 0 and all ones are reserved for the exponent */
1290 exp_bias = (1 << (desc->exponent_size - 1)) - 1;
1291 v = fc_get_exponent(value) + exp_bias;
1292 if (0 < v && v < (1 << desc->exponent_size) - 1) {
1293 /* exponent can be encoded, now check the mantissa */
1294 v = value->desc.mantissa_size + ROUNDING_BITS - sc_get_lowest_set_bit(_mant(value));
1295 return v <= (int)desc->mantissa_size;
1301 fc_rounding_mode_t fc_set_rounding_mode(fc_rounding_mode_t mode)
1303 if (mode == FC_TONEAREST || mode == FC_TOPOSITIVE || mode == FC_TONEGATIVE || mode == FC_TOZERO)
1304 rounding_mode = mode;
1306 return rounding_mode;
1309 fc_rounding_mode_t fc_get_rounding_mode(void)
1311 return rounding_mode;
1314 void init_fltcalc(int precision)
1316 if (calc_buffer == NULL) {
1317 /* does nothing if already init */
1318 if (precision == 0) precision = FC_DEFAULT_PRECISION;
1320 init_strcalc(precision + 2 + ROUNDING_BITS);
1322 /* needs additionally rounding bits, one bit as explicit 1., and one for
1323 * addition overflow */
1324 max_precision = sc_get_precision() - (2 + ROUNDING_BITS);
1325 if (max_precision < precision)
1326 printf("WARNING: not enough precision available, using %d\n", max_precision);
1328 rounding_mode = FC_TONEAREST;
1329 value_size = sc_get_buffer_length();
1330 calc_buffer_size = sizeof(fp_value) + 2*value_size - 1;
1332 calc_buffer = (fp_value*) xmalloc(calc_buffer_size);
1333 memset(calc_buffer, 0, calc_buffer_size);
1337 void finish_fltcalc (void)
1339 free(calc_buffer); calc_buffer = NULL;
1342 /* definition of interface functions */
1343 fp_value *fc_add(const fp_value *a, const fp_value *b, fp_value *result)
1345 if (result == NULL) result = calc_buffer;
1347 /* make the value with the bigger exponent the first one */
1348 if (sc_comp(_exp(a), _exp(b)) == -1)
1349 _fadd(b, a, result);
1351 _fadd(a, b, result);
1356 fp_value *fc_sub(const fp_value *a, const fp_value *b, fp_value *result)
1360 if (result == NULL) result = calc_buffer;
1362 temp = (fp_value*) alloca(calc_buffer_size);
1363 memcpy(temp, b, calc_buffer_size);
1364 temp->sign = !b->sign;
1365 if (sc_comp(_exp(a), _exp(temp)) == -1)
1366 _fadd(temp, a, result);
1368 _fadd(a, temp, result);
1373 fp_value *fc_mul(const fp_value *a, const fp_value *b, fp_value *result)
1375 if (result == NULL) result = calc_buffer;
1377 _fmul(a, b, result);
1382 fp_value *fc_div(const fp_value *a, const fp_value *b, fp_value *result)
1384 if (result == NULL) result = calc_buffer;
1386 _fdiv(a, b, result);
1391 fp_value *fc_neg(const fp_value *a, fp_value *result)
1393 if (result == NULL) result = calc_buffer;
1396 memcpy(result, a, calc_buffer_size);
1397 result->sign = !a->sign;
1402 fp_value *fc_int(const fp_value *a, fp_value *result)
1404 if (result == NULL) result = calc_buffer;
1411 fp_value *fc_rnd(const fp_value *a, fp_value *result)
1416 panic("not yet implemented");
1420 * convert a floating point value into an sc value ...
1422 int fc_flt2int(const fp_value *a, void *result, ir_mode *dst_mode)
1424 if (a->clss == FC_NORMAL) {
1425 int exp_bias = (1 << (a->desc.exponent_size - 1)) - 1;
1426 int exp_val = sc_val_to_long(_exp(a)) - exp_bias;
1431 if (a->sign && !mode_is_signed(dst_mode)) {
1432 /* FIXME: for now we cannot convert this */
1436 tgt_bits = get_mode_size_bits(dst_mode);
1437 if (mode_is_signed(dst_mode))
1440 assert(exp_val >= 0 && "floating point value not integral before fc_flt2int() call");
1441 mantissa_size = a->desc.mantissa_size + ROUNDING_BITS;
1442 shift = exp_val - mantissa_size;
1444 if (tgt_bits < mantissa_size + 1)
1445 tgt_bits = mantissa_size + 1;
1447 sc_shlI(_mant(a), shift, tgt_bits, 0, result);
1449 sc_shrI(_mant(a), -shift, tgt_bits, 0, result);
1452 /* check for overflow */
1453 highest = sc_get_highest_set_bit(result);
1455 if (mode_is_signed(dst_mode)) {
1456 if (highest == sc_get_lowest_set_bit(result)) {
1457 /* need extra test for MIN_INT */
1458 if (highest >= (int) get_mode_size_bits(dst_mode)) {
1459 /* FIXME: handle overflow */
1463 if (highest >= (int) get_mode_size_bits(dst_mode) - 1) {
1464 /* FIXME: handle overflow */
1469 if (highest >= (int) get_mode_size_bits(dst_mode)) {
1470 /* FIXME: handle overflow */
1476 sc_neg(result, result);
1479 } else if (a->clss == FC_ZERO) {
1486 int fc_is_exact(void)