sreal.c revision 302408
1/* Simple data type for positive real numbers for the GNU compiler.
2   Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 2, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING.  If not, write to the Free
18Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
1902110-1301, USA.  */
20
21/* This library supports positive real numbers and 0;
22   inf and nan are NOT supported.
23   It is written to be simple and fast.
24
25   Value of sreal is
26	x = sig * 2 ^ exp
27   where
28	sig = significant
29	  (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
30	exp = exponent
31
32   One HOST_WIDE_INT is used for the significant on 64-bit (and more than
33   64-bit) machines,
34   otherwise two HOST_WIDE_INTs are used for the significant.
35   Only a half of significant bits is used (in normalized sreals) so that we do
36   not have problems with overflow, for example when c->sig = a->sig * b->sig.
37   So the precision for 64-bit and 32-bit machines is 32-bit.
38
39   Invariant: The numbers are normalized before and after each call of sreal_*.
40
41   Normalized sreals:
42   All numbers (except zero) meet following conditions:
43	 SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
44	-SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
45
46   If the number would be too large, it is set to upper bounds of these
47   conditions.
48
49   If the number is zero or would be too small it meets following conditions:
50	sig == 0 && exp == -SREAL_MAX_EXP
51*/
52
53#include "config.h"
54#include "system.h"
55#include "coretypes.h"
56#include "tm.h"
57#include "sreal.h"
58
59static inline void copy (sreal *, sreal *);
60static inline void shift_right (sreal *, int);
61static void normalize (sreal *);
62
63/* Print the content of struct sreal.  */
64
65void
66dump_sreal (FILE *file, sreal *x)
67{
68#if SREAL_PART_BITS < 32
69  fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
70	   HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
71	   x->sig_hi, x->sig_lo, x->exp);
72#else
73  fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
74#endif
75}
76
77/* Copy the sreal number.  */
78
79static inline void
80copy (sreal *r, sreal *a)
81{
82#if SREAL_PART_BITS < 32
83  r->sig_lo = a->sig_lo;
84  r->sig_hi = a->sig_hi;
85#else
86  r->sig = a->sig;
87#endif
88  r->exp = a->exp;
89}
90
91/* Shift X right by S bits.  Needed: 0 < S <= SREAL_BITS.
92   When the most significant bit shifted out is 1, add 1 to X (rounding).  */
93
94static inline void
95shift_right (sreal *x, int s)
96{
97  gcc_assert (s > 0);
98  gcc_assert (s <= SREAL_BITS);
99  /* Exponent should never be so large because shift_right is used only by
100     sreal_add and sreal_sub ant thus the number cannot be shifted out from
101     exponent range.  */
102  gcc_assert (x->exp + s <= SREAL_MAX_EXP);
103
104  x->exp += s;
105
106#if SREAL_PART_BITS < 32
107  if (s > SREAL_PART_BITS)
108    {
109      s -= SREAL_PART_BITS;
110      x->sig_hi += (uhwi) 1 << (s - 1);
111      x->sig_lo = x->sig_hi >> s;
112      x->sig_hi = 0;
113    }
114  else
115    {
116      x->sig_lo += (uhwi) 1 << (s - 1);
117      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
118	{
119	  x->sig_hi++;
120	  x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
121	}
122      x->sig_lo >>= s;
123      x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
124      x->sig_hi >>= s;
125    }
126#else
127  x->sig += (uhwi) 1 << (s - 1);
128  x->sig >>= s;
129#endif
130}
131
132/* Normalize *X.  */
133
134static void
135normalize (sreal *x)
136{
137#if SREAL_PART_BITS < 32
138  int shift;
139  HOST_WIDE_INT mask;
140
141  if (x->sig_lo == 0 && x->sig_hi == 0)
142    {
143      x->exp = -SREAL_MAX_EXP;
144    }
145  else if (x->sig_hi < SREAL_MIN_SIG)
146    {
147      if (x->sig_hi == 0)
148	{
149	  /* Move lower part of significant to higher part.  */
150	  x->sig_hi = x->sig_lo;
151	  x->sig_lo = 0;
152	  x->exp -= SREAL_PART_BITS;
153	}
154      shift = 0;
155      while (x->sig_hi < SREAL_MIN_SIG)
156	{
157	  x->sig_hi <<= 1;
158	  x->exp--;
159	  shift++;
160	}
161      /* Check underflow.  */
162      if (x->exp < -SREAL_MAX_EXP)
163	{
164	  x->exp = -SREAL_MAX_EXP;
165	  x->sig_hi = 0;
166	  x->sig_lo = 0;
167	}
168      else if (shift)
169	{
170	  mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
171	  x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
172	  x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
173	}
174    }
175  else if (x->sig_hi > SREAL_MAX_SIG)
176    {
177      unsigned HOST_WIDE_INT tmp = x->sig_hi;
178
179      /* Find out how many bits will be shifted.  */
180      shift = 0;
181      do
182	{
183	  tmp >>= 1;
184	  shift++;
185	}
186      while (tmp > SREAL_MAX_SIG);
187
188      /* Round the number.  */
189      x->sig_lo += (uhwi) 1 << (shift - 1);
190
191      x->sig_lo >>= shift;
192      x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
193		    << (SREAL_PART_BITS - shift));
194      x->sig_hi >>= shift;
195      x->exp += shift;
196      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
197	{
198	  x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
199	  x->sig_hi++;
200	  if (x->sig_hi > SREAL_MAX_SIG)
201	    {
202	      /* x->sig_hi was SREAL_MAX_SIG before increment
203		 so now last bit is zero.  */
204	      x->sig_hi >>= 1;
205	      x->sig_lo >>= 1;
206	      x->exp++;
207	    }
208	}
209
210      /* Check overflow.  */
211      if (x->exp > SREAL_MAX_EXP)
212	{
213	  x->exp = SREAL_MAX_EXP;
214	  x->sig_hi = SREAL_MAX_SIG;
215	  x->sig_lo = SREAL_MAX_SIG;
216	}
217    }
218#else
219  if (x->sig == 0)
220    {
221      x->exp = -SREAL_MAX_EXP;
222    }
223  else if (x->sig < SREAL_MIN_SIG)
224    {
225      do
226	{
227	  x->sig <<= 1;
228	  x->exp--;
229	}
230      while (x->sig < SREAL_MIN_SIG);
231
232      /* Check underflow.  */
233      if (x->exp < -SREAL_MAX_EXP)
234	{
235	  x->exp = -SREAL_MAX_EXP;
236	  x->sig = 0;
237	}
238    }
239  else if (x->sig > SREAL_MAX_SIG)
240    {
241      int last_bit;
242      do
243	{
244	  last_bit = x->sig & 1;
245	  x->sig >>= 1;
246	  x->exp++;
247	}
248      while (x->sig > SREAL_MAX_SIG);
249
250      /* Round the number.  */
251      x->sig += last_bit;
252      if (x->sig > SREAL_MAX_SIG)
253	{
254	  x->sig >>= 1;
255	  x->exp++;
256	}
257
258      /* Check overflow.  */
259      if (x->exp > SREAL_MAX_EXP)
260	{
261	  x->exp = SREAL_MAX_EXP;
262	  x->sig = SREAL_MAX_SIG;
263	}
264    }
265#endif
266}
267
268/* Set *R to SIG * 2 ^ EXP.  Return R.  */
269
270sreal *
271sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
272{
273#if SREAL_PART_BITS < 32
274  r->sig_lo = 0;
275  r->sig_hi = sig;
276  r->exp = exp - 16;
277#else
278  r->sig = sig;
279  r->exp = exp;
280#endif
281  normalize (r);
282  return r;
283}
284
285/* Return integer value of *R.  */
286
287HOST_WIDE_INT
288sreal_to_int (sreal *r)
289{
290#if SREAL_PART_BITS < 32
291  if (r->exp <= -SREAL_BITS)
292    return 0;
293  if (r->exp >= 0)
294    return MAX_HOST_WIDE_INT;
295  return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
296#else
297  if (r->exp <= -SREAL_BITS)
298    return 0;
299  if (r->exp >= SREAL_PART_BITS)
300    return MAX_HOST_WIDE_INT;
301  if (r->exp > 0)
302    return r->sig << r->exp;
303  if (r->exp < 0)
304    return r->sig >> -r->exp;
305  return r->sig;
306#endif
307}
308
309/* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B.  */
310
311int
312sreal_compare (sreal *a, sreal *b)
313{
314  if (a->exp > b->exp)
315    return 1;
316  if (a->exp < b->exp)
317    return -1;
318#if SREAL_PART_BITS < 32
319  if (a->sig_hi > b->sig_hi)
320    return 1;
321  if (a->sig_hi < b->sig_hi)
322    return -1;
323  if (a->sig_lo > b->sig_lo)
324    return 1;
325  if (a->sig_lo < b->sig_lo)
326    return -1;
327#else
328  if (a->sig > b->sig)
329    return 1;
330  if (a->sig < b->sig)
331    return -1;
332#endif
333  return 0;
334}
335
336/* *R = *A + *B.  Return R.  */
337
338sreal *
339sreal_add (sreal *r, sreal *a, sreal *b)
340{
341  int dexp;
342  sreal tmp;
343  sreal *bb;
344
345  if (sreal_compare (a, b) < 0)
346    {
347      sreal *swap;
348      swap = a;
349      a = b;
350      b = swap;
351    }
352
353  dexp = a->exp - b->exp;
354  r->exp = a->exp;
355  if (dexp > SREAL_BITS)
356    {
357#if SREAL_PART_BITS < 32
358      r->sig_hi = a->sig_hi;
359      r->sig_lo = a->sig_lo;
360#else
361      r->sig = a->sig;
362#endif
363      return r;
364    }
365
366  if (dexp == 0)
367    bb = b;
368  else
369    {
370      copy (&tmp, b);
371      shift_right (&tmp, dexp);
372      bb = &tmp;
373    }
374
375#if SREAL_PART_BITS < 32
376  r->sig_hi = a->sig_hi + bb->sig_hi;
377  r->sig_lo = a->sig_lo + bb->sig_lo;
378  if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
379    {
380      r->sig_hi++;
381      r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
382    }
383#else
384  r->sig = a->sig + bb->sig;
385#endif
386  normalize (r);
387  return r;
388}
389
390/* *R = *A - *B.  Return R.  */
391
392sreal *
393sreal_sub (sreal *r, sreal *a, sreal *b)
394{
395  int dexp;
396  sreal tmp;
397  sreal *bb;
398
399  gcc_assert (sreal_compare (a, b) >= 0);
400
401  dexp = a->exp - b->exp;
402  r->exp = a->exp;
403  if (dexp > SREAL_BITS)
404    {
405#if SREAL_PART_BITS < 32
406      r->sig_hi = a->sig_hi;
407      r->sig_lo = a->sig_lo;
408#else
409      r->sig = a->sig;
410#endif
411      return r;
412    }
413  if (dexp == 0)
414    bb = b;
415  else
416    {
417      copy (&tmp, b);
418      shift_right (&tmp, dexp);
419      bb = &tmp;
420    }
421
422#if SREAL_PART_BITS < 32
423  if (a->sig_lo < bb->sig_lo)
424    {
425      r->sig_hi = a->sig_hi - bb->sig_hi - 1;
426      r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
427    }
428  else
429    {
430      r->sig_hi = a->sig_hi - bb->sig_hi;
431      r->sig_lo = a->sig_lo - bb->sig_lo;
432    }
433#else
434  r->sig = a->sig - bb->sig;
435#endif
436  normalize (r);
437  return r;
438}
439
440/* *R = *A * *B.  Return R.  */
441
442sreal *
443sreal_mul (sreal *r, sreal *a, sreal *b)
444{
445#if SREAL_PART_BITS < 32
446  if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
447    {
448      r->sig_lo = 0;
449      r->sig_hi = 0;
450      r->exp = -SREAL_MAX_EXP;
451    }
452  else
453    {
454      unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
455      if (sreal_compare (a, b) < 0)
456	{
457	  sreal *swap;
458	  swap = a;
459	  a = b;
460	  b = swap;
461	}
462
463      r->exp = a->exp + b->exp + SREAL_PART_BITS;
464
465      tmp1 = a->sig_lo * b->sig_lo;
466      tmp2 = a->sig_lo * b->sig_hi;
467      tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
468
469      r->sig_hi = a->sig_hi * b->sig_hi;
470      r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
471      tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
472      tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
473      tmp1 = tmp2 + tmp3;
474
475      r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
476      r->sig_hi += tmp1 >> SREAL_PART_BITS;
477
478      normalize (r);
479    }
480#else
481  if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
482    {
483      r->sig = 0;
484      r->exp = -SREAL_MAX_EXP;
485    }
486  else
487    {
488      r->sig = a->sig * b->sig;
489      r->exp = a->exp + b->exp;
490      normalize (r);
491    }
492#endif
493  return r;
494}
495
496/* *R = *A / *B.  Return R.  */
497
498sreal *
499sreal_div (sreal *r, sreal *a, sreal *b)
500{
501#if SREAL_PART_BITS < 32
502  unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
503
504  gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
505  if (a->sig_hi < SREAL_MIN_SIG)
506    {
507      r->sig_hi = 0;
508      r->sig_lo = 0;
509      r->exp = -SREAL_MAX_EXP;
510    }
511  else
512    {
513      /* Since division by the whole number is pretty ugly to write
514	 we are dividing by first 3/4 of bits of number.  */
515
516      tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
517      tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
518	      + (b->sig_lo >> (SREAL_PART_BITS / 2)));
519      if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
520	tmp2++;
521
522      r->sig_lo = 0;
523      tmp = tmp1 / tmp2;
524      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
525      r->sig_hi = tmp << SREAL_PART_BITS;
526
527      tmp = tmp1 / tmp2;
528      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
529      r->sig_hi += tmp << (SREAL_PART_BITS / 2);
530
531      tmp = tmp1 / tmp2;
532      r->sig_hi += tmp;
533
534      r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
535      normalize (r);
536    }
537#else
538  gcc_assert (b->sig != 0);
539  r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
540  r->exp = a->exp - b->exp - SREAL_PART_BITS;
541  normalize (r);
542#endif
543  return r;
544}
545