1#include "FEATURE/uwin"
2
3#if !_UWIN || (_lib__copysign||_lib_copysign) && _lib_logb && (_lib__finite||_lib_finite) && (_lib_drem||_lib_remainder) && _lib_sqrt && _lib_ilogb && (_lib__scalb||_lib_scalb)
4
5void _STUB_support(){}
6
7#else
8
9/*
10 * Copyright (c) 1985, 1993
11 *	The Regents of the University of California.  All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#ifndef lint
39static char sccsid[] = "@(#)support.c	8.1 (Berkeley) 6/4/93";
40#endif /* not lint */
41
42/*
43 * Some IEEE standard 754 recommended functions and remainder and sqrt for
44 * supporting the C elementary functions.
45 ******************************************************************************
46 * WARNING:
47 *      These codes are developed (in double) to support the C elementary
48 * functions temporarily. They are not universal, and some of them are very
49 * slow (in particular, drem and sqrt is extremely inefficient). Each
50 * computer system should have its implementation of these functions using
51 * its own assembler.
52 ******************************************************************************
53 *
54 * IEEE 754 required operations:
55 *     drem(x,p)
56 *              returns  x REM y  =  x - [x/y]*y , where [x/y] is the integer
57 *              nearest x/y; in half way case, choose the even one.
58 *     sqrt(x)
59 *              returns the square root of x correctly rounded according to
60 *		the rounding mod.
61 *
62 * IEEE 754 recommended functions:
63 * (a) copysign(x,y)
64 *              returns x with the sign of y.
65 * (b) scalb(x,N)
66 *              returns  x * (2**N), for integer values N.
67 * (c) logb(x)
68 *              returns the unbiased exponent of x, a signed integer in
69 *              double precision, except that logb(0) is -INF, logb(INF)
70 *              is +INF, and logb(NAN) is that NAN.
71 * (d) finite(x)
72 *              returns the value TRUE if -INF < x < +INF and returns
73 *              FALSE otherwise.
74 *
75 *
76 * CODED IN C BY K.C. NG, 11/25/84;
77 * REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85.
78 */
79
80#include "mathimpl.h"
81
82#if defined(vax)||defined(tahoe)      /* VAX D format */
83#include <errno.h>
84    static const unsigned short msign=0x7fff , mexp =0x7f80 ;
85    static const short  prep1=57, gap=7, bias=129           ;
86    static const double novf=1.7E38, nunf=3.0E-39, zero=0.0 ;
87#else	/* defined(vax)||defined(tahoe) */
88    static const unsigned short msign=0x7fff, mexp =0x7ff0  ;
89    static const short prep1=54, gap=4, bias=1023           ;
90    static const double novf=1.7E308, nunf=3.0E-308,zero=0.0;
91#endif	/* defined(vax)||defined(tahoe) */
92
93#if !_lib__scalb || !_lib_scalb
94
95extern double _scalb(x,N)
96double x; double N;
97{
98        int k;
99
100#ifdef national
101        unsigned short *px=(unsigned short *) &x + 3;
102#else	/* national */
103        unsigned short *px=(unsigned short *) &x;
104#endif	/* national */
105
106        if( x == zero )  return(x);
107
108#if defined(vax)||defined(tahoe)
109        if( (k= *px & mexp ) != ~msign ) {
110            if (N < -260)
111		return(nunf*nunf);
112	    else if (N > 260) {
113		return(copysign(infnan(ERANGE),x));
114	    }
115#else	/* defined(vax)||defined(tahoe) */
116        if( (k= *px & mexp ) != mexp ) {
117            if( N<-2100) return(nunf*nunf); else if(N>2100) return(novf+novf);
118            if( k == 0 ) {
119                 x *= scalb(1.0,prep1);  N -= prep1; return(scalb(x,N));}
120#endif	/* defined(vax)||defined(tahoe) */
121
122            if((k = (k>>gap)+ N) > 0 )
123                if( k < (mexp>>gap) ) *px = (*px&~mexp) | (k<<gap);
124                else x=novf+novf;               /* overflow */
125            else
126                if( k > -prep1 )
127                                        /* gradual underflow */
128                    {*px=(*px&~mexp)|(short)(1<<gap); x *= scalb(1.0,k-1);}
129                else
130                return(nunf*nunf);
131            }
132        return(x);
133}
134
135#endif
136
137#if !_lib_scalb
138
139extern double scalb(x,N)
140double x; double N;
141{
142	return _scalb(x, N);
143}
144
145#endif
146
147#if !_lib__copysign
148
149extern double _copysign(x,y)
150double x,y;
151{
152#ifdef national
153        unsigned short  *px=(unsigned short *) &x+3,
154                        *py=(unsigned short *) &y+3;
155#else	/* national */
156        unsigned short  *px=(unsigned short *) &x,
157                        *py=(unsigned short *) &y;
158#endif	/* national */
159
160#if defined(vax)||defined(tahoe)
161        if ( (*px & mexp) == 0 ) return(x);
162#endif	/* defined(vax)||defined(tahoe) */
163
164        *px = ( *px & msign ) | ( *py & ~msign );
165        return(x);
166}
167
168#endif
169
170#if !_lib_copysign
171
172extern double copysign(x,y)
173double x,y;
174{
175	return _copysign(x,y);
176}
177
178#endif
179
180#if !_lib_logb
181
182extern double logb(x)
183double x;
184{
185
186#ifdef national
187        short *px=(short *) &x+3, k;
188#else	/* national */
189        short *px=(short *) &x, k;
190#endif	/* national */
191
192#if defined(vax)||defined(tahoe)
193        return (int)(((*px&mexp)>>gap)-bias);
194#else	/* defined(vax)||defined(tahoe) */
195        if( (k= *px & mexp ) != mexp )
196            if ( k != 0 )
197                return ( (k>>gap) - bias );
198            else if( x != zero)
199                return ( -1022.0 );
200            else
201                return(-(1.0/zero));
202        else if(x != x)
203            return(x);
204        else
205            {*px &= msign; return(x);}
206#endif	/* defined(vax)||defined(tahoe) */
207}
208
209#endif
210
211#if !_lib__finite
212
213extern int _finite(x)
214double x;
215{
216#if defined(vax)||defined(tahoe)
217        return(1);
218#else	/* defined(vax)||defined(tahoe) */
219#ifdef national
220        return( (*((short *) &x+3 ) & mexp ) != mexp );
221#else	/* national */
222        return( (*((short *) &x ) & mexp ) != mexp );
223#endif	/* national */
224#endif	/* defined(vax)||defined(tahoe) */
225}
226
227#endif
228
229#if !_lib_finite
230
231extern int finite(x)
232double x;
233{
234	return _finite(x);
235}
236
237#endif
238
239#if !_lib_drem
240
241extern double drem(x,p)
242double x,p;
243{
244#if _lib_remainder
245	return remainder(x,p);
246#else
247        short sign;
248        double hp,dp,tmp;
249        unsigned short  k;
250#ifdef national
251        unsigned short
252              *px=(unsigned short *) &x  +3,
253              *pp=(unsigned short *) &p  +3,
254              *pd=(unsigned short *) &dp +3,
255              *pt=(unsigned short *) &tmp+3;
256#else	/* national */
257        unsigned short
258              *px=(unsigned short *) &x  ,
259              *pp=(unsigned short *) &p  ,
260              *pd=(unsigned short *) &dp ,
261              *pt=(unsigned short *) &tmp;
262#endif	/* national */
263
264        *pp &= msign ;
265
266#if defined(vax)||defined(tahoe)
267        if( ( *px & mexp ) == ~msign )	/* is x a reserved operand? */
268#else	/* defined(vax)||defined(tahoe) */
269        if( ( *px & mexp ) == mexp )
270#endif	/* defined(vax)||defined(tahoe) */
271		return  (x-p)-(x-p);	/* create nan if x is inf */
272	if (p == zero) {
273#if defined(vax)||defined(tahoe)
274		return(infnan(EDOM));
275#else	/* defined(vax)||defined(tahoe) */
276		return zero/zero;
277#endif	/* defined(vax)||defined(tahoe) */
278	}
279
280#if defined(vax)||defined(tahoe)
281        if( ( *pp & mexp ) == ~msign )	/* is p a reserved operand? */
282#else	/* defined(vax)||defined(tahoe) */
283        if( ( *pp & mexp ) == mexp )
284#endif	/* defined(vax)||defined(tahoe) */
285		{ if (p != p) return p; else return x;}
286
287        else  if ( ((*pp & mexp)>>gap) <= 1 )
288                /* subnormal p, or almost subnormal p */
289            { double b; b=scalb(1.0,(int)prep1);
290              p *= b; x = drem(x,p); x *= b; return(drem(x,p)/b);}
291        else  if ( p >= novf/2)
292            { p /= 2 ; x /= 2; return(drem(x,p)*2);}
293        else
294            {
295                dp=p+p; hp=p/2;
296                sign= *px & ~msign ;
297                *px &= msign       ;
298                while ( x > dp )
299                    {
300                        k=(*px & mexp) - (*pd & mexp) ;
301                        tmp = dp ;
302                        *pt += k ;
303
304#if defined(vax)||defined(tahoe)
305                        if( x < tmp ) *pt -= 128 ;
306#else	/* defined(vax)||defined(tahoe) */
307                        if( x < tmp ) *pt -= 16 ;
308#endif	/* defined(vax)||defined(tahoe) */
309
310                        x -= tmp ;
311                    }
312                if ( x > hp )
313                    { x -= p ;  if ( x >= hp ) x -= p ; }
314
315#if defined(vax)||defined(tahoe)
316		if (x)
317#endif	/* defined(vax)||defined(tahoe) */
318			*px ^= sign;
319                return( x);
320
321            }
322#endif
323}
324
325#endif
326
327#if !_lib_remainder
328
329extern double remainder(x,p)
330double x,p;
331{
332	return drem(x,p);
333}
334
335#endif
336
337#if !_lib_sqrt
338
339extern double sqrt(x)
340double x;
341{
342        double q,s,b,r;
343        double t;
344	double const zero=0.0;
345        int m,n,i;
346#if defined(vax)||defined(tahoe)
347        int k=54;
348#else	/* defined(vax)||defined(tahoe) */
349        int k=51;
350#endif	/* defined(vax)||defined(tahoe) */
351
352    /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
353        if(x!=x||x==zero) return(x);
354
355    /* sqrt(negative) is invalid */
356        if(x<zero) {
357#if defined(vax)||defined(tahoe)
358		return (infnan(EDOM));	/* NaN */
359#else	/* defined(vax)||defined(tahoe) */
360		return(zero/zero);
361#endif	/* defined(vax)||defined(tahoe) */
362	}
363
364    /* sqrt(INF) is INF */
365        if(!finite(x)) return(x);
366
367    /* scale x to [1,4) */
368        n=logb(x);
369        x=scalb(x,-n);
370        if((m=logb(x))!=0) x=scalb(x,-m);       /* subnormal number */
371        m += n;
372        n = m/2;
373        if((n+n)!=m) {x *= 2; m -=1; n=m/2;}
374
375    /* generate sqrt(x) bit by bit (accumulating in q) */
376            q=1.0; s=4.0; x -= 1.0; r=1;
377            for(i=1;i<=k;i++) {
378                t=s+1; x *= 4; r /= 2;
379                if(t<=x) {
380                    s=t+t+2, x -= t; q += r;}
381                else
382                    s *= 2;
383                }
384
385    /* generate the last bit and determine the final rounding */
386            r/=2; x *= 4;
387            if(x==zero) goto end; 100+r; /* trigger inexact flag */
388            if(s<x) {
389                q+=r; x -=s; s += 2; s *= 2; x *= 4;
390                t = (x-s)-5;
391                b=1.0+3*r/4; if(b==1.0) goto end; /* b==1 : Round-to-zero */
392                b=1.0+r/4;   if(b>1.0) t=1;	/* b>1 : Round-to-(+INF) */
393                if(t>=0) q+=r; }	      /* else: Round-to-nearest */
394            else {
395                s *= 2; x *= 4;
396                t = (x-s)-1;
397                b=1.0+3*r/4; if(b==1.0) goto end;
398                b=1.0+r/4;   if(b>1.0) t=1;
399                if(t>=0) q+=r; }
400
401end:        return(scalb(q,n));
402}
403
404#endif
405
406#if 0
407/* DREM(X,Y)
408 * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
409 * DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS)
410 * INTENDED FOR ASSEMBLY LANGUAGE
411 * CODED IN C BY K.C. NG, 3/23/85, 4/8/85.
412 *
413 * Warning: this code should not get compiled in unless ALL of
414 * the following machine-dependent routines are supplied.
415 *
416 * Required machine dependent functions (not on a VAX):
417 *     swapINX(i): save inexact flag and reset it to "i"
418 *     swapENI(e): save inexact enable and reset it to "e"
419 */
420
421extern double drem(x,y)
422double x,y;
423{
424
425#ifdef national		/* order of words in floating point number */
426	static const n0=3,n1=2,n2=1,n3=0;
427#else /* VAX, SUN, ZILOG, TAHOE */
428	static const n0=0,n1=1,n2=2,n3=3;
429#endif
430
431    	static const unsigned short mexp =0x7ff0, m25 =0x0190, m57 =0x0390;
432	static const double zero=0.0;
433	double hy,y1,t,t1;
434	short k;
435	long n;
436	int i,e;
437	unsigned short xexp,yexp, *px  =(unsigned short *) &x  ,
438	      		nx,nf,	  *py  =(unsigned short *) &y  ,
439	      		sign,	  *pt  =(unsigned short *) &t  ,
440	      			  *pt1 =(unsigned short *) &t1 ;
441
442	xexp = px[n0] & mexp ;	/* exponent of x */
443	yexp = py[n0] & mexp ;	/* exponent of y */
444	sign = px[n0] &0x8000;	/* sign of x     */
445
446/* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */
447	if(x!=x) return(x); if(y!=y) return(y);	     /* x or y is NaN */
448	if( xexp == mexp )   return(zero/zero);      /* x is INF */
449	if(y==zero) return(y/y);
450
451/* save the inexact flag and inexact enable in i and e respectively
452 * and reset them to zero
453 */
454	i=swapINX(0);	e=swapENI(0);
455
456/* subnormal number */
457	nx=0;
458	if(yexp==0) {t=1.0,pt[n0]+=m57; y*=t; nx=m57;}
459
460/* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */
461	if( yexp <= m57 ) {py[n0]+=m57; nx+=m57; yexp+=m57;}
462
463	nf=nx;
464	py[n0] &= 0x7fff;
465	px[n0] &= 0x7fff;
466
467/* mask off the least significant 27 bits of y */
468	t=y; pt[n3]=0; pt[n2]&=0xf800; y1=t;
469
470/* LOOP: argument reduction on x whenever x > y */
471loop:
472	while ( x > y )
473	{
474	    t=y;
475	    t1=y1;
476	    xexp=px[n0]&mexp;	  /* exponent of x */
477	    k=xexp-yexp-m25;
478	    if(k>0) 	/* if x/y >= 2**26, scale up y so that x/y < 2**26 */
479		{pt[n0]+=k;pt1[n0]+=k;}
480	    n=x/t; x=(x-n*t1)-n*(t-t1);
481	}
482    /* end while (x > y) */
483
484	if(nx!=0) {t=1.0; pt[n0]+=nx; x*=t; nx=0; goto loop;}
485
486/* final adjustment */
487
488	hy=y/2.0;
489	if(x>hy||((x==hy)&&n%2==1)) x-=y;
490	px[n0] ^= sign;
491	if(nf!=0) { t=1.0; pt[n0]-=nf; x*=t;}
492
493/* restore inexact flag and inexact enable */
494	swapINX(i); swapENI(e);
495
496	return(x);
497}
498#endif
499
500#if 0
501/* SQRT
502 * RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT
503 * FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE
504 * CODED IN C BY K.C. NG, 3/22/85.
505 *
506 * Warning: this code should not get compiled in unless ALL of
507 * the following machine-dependent routines are supplied.
508 *
509 * Required machine dependent functions:
510 *     swapINX(i)  ...return the status of INEXACT flag and reset it to "i"
511 *     swapRM(r)   ...return the current Rounding Mode and reset it to "r"
512 *     swapENI(e)  ...return the status of inexact enable and reset it to "e"
513 *     addc(t)     ...perform t=t+1 regarding t as a 64 bit unsigned integer
514 *     subc(t)     ...perform t=t-1 regarding t as a 64 bit unsigned integer
515 */
516
517static const unsigned long table[] = {
5180, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740,
51958733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478,
52021581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, };
521
522extern double newsqrt(x)
523double x;
524{
525        double y,z,t,addc(),subc()
526	double const b54=134217728.*134217728.; /* b54=2**54 */
527        long mx,scalx;
528	long const mexp=0x7ff00000;
529        int i,j,r,e,swapINX(),swapRM(),swapENI();
530        unsigned long *py=(unsigned long *) &y   ,
531                      *pt=(unsigned long *) &t   ,
532                      *px=(unsigned long *) &x   ;
533#ifdef national         /* ordering of word in a floating point number */
534        const int n0=1, n1=0;
535#else
536        const int n0=0, n1=1;
537#endif
538/* Rounding Mode:  RN ...round-to-nearest
539 *                 RZ ...round-towards 0
540 *                 RP ...round-towards +INF
541 *		   RM ...round-towards -INF
542 */
543        const int RN=0,RZ=1,RP=2,RM=3;
544				/* machine dependent: work on a Zilog Z8070
545                                 * and a National 32081 & 16081
546                                 */
547
548/* exceptions */
549	if(x!=x||x==0.0) return(x);  /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */
550	if(x<0) return((x-x)/(x-x)); /* sqrt(negative) is invalid */
551        if((mx=px[n0]&mexp)==mexp) return(x);  /* sqrt(+INF) is +INF */
552
553/* save, reset, initialize */
554        e=swapENI(0);   /* ...save and reset the inexact enable */
555        i=swapINX(0);   /* ...save INEXACT flag */
556        r=swapRM(RN);   /* ...save and reset the Rounding Mode to RN */
557        scalx=0;
558
559/* subnormal number, scale up x to x*2**54 */
560        if(mx==0) {x *= b54 ; scalx-=0x01b00000;}
561
562/* scale x to avoid intermediate over/underflow:
563 * if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */
564        if(mx>0x5ff00000) {px[n0] -= 0x20000000; scalx+= 0x10000000;}
565        if(mx<0x1ff00000) {px[n0] += 0x20000000; scalx-= 0x10000000;}
566
567/* magic initial approximation to almost 8 sig. bits */
568        py[n0]=(px[n0]>>1)+0x1ff80000;
569        py[n0]=py[n0]-table[(py[n0]>>15)&31];
570
571/* Heron's rule once with correction to improve y to almost 18 sig. bits */
572        t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0;
573
574/* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */
575        t=y*y; z=t;  pt[n0]+=0x00100000; t+=z; z=(x-z)*y;
576        t=z/(t+x) ;  pt[n0]+=0x00100000; y+=t;
577
578/* twiddle last bit to force y correctly rounded */
579        swapRM(RZ);     /* ...set Rounding Mode to round-toward-zero */
580        swapINX(0);     /* ...clear INEXACT flag */
581        swapENI(e);     /* ...restore inexact enable status */
582        t=x/y;          /* ...chopped quotient, possibly inexact */
583        j=swapINX(i);   /* ...read and restore inexact flag */
584        if(j==0) { if(t==y) goto end; else t=subc(t); }  /* ...t=t-ulp */
585        b54+0.1;        /* ..trigger inexact flag, sqrt(x) is inexact */
586        if(r==RN) t=addc(t);            /* ...t=t+ulp */
587        else if(r==RP) { t=addc(t);y=addc(y);}/* ...t=t+ulp;y=y+ulp; */
588        y=y+t;                          /* ...chopped sum */
589        py[n0]=py[n0]-0x00100000;       /* ...correctly rounded sqrt(x) */
590end:    py[n0]=py[n0]+scalx;            /* ...scale back y */
591        swapRM(r);                      /* ...restore Rounding Mode */
592        return(y);
593}
594#endif
595
596#if !_lib_ilogb
597
598extern int ilogb(double x)
599{
600	return((int)logb(x));
601}
602
603#endif
604
605#endif
606