11573Srgrimes/*-
21573Srgrimes * Copyright (c) 1992, 1993
31573Srgrimes *	The Regents of the University of California.  All rights reserved.
41573Srgrimes *
51573Srgrimes * This software was developed by the Computer Systems Engineering group
61573Srgrimes * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
71573Srgrimes * contributed to Berkeley.
81573Srgrimes *
91573Srgrimes * Redistribution and use in source and binary forms, with or without
101573Srgrimes * modification, are permitted provided that the following conditions
111573Srgrimes * are met:
121573Srgrimes * 1. Redistributions of source code must retain the above copyright
131573Srgrimes *    notice, this list of conditions and the following disclaimer.
141573Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
151573Srgrimes *    notice, this list of conditions and the following disclaimer in the
161573Srgrimes *    documentation and/or other materials provided with the distribution.
171573Srgrimes * 4. Neither the name of the University nor the names of its contributors
181573Srgrimes *    may be used to endorse or promote products derived from this software
191573Srgrimes *    without specific prior written permission.
201573Srgrimes *
211573Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
221573Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
231573Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
241573Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
251573Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
261573Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
271573Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
281573Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
291573Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
301573Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
311573Srgrimes * SUCH DAMAGE.
321573Srgrimes */
331573Srgrimes
341573Srgrimes#if defined(LIBC_SCCS) && !defined(lint)
351573Srgrimesstatic char sccsid[] = "@(#)muldi3.c	8.1 (Berkeley) 6/4/93";
361573Srgrimes#endif /* LIBC_SCCS and not lint */
3792889Sobrien#include <sys/cdefs.h>
3892889Sobrien__FBSDID("$FreeBSD$");
391573Srgrimes
401573Srgrimes#include "quad.h"
411573Srgrimes
421573Srgrimes/*
431573Srgrimes * Multiply two quads.
441573Srgrimes *
451573Srgrimes * Our algorithm is based on the following.  Split incoming quad values
461573Srgrimes * u and v (where u,v >= 0) into
471573Srgrimes *
481573Srgrimes *	u = 2^n u1  *  u0	(n = number of bits in `u_long', usu. 32)
491573Srgrimes *
508870Srgrimes * and
511573Srgrimes *
521573Srgrimes *	v = 2^n v1  *  v0
531573Srgrimes *
541573Srgrimes * Then
551573Srgrimes *
561573Srgrimes *	uv = 2^2n u1 v1  +  2^n u1 v0  +  2^n v1 u0  +  u0 v0
571573Srgrimes *	   = 2^2n u1 v1  +     2^n (u1 v0 + v1 u0)   +  u0 v0
581573Srgrimes *
591573Srgrimes * Now add 2^n u1 v1 to the first term and subtract it from the middle,
601573Srgrimes * and add 2^n u0 v0 to the last term and subtract it from the middle.
611573Srgrimes * This gives:
621573Srgrimes *
631573Srgrimes *	uv = (2^2n + 2^n) (u1 v1)  +
641573Srgrimes *	         (2^n)    (u1 v0 - u1 v1 + u0 v1 - u0 v0)  +
651573Srgrimes *	       (2^n + 1)  (u0 v0)
661573Srgrimes *
671573Srgrimes * Factoring the middle a bit gives us:
681573Srgrimes *
691573Srgrimes *	uv = (2^2n + 2^n) (u1 v1)  +			[u1v1 = high]
701573Srgrimes *		 (2^n)    (u1 - u0) (v0 - v1)  +	[(u1-u0)... = mid]
711573Srgrimes *	       (2^n + 1)  (u0 v0)			[u0v0 = low]
721573Srgrimes *
731573Srgrimes * The terms (u1 v1), (u1 - u0) (v0 - v1), and (u0 v0) can all be done
741573Srgrimes * in just half the precision of the original.  (Note that either or both
751573Srgrimes * of (u1 - u0) or (v0 - v1) may be negative.)
761573Srgrimes *
771573Srgrimes * This algorithm is from Knuth vol. 2 (2nd ed), section 4.3.3, p. 278.
781573Srgrimes *
791573Srgrimes * Since C does not give us a `long * long = quad' operator, we split
801573Srgrimes * our input quads into two longs, then split the two longs into two
811573Srgrimes * shorts.  We can then calculate `short * short = long' in native
821573Srgrimes * arithmetic.
831573Srgrimes *
841573Srgrimes * Our product should, strictly speaking, be a `long quad', with 128
851573Srgrimes * bits, but we are going to discard the upper 64.  In other words,
861573Srgrimes * we are not interested in uv, but rather in (uv mod 2^2n).  This
871573Srgrimes * makes some of the terms above vanish, and we get:
881573Srgrimes *
891573Srgrimes *	(2^n)(high) + (2^n)(mid) + (2^n + 1)(low)
901573Srgrimes *
911573Srgrimes * or
921573Srgrimes *
931573Srgrimes *	(2^n)(high + mid + low) + low
941573Srgrimes *
951573Srgrimes * Furthermore, `high' and `mid' can be computed mod 2^n, as any factor
961573Srgrimes * of 2^n in either one will also vanish.  Only `low' need be computed
971573Srgrimes * mod 2^2n, and only because of the final term above.
981573Srgrimes */
991573Srgrimesstatic quad_t __lmulq(u_long, u_long);
1001573Srgrimes
1011573Srgrimesquad_t
1021573Srgrimes__muldi3(a, b)
1031573Srgrimes	quad_t a, b;
1041573Srgrimes{
1051573Srgrimes	union uu u, v, low, prod;
10692889Sobrien	u_long high, mid, udiff, vdiff;
10792889Sobrien	int negall, negmid;
1081573Srgrimes#define	u1	u.ul[H]
1091573Srgrimes#define	u0	u.ul[L]
1101573Srgrimes#define	v1	v.ul[H]
1111573Srgrimes#define	v0	v.ul[L]
1121573Srgrimes
1131573Srgrimes	/*
1141573Srgrimes	 * Get u and v such that u, v >= 0.  When this is finished,
1151573Srgrimes	 * u1, u0, v1, and v0 will be directly accessible through the
1161573Srgrimes	 * longword fields.
1171573Srgrimes	 */
1181573Srgrimes	if (a >= 0)
1191573Srgrimes		u.q = a, negall = 0;
1201573Srgrimes	else
1211573Srgrimes		u.q = -a, negall = 1;
1221573Srgrimes	if (b >= 0)
1231573Srgrimes		v.q = b;
1241573Srgrimes	else
1251573Srgrimes		v.q = -b, negall ^= 1;
1261573Srgrimes
1271573Srgrimes	if (u1 == 0 && v1 == 0) {
1281573Srgrimes		/*
1291573Srgrimes		 * An (I hope) important optimization occurs when u1 and v1
1301573Srgrimes		 * are both 0.  This should be common since most numbers
1311573Srgrimes		 * are small.  Here the product is just u0*v0.
1321573Srgrimes		 */
1331573Srgrimes		prod.q = __lmulq(u0, v0);
1341573Srgrimes	} else {
1351573Srgrimes		/*
1361573Srgrimes		 * Compute the three intermediate products, remembering
1371573Srgrimes		 * whether the middle term is negative.  We can discard
1381573Srgrimes		 * any upper bits in high and mid, so we can use native
1391573Srgrimes		 * u_long * u_long => u_long arithmetic.
1401573Srgrimes		 */
1411573Srgrimes		low.q = __lmulq(u0, v0);
1421573Srgrimes
1431573Srgrimes		if (u1 >= u0)
1441573Srgrimes			negmid = 0, udiff = u1 - u0;
1451573Srgrimes		else
1461573Srgrimes			negmid = 1, udiff = u0 - u1;
1471573Srgrimes		if (v0 >= v1)
1481573Srgrimes			vdiff = v0 - v1;
1491573Srgrimes		else
1501573Srgrimes			vdiff = v1 - v0, negmid ^= 1;
1511573Srgrimes		mid = udiff * vdiff;
1521573Srgrimes
1531573Srgrimes		high = u1 * v1;
1541573Srgrimes
1551573Srgrimes		/*
1561573Srgrimes		 * Assemble the final product.
1571573Srgrimes		 */
1581573Srgrimes		prod.ul[H] = high + (negmid ? -mid : mid) + low.ul[L] +
1591573Srgrimes		    low.ul[H];
1601573Srgrimes		prod.ul[L] = low.ul[L];
1611573Srgrimes	}
1621573Srgrimes	return (negall ? -prod.q : prod.q);
1631573Srgrimes#undef u1
1641573Srgrimes#undef u0
1651573Srgrimes#undef v1
1661573Srgrimes#undef v0
1671573Srgrimes}
1681573Srgrimes
1691573Srgrimes/*
1701573Srgrimes * Multiply two 2N-bit longs to produce a 4N-bit quad, where N is half
1711573Srgrimes * the number of bits in a long (whatever that is---the code below
1721573Srgrimes * does not care as long as quad.h does its part of the bargain---but
1731573Srgrimes * typically N==16).
1741573Srgrimes *
1751573Srgrimes * We use the same algorithm from Knuth, but this time the modulo refinement
1761573Srgrimes * does not apply.  On the other hand, since N is half the size of a long,
1771573Srgrimes * we can get away with native multiplication---none of our input terms
1781573Srgrimes * exceeds (ULONG_MAX >> 1).
1791573Srgrimes *
1801573Srgrimes * Note that, for u_long l, the quad-precision result
1811573Srgrimes *
1821573Srgrimes *	l << N
1831573Srgrimes *
1841573Srgrimes * splits into high and low longs as HHALF(l) and LHUP(l) respectively.
1851573Srgrimes */
1861573Srgrimesstatic quad_t
1871573Srgrimes__lmulq(u_long u, u_long v)
1881573Srgrimes{
1891573Srgrimes	u_long u1, u0, v1, v0, udiff, vdiff, high, mid, low;
1901573Srgrimes	u_long prodh, prodl, was;
1911573Srgrimes	union uu prod;
1921573Srgrimes	int neg;
1931573Srgrimes
1941573Srgrimes	u1 = HHALF(u);
1951573Srgrimes	u0 = LHALF(u);
1961573Srgrimes	v1 = HHALF(v);
1971573Srgrimes	v0 = LHALF(v);
1981573Srgrimes
1991573Srgrimes	low = u0 * v0;
2001573Srgrimes
2011573Srgrimes	/* This is the same small-number optimization as before. */
2021573Srgrimes	if (u1 == 0 && v1 == 0)
2031573Srgrimes		return (low);
2041573Srgrimes
2051573Srgrimes	if (u1 >= u0)
2061573Srgrimes		udiff = u1 - u0, neg = 0;
2071573Srgrimes	else
2081573Srgrimes		udiff = u0 - u1, neg = 1;
2091573Srgrimes	if (v0 >= v1)
2101573Srgrimes		vdiff = v0 - v1;
2111573Srgrimes	else
2121573Srgrimes		vdiff = v1 - v0, neg ^= 1;
2131573Srgrimes	mid = udiff * vdiff;
2141573Srgrimes
2151573Srgrimes	high = u1 * v1;
2161573Srgrimes
2171573Srgrimes	/* prod = (high << 2N) + (high << N); */
2181573Srgrimes	prodh = high + HHALF(high);
2191573Srgrimes	prodl = LHUP(high);
2201573Srgrimes
2211573Srgrimes	/* if (neg) prod -= mid << N; else prod += mid << N; */
2221573Srgrimes	if (neg) {
2231573Srgrimes		was = prodl;
2241573Srgrimes		prodl -= LHUP(mid);
2251573Srgrimes		prodh -= HHALF(mid) + (prodl > was);
2261573Srgrimes	} else {
2271573Srgrimes		was = prodl;
2281573Srgrimes		prodl += LHUP(mid);
2291573Srgrimes		prodh += HHALF(mid) + (prodl < was);
2301573Srgrimes	}
2311573Srgrimes
2321573Srgrimes	/* prod += low << N */
2331573Srgrimes	was = prodl;
2341573Srgrimes	prodl += LHUP(low);
2351573Srgrimes	prodh += HHALF(low) + (prodl < was);
2361573Srgrimes	/* ... + low; */
2371573Srgrimes	if ((prodl += low) < low)
2381573Srgrimes		prodh++;
2391573Srgrimes
2401573Srgrimes	/* return 4N-bit product */
2411573Srgrimes	prod.ul[H] = prodh;
2421573Srgrimes	prod.ul[L] = prodl;
2431573Srgrimes	return (prod.q);
2441573Srgrimes}
245