1/* $NetBSD: fpu_arith.h,v 1.5 2005/12/11 12:19:05 christos Exp $ */ 2 3/* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)fpu_arith.h 8.1 (Berkeley) 6/11/93 41 */ 42 43/* 44 * Extended-precision arithmetic. 45 * 46 * We hold the notion of a `carry register', which may or may not be a 47 * machine carry bit or register. On the SPARC, it is just the machine's 48 * carry bit. 49 * 50 * In the worst case, you can compute the carry from x+y as 51 * (unsigned)(x + y) < (unsigned)x 52 * and from x+y+c as 53 * ((unsigned)(x + y + c) <= (unsigned)x && (y|c) != 0) 54 * for example. 55 */ 56 57/* set up for extended-precision arithemtic */ 58#define FPU_DECL_CARRY 59 60/* 61 * We have three kinds of add: 62 * add with carry: r = x + y + c 63 * add (ignoring current carry) and set carry: c'r = x + y + 0 64 * add with carry and set carry: c'r = x + y + c 65 * The macros use `C' for `use carry' and `S' for `set carry'. 66 * Note that the state of the carry is undefined after ADDC and SUBC, 67 * so if all you have for these is `add with carry and set carry', 68 * that is OK. 69 * 70 * The same goes for subtract, except that we compute x - y - c. 71 * 72 * Finally, we have a way to get the carry into a `regular' variable, 73 * or set it from a value. SET_CARRY turns 0 into no-carry, nonzero 74 * into carry; GET_CARRY sets its argument to 0 or 1. 75 */ 76#define FPU_ADDC(r, x, y) \ 77 __asm volatile("addx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 78#define FPU_ADDS(r, x, y) \ 79 __asm volatile("addcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 80#define FPU_ADDCS(r, x, y) \ 81 __asm volatile("addxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 82#define FPU_SUBC(r, x, y) \ 83 __asm volatile("subx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 84#define FPU_SUBS(r, x, y) \ 85 __asm volatile("subcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 86#define FPU_SUBCS(r, x, y) \ 87 __asm volatile("subxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y)) 88 89#define FPU_GET_CARRY(r) __asm volatile("addx %%g0,%%g0,%0" : "=r"(r)) 90#define FPU_SET_CARRY(v) __asm volatile("addcc %0,-1,%%g0" : : "r"(v)) 91 92#define FPU_SHL1_BY_ADD /* shift left 1 faster by ADDC than (a<<1)|(b>>31) */ 93