1/* 2 * mul.S: This routine was taken from glibc-1.09 and is covered 3 * by the GNU Library General Public License Version 2. 4 */ 5 6/* 7 * Signed multiply, from Appendix E of the Sparc Version 8 8 * Architecture Manual. 9 */ 10 11/* 12 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of 13 * the 64-bit product). 14 * 15 * This code optimizes short (less than 13-bit) multiplies. 16 */ 17 18 .globl .mul 19 .globl _Mul 20.mul: 21_Mul: /* needed for export */ 22 mov %o0, %y ! multiplier -> Y 23 andncc %o0, 0xfff, %g0 ! test bits 12..31 24 be Lmul_shortway ! if zero, can do it the short way 25 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V 26 27 /* 28 * Long multiply. 32 steps, followed by a final shift step. 29 */ 30 mulscc %o4, %o1, %o4 ! 1 31 mulscc %o4, %o1, %o4 ! 2 32 mulscc %o4, %o1, %o4 ! 3 33 mulscc %o4, %o1, %o4 ! 4 34 mulscc %o4, %o1, %o4 ! 5 35 mulscc %o4, %o1, %o4 ! 6 36 mulscc %o4, %o1, %o4 ! 7 37 mulscc %o4, %o1, %o4 ! 8 38 mulscc %o4, %o1, %o4 ! 9 39 mulscc %o4, %o1, %o4 ! 10 40 mulscc %o4, %o1, %o4 ! 11 41 mulscc %o4, %o1, %o4 ! 12 42 mulscc %o4, %o1, %o4 ! 13 43 mulscc %o4, %o1, %o4 ! 14 44 mulscc %o4, %o1, %o4 ! 15 45 mulscc %o4, %o1, %o4 ! 16 46 mulscc %o4, %o1, %o4 ! 17 47 mulscc %o4, %o1, %o4 ! 18 48 mulscc %o4, %o1, %o4 ! 19 49 mulscc %o4, %o1, %o4 ! 20 50 mulscc %o4, %o1, %o4 ! 21 51 mulscc %o4, %o1, %o4 ! 22 52 mulscc %o4, %o1, %o4 ! 23 53 mulscc %o4, %o1, %o4 ! 24 54 mulscc %o4, %o1, %o4 ! 25 55 mulscc %o4, %o1, %o4 ! 26 56 mulscc %o4, %o1, %o4 ! 27 57 mulscc %o4, %o1, %o4 ! 28 58 mulscc %o4, %o1, %o4 ! 29 59 mulscc %o4, %o1, %o4 ! 30 60 mulscc %o4, %o1, %o4 ! 31 61 mulscc %o4, %o1, %o4 ! 32 62 mulscc %o4, %g0, %o4 ! final shift 63 64 ! If %o0 was negative, the result is 65 ! (%o0 * %o1) + (%o1 << 32)) 66 ! We fix that here. 67 68 /* Faster code adapted from tege@sics.se's code for umul.S. */ 69 sra %o0, 31, %o2 ! make mask from sign bit 70 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0 71 rd %y, %o0 ! get lower half of product 72 retl 73 sub %o4, %o2, %o1 ! subtract compensation 74 ! and put upper half in place 75 76Lmul_shortway: 77 /* 78 * Short multiply. 12 steps, followed by a final shift step. 79 * The resulting bits are off by 12 and (32-12) = 20 bit positions, 80 * but there is no problem with %o0 being negative (unlike above). 81 */ 82 mulscc %o4, %o1, %o4 ! 1 83 mulscc %o4, %o1, %o4 ! 2 84 mulscc %o4, %o1, %o4 ! 3 85 mulscc %o4, %o1, %o4 ! 4 86 mulscc %o4, %o1, %o4 ! 5 87 mulscc %o4, %o1, %o4 ! 6 88 mulscc %o4, %o1, %o4 ! 7 89 mulscc %o4, %o1, %o4 ! 8 90 mulscc %o4, %o1, %o4 ! 9 91 mulscc %o4, %o1, %o4 ! 10 92 mulscc %o4, %o1, %o4 ! 11 93 mulscc %o4, %o1, %o4 ! 12 94 mulscc %o4, %g0, %o4 ! final shift 95 96 /* 97 * %o4 has 20 of the bits that should be in the low part of the 98 * result; %y has the bottom 12 (as %y's top 12). That is: 99 * 100 * %o4 %y 101 * +----------------+----------------+ 102 * | -12- | -20- | -12- | -20- | 103 * +------(---------+------)---------+ 104 * --hi-- ----low-part---- 105 * 106 * The upper 12 bits of %o4 should be sign-extended to form the 107 * high part of the product (i.e., highpart = %o4 >> 20). 108 */ 109 110 rd %y, %o5 111 sll %o4, 12, %o0 ! shift middle bits left 12 112 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left 113 or %o5, %o0, %o0 ! construct low part of result 114 retl 115 sra %o4, 20, %o1 ! ... and extract high part of result 116 117 .globl .mul_patch 118.mul_patch: 119 smul %o0, %o1, %o0 120 retl 121 rd %y, %o1 122 nop 123