Lines Matching refs:vsd

69 #define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except)
215 vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
247 *vsd = *nan;
318 struct vfp_single vsm, vsd;
324 struct vfp_single *vsp = &vsd;
364 vsd.sign = 0;
365 vsd.exponent = ((vsm.exponent - 127) >> 1) + 127;
366 vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2;
368 vfp_single_dump("sqrt estimate", &vsd);
373 if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) {
374 if (vsd.significand < 2) {
375 vsd.significand = 0xffffffff;
380 term = (u64)vsd.significand * vsd.significand;
386 vsd.significand -= 1;
387 rem += ((u64)vsd.significand << 1) | 1;
389 vsd.significand |= rem != 0;
392 vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
394 return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fsqrt");
729 vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
764 return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
766 *vsd = *vsp;
771 vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn,
799 return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr);
806 *vsd = *vsn;
820 vsd->sign = vfp_sign_negate(vsd->sign);
823 vsd->sign = (fpscr & FPSCR_RMODE_MASK) ==
829 vsd->significand = m_sig;
835 vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_single *vsm, u32 fpscr)
852 vsd->sign = vsn->sign ^ vsm->sign;
859 return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
861 *vsd = vfp_single_default_qnan;
864 vsd->exponent = vsn->exponent;
865 vsd->significand = 0;
874 vsd->exponent = 0;
875 vsd->significand = 0;
884 vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2;
885 vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand);
887 vfp_single_dump("VSD", vsd);
897 struct vfp_single vsd, vsp, vsn, vsm;
923 exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
925 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, func);
969 struct vfp_single vsd, vsn, vsm;
983 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
984 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fmul");
992 struct vfp_single vsd, vsn, vsm;
1006 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
1007 vsd.sign = vfp_sign_negate(vsd.sign);
1008 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fnmul");
1016 struct vfp_single vsd, vsn, vsm;
1033 exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
1035 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fadd");
1054 struct vfp_single vsd, vsn, vsm;
1064 vsd.sign = vsn.sign ^ vsm.sign;
1114 vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1;
1118 vsd.exponent++;
1123 vsd.significand = significand;
1125 if ((vsd.significand & 0x3f) == 0)
1126 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
1128 return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fdiv");
1131 exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
1133 vfp_put_float(vfp_single_pack(&vsd), sd);
1137 exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
1141 vsd.exponent = 0;
1142 vsd.significand = 0;
1148 vsd.exponent = 255;
1149 vsd.significand = 0;