• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/arch/arm/vfp/

Lines Matching refs:vsd

69 #define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except)
215 vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn,
247 *vsd = *nan;
318 struct vfp_single vsm, vsd;
324 struct vfp_single *vsp = &vsd;
364 vsd.sign = 0;
365 vsd.exponent = ((vsm.exponent - 127) >> 1) + 127;
366 vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2;
368 vfp_single_dump("sqrt estimate", &vsd);
373 if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) {
374 if (vsd.significand < 2) {
375 vsd.significand = 0xffffffff;
380 term = (u64)vsd.significand * vsd.significand;
386 vsd.significand -= 1;
387 rem += ((u64)vsd.significand << 1) | 1;
389 vsd.significand |= rem != 0;
392 vsd.significand = vfp_shiftright32jamming(vsd.significand, 1);
394 return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fsqrt");
729 vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn,
764 return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
766 *vsd = *vsp;
771 vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn,
799 return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr);
806 *vsd = *vsn;
820 vsd->sign = vfp_sign_negate(vsd->sign);
823 vsd->sign = (fpscr & FPSCR_RMODE_MASK) ==
829 vsd->significand = m_sig;
835 vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_single *vsm, u32 fpscr)
852 vsd->sign = vsn->sign ^ vsm->sign;
859 return vfp_propagate_nan(vsd, vsn, vsm, fpscr);
861 *vsd = vfp_single_default_qnan;
864 vsd->exponent = vsn->exponent;
865 vsd->significand = 0;
874 vsd->exponent = 0;
875 vsd->significand = 0;
884 vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2;
885 vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand);
887 vfp_single_dump("VSD", vsd);
897 struct vfp_single vsd, vsp, vsn, vsm;
921 exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
923 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, func);
967 struct vfp_single vsd, vsn, vsm;
981 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
982 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fmul");
990 struct vfp_single vsd, vsn, vsm;
1004 exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
1005 vsd.sign = vfp_sign_negate(vsd.sign);
1006 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fnmul");
1014 struct vfp_single vsd, vsn, vsm;
1031 exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
1033 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fadd");
1052 struct vfp_single vsd, vsn, vsm;
1062 vsd.sign = vsn.sign ^ vsm.sign;
1112 vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1;
1116 vsd.exponent++;
1121 vsd.significand = significand;
1123 if ((vsd.significand & 0x3f) == 0)
1124 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
1126 return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fdiv");
1129 exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr);
1131 vfp_put_float(vfp_single_pack(&vsd), sd);
1135 exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr);
1139 vsd.exponent = 0;
1140 vsd.significand = 0;
1146 vsd.exponent = 255;
1147 vsd.significand = 0;