• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/arm/vfp/

Lines Matching refs:vdd

207 vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn,
239 *vdd = *nan;
270 struct vfp_double vdm, vdd;
276 struct vfp_double *vdp = &vdd;
316 vdd.sign = 0;
317 vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023;
318 vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31;
320 vfp_double_dump("sqrt estimate1", &vdd);
323 vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand);
325 vfp_double_dump("sqrt estimate2", &vdd);
330 if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) {
331 if (vdd.significand < 2) {
332 vdd.significand = ~0ULL;
336 mul64to128(&termh, &terml, vdd.significand, vdd.significand);
339 vdd.significand -= 1;
340 shift64left(&termh, &terml, vdd.significand);
344 vdd.significand |= (remh | reml) != 0;
347 vdd.significand = vfp_shiftright64jamming(vdd.significand, 1);
349 return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt");
683 vfp_double_fadd_nonnumber(struct vfp_double *vdd, struct vfp_double *vdn,
718 return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
720 *vdd = *vdp;
725 vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn,
754 return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr);
761 *vdd = *vdn;
775 vdd->sign = vfp_sign_negate(vdd->sign);
778 vdd->sign = (fpscr & FPSCR_RMODE_MASK) ==
784 vdd->significand = m_sig;
790 vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn,
808 vdd->sign = vdn->sign ^ vdm->sign;
815 return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
817 *vdd = vfp_double_default_qnan;
820 vdd->exponent = vdn->exponent;
821 vdd->significand = 0;
830 vdd->exponent = 0;
831 vdd->significand = 0;
840 vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2;
841 vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand);
843 vfp_double_dump("VDD", vdd);
853 struct vfp_double vdd, vdp, vdn, vdm;
872 exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
874 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func);
918 struct vfp_double vdd, vdn, vdm;
929 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
930 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul");
938 struct vfp_double vdd, vdn, vdm;
949 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
950 vdd.sign = vfp_sign_negate(vdd.sign);
952 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul");
960 struct vfp_double vdd, vdn, vdm;
971 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
973 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd");
981 struct vfp_double vdd, vdn, vdm;
997 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
999 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub");
1007 struct vfp_double vdd, vdn, vdm;
1014 vdd.sign = vdn.sign ^ vdm.sign;
1064 vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1;
1068 vdd.exponent++;
1070 vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand);
1071 if ((vdd.significand & 0x1ff) <= 2) {
1073 mul64to128(&termh, &terml, vdm.significand, vdd.significand);
1076 vdd.significand -= 1;
1079 vdd.significand |= (reml != 0);
1081 return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv");
1084 exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
1086 vfp_put_double(vfp_double_pack(&vdd), dd);
1090 exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
1094 vdd.exponent = 0;
1095 vdd.significand = 0;
1101 vdd.exponent = 2047;
1102 vdd.significand = 0;