Lines Matching defs:fpscr

70 u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
129 rmode = fpscr & FPSCR_RMODE_MASK;
208 struct vfp_double *vdm, u32 fpscr)
218 if (fpscr & FPSCR_DEFAULT_NAN)
250 static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr)
256 static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr)
262 static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr)
268 static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr)
279 ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr);
349 return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt");
358 static u32 vfp_compare(int dd, int signal_on_qnan, int dm, u32 fpscr)
419 static u32 vfp_double_fcmp(int dd, int unused, int dm, u32 fpscr)
421 return vfp_compare(dd, 0, dm, fpscr);
424 static u32 vfp_double_fcmpe(int dd, int unused, int dm, u32 fpscr)
426 return vfp_compare(dd, 1, dm, fpscr);
429 static u32 vfp_double_fcmpz(int dd, int unused, int dm, u32 fpscr)
431 return vfp_compare(dd, 0, VFP_REG_ZERO, fpscr);
434 static u32 vfp_double_fcmpez(int dd, int unused, int dm, u32 fpscr)
436 return vfp_compare(dd, 1, VFP_REG_ZERO, fpscr);
439 static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr)
475 return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts");
482 static u32 vfp_double_fuito(int dd, int unused, int dm, u32 fpscr)
491 return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito");
494 static u32 vfp_double_fsito(int dd, int unused, int dm, u32 fpscr)
503 return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito");
506 static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr)
510 int rmode = fpscr & FPSCR_RMODE_MASK;
580 static u32 vfp_double_ftouiz(int sd, int unused, int dm, u32 fpscr)
585 static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr)
589 int rmode = fpscr & FPSCR_RMODE_MASK;
655 static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr)
684 struct vfp_double *vdm, u32 fpscr)
718 return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
726 struct vfp_double *vdm, u32 fpscr)
754 return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr);
778 vdd->sign = (fpscr & FPSCR_RMODE_MASK) ==
791 struct vfp_double *vdm, u32 fpscr)
815 return vfp_propagate_nan(vdd, vdn, vdm, fpscr);
851 vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, char *func)
864 exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr);
874 exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr);
876 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func);
886 static u32 vfp_double_fmac(int dd, int dn, int dm, u32 fpscr)
888 return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, 0, "fmac");
894 static u32 vfp_double_fnmac(int dd, int dn, int dm, u32 fpscr)
896 return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
902 static u32 vfp_double_fmsc(int dd, int dn, int dm, u32 fpscr)
904 return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
910 static u32 vfp_double_fnmsc(int dd, int dn, int dm, u32 fpscr)
912 return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
918 static u32 vfp_double_fmul(int dd, int dn, int dm, u32 fpscr)
931 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
932 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul");
938 static u32 vfp_double_fnmul(int dd, int dn, int dm, u32 fpscr)
951 exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr);
954 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul");
960 static u32 vfp_double_fadd(int dd, int dn, int dm, u32 fpscr)
973 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
975 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd");
981 static u32 vfp_double_fsub(int dd, int dn, int dm, u32 fpscr)
999 exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr);
1001 return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub");
1007 static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
1083 return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv");
1086 exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr);
1092 exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr);
1127 u32 vfp_double_cpdo(u32 inst, u32 fpscr)
1137 vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK));
1165 veclen = fpscr & FPSCR_LENGTH_MASK;
1187 except = fop->fn(dest, dn, dm, fpscr);