Lines Matching defs:xl

47 #define xl r0
53 #define xl r1
97 teqeq xl, yl
98 orrnes ip, r4, xl
105 @ corresponding arg in xh-xl, and positive exponent difference in r5.
111 eor yl, xl, yl
113 eor xl, yl, xl
115 eor yl, xl, yl
119 @ already in xh-xl. We need up to 54 bit to handle proper rounding
130 rsbs xl, xl, #0
149 @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
153 adds xl, xl, yl, lsr r5
155 adds xl, xl, yh, lsl lr
163 adds xl, xl, yh, asr r5
166 @ We now have a result in xh-xl-ip.
167 @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
171 rscs xl, xl, #0
183 movs xl, xl, rrx
192 @ Our result is now properly aligned into xh-xl, remaining bits in ip.
194 @ LSB of xl = 0.
198 moveqs ip, xl, lsr #1
199 adcs xl, xl, #0
207 adcs xl, xl, xl
221 moveq xh, xl
222 moveq xl, #0
241 moveq xh, xl
242 moveq xl, #0
259 mov xl, xh, lsl ip
268 orrle xh, xh, xl, lsr ip
269 movle xl, xl, lsl r2
288 mov xl, xl, lsr r4
289 orr xl, xl, xh, lsl r2
294 @ a register switch from xh to xl.
297 mov xl, xl, lsr r2
298 orr xl, xl, xh, lsl r4
303 @ from xh to xl.
304 2: mov xl, xh, lsr r4
325 teqeq xl, yl
329 orrs ip, r4, xl
331 moveq xl, yl
338 movne xl, #0
344 movs xl, xl, lsl #1
357 mov xl, #0
361 @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
362 @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
365 @ otherwise return xh-xl (which is INF or -INF)
369 movne xl, yl
372 movne yl, xl
373 orrs r4, xl, xh, lsl #12
394 .ifnc xl, r0
395 mov xl, r0
414 .ifnc xl, r0
415 mov xl, r0
429 mov xl, r2, lsl #28 @ retrieve remaining bits
502 mov xl, ip
517 mov ip, xl, lsl r3
518 mov xl, xl, lsr r2
519 orr xl, xl, xh, lsl r3
567 orrs r5, xl, xh, lsl #12
580 mov r7, xl, lsr #16
584 bic xl, xl, r7, lsl #16
588 mul ip, xl, yl
589 mul fp, xl, r8
596 mul fp, xl, sl
616 mul fp, xl, yh
636 umull ip, lr, xl, yl
640 umlal lr, r5, xl, yh
663 mov xl, r5, lsl #11
664 orr xl, xl, lr, lsr #21
674 moveqs lr, xl, lsr #1
675 adcs xl, xl, #0
683 orr xl, xl, yl
701 movle xl, #0
715 mov r3, xl, lsl r5
716 mov xl, xl, lsr r4
717 orr xl, xl, xh, lsl r5
720 adds xl, xl, r3, lsr #31
723 biceq xl, xl, r3, lsr #31
727 @ a register switch from xh to xl. Then round.
730 mov r3, xl, lsl r4
731 mov xl, xl, lsr r5
732 orr xl, xl, xh, lsl r4
734 adds xl, xl, r3, lsr #31
737 biceq xl, xl, r3, lsr #31
741 @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
743 orr lr, lr, xl, lsl r5
744 mov r3, xl, lsr r4
746 mov xl, xh, lsr r4
748 bic xl, xl, xh, lsr r4
749 add xl, xl, r3, lsr #31
751 biceq xl, xl, r3, lsr #31
760 1: movs xl, xl, lsl #1
785 orrs r6, xl, xh, lsl #1
793 mov xl, #0
797 orrs r6, xl, xh, lsl #1
798 moveq xl, yl
804 orrs r6, xl, xh, lsl #12
809 movne xl, yl
822 mov xl, #0
865 orr r5, r5, xl, lsr #24
866 mov r6, xl, lsl #8
886 mov xl, #0x00100000
894 orrcs xl, xl, ip
901 orrcs xl, xl, ip, lsr #1
908 orrcs xl, xl, ip, lsr #2
915 orrcs xl, xl, ip, lsr #3
932 orr xh, xh, xl
933 mov xl, #0
939 orreq xh, xh, xl
940 moveq xl, #0
950 moveqs ip, xl, lsr #1
951 adcs xl, xl, #0
983 orrs r4, xl, xh, lsl #12
987 mov xl, yl
994 mov xl, yl
998 orrs r6, xl, xh, lsl #1
1002 orrs r4, xl, xh, lsl #1
1043 2: orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
1046 teqeq xl, yl @ and xl == yl
1058 cmpeq xl, yl
1070 orrs ip, xl, xh, lsl #12
1177 orrs ip, xl, xh, lsl #12
1212 orr r3, r3, xl, lsr #21
1221 2: orrs xl, xl, xh, lsl #12
1253 orr r3, r3, xl, lsr #21
1260 2: orrs xl, xl, xh, lsl #12
1287 mov r2, xl, lsl #3
1288 orr xl, ip, xl, lsr #29
1290 adc r0, xl, r3, lsl #2
1308 movs r3, xl, lsl ip
1309 mov xl, xl, lsr r2
1310 orrne xl, xl, #1 @ fold r3 for rounding considerations.
1313 orr xl, xl, r3, lsl ip
1321 orrs r3, xl, xh, lsl #12