1/* ieee754-sf.S single-precision floating point support for ARM
2
3   Copyright (C) 2003, 2004, 2005  Free Software Foundation, Inc.
4   Contributed by Nicolas Pitre (nico@cam.org)
5
6   This file is free software; you can redistribute it and/or modify it
7   under the terms of the GNU General Public License as published by the
8   Free Software Foundation; either version 2, or (at your option) any
9   later version.
10
11   In addition to the permissions in the GNU General Public License, the
12   Free Software Foundation gives you unlimited permission to link the
13   compiled version of this file into combinations with other programs,
14   and to distribute those combinations without any restriction coming
15   from the use of this file.  (The General Public License restrictions
16   do apply in other respects; for example, they cover modification of
17   the file, and distribution when not linked into a combine
18   executable.)
19
20   This file is distributed in the hope that it will be useful, but
21   WITHOUT ANY WARRANTY; without even the implied warranty of
22   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23   General Public License for more details.
24
25   You should have received a copy of the GNU General Public License
26   along with this program; see the file COPYING.  If not, write to
27   the Free Software Foundation, 51 Franklin Street, Fifth Floor,
28   Boston, MA 02110-1301, USA.  */
29
30/*
31 * Notes:
32 *
33 * The goal of this code is to be as fast as possible.  This is
34 * not meant to be easy to understand for the casual reader.
35 *
36 * Only the default rounding mode is intended for best performances.
37 * Exceptions aren't supported yet, but that can be added quite easily
38 * if necessary without impacting performances.
39 */
40
41#ifdef L_negsf2
42
43ARM_FUNC_START negsf2
44ARM_FUNC_ALIAS aeabi_fneg negsf2
45
46	eor	r0, r0, #0x80000000	@ flip sign bit
47	RET
48
49	FUNC_END aeabi_fneg
50	FUNC_END negsf2
51
52#endif
53
54#ifdef L_addsubsf3
55
56ARM_FUNC_START aeabi_frsub
57
58	eor	r0, r0, #0x80000000	@ flip sign bit of first arg
59	b	1f
60
61ARM_FUNC_START subsf3
62ARM_FUNC_ALIAS aeabi_fsub subsf3
63
64	eor	r1, r1, #0x80000000	@ flip sign bit of second arg
65#if defined(__INTERWORKING_STUBS__)
66	b	1f			@ Skip Thumb-code prologue
67#endif
68
69ARM_FUNC_START addsf3
70ARM_FUNC_ALIAS aeabi_fadd addsf3
71
721:	@ Look for zeroes, equal values, INF, or NAN.
73	movs	r2, r0, lsl #1
74	movnes	r3, r1, lsl #1
75	teqne	r2, r3
76	mvnnes	ip, r2, asr #24
77	mvnnes	ip, r3, asr #24
78	beq	LSYM(Lad_s)
79
80	@ Compute exponent difference.  Make largest exponent in r2,
81	@ corresponding arg in r0, and positive exponent difference in r3.
82	mov	r2, r2, lsr #24
83	rsbs	r3, r2, r3, lsr #24
84	addgt	r2, r2, r3
85	eorgt	r1, r0, r1
86	eorgt	r0, r1, r0
87	eorgt	r1, r0, r1
88	rsblt	r3, r3, #0
89
90	@ If exponent difference is too large, return largest argument
91	@ already in r0.  We need up to 25 bit to handle proper rounding
92	@ of 0x1p25 - 1.1.
93	cmp	r3, #25
94	RETc(hi)
95
96	@ Convert mantissa to signed integer.
97	tst	r0, #0x80000000
98	orr	r0, r0, #0x00800000
99	bic	r0, r0, #0xff000000
100	rsbne	r0, r0, #0
101	tst	r1, #0x80000000
102	orr	r1, r1, #0x00800000
103	bic	r1, r1, #0xff000000
104	rsbne	r1, r1, #0
105
106	@ If exponent == difference, one or both args were denormalized.
107	@ Since this is not common case, rescale them off line.
108	teq	r2, r3
109	beq	LSYM(Lad_d)
110LSYM(Lad_x):
111
112	@ Compensate for the exponent overlapping the mantissa MSB added later
113	sub	r2, r2, #1
114
115	@ Shift and add second arg to first arg in r0.
116	@ Keep leftover bits into r1.
117	adds	r0, r0, r1, asr r3
118	rsb	r3, r3, #32
119	mov	r1, r1, lsl r3
120
121	@ Keep absolute value in r0-r1, sign in r3 (the n bit was set above)
122	and	r3, r0, #0x80000000
123	bpl	LSYM(Lad_p)
124	rsbs	r1, r1, #0
125	rsc	r0, r0, #0
126
127	@ Determine how to normalize the result.
128LSYM(Lad_p):
129	cmp	r0, #0x00800000
130	bcc	LSYM(Lad_a)
131	cmp	r0, #0x01000000
132	bcc	LSYM(Lad_e)
133
134	@ Result needs to be shifted right.
135	movs	r0, r0, lsr #1
136	mov	r1, r1, rrx
137	add	r2, r2, #1
138
139	@ Make sure we did not bust our exponent.
140	cmp	r2, #254
141	bhs	LSYM(Lad_o)
142
143	@ Our result is now properly aligned into r0, remaining bits in r1.
144	@ Pack final result together.
145	@ Round with MSB of r1. If halfway between two numbers, round towards
146	@ LSB of r0 = 0.
147LSYM(Lad_e):
148	cmp	r1, #0x80000000
149	adc	r0, r0, r2, lsl #23
150	biceq	r0, r0, #1
151	orr	r0, r0, r3
152	RET
153
154	@ Result must be shifted left and exponent adjusted.
155LSYM(Lad_a):
156	movs	r1, r1, lsl #1
157	adc	r0, r0, r0
158	tst	r0, #0x00800000
159	sub	r2, r2, #1
160	bne	LSYM(Lad_e)
161
162	@ No rounding necessary since r1 will always be 0 at this point.
163LSYM(Lad_l):
164
165#if __ARM_ARCH__ < 5
166
167	movs	ip, r0, lsr #12
168	moveq	r0, r0, lsl #12
169	subeq	r2, r2, #12
170	tst	r0, #0x00ff0000
171	moveq	r0, r0, lsl #8
172	subeq	r2, r2, #8
173	tst	r0, #0x00f00000
174	moveq	r0, r0, lsl #4
175	subeq	r2, r2, #4
176	tst	r0, #0x00c00000
177	moveq	r0, r0, lsl #2
178	subeq	r2, r2, #2
179	cmp	r0, #0x00800000
180	movcc	r0, r0, lsl #1
181	sbcs	r2, r2, #0
182
183#else
184
185	clz	ip, r0
186	sub	ip, ip, #8
187	subs	r2, r2, ip
188	mov	r0, r0, lsl ip
189
190#endif
191
192	@ Final result with sign
193	@ If exponent negative, denormalize result.
194	addge	r0, r0, r2, lsl #23
195	rsblt	r2, r2, #0
196	orrge	r0, r0, r3
197	orrlt	r0, r3, r0, lsr r2
198	RET
199
200	@ Fixup and adjust bit position for denormalized arguments.
201	@ Note that r2 must not remain equal to 0.
202LSYM(Lad_d):
203	teq	r2, #0
204	eor	r1, r1, #0x00800000
205	eoreq	r0, r0, #0x00800000
206	addeq	r2, r2, #1
207	subne	r3, r3, #1
208	b	LSYM(Lad_x)
209
210LSYM(Lad_s):
211	mov	r3, r1, lsl #1
212
213	mvns	ip, r2, asr #24
214	mvnnes	ip, r3, asr #24
215	beq	LSYM(Lad_i)
216
217	teq	r2, r3
218	beq	1f
219
220	@ Result is x + 0.0 = x or 0.0 + y = y.
221	teq	r2, #0
222	moveq	r0, r1
223	RET
224
2251:	teq	r0, r1
226
227	@ Result is x - x = 0.
228	movne	r0, #0
229	RETc(ne)
230
231	@ Result is x + x = 2x.
232	tst	r2, #0xff000000
233	bne	2f
234	movs	r0, r0, lsl #1
235	orrcs	r0, r0, #0x80000000
236	RET
2372:	adds	r2, r2, #(2 << 24)
238	addcc	r0, r0, #(1 << 23)
239	RETc(cc)
240	and	r3, r0, #0x80000000
241
242	@ Overflow: return INF.
243LSYM(Lad_o):
244	orr	r0, r3, #0x7f000000
245	orr	r0, r0, #0x00800000
246	RET
247
248	@ At least one of r0/r1 is INF/NAN.
249	@   if r0 != INF/NAN: return r1 (which is INF/NAN)
250	@   if r1 != INF/NAN: return r0 (which is INF/NAN)
251	@   if r0 or r1 is NAN: return NAN
252	@   if opposite sign: return NAN
253	@   otherwise return r0 (which is INF or -INF)
254LSYM(Lad_i):
255	mvns	r2, r2, asr #24
256	movne	r0, r1
257	mvneqs	r3, r3, asr #24
258	movne	r1, r0
259	movs	r2, r0, lsl #9
260	moveqs	r3, r1, lsl #9
261	teqeq	r0, r1
262	orrne	r0, r0, #0x00400000	@ quiet NAN
263	RET
264
265	FUNC_END aeabi_frsub
266	FUNC_END aeabi_fadd
267	FUNC_END addsf3
268	FUNC_END aeabi_fsub
269	FUNC_END subsf3
270
271ARM_FUNC_START floatunsisf
272ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
273
274	mov	r3, #0
275	b	1f
276
277ARM_FUNC_START floatsisf
278ARM_FUNC_ALIAS aeabi_i2f floatsisf
279
280	ands	r3, r0, #0x80000000
281	rsbmi	r0, r0, #0
282
2831:	movs	ip, r0
284	RETc(eq)
285
286	@ Add initial exponent to sign
287	orr	r3, r3, #((127 + 23) << 23)
288
289	.ifnc	ah, r0
290	mov	ah, r0
291	.endif
292	mov	al, #0
293	b	2f
294
295	FUNC_END aeabi_i2f
296	FUNC_END floatsisf
297	FUNC_END aeabi_ui2f
298	FUNC_END floatunsisf
299
300ARM_FUNC_START floatundisf
301ARM_FUNC_ALIAS aeabi_ul2f floatundisf
302
303	orrs	r2, r0, r1
304#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
305	mvfeqs	f0, #0.0
306#endif
307	RETc(eq)
308
309	mov	r3, #0
310	b	1f
311
312ARM_FUNC_START floatdisf
313ARM_FUNC_ALIAS aeabi_l2f floatdisf
314
315	orrs	r2, r0, r1
316#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
317	mvfeqs	f0, #0.0
318#endif
319	RETc(eq)
320
321	ands	r3, ah, #0x80000000	@ sign bit in r3
322	bpl	1f
323	rsbs	al, al, #0
324	rsc	ah, ah, #0
3251:
326#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
327	@ For hard FPA code we want to return via the tail below so that
328	@ we can return the result in f0 as well as in r0 for backwards
329	@ compatibility.
330	str	lr, [sp, #-8]!
331	adr	lr, LSYM(f0_ret)
332#endif
333
334	movs	ip, ah
335	moveq	ip, al
336	moveq	ah, al
337	moveq	al, #0
338
339	@ Add initial exponent to sign
340	orr	r3, r3, #((127 + 23 + 32) << 23)
341	subeq	r3, r3, #(32 << 23)
3422:	sub	r3, r3, #(1 << 23)
343
344#if __ARM_ARCH__ < 5
345
346	mov	r2, #23
347	cmp	ip, #(1 << 16)
348	movhs	ip, ip, lsr #16
349	subhs	r2, r2, #16
350	cmp	ip, #(1 << 8)
351	movhs	ip, ip, lsr #8
352	subhs	r2, r2, #8
353	cmp	ip, #(1 << 4)
354	movhs	ip, ip, lsr #4
355	subhs	r2, r2, #4
356	cmp	ip, #(1 << 2)
357	subhs	r2, r2, #2
358	sublo	r2, r2, ip, lsr #1
359	subs	r2, r2, ip, lsr #3
360
361#else
362
363	clz	r2, ip
364	subs	r2, r2, #8
365
366#endif
367
368	sub	r3, r3, r2, lsl #23
369	blt	3f
370
371	add	r3, r3, ah, lsl r2
372	mov	ip, al, lsl r2
373	rsb	r2, r2, #32
374	cmp	ip, #0x80000000
375	adc	r0, r3, al, lsr r2
376	biceq	r0, r0, #1
377	RET
378
3793:	add	r2, r2, #32
380	mov	ip, ah, lsl r2
381	rsb	r2, r2, #32
382	orrs	al, al, ip, lsl #1
383	adc	r0, r3, ah, lsr r2
384	biceq	r0, r0, ip, lsr #31
385	RET
386
387#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
388
389LSYM(f0_ret):
390	str	r0, [sp, #-4]!
391	ldfs	f0, [sp], #4
392	RETLDM
393
394#endif
395
396	FUNC_END floatdisf
397	FUNC_END aeabi_l2f
398	FUNC_END floatundisf
399	FUNC_END aeabi_ul2f
400
401#endif /* L_addsubsf3 */
402
403#ifdef L_muldivsf3
404
405ARM_FUNC_START mulsf3
406ARM_FUNC_ALIAS aeabi_fmul mulsf3
407
408	@ Mask out exponents, trap any zero/denormal/INF/NAN.
409	mov	ip, #0xff
410	ands	r2, ip, r0, lsr #23
411	andnes	r3, ip, r1, lsr #23
412	teqne	r2, ip
413	teqne	r3, ip
414	beq	LSYM(Lml_s)
415LSYM(Lml_x):
416
417	@ Add exponents together
418	add	r2, r2, r3
419
420	@ Determine final sign.
421	eor	ip, r0, r1
422
423	@ Convert mantissa to unsigned integer.
424	@ If power of two, branch to a separate path.
425	@ Make up for final alignment.
426	movs	r0, r0, lsl #9
427	movnes	r1, r1, lsl #9
428	beq	LSYM(Lml_1)
429	mov	r3, #0x08000000
430	orr	r0, r3, r0, lsr #5
431	orr	r1, r3, r1, lsr #5
432
433#if __ARM_ARCH__ < 4
434
435	@ Put sign bit in r3, which will be restored into r0 later.
436	and	r3, ip, #0x80000000
437
438	@ Well, no way to make it shorter without the umull instruction.
439	stmfd	sp!, {r3, r4, r5}
440	mov	r4, r0, lsr #16
441	mov	r5, r1, lsr #16
442	bic	r0, r0, r4, lsl #16
443	bic	r1, r1, r5, lsl #16
444	mul	ip, r4, r5
445	mul	r3, r0, r1
446	mul	r0, r5, r0
447	mla	r0, r4, r1, r0
448	adds	r3, r3, r0, lsl #16
449	adc	r1, ip, r0, lsr #16
450	ldmfd	sp!, {r0, r4, r5}
451
452#else
453
454	@ The actual multiplication.
455	umull	r3, r1, r0, r1
456
457	@ Put final sign in r0.
458	and	r0, ip, #0x80000000
459
460#endif
461
462	@ Adjust result upon the MSB position.
463	cmp	r1, #(1 << 23)
464	movcc	r1, r1, lsl #1
465	orrcc	r1, r1, r3, lsr #31
466	movcc	r3, r3, lsl #1
467
468	@ Add sign to result.
469	orr	r0, r0, r1
470
471	@ Apply exponent bias, check for under/overflow.
472	sbc	r2, r2, #127
473	cmp	r2, #(254 - 1)
474	bhi	LSYM(Lml_u)
475
476	@ Round the result, merge final exponent.
477	cmp	r3, #0x80000000
478	adc	r0, r0, r2, lsl #23
479	biceq	r0, r0, #1
480	RET
481
482	@ Multiplication by 0x1p*: let''s shortcut a lot of code.
483LSYM(Lml_1):
484	teq	r0, #0
485	and	ip, ip, #0x80000000
486	moveq	r1, r1, lsl #9
487	orr	r0, ip, r0, lsr #9
488	orr	r0, r0, r1, lsr #9
489	subs	r2, r2, #127
490	rsbgts	r3, r2, #255
491	orrgt	r0, r0, r2, lsl #23
492	RETc(gt)
493
494	@ Under/overflow: fix things up for the code below.
495	orr	r0, r0, #0x00800000
496	mov	r3, #0
497	subs	r2, r2, #1
498
499LSYM(Lml_u):
500	@ Overflow?
501	bgt	LSYM(Lml_o)
502
503	@ Check if denormalized result is possible, otherwise return signed 0.
504	cmn	r2, #(24 + 1)
505	bicle	r0, r0, #0x7fffffff
506	RETc(le)
507
508	@ Shift value right, round, etc.
509	rsb	r2, r2, #0
510	movs	r1, r0, lsl #1
511	mov	r1, r1, lsr r2
512	rsb	r2, r2, #32
513	mov	ip, r0, lsl r2
514	movs	r0, r1, rrx
515	adc	r0, r0, #0
516	orrs	r3, r3, ip, lsl #1
517	biceq	r0, r0, ip, lsr #31
518	RET
519
520	@ One or both arguments are denormalized.
521	@ Scale them leftwards and preserve sign bit.
522LSYM(Lml_d):
523	teq	r2, #0
524	and	ip, r0, #0x80000000
5251:	moveq	r0, r0, lsl #1
526	tsteq	r0, #0x00800000
527	subeq	r2, r2, #1
528	beq	1b
529	orr	r0, r0, ip
530	teq	r3, #0
531	and	ip, r1, #0x80000000
5322:	moveq	r1, r1, lsl #1
533	tsteq	r1, #0x00800000
534	subeq	r3, r3, #1
535	beq	2b
536	orr	r1, r1, ip
537	b	LSYM(Lml_x)
538
539LSYM(Lml_s):
540	@ Isolate the INF and NAN cases away
541	and	r3, ip, r1, lsr #23
542	teq	r2, ip
543	teqne	r3, ip
544	beq	1f
545
546	@ Here, one or more arguments are either denormalized or zero.
547	bics	ip, r0, #0x80000000
548	bicnes	ip, r1, #0x80000000
549	bne	LSYM(Lml_d)
550
551	@ Result is 0, but determine sign anyway.
552LSYM(Lml_z):
553	eor	r0, r0, r1
554	bic	r0, r0, #0x7fffffff
555	RET
556
5571:	@ One or both args are INF or NAN.
558	teq	r0, #0x0
559	teqne	r0, #0x80000000
560	moveq	r0, r1
561	teqne	r1, #0x0
562	teqne	r1, #0x80000000
563	beq	LSYM(Lml_n)		@ 0 * INF or INF * 0 -> NAN
564	teq	r2, ip
565	bne	1f
566	movs	r2, r0, lsl #9
567	bne	LSYM(Lml_n)		@ NAN * <anything> -> NAN
5681:	teq	r3, ip
569	bne	LSYM(Lml_i)
570	movs	r3, r1, lsl #9
571	movne	r0, r1
572	bne	LSYM(Lml_n)		@ <anything> * NAN -> NAN
573
574	@ Result is INF, but we need to determine its sign.
575LSYM(Lml_i):
576	eor	r0, r0, r1
577
578	@ Overflow: return INF (sign already in r0).
579LSYM(Lml_o):
580	and	r0, r0, #0x80000000
581	orr	r0, r0, #0x7f000000
582	orr	r0, r0, #0x00800000
583	RET
584
585	@ Return a quiet NAN.
586LSYM(Lml_n):
587	orr	r0, r0, #0x7f000000
588	orr	r0, r0, #0x00c00000
589	RET
590
591	FUNC_END aeabi_fmul
592	FUNC_END mulsf3
593
594ARM_FUNC_START divsf3
595ARM_FUNC_ALIAS aeabi_fdiv divsf3
596
597	@ Mask out exponents, trap any zero/denormal/INF/NAN.
598	mov	ip, #0xff
599	ands	r2, ip, r0, lsr #23
600	andnes	r3, ip, r1, lsr #23
601	teqne	r2, ip
602	teqne	r3, ip
603	beq	LSYM(Ldv_s)
604LSYM(Ldv_x):
605
606	@ Substract divisor exponent from dividend''s
607	sub	r2, r2, r3
608
609	@ Preserve final sign into ip.
610	eor	ip, r0, r1
611
612	@ Convert mantissa to unsigned integer.
613	@ Dividend -> r3, divisor -> r1.
614	movs	r1, r1, lsl #9
615	mov	r0, r0, lsl #9
616	beq	LSYM(Ldv_1)
617	mov	r3, #0x10000000
618	orr	r1, r3, r1, lsr #4
619	orr	r3, r3, r0, lsr #4
620
621	@ Initialize r0 (result) with final sign bit.
622	and	r0, ip, #0x80000000
623
624	@ Ensure result will land to known bit position.
625	@ Apply exponent bias accordingly.
626	cmp	r3, r1
627	movcc	r3, r3, lsl #1
628	adc	r2, r2, #(127 - 2)
629
630	@ The actual division loop.
631	mov	ip, #0x00800000
6321:	cmp	r3, r1
633	subcs	r3, r3, r1
634	orrcs	r0, r0, ip
635	cmp	r3, r1, lsr #1
636	subcs	r3, r3, r1, lsr #1
637	orrcs	r0, r0, ip, lsr #1
638	cmp	r3, r1, lsr #2
639	subcs	r3, r3, r1, lsr #2
640	orrcs	r0, r0, ip, lsr #2
641	cmp	r3, r1, lsr #3
642	subcs	r3, r3, r1, lsr #3
643	orrcs	r0, r0, ip, lsr #3
644	movs	r3, r3, lsl #4
645	movnes	ip, ip, lsr #4
646	bne	1b
647
648	@ Check exponent for under/overflow.
649	cmp	r2, #(254 - 1)
650	bhi	LSYM(Lml_u)
651
652	@ Round the result, merge final exponent.
653	cmp	r3, r1
654	adc	r0, r0, r2, lsl #23
655	biceq	r0, r0, #1
656	RET
657
658	@ Division by 0x1p*: let''s shortcut a lot of code.
659LSYM(Ldv_1):
660	and	ip, ip, #0x80000000
661	orr	r0, ip, r0, lsr #9
662	adds	r2, r2, #127
663	rsbgts	r3, r2, #255
664	orrgt	r0, r0, r2, lsl #23
665	RETc(gt)
666
667	orr	r0, r0, #0x00800000
668	mov	r3, #0
669	subs	r2, r2, #1
670	b	LSYM(Lml_u)
671
672	@ One or both arguments are denormalized.
673	@ Scale them leftwards and preserve sign bit.
674LSYM(Ldv_d):
675	teq	r2, #0
676	and	ip, r0, #0x80000000
6771:	moveq	r0, r0, lsl #1
678	tsteq	r0, #0x00800000
679	subeq	r2, r2, #1
680	beq	1b
681	orr	r0, r0, ip
682	teq	r3, #0
683	and	ip, r1, #0x80000000
6842:	moveq	r1, r1, lsl #1
685	tsteq	r1, #0x00800000
686	subeq	r3, r3, #1
687	beq	2b
688	orr	r1, r1, ip
689	b	LSYM(Ldv_x)
690
691	@ One or both arguments are either INF, NAN, zero or denormalized.
692LSYM(Ldv_s):
693	and	r3, ip, r1, lsr #23
694	teq	r2, ip
695	bne	1f
696	movs	r2, r0, lsl #9
697	bne	LSYM(Lml_n)		@ NAN / <anything> -> NAN
698	teq	r3, ip
699	bne	LSYM(Lml_i)		@ INF / <anything> -> INF
700	mov	r0, r1
701	b	LSYM(Lml_n)		@ INF / (INF or NAN) -> NAN
7021:	teq	r3, ip
703	bne	2f
704	movs	r3, r1, lsl #9
705	beq	LSYM(Lml_z)		@ <anything> / INF -> 0
706	mov	r0, r1
707	b	LSYM(Lml_n)		@ <anything> / NAN -> NAN
7082:	@ If both are nonzero, we need to normalize and resume above.
709	bics	ip, r0, #0x80000000
710	bicnes	ip, r1, #0x80000000
711	bne	LSYM(Ldv_d)
712	@ One or both arguments are zero.
713	bics	r2, r0, #0x80000000
714	bne	LSYM(Lml_i)		@ <non_zero> / 0 -> INF
715	bics	r3, r1, #0x80000000
716	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
717	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
718
719	FUNC_END aeabi_fdiv
720	FUNC_END divsf3
721
722#endif /* L_muldivsf3 */
723
724#ifdef L_cmpsf2
725
726	@ The return value in r0 is
727	@
728	@   0  if the operands are equal
729	@   1  if the first operand is greater than the second, or
730	@      the operands are unordered and the operation is
731	@      CMP, LT, LE, NE, or EQ.
732	@   -1 if the first operand is less than the second, or
733	@      the operands are unordered and the operation is GT
734	@      or GE.
735	@
736	@ The Z flag will be set iff the operands are equal.
737	@
738	@ The following registers are clobbered by this function:
739	@   ip, r0, r1, r2, r3
740
741ARM_FUNC_START gtsf2
742ARM_FUNC_ALIAS gesf2 gtsf2
743	mov	ip, #-1
744	b	1f
745
746ARM_FUNC_START ltsf2
747ARM_FUNC_ALIAS lesf2 ltsf2
748	mov	ip, #1
749	b	1f
750
751ARM_FUNC_START cmpsf2
752ARM_FUNC_ALIAS nesf2 cmpsf2
753ARM_FUNC_ALIAS eqsf2 cmpsf2
754	mov	ip, #1			@ how should we specify unordered here?
755
7561:	str	ip, [sp, #-4]
757
758	@ Trap any INF/NAN first.
759	mov	r2, r0, lsl #1
760	mov	r3, r1, lsl #1
761	mvns	ip, r2, asr #24
762	mvnnes	ip, r3, asr #24
763	beq	3f
764
765	@ Compare values.
766	@ Note that 0.0 is equal to -0.0.
7672:	orrs	ip, r2, r3, lsr #1	@ test if both are 0, clear C flag
768	teqne	r0, r1			@ if not 0 compare sign
769	subpls	r0, r2, r3		@ if same sign compare values, set r0
770
771	@ Result:
772	movhi	r0, r1, asr #31
773	mvnlo	r0, r1, asr #31
774	orrne	r0, r0, #1
775	RET
776
777	@ Look for a NAN.
7783:	mvns	ip, r2, asr #24
779	bne	4f
780	movs	ip, r0, lsl #9
781	bne	5f			@ r0 is NAN
7824:	mvns	ip, r3, asr #24
783	bne	2b
784	movs	ip, r1, lsl #9
785	beq	2b			@ r1 is not NAN
7865:	ldr	r0, [sp, #-4]		@ return unordered code.
787	RET
788
789	FUNC_END gesf2
790	FUNC_END gtsf2
791	FUNC_END lesf2
792	FUNC_END ltsf2
793	FUNC_END nesf2
794	FUNC_END eqsf2
795	FUNC_END cmpsf2
796
797ARM_FUNC_START aeabi_cfrcmple
798
799	mov	ip, r0
800	mov	r0, r1
801	mov	r1, ip
802	b	6f
803
804ARM_FUNC_START aeabi_cfcmpeq
805ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
806
807	@ The status-returning routines are required to preserve all
808	@ registers except ip, lr, and cpsr.
8096:	stmfd	sp!, {r0, r1, r2, r3, lr}
810	ARM_CALL cmpsf2
811	@ Set the Z flag correctly, and the C flag unconditionally.
812	cmp	 r0, #0
813	@ Clear the C flag if the return value was -1, indicating
814	@ that the first operand was smaller than the second.
815	cmnmi	 r0, #0
816	RETLDM  "r0, r1, r2, r3"
817
818	FUNC_END aeabi_cfcmple
819	FUNC_END aeabi_cfcmpeq
820	FUNC_END aeabi_cfrcmple
821
822ARM_FUNC_START	aeabi_fcmpeq
823
824	str	lr, [sp, #-8]!
825	ARM_CALL aeabi_cfcmple
826	moveq	r0, #1	@ Equal to.
827	movne	r0, #0	@ Less than, greater than, or unordered.
828	RETLDM
829
830	FUNC_END aeabi_fcmpeq
831
832ARM_FUNC_START	aeabi_fcmplt
833
834	str	lr, [sp, #-8]!
835	ARM_CALL aeabi_cfcmple
836	movcc	r0, #1	@ Less than.
837	movcs	r0, #0	@ Equal to, greater than, or unordered.
838	RETLDM
839
840	FUNC_END aeabi_fcmplt
841
842ARM_FUNC_START	aeabi_fcmple
843
844	str	lr, [sp, #-8]!
845	ARM_CALL aeabi_cfcmple
846	movls	r0, #1  @ Less than or equal to.
847	movhi	r0, #0	@ Greater than or unordered.
848	RETLDM
849
850	FUNC_END aeabi_fcmple
851
852ARM_FUNC_START	aeabi_fcmpge
853
854	str	lr, [sp, #-8]!
855	ARM_CALL aeabi_cfrcmple
856	movls	r0, #1	@ Operand 2 is less than or equal to operand 1.
857	movhi	r0, #0	@ Operand 2 greater than operand 1, or unordered.
858	RETLDM
859
860	FUNC_END aeabi_fcmpge
861
862ARM_FUNC_START	aeabi_fcmpgt
863
864	str	lr, [sp, #-8]!
865	ARM_CALL aeabi_cfrcmple
866	movcc	r0, #1	@ Operand 2 is less than operand 1.
867	movcs	r0, #0  @ Operand 2 is greater than or equal to operand 1,
868			@ or they are unordered.
869	RETLDM
870
871	FUNC_END aeabi_fcmpgt
872
873#endif /* L_cmpsf2 */
874
875#ifdef L_unordsf2
876
877ARM_FUNC_START unordsf2
878ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
879
880	mov	r2, r0, lsl #1
881	mov	r3, r1, lsl #1
882	mvns	ip, r2, asr #24
883	bne	1f
884	movs	ip, r0, lsl #9
885	bne	3f			@ r0 is NAN
8861:	mvns	ip, r3, asr #24
887	bne	2f
888	movs	ip, r1, lsl #9
889	bne	3f			@ r1 is NAN
8902:	mov	r0, #0			@ arguments are ordered.
891	RET
8923:	mov	r0, #1			@ arguments are unordered.
893	RET
894
895	FUNC_END aeabi_fcmpun
896	FUNC_END unordsf2
897
898#endif /* L_unordsf2 */
899
900#ifdef L_fixsfsi
901
902ARM_FUNC_START fixsfsi
903ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
904
905	@ check exponent range.
906	mov	r2, r0, lsl #1
907	cmp	r2, #(127 << 24)
908	bcc	1f			@ value is too small
909	mov	r3, #(127 + 31)
910	subs	r2, r3, r2, lsr #24
911	bls	2f			@ value is too large
912
913	@ scale value
914	mov	r3, r0, lsl #8
915	orr	r3, r3, #0x80000000
916	tst	r0, #0x80000000		@ the sign bit
917	mov	r0, r3, lsr r2
918	rsbne	r0, r0, #0
919	RET
920
9211:	mov	r0, #0
922	RET
923
9242:	cmp	r2, #(127 + 31 - 0xff)
925	bne	3f
926	movs	r2, r0, lsl #9
927	bne	4f			@ r0 is NAN.
9283:	ands	r0, r0, #0x80000000	@ the sign bit
929	moveq	r0, #0x7fffffff		@ the maximum signed positive si
930	RET
931
9324:	mov	r0, #0			@ What should we convert NAN to?
933	RET
934
935	FUNC_END aeabi_f2iz
936	FUNC_END fixsfsi
937
938#endif /* L_fixsfsi */
939
940#ifdef L_fixunssfsi
941
942ARM_FUNC_START fixunssfsi
943ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
944
945	@ check exponent range.
946	movs	r2, r0, lsl #1
947	bcs	1f			@ value is negative
948	cmp	r2, #(127 << 24)
949	bcc	1f			@ value is too small
950	mov	r3, #(127 + 31)
951	subs	r2, r3, r2, lsr #24
952	bmi	2f			@ value is too large
953
954	@ scale the value
955	mov	r3, r0, lsl #8
956	orr	r3, r3, #0x80000000
957	mov	r0, r3, lsr r2
958	RET
959
9601:	mov	r0, #0
961	RET
962
9632:	cmp	r2, #(127 + 31 - 0xff)
964	bne	3f
965	movs	r2, r0, lsl #9
966	bne	4f			@ r0 is NAN.
9673:	mov	r0, #0xffffffff		@ maximum unsigned si
968	RET
969
9704:	mov	r0, #0			@ What should we convert NAN to?
971	RET
972
973	FUNC_END aeabi_f2uiz
974	FUNC_END fixunssfsi
975
976#endif /* L_fixunssfsi */
977