1280297Sjkim/*
2280297Sjkim * Copyright (C) 2013 Andrew Turner
3280297Sjkim * All rights reserved.
4238384Sjkim *
5238384Sjkim * Redistribution and use in source and binary forms, with or without
6238384Sjkim * modification, are permitted provided that the following conditions
7238384Sjkim * are met:
8238384Sjkim * 1. Redistributions of source code must retain the above copyright
9238384Sjkim *    notice, this list of conditions and the following disclaimer.
10238384Sjkim * 2. Redistributions in binary form must reproduce the above copyright
11238384Sjkim *    notice, this list of conditions and the following disclaimer in the
12238384Sjkim *    documentation and/or other materials provided with the distribution.
13280297Sjkim *
14238384Sjkim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15238384Sjkim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16238384Sjkim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17238384Sjkim * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18238384Sjkim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19238384Sjkim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20238384Sjkim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21238384Sjkim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22238384Sjkim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23238384Sjkim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24238384Sjkim * SUCH DAMAGE.
25238384Sjkim *
26238384Sjkim */
27238384Sjkim
28238384Sjkim#include <machine/asm.h>
29238384Sjkim__FBSDID("$FreeBSD: releng/10.2/lib/libc/arm/aeabi/aeabi_vfp_float.S 273471 2014-10-22 13:50:38Z andrew $");
30238384Sjkim
31238384Sjkim#include "aeabi_vfp.h"
32238384Sjkim
33238384Sjkim.fpu	vfp
34238384Sjkim.syntax	unified
35238384Sjkim
36238384Sjkim/* void __aeabi_cfcmpeq(float, float) */
37238384SjkimAEABI_ENTRY(cfcmpeq)
38238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
39238384Sjkim	vcmp.f32	s0, s1
40238384Sjkim	vmrs     	APSR_nzcv, fpscr
41238384Sjkim	RET
42238384SjkimAEABI_END(cfcmpeq)
43238384Sjkim
44238384Sjkim/* void __aeabi_cfcmple(float, float) */
45238384SjkimAEABI_ENTRY(cfcmple)
46238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
47238384Sjkim	vcmpe.f32	s0, s1
48238384Sjkim	vmrs     	APSR_nzcv, fpscr
49238384Sjkim	RET
50238384SjkimAEABI_END(cfcmple)
51238384Sjkim
52238384Sjkim/* void __aeabi_cfrcmple(float, float) */
53238384SjkimAEABI_ENTRY(cfrcmple)
54238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
55238384Sjkim	vcmpe.f32	s1, s0
56238384Sjkim	vmrs     	APSR_nzcv, fpscr
57238384Sjkim	RET
58238384SjkimAEABI_END(cfrcmple)
59238384Sjkim
60238384Sjkim/* int __aeabi_fcmpeq(float, float) */
61238384SjkimAEABI_ENTRY(fcmpeq)
62238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
63238384Sjkim	vcmp.f32 s0, s1
64290207Sjkim	vmrs     APSR_nzcv, fpscr
65238384Sjkim	movne    r0, #0
66238384Sjkim	moveq    r0, #1
67238384Sjkim	RET
68238384SjkimAEABI_END(fcmpeq)
69238384Sjkim
70238384Sjkim/* int __aeabi_fcmplt(float, float) */
71280297SjkimAEABI_ENTRY(fcmplt)
72280297Sjkim	LOAD_SREGS(s0, s1, r0, r1)
73280297Sjkim	vcmp.f32 s0, s1
74280297Sjkim	vmrs     APSR_nzcv, fpscr
75280297Sjkim	movcs    r0, #0
76290207Sjkim	movlt    r0, #1
77290207Sjkim	RET
78290207SjkimAEABI_END(fcmplt)
79290207Sjkim
80290207Sjkim/* int __aeabi_fcmple(float, float) */
81290207SjkimAEABI_ENTRY(fcmple)
82290207Sjkim	LOAD_SREGS(s0, s1, r0, r1)
83290207Sjkim	vcmp.f32 s0, s1
84290207Sjkim	vmrs     APSR_nzcv, fpscr
85290207Sjkim	movhi    r0, #0
86290207Sjkim	movls    r0, #1
87290207Sjkim	RET
88290207SjkimAEABI_END(fcmple)
89280297Sjkim
90238384Sjkim/* int __aeabi_fcmpge(float, float) */
91238384SjkimAEABI_ENTRY(fcmpge)
92280297Sjkim	LOAD_SREGS(s0, s1, r0, r1)
93280297Sjkim	vcmp.f32 s0, s1
94280297Sjkim	vmrs     APSR_nzcv, fpscr
95280297Sjkim	movlt    r0, #0
96280297Sjkim	movge    r0, #1
97280297Sjkim	RET
98280297SjkimAEABI_END(fcmpge)
99238384Sjkim
100290207Sjkim/* int __aeabi_fcmpgt(float, float) */
101290207SjkimAEABI_ENTRY(fcmpgt)
102290207Sjkim	LOAD_SREGS(s0, s1, r0, r1)
103290207Sjkim	vcmp.f32 s0, s1
104290207Sjkim	vmrs     APSR_nzcv, fpscr
105290207Sjkim	movle    r0, #0
106290207Sjkim	movgt    r0, #1
107290207Sjkim	RET
108280297SjkimAEABI_END(fcmpgt)
109238384Sjkim
110280297Sjkim/* int __aeabi_fcmpun(float, float) */
111280297SjkimAEABI_ENTRY(fcmpun)
112238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
113238384Sjkim	vcmp.f32 s0, s1
114280297Sjkim	vmrs     APSR_nzcv, fpscr
115280297Sjkim	movvc    r0, #0
116280297Sjkim	movvs    r0, #1
117280297Sjkim	RET
118280297SjkimAEABI_END(fcmpun)
119280297Sjkim
120280297Sjkim/* int __aeabi_f2iz(float) */
121280297SjkimAEABI_ENTRY(f2iz)
122280297Sjkim	LOAD_SREG(s0, r0)
123280297Sjkim#if 0
124280297Sjkim	/*
125280297Sjkim	 * This should be the correct instruction, but binutils incorrectly
126290207Sjkim	 * encodes it as the version that used FPSCR to determine the rounding.
127290207Sjkim	 * When binutils is fixed we can use this again.
128290207Sjkim	 */
129290207Sjkim	vcvt.s32.f32 s0, s0
130290207Sjkim#else
131290207Sjkim	ftosizs      s0, s0
132290207Sjkim#endif
133290207Sjkim	vmov         r0, s0
134290207Sjkim	RET
135290207SjkimAEABI_END(f2iz)
136290207Sjkim
137290207Sjkim/* double __aeabi_f2d(float) */
138290207SjkimAEABI_ENTRY(f2d)
139290207Sjkim	LOAD_SREG(s0, r0)
140290207Sjkim	vcvt.f64.f32 d0, s0
141290207Sjkim	UNLOAD_DREG(r0, r1, d0)
142280297Sjkim	RET
143280297SjkimAEABI_END(f2d)
144238384Sjkim
145238384Sjkim/* float __aeabi_i2f(int) */
146280297SjkimAEABI_ENTRY(i2f)
147280297Sjkim	vmov         s0, r0
148280297Sjkim	vcvt.f32.s32 s0, s0
149280297Sjkim	UNLOAD_SREG(r0, s0)
150280297Sjkim	RET
151290207SjkimAEABI_END(i2f)
152290207Sjkim
153290207Sjkim/* float __aeabi_fadd(float, float) */
154290207SjkimAEABI_ENTRY(fadd)
155280297Sjkim	LOAD_SREGS(s0, s1, r0, r1)
156280297Sjkim	vadd.f32 s0, s0, s1
157280297Sjkim	UNLOAD_SREG(r0, s0)
158238384Sjkim	RET
159238384SjkimAEABI_END(fadd)
160280297Sjkim
161280297Sjkim/* float __aeabi_fmul(float, float) */
162280297SjkimAEABI_ENTRY(fdiv)
163280297Sjkim	LOAD_SREGS(s0, s1, r0, r1)
164280297Sjkim	vdiv.f32 s0, s0, s1
165280297Sjkim	UNLOAD_SREG(r0, s0)
166238384Sjkim	RET
167280297SjkimAEABI_END(fdiv)
168280297Sjkim
169280297Sjkim/* float __aeabi_fmul(float, float) */
170280297SjkimAEABI_ENTRY(fmul)
171280297Sjkim	LOAD_SREGS(s0, s1, r0, r1)
172280297Sjkim	vmul.f32 s0, s0, s1
173280297Sjkim	UNLOAD_SREG(r0, s0)
174238384Sjkim	RET
175280297SjkimAEABI_END(fmul)
176280297Sjkim
177280297Sjkim/* float __aeabi_fsub(float, float) */
178280297SjkimAEABI_ENTRY(fsub)
179238384Sjkim	LOAD_SREGS(s0, s1, r0, r1)
180280297Sjkim	vsub.f32 s0, s0, s1
181238384Sjkim	UNLOAD_SREG(r0, s0)
182280297Sjkim	RET
183280297SjkimAEABI_END(fsub)
184280297Sjkim
185280297Sjkim