Deleted Added
full compact
aeabi_vfp_double.S (273088) aeabi_vfp_double.S (282816)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29__FBSDID("$FreeBSD: head/lib/libc/arm/aeabi/aeabi_vfp_double.S 273088 2014-10-14 14:27:51Z andrew $");
29__FBSDID("$FreeBSD: head/lib/libc/arm/aeabi/aeabi_vfp_double.S 282816 2015-05-12 10:03:14Z andrew $");
30
31#include "aeabi_vfp.h"
32
33.fpu vfp
34.syntax unified
35
36/* void __aeabi_cdcmpeq(double, double) */
37AEABI_ENTRY(cdcmpeq)
38 LOAD_DREG(d0, r0, r1)
39 LOAD_DREG(d1, r2, r3)
40 vcmp.f64 d0, d1
41 vmrs APSR_nzcv, fpscr
42 RET
43AEABI_END(cdcmpeq)
44
45/* void __aeabi_cdcmple(double, double) */
46AEABI_ENTRY(cdcmple)
47 LOAD_DREG(d0, r0, r1)
48 LOAD_DREG(d1, r2, r3)
49 vcmpe.f64 d0, d1
50 vmrs APSR_nzcv, fpscr
51 RET
52AEABI_END(cdcmple)
53
54/* void __aeabi_cdrcmple(double, double) */
55AEABI_ENTRY(cdrcmple)
56 LOAD_DREG(d0, r0, r1)
57 LOAD_DREG(d1, r2, r3)
58 vcmpe.f64 d1, d0
59 vmrs APSR_nzcv, fpscr
60 RET
61AEABI_END(cdrcmple)
62
63/* int __aeabi_dcmpeq(double, double) */
64AEABI_ENTRY(dcmpeq)
65 LOAD_DREG(d0, r0, r1)
66 LOAD_DREG(d1, r2, r3)
67 vcmp.f64 d0, d1
68 vmrs APSR_nzcv, fpscr
30
31#include "aeabi_vfp.h"
32
33.fpu vfp
34.syntax unified
35
36/* void __aeabi_cdcmpeq(double, double) */
37AEABI_ENTRY(cdcmpeq)
38 LOAD_DREG(d0, r0, r1)
39 LOAD_DREG(d1, r2, r3)
40 vcmp.f64 d0, d1
41 vmrs APSR_nzcv, fpscr
42 RET
43AEABI_END(cdcmpeq)
44
45/* void __aeabi_cdcmple(double, double) */
46AEABI_ENTRY(cdcmple)
47 LOAD_DREG(d0, r0, r1)
48 LOAD_DREG(d1, r2, r3)
49 vcmpe.f64 d0, d1
50 vmrs APSR_nzcv, fpscr
51 RET
52AEABI_END(cdcmple)
53
54/* void __aeabi_cdrcmple(double, double) */
55AEABI_ENTRY(cdrcmple)
56 LOAD_DREG(d0, r0, r1)
57 LOAD_DREG(d1, r2, r3)
58 vcmpe.f64 d1, d0
59 vmrs APSR_nzcv, fpscr
60 RET
61AEABI_END(cdrcmple)
62
63/* int __aeabi_dcmpeq(double, double) */
64AEABI_ENTRY(dcmpeq)
65 LOAD_DREG(d0, r0, r1)
66 LOAD_DREG(d1, r2, r3)
67 vcmp.f64 d0, d1
68 vmrs APSR_nzcv, fpscr
69 ite ne
69 movne r0, #0
70 moveq r0, #1
71 RET
72AEABI_END(dcmpeq)
73
74/* int __aeabi_dcmplt(double, double) */
75AEABI_ENTRY(dcmplt)
76 LOAD_DREG(d0, r0, r1)
77 LOAD_DREG(d1, r2, r3)
78 vcmp.f64 d0, d1
79 vmrs APSR_nzcv, fpscr
70 movne r0, #0
71 moveq r0, #1
72 RET
73AEABI_END(dcmpeq)
74
75/* int __aeabi_dcmplt(double, double) */
76AEABI_ENTRY(dcmplt)
77 LOAD_DREG(d0, r0, r1)
78 LOAD_DREG(d1, r2, r3)
79 vcmp.f64 d0, d1
80 vmrs APSR_nzcv, fpscr
81 ite cs
80 movcs r0, #0
82 movcs r0, #0
81 movlt r0, #1
83 movcc r0, #1
82 RET
83AEABI_END(dcmplt)
84
85/* int __aeabi_dcmple(double, double) */
86AEABI_ENTRY(dcmple)
87 LOAD_DREG(d0, r0, r1)
88 LOAD_DREG(d1, r2, r3)
89 vcmp.f64 d0, d1
90 vmrs APSR_nzcv, fpscr
84 RET
85AEABI_END(dcmplt)
86
87/* int __aeabi_dcmple(double, double) */
88AEABI_ENTRY(dcmple)
89 LOAD_DREG(d0, r0, r1)
90 LOAD_DREG(d1, r2, r3)
91 vcmp.f64 d0, d1
92 vmrs APSR_nzcv, fpscr
93 ite hi
91 movhi r0, #0
92 movls r0, #1
93 RET
94AEABI_END(dcmple)
95
96/* int __aeabi_dcmpge(double, double) */
97AEABI_ENTRY(dcmpge)
98 LOAD_DREG(d0, r0, r1)
99 LOAD_DREG(d1, r2, r3)
100 vcmp.f64 d0, d1
101 vmrs APSR_nzcv, fpscr
94 movhi r0, #0
95 movls r0, #1
96 RET
97AEABI_END(dcmple)
98
99/* int __aeabi_dcmpge(double, double) */
100AEABI_ENTRY(dcmpge)
101 LOAD_DREG(d0, r0, r1)
102 LOAD_DREG(d1, r2, r3)
103 vcmp.f64 d0, d1
104 vmrs APSR_nzcv, fpscr
105 ite lt
102 movlt r0, #0
103 movge r0, #1
104 RET
105AEABI_END(dcmpge)
106
107/* int __aeabi_dcmpgt(double, double) */
108AEABI_ENTRY(dcmpgt)
109 LOAD_DREG(d0, r0, r1)
110 LOAD_DREG(d1, r2, r3)
111 vcmp.f64 d0, d1
112 vmrs APSR_nzcv, fpscr
106 movlt r0, #0
107 movge r0, #1
108 RET
109AEABI_END(dcmpge)
110
111/* int __aeabi_dcmpgt(double, double) */
112AEABI_ENTRY(dcmpgt)
113 LOAD_DREG(d0, r0, r1)
114 LOAD_DREG(d1, r2, r3)
115 vcmp.f64 d0, d1
116 vmrs APSR_nzcv, fpscr
117 ite le
113 movle r0, #0
114 movgt r0, #1
115 RET
116AEABI_END(dcmpgt)
117
118/* int __aeabi_dcmpun(double, double) */
119AEABI_ENTRY(dcmpun)
120 LOAD_DREG(d0, r0, r1)
121 LOAD_DREG(d1, r2, r3)
122 vcmp.f64 d0, d1
123 vmrs APSR_nzcv, fpscr
118 movle r0, #0
119 movgt r0, #1
120 RET
121AEABI_END(dcmpgt)
122
123/* int __aeabi_dcmpun(double, double) */
124AEABI_ENTRY(dcmpun)
125 LOAD_DREG(d0, r0, r1)
126 LOAD_DREG(d1, r2, r3)
127 vcmp.f64 d0, d1
128 vmrs APSR_nzcv, fpscr
129 ite vc
124 movvc r0, #0
125 movvs r0, #1
126 RET
127AEABI_END(dcmpun)
128
129/* int __aeabi_d2iz(double) */
130AEABI_ENTRY(d2iz)
131 LOAD_DREG(d0, r0, r1)
132#if 0
133 /*
134 * This should be the correct instruction, but binutils incorrectly
135 * encodes it as the version that used FPSCR to determine the rounding.
136 * When binutils is fixed we can use this again.
137 */
138 vcvt.s32.f64 s0, d0
139#else
140 ftosizd s0, d0
141#endif
142 vmov r0, s0
143 RET
144AEABI_END(d2iz)
145
146/* float __aeabi_d2f(double) */
147AEABI_ENTRY(d2f)
148 LOAD_DREG(d0, r0, r1)
149 vcvt.f32.f64 s0, d0
150 UNLOAD_SREG(r0, s0)
151 RET
152AEABI_END(d2f)
153
154/* double __aeabi_i2d(int) */
155AEABI_ENTRY(i2d)
156 vmov s0, r0
157 vcvt.f64.s32 d0, s0
158 UNLOAD_DREG(r0, r1, d0)
159 RET
160AEABI_END(i2d)
161
162/* double __aeabi_dadd(double, double) */
163AEABI_ENTRY(dadd)
164 LOAD_DREG(d0, r0, r1)
165 LOAD_DREG(d1, r2, r3)
166 vadd.f64 d0, d0, d1
167 UNLOAD_DREG(r0, r1, d0)
168 RET
169AEABI_END(dadd)
170
171/* double __aeabi_ddiv(double, double) */
172AEABI_ENTRY(ddiv)
173 LOAD_DREG(d0, r0, r1)
174 LOAD_DREG(d1, r2, r3)
175 vdiv.f64 d0, d0, d1
176 UNLOAD_DREG(r0, r1, d0)
177 RET
178AEABI_END(ddiv)
179
180/* double __aeabi_dmul(double, double) */
181AEABI_ENTRY(dmul)
182 LOAD_DREG(d0, r0, r1)
183 LOAD_DREG(d1, r2, r3)
184 vmul.f64 d0, d0, d1
185 UNLOAD_DREG(r0, r1, d0)
186 RET
187AEABI_END(dmul)
188
189/* double __aeabi_dsub(double, double) */
190AEABI_ENTRY(dsub)
191 LOAD_DREG(d0, r0, r1)
192 LOAD_DREG(d1, r2, r3)
193 vsub.f64 d0, d0, d1
194 UNLOAD_DREG(r0, r1, d0)
195 RET
196AEABI_END(dsub)
197
130 movvc r0, #0
131 movvs r0, #1
132 RET
133AEABI_END(dcmpun)
134
135/* int __aeabi_d2iz(double) */
136AEABI_ENTRY(d2iz)
137 LOAD_DREG(d0, r0, r1)
138#if 0
139 /*
140 * This should be the correct instruction, but binutils incorrectly
141 * encodes it as the version that used FPSCR to determine the rounding.
142 * When binutils is fixed we can use this again.
143 */
144 vcvt.s32.f64 s0, d0
145#else
146 ftosizd s0, d0
147#endif
148 vmov r0, s0
149 RET
150AEABI_END(d2iz)
151
152/* float __aeabi_d2f(double) */
153AEABI_ENTRY(d2f)
154 LOAD_DREG(d0, r0, r1)
155 vcvt.f32.f64 s0, d0
156 UNLOAD_SREG(r0, s0)
157 RET
158AEABI_END(d2f)
159
160/* double __aeabi_i2d(int) */
161AEABI_ENTRY(i2d)
162 vmov s0, r0
163 vcvt.f64.s32 d0, s0
164 UNLOAD_DREG(r0, r1, d0)
165 RET
166AEABI_END(i2d)
167
168/* double __aeabi_dadd(double, double) */
169AEABI_ENTRY(dadd)
170 LOAD_DREG(d0, r0, r1)
171 LOAD_DREG(d1, r2, r3)
172 vadd.f64 d0, d0, d1
173 UNLOAD_DREG(r0, r1, d0)
174 RET
175AEABI_END(dadd)
176
177/* double __aeabi_ddiv(double, double) */
178AEABI_ENTRY(ddiv)
179 LOAD_DREG(d0, r0, r1)
180 LOAD_DREG(d1, r2, r3)
181 vdiv.f64 d0, d0, d1
182 UNLOAD_DREG(r0, r1, d0)
183 RET
184AEABI_END(ddiv)
185
186/* double __aeabi_dmul(double, double) */
187AEABI_ENTRY(dmul)
188 LOAD_DREG(d0, r0, r1)
189 LOAD_DREG(d1, r2, r3)
190 vmul.f64 d0, d0, d1
191 UNLOAD_DREG(r0, r1, d0)
192 RET
193AEABI_END(dmul)
194
195/* double __aeabi_dsub(double, double) */
196AEABI_ENTRY(dsub)
197 LOAD_DREG(d0, r0, r1)
198 LOAD_DREG(d1, r2, r3)
199 vsub.f64 d0, d0, d1
200 UNLOAD_DREG(r0, r1, d0)
201 RET
202AEABI_END(dsub)
203