1/*-
2 * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
3 * Copyright (c) 2012 Mark Tinguely
4 *
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: releng/10.3/sys/arm/arm/vfp.c 278646 2015-02-13 00:15:13Z ian $");
31
32#ifdef VFP
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/proc.h>
36#include <sys/kernel.h>
37
38#include <machine/armreg.h>
39#include <machine/frame.h>
40#include <machine/fp.h>
41#include <machine/pcb.h>
42#include <machine/undefined.h>
43#include <machine/vfp.h>
44
45/* function prototypes */
46static int vfp_bounce(u_int, u_int, struct trapframe *, int);
47static void vfp_restore(struct vfp_state *);
48
49extern int vfp_exists;
50static struct undefined_handler vfp10_uh, vfp11_uh;
51/* If true the VFP unit has 32 double registers, otherwise it has 16 */
52static int is_d32;
53
54/*
55 * About .fpu directives in this file...
56 *
57 * We should need simply .fpu vfpv3, but clang 3.5 has a quirk where setting
58 * vfpv3 doesn't imply that vfp2 features are also available -- both have to be
59 * explicitly set to get all the features of both.  This is probably a bug in
60 * clang, so it may get fixed and require changes here some day.  Other changes
61 * are probably coming in clang too, because there is email and open PRs
62 * indicating they want to completely disable the ability to use .fpu and
63 * similar directives in inline asm.  That would be catastrophic for us,
64 * hopefully they come to their senses.  There was also some discusion of a new
65 * syntax such as .push fpu=vfpv3; ...; .pop fpu; and that would be ideal for
66 * us, better than what we have now really.
67 *
68 * For gcc, each .fpu directive completely overrides the prior directive, unlike
69 * with clang, but luckily on gcc saying v3 implies all the v2 features as well.
70 */
71
72#define fmxr(reg, val) \
73    __asm __volatile("	.fpu vfpv2\n .fpu vfpv3\n"			\
74		     "	vmsr	" __STRING(reg) ", %0"   :: "r"(val));
75
76#define fmrx(reg) \
77({ u_int val = 0;\
78    __asm __volatile(" .fpu vfpv2\n .fpu vfpv3\n"			\
79		     "	vmrs	%0, " __STRING(reg) : "=r"(val));	\
80    val; \
81})
82
83static u_int
84get_coprocessorACR(void)
85{
86	u_int val;
87	__asm __volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (val) : : "cc");
88	return val;
89}
90
91static void
92set_coprocessorACR(u_int val)
93{
94	__asm __volatile("mcr p15, 0, %0, c1, c0, 2\n\t"
95	 : : "r" (val) : "cc");
96	isb();
97}
98
99
100	/* called for each cpu */
101void
102vfp_init(void)
103{
104	u_int fpsid, fpexc, tmp;
105	u_int coproc, vfp_arch;
106
107	coproc = get_coprocessorACR();
108	coproc |= COPROC10 | COPROC11;
109	set_coprocessorACR(coproc);
110
111	fpsid = fmrx(fpsid);		/* read the vfp system id */
112	fpexc = fmrx(fpexc);		/* read the vfp exception reg */
113
114	if (!(fpsid & VFPSID_HARDSOFT_IMP)) {
115		vfp_exists = 1;
116		is_d32 = 0;
117		PCPU_SET(vfpsid, fpsid);	/* save the fpsid */
118
119		vfp_arch =
120		    (fpsid & VFPSID_SUBVERSION2_MASK) >> VFPSID_SUBVERSION_OFF;
121
122		if (vfp_arch >= VFP_ARCH3) {
123			tmp = fmrx(mvfr0);
124			PCPU_SET(vfpmvfr0, tmp);
125
126			if ((tmp & VMVFR0_RB_MASK) == 2)
127				is_d32 = 1;
128
129			tmp = fmrx(mvfr1);
130			PCPU_SET(vfpmvfr1, tmp);
131		}
132
133		/* initialize the coprocess 10 and 11 calls
134		 * These are called to restore the registers and enable
135		 * the VFP hardware.
136		 */
137		if (vfp10_uh.uh_handler == NULL) {
138			vfp10_uh.uh_handler = vfp_bounce;
139			vfp11_uh.uh_handler = vfp_bounce;
140			install_coproc_handler_static(10, &vfp10_uh);
141			install_coproc_handler_static(11, &vfp11_uh);
142		}
143	}
144}
145
146SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
147
148
149/* start VFP unit, restore the vfp registers from the PCB  and retry
150 * the instruction
151 */
152static int
153vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
154{
155	u_int cpu, fpexc;
156	struct pcb *curpcb;
157	ksiginfo_t ksi;
158
159	if ((code & FAULT_USER) == 0)
160		panic("undefined floating point instruction in supervisor mode");
161
162	critical_enter();
163
164	/*
165	 * If the VFP is already on and we got an undefined instruction, then
166	 * something tried to executate a truly invalid instruction that maps to
167	 * the VFP.
168	 */
169	fpexc = fmrx(fpexc);
170	if (fpexc & VFPEXC_EN) {
171		/* Clear any exceptions */
172		fmxr(fpexc, fpexc & ~(VFPEXC_EX | VFPEXC_FP2V));
173
174		/* kill the process - we do not handle emulation */
175		critical_exit();
176
177		if (fpexc & VFPEXC_EX) {
178			/* We have an exception, signal a SIGFPE */
179			ksiginfo_init_trap(&ksi);
180			ksi.ksi_signo = SIGFPE;
181			if (fpexc & VFPEXC_UFC)
182				ksi.ksi_code = FPE_FLTUND;
183			else if (fpexc & VFPEXC_OFC)
184				ksi.ksi_code = FPE_FLTOVF;
185			else if (fpexc & VFPEXC_IOC)
186				ksi.ksi_code = FPE_FLTINV;
187			ksi.ksi_addr = (void *)addr;
188			trapsignal(curthread, &ksi);
189			return 0;
190		}
191
192		return 1;
193	}
194
195	/*
196	 * If the last time this thread used the VFP it was on this core, and
197	 * the last thread to use the VFP on this core was this thread, then the
198	 * VFP state is valid, otherwise restore this thread's state to the VFP.
199	 */
200	fmxr(fpexc, fpexc | VFPEXC_EN);
201	curpcb = curthread->td_pcb;
202	cpu = PCPU_GET(cpu);
203	if (curpcb->pcb_vfpcpu != cpu || curthread != PCPU_GET(fpcurthread)) {
204		vfp_restore(&curpcb->pcb_vfpstate);
205		curpcb->pcb_vfpcpu = cpu;
206		PCPU_SET(fpcurthread, curthread);
207	}
208
209	critical_exit();
210	return (0);
211}
212
213/*
214 * Restore the given state to the VFP hardware.
215 */
216static void
217vfp_restore(struct vfp_state *vfpsave)
218{
219	uint32_t fpexc;
220
221	/* On vfpv3 we may need to restore FPINST and FPINST2 */
222	fpexc = vfpsave->fpexec;
223	if (fpexc & VFPEXC_EX) {
224		fmxr(fpinst, vfpsave->fpinst);
225		if (fpexc & VFPEXC_FP2V)
226			fmxr(fpinst2, vfpsave->fpinst2);
227	}
228	fmxr(fpscr, vfpsave->fpscr);
229
230	__asm __volatile(
231	    " .fpu	vfpv2\n"
232	    " .fpu	vfpv3\n"
233	    " vldmia	%0!, {d0-d15}\n"	/* d0-d15 */
234	    " cmp	%1, #0\n"		/* -D16 or -D32? */
235	    " vldmiane	%0!, {d16-d31}\n"	/* d16-d31 */
236	    " addeq	%0, %0, #128\n"		/* skip missing regs */
237	    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
238	    );
239
240	fmxr(fpexc, fpexc);
241}
242
243/*
244 * If the VFP is on, save its current state and turn it off if requested to do
245 * so.  If the VFP is not on, does not change the values at *vfpsave.  Caller is
246 * responsible for preventing a context switch while this is running.
247 */
248void
249vfp_store(struct vfp_state *vfpsave, boolean_t disable_vfp)
250{
251	uint32_t fpexc;
252
253	fpexc = fmrx(fpexc);		/* Is the vfp enabled? */
254	if (fpexc & VFPEXC_EN) {
255		vfpsave->fpexec = fpexc;
256		vfpsave->fpscr = fmrx(fpscr);
257
258		/* On vfpv3 we may need to save FPINST and FPINST2 */
259		if (fpexc & VFPEXC_EX) {
260			vfpsave->fpinst = fmrx(fpinst);
261			if (fpexc & VFPEXC_FP2V)
262				vfpsave->fpinst2 = fmrx(fpinst2);
263			fpexc &= ~VFPEXC_EX;
264		}
265
266		__asm __volatile(
267		    " .fpu	vfpv2\n"
268		    " .fpu	vfpv3\n"
269		    " vstmia	%0!, {d0-d15}\n"	/* d0-d15 */
270		    " cmp	%1, #0\n"		/* -D16 or -D32? */
271		    " vstmiane	r0!, {d16-d31}\n"	/* d16-d31 */
272		    " addeq	%0, %0, #128\n"		/* skip missing regs */
273		    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
274		    );
275
276		if (disable_vfp)
277			fmxr(fpexc , fpexc & ~VFPEXC_EN);
278	}
279}
280
281/*
282 * The current thread is dying.  If the state currently in the hardware belongs
283 * to the current thread, set fpcurthread to NULL to indicate that the VFP
284 * hardware state does not belong to any thread.  If the VFP is on, turn it off.
285 * Called only from cpu_throw(), so we don't have to worry about a context
286 * switch here.
287 */
288void
289vfp_discard(struct thread *td)
290{
291	u_int tmp;
292
293	if (PCPU_GET(fpcurthread) == td)
294		PCPU_SET(fpcurthread, NULL);
295
296	tmp = fmrx(fpexc);
297	if (tmp & VFPEXC_EN)
298		fmxr(fpexc, tmp & ~VFPEXC_EN);
299}
300
301#endif
302
303