1/*-
2 * Copyright (c) 2003 Marcel Moolenaar
3 * Copyright (c) 2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/proc.h>
34#include <sys/sysctl.h>
35#include <vm/vm.h>
36#include <vm/vm_extern.h>
37#include <machine/frame.h>
38#include <machine/md_var.h>
39#include <ia64/disasm/disasm.h>
40
41static int ia64_unaligned_print = 0;
42SYSCTL_INT(_debug, OID_AUTO, unaligned_print, CTLFLAG_RW,
43    &ia64_unaligned_print, 0, "warn about unaligned accesses");
44
45static int ia64_unaligned_test = 0;
46SYSCTL_INT(_debug, OID_AUTO, unaligned_test, CTLFLAG_RW,
47    &ia64_unaligned_test, 0, "test emulation when PSR.ac is set");
48
49static void *
50fpreg_ptr(mcontext_t *mc, int fr)
51{
52	union _ia64_fpreg *p;
53
54	if (fr <= 1 || fr >= 128)
55		return (NULL);
56	if (fr >= 32) {
57		p = &mc->mc_high_fp.fr32;
58		fr -= 32;
59	} else if (fr >= 16) {
60		p = &mc->mc_preserved_fp.fr16;
61		fr -= 16;
62	} else if (fr >= 6) {
63		p = &mc->mc_scratch_fp.fr6;
64		fr -= 6;
65	} else {
66		p = &mc->mc_preserved_fp.fr2;
67		fr -= 2;
68	}
69	return ((void*)(p + fr));
70}
71
72static void *
73greg_ptr(mcontext_t *mc, int gr)
74{
75	uint64_t *p;
76	int nslots;
77
78	if (gr <= 0 || gr >= 32 + (mc->mc_special.cfm & 0x7f))
79		return (NULL);
80	if (gr >= 32) {
81	 	nslots = IA64_CFM_SOF(mc->mc_special.cfm) - gr + 32;
82		p = (void *)ia64_bsp_adjust(mc->mc_special.bspstore, -nslots);
83		gr = 0;
84	} else if (gr >= 14) {
85		p = &mc->mc_scratch.gr14;
86		gr -= 14;
87	} else if (gr == 13) {
88		p = &mc->mc_special.tp;
89		gr = 0;
90	} else if (gr == 12) {
91		p = &mc->mc_special.sp;
92		gr = 0;
93	} else if (gr >= 8) {
94		p = &mc->mc_scratch.gr8;
95		gr -= 8;
96	} else if (gr >= 4) {
97		p = &mc->mc_preserved.gr4;
98		gr -= 4;
99	} else if (gr >= 2) {
100		p = &mc->mc_scratch.gr2;
101		gr -= 2;
102	} else {
103		p = &mc->mc_special.gp;
104		gr = 0;
105	}
106	return ((void*)(p + gr));
107}
108
109static uint64_t
110rdreg(uint64_t *addr)
111{
112	if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
113		return (fuword(addr));
114	return (*addr);
115}
116
117static void
118wrreg(uint64_t *addr, uint64_t val)
119{
120	if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
121		suword(addr, val);
122	else
123		*addr = val;
124}
125
126static int
127fixup(struct asm_inst *i, mcontext_t *mc, uint64_t va)
128{
129	union {
130		double d;
131		long double e;
132		uint64_t i;
133		float s;
134	} buf;
135	void *reg;
136	uint64_t postinc;
137
138	switch (i->i_op) {
139	case ASM_OP_LD2:
140		copyin((void*)va, (void*)&buf.i, 2);
141		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
142		if (reg == NULL)
143			return (EINVAL);
144		wrreg(reg, buf.i & 0xffffU);
145		break;
146	case ASM_OP_LD4:
147		copyin((void*)va, (void*)&buf.i, 4);
148		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
149		if (reg == NULL)
150			return (EINVAL);
151		wrreg(reg, buf.i & 0xffffffffU);
152		break;
153	case ASM_OP_LD8:
154		copyin((void*)va, (void*)&buf.i, 8);
155		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
156		if (reg == NULL)
157			return (EINVAL);
158		wrreg(reg, buf.i);
159		break;
160	case ASM_OP_LDFD:
161		copyin((void*)va, (void*)&buf.d, sizeof(buf.d));
162		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
163		if (reg == NULL)
164			return (EINVAL);
165		__asm("ldfd f6=%1;; stf.spill %0=f6" : "=m"(*(double *)reg) :
166		    "m"(buf.d) : "f6");
167		break;
168	case ASM_OP_LDFE:
169		copyin((void*)va, (void*)&buf.e, sizeof(buf.e));
170		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
171		if (reg == NULL)
172			return (EINVAL);
173		__asm("ldfe f6=%1;; stf.spill %0=f6" :
174		    "=m"(*(long double *)reg) : "m"(buf.e) : "f6");
175		break;
176	case ASM_OP_LDFS:
177		copyin((void*)va, (void*)&buf.s, sizeof(buf.s));
178		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
179		if (reg == NULL)
180			return (EINVAL);
181		__asm("ldfs f6=%1;; stf.spill %0=f6" : "=m"(*(float *)reg) :
182		    "m"(buf.s) : "f6");
183		break;
184	case ASM_OP_ST2:
185		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
186		if (reg == NULL)
187			return (EINVAL);
188		buf.i = rdreg(reg);
189		copyout((void*)&buf.i, (void*)va, 2);
190		break;
191	case ASM_OP_ST4:
192		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
193		if (reg == NULL)
194			return (EINVAL);
195		buf.i = rdreg(reg);
196		copyout((void*)&buf.i, (void*)va, 4);
197		break;
198	case ASM_OP_ST8:
199		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
200		if (reg == NULL)
201			return (EINVAL);
202		buf.i = rdreg(reg);
203		copyout((void*)&buf.i, (void*)va, 8);
204		break;
205	case ASM_OP_STFD:
206		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
207		if (reg == NULL)
208			return (EINVAL);
209		__asm("ldf.fill f6=%1;; stfd %0=f6" : "=m"(buf.d) :
210		    "m"(*(double *)reg) : "f6");
211		copyout((void*)&buf.d, (void*)va, sizeof(buf.d));
212		break;
213	case ASM_OP_STFE:
214		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
215		if (reg == NULL)
216			return (EINVAL);
217		__asm("ldf.fill f6=%1;; stfe %0=f6" : "=m"(buf.e) :
218		    "m"(*(long double *)reg) : "f6");
219		copyout((void*)&buf.e, (void*)va, sizeof(buf.e));
220		break;
221	case ASM_OP_STFS:
222		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
223		if (reg == NULL)
224			return (EINVAL);
225		__asm("ldf.fill f6=%1;; stfs %0=f6" : "=m"(buf.s) :
226		    "m"(*(float *)reg) : "f6");
227		copyout((void*)&buf.s, (void*)va, sizeof(buf.s));
228		break;
229	default:
230		return (ENOENT);
231	}
232
233	/* Handle post-increment. */
234	if (i->i_oper[3].o_type == ASM_OPER_GREG) {
235		reg = greg_ptr(mc, (int)i->i_oper[3].o_value);
236		if (reg == NULL)
237			return (EINVAL);
238		postinc = rdreg(reg);
239	} else
240		postinc = (i->i_oper[3].o_type == ASM_OPER_IMM)
241		    ? i->i_oper[3].o_value : 0;
242	if (postinc != 0) {
243		if (i->i_oper[1].o_type == ASM_OPER_MEM)
244			reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
245		else
246			reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
247		if (reg == NULL)
248			return (EINVAL);
249		postinc += rdreg(reg);
250		wrreg(reg, postinc);
251	}
252	return (0);
253}
254
255int
256unaligned_fixup(struct trapframe *tf, struct thread *td)
257{
258	mcontext_t context;
259	struct asm_bundle bundle;
260	int error, slot;
261
262	slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
263	    ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
264
265	if (ia64_unaligned_print) {
266		uprintf("pid %d (%s): unaligned access: va=0x%lx, pc=0x%lx\n",
267		    td->td_proc->p_pid, td->td_proc->p_comm,
268		    tf->tf_special.ifa, tf->tf_special.iip + slot);
269	}
270
271	/*
272	 * If PSR.ac is set, the process wants to be signalled about mis-
273	 * aligned loads and stores. Send it a SIGBUS. In order for us to
274	 * test the emulation of misaligned loads and stores, we have a
275	 * sysctl that tells us that we must emulate the load or store,
276	 * instead of sending the signal. We need the sysctl because if
277	 * PSR.ac is not set, the CPU may (and likely will) deal with the
278	 * misaligned load or store itself. As such, we won't get the
279	 * exception.
280	 */
281	if ((tf->tf_special.psr & IA64_PSR_AC) && !ia64_unaligned_test)
282		return (SIGBUS);
283
284	if (!asm_decode(tf->tf_special.iip, &bundle))
285		return (SIGILL);
286
287	get_mcontext(td, &context, 0);
288
289	error = fixup(bundle.b_inst + slot, &context, tf->tf_special.ifa);
290	if (error == ENOENT) {
291		printf("unhandled misaligned memory access:\n\t");
292		asm_print_inst(&bundle, slot, tf->tf_special.iip);
293		return (SIGILL);
294	} else if (error != 0)
295		return (SIGBUS);
296
297	set_mcontext(td, &context);
298
299	/* Advance to the next instruction. */
300	if (slot == 2) {
301		tf->tf_special.psr &= ~IA64_PSR_RI;
302		tf->tf_special.iip += 16;
303	} else
304		tf->tf_special.psr += IA64_PSR_RI_1;
305
306	return (0);
307}
308