unaligned.c revision 150335
1/*-
2 * Copyright (c) 2003 Marcel Moolenaar
3 * Copyright (c) 2001 Doug Rabson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/ia64/ia64/unaligned.c 150335 2005-09-19 16:51:43Z rwatson $
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/lock.h>
34#include <sys/mutex.h>
35#include <sys/proc.h>
36#include <sys/sysctl.h>
37#include <vm/vm.h>
38#include <vm/vm_extern.h>
39#include <machine/frame.h>
40#include <machine/md_var.h>
41#include <ia64/disasm/disasm.h>
42
43static int ia64_unaligned_print = 0;
44SYSCTL_INT(_debug, OID_AUTO, unaligned_print, CTLFLAG_RW,
45    &ia64_unaligned_print, 0, "warn about unaligned accesses");
46
47static int ia64_unaligned_test = 0;
48SYSCTL_INT(_debug, OID_AUTO, unaligned_test, CTLFLAG_RW,
49    &ia64_unaligned_test, 0, "test emulation when PSR.ac is set");
50
51static void *
52fpreg_ptr(mcontext_t *mc, int fr)
53{
54	union _ia64_fpreg *p;
55
56	if (fr <= 1 || fr >= 128)
57		return (NULL);
58	if (fr >= 32) {
59		p = &mc->mc_high_fp.fr32;
60		fr -= 32;
61	} else if (fr >= 16) {
62		p = &mc->mc_preserved_fp.fr16;
63		fr -= 16;
64	} else if (fr >= 6) {
65		p = &mc->mc_scratch_fp.fr6;
66		fr -= 6;
67	} else {
68		p = &mc->mc_preserved_fp.fr2;
69		fr -= 2;
70	}
71	return ((void*)(p + fr));
72}
73
74static void *
75greg_ptr(mcontext_t *mc, int gr)
76{
77	uint64_t *p;
78	int nslots;
79
80	if (gr <= 0 || gr >= 32 + (mc->mc_special.cfm & 0x7f))
81		return (NULL);
82	if (gr >= 32) {
83	 	nslots = IA64_CFM_SOF(mc->mc_special.cfm) - gr + 32;
84		p = (void *)ia64_bsp_adjust(mc->mc_special.bspstore, -nslots);
85		gr = 0;
86	} else if (gr >= 14) {
87		p = &mc->mc_scratch.gr14;
88		gr -= 14;
89	} else if (gr == 13) {
90		p = &mc->mc_special.tp;
91		gr = 0;
92	} else if (gr == 12) {
93		p = &mc->mc_special.sp;
94		gr = 0;
95	} else if (gr >= 8) {
96		p = &mc->mc_scratch.gr8;
97		gr -= 8;
98	} else if (gr >= 4) {
99		p = &mc->mc_preserved.gr4;
100		gr -= 4;
101	} else if (gr >= 2) {
102		p = &mc->mc_scratch.gr2;
103		gr -= 2;
104	} else {
105		p = &mc->mc_special.gp;
106		gr = 0;
107	}
108	return ((void*)(p + gr));
109}
110
111static uint64_t
112rdreg(uint64_t *addr)
113{
114	if ((uintptr_t)addr < VM_MAX_ADDRESS)
115		return (fuword(addr));
116	return (*addr);
117}
118
119static void
120wrreg(uint64_t *addr, uint64_t val)
121{
122	if ((uintptr_t)addr < VM_MAX_ADDRESS)
123		suword(addr, val);
124	else
125		*addr = val;
126}
127
128static int
129fixup(struct asm_inst *i, mcontext_t *mc, uint64_t va)
130{
131	union {
132		double d;
133		long double e;
134		uint64_t i;
135		float s;
136	} buf;
137	void *reg;
138	uint64_t postinc;
139
140	switch (i->i_op) {
141	case ASM_OP_LD2:
142		copyin((void*)va, (void*)&buf.i, 2);
143		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
144		if (reg == NULL)
145			return (EINVAL);
146		wrreg(reg, buf.i & 0xffffU);
147		break;
148	case ASM_OP_LD4:
149		copyin((void*)va, (void*)&buf.i, 4);
150		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
151		if (reg == NULL)
152			return (EINVAL);
153		wrreg(reg, buf.i & 0xffffffffU);
154		break;
155	case ASM_OP_LD8:
156		copyin((void*)va, (void*)&buf.i, 8);
157		reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
158		if (reg == NULL)
159			return (EINVAL);
160		wrreg(reg, buf.i);
161		break;
162	case ASM_OP_LDFD:
163		copyin((void*)va, (void*)&buf.d, sizeof(buf.d));
164		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
165		if (reg == NULL)
166			return (EINVAL);
167		__asm("ldfd f6=%1;; stf.spill %0=f6" : "=m"(*(double *)reg) :
168		    "m"(buf.d) : "f6");
169		break;
170	case ASM_OP_LDFE:
171		copyin((void*)va, (void*)&buf.e, sizeof(buf.e));
172		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
173		if (reg == NULL)
174			return (EINVAL);
175		__asm("ldfe f6=%1;; stf.spill %0=f6" :
176		    "=m"(*(long double *)reg) : "m"(buf.e) : "f6");
177		break;
178	case ASM_OP_LDFS:
179		copyin((void*)va, (void*)&buf.s, sizeof(buf.s));
180		reg = fpreg_ptr(mc, (int)i->i_oper[1].o_value);
181		if (reg == NULL)
182			return (EINVAL);
183		__asm("ldfs f6=%1;; stf.spill %0=f6" : "=m"(*(float *)reg) :
184		    "m"(buf.s) : "f6");
185		break;
186	case ASM_OP_ST2:
187		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
188		if (reg == NULL)
189			return (EINVAL);
190		buf.i = rdreg(reg);
191		copyout((void*)&buf.i, (void*)va, 2);
192		break;
193	case ASM_OP_ST4:
194		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
195		if (reg == NULL)
196			return (EINVAL);
197		buf.i = rdreg(reg);
198		copyout((void*)&buf.i, (void*)va, 4);
199		break;
200	case ASM_OP_ST8:
201		reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
202		if (reg == NULL)
203			return (EINVAL);
204		buf.i = rdreg(reg);
205		copyout((void*)&buf.i, (void*)va, 8);
206		break;
207	case ASM_OP_STFD:
208		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
209		if (reg == NULL)
210			return (EINVAL);
211		__asm("ldf.fill f6=%1;; stfd %0=f6" : "=m"(buf.d) :
212		    "m"(*(double *)reg) : "f6");
213		copyout((void*)&buf.d, (void*)va, sizeof(buf.d));
214		break;
215	case ASM_OP_STFE:
216		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
217		if (reg == NULL)
218			return (EINVAL);
219		__asm("ldf.fill f6=%1;; stfe %0=f6" : "=m"(buf.e) :
220		    "m"(*(long double *)reg) : "f6");
221		copyout((void*)&buf.e, (void*)va, sizeof(buf.e));
222		break;
223	case ASM_OP_STFS:
224		reg = fpreg_ptr(mc, (int)i->i_oper[2].o_value);
225		if (reg == NULL)
226			return (EINVAL);
227		__asm("ldf.fill f6=%1;; stfs %0=f6" : "=m"(buf.s) :
228		    "m"(*(float *)reg) : "f6");
229		copyout((void*)&buf.s, (void*)va, sizeof(buf.s));
230		break;
231	default:
232		return (ENOENT);
233	}
234
235	/* Handle post-increment. */
236	if (i->i_oper[3].o_type == ASM_OPER_GREG) {
237		reg = greg_ptr(mc, (int)i->i_oper[3].o_value);
238		if (reg == NULL)
239			return (EINVAL);
240		postinc = rdreg(reg);
241	} else
242		postinc = (i->i_oper[3].o_type == ASM_OPER_IMM)
243		    ? i->i_oper[3].o_value : 0;
244	if (postinc != 0) {
245		if (i->i_oper[1].o_type == ASM_OPER_MEM)
246			reg = greg_ptr(mc, (int)i->i_oper[1].o_value);
247		else
248			reg = greg_ptr(mc, (int)i->i_oper[2].o_value);
249		if (reg == NULL)
250			return (EINVAL);
251		postinc += rdreg(reg);
252		wrreg(reg, postinc);
253	}
254	return (0);
255}
256
257int
258unaligned_fixup(struct trapframe *tf, struct thread *td)
259{
260	mcontext_t context;
261	struct asm_bundle bundle;
262	int error, slot;
263
264	slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
265	    ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
266
267	if (ia64_unaligned_print) {
268		mtx_lock(&Giant);
269		uprintf("pid %d (%s): unaligned access: va=0x%lx, pc=0x%lx\n",
270		    td->td_proc->p_pid, td->td_proc->p_comm,
271		    tf->tf_special.ifa, tf->tf_special.iip + slot);
272		mtx_unlock(&Giant);
273	}
274
275	/*
276	 * If PSR.ac is set, the process wants to be signalled about mis-
277	 * aligned loads and stores. Send it a SIGBUS. In order for us to
278	 * test the emulation of misaligned loads and stores, we have a
279	 * sysctl that tells us that we must emulate the load or store,
280	 * instead of sending the signal. We need the sysctl because if
281	 * PSR.ac is not set, the CPU may (and likely will) deal with the
282	 * misaligned load or store itself. As such, we won't get the
283	 * exception.
284	 */
285	if ((tf->tf_special.psr & IA64_PSR_AC) && !ia64_unaligned_test)
286		return (SIGBUS);
287
288	if (!asm_decode(tf->tf_special.iip, &bundle))
289		return (SIGILL);
290
291	get_mcontext(td, &context, 0);
292
293	error = fixup(bundle.b_inst + slot, &context, tf->tf_special.ifa);
294	if (error == ENOENT) {
295		printf("unhandled misaligned memory access:\n\t");
296		asm_print_inst(&bundle, slot, tf->tf_special.iip);
297		return (SIGILL);
298	} else if (error != 0)
299		return (SIGBUS);
300
301	set_mcontext(td, &context);
302
303	/* Advance to the next instruction. */
304	if (slot == 2) {
305		tf->tf_special.psr &= ~IA64_PSR_RI;
306		tf->tf_special.iip += 16;
307	} else
308		tf->tf_special.psr += IA64_PSR_RI_1;
309
310	return (0);
311}
312