unaligned.c revision 115378
1/*-
2 * Copyright (c) 2001 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/ia64/ia64/unaligned.c 115378 2003-05-29 06:30:36Z marcel $
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/proc.h>
33#include <sys/sysctl.h>
34#include <vm/vm.h>
35#include <vm/vm_extern.h>
36#include <machine/frame.h>
37#include <machine/inst.h>
38
39#define sign_extend(imm, w) (((int64_t)(imm) << (64 - (w))) >> (64 - (w)))
40
41static int ia64_unaligned_print = 1;	/* warn about unaligned accesses */
42static int ia64_unaligned_fix = 1;	/* fix up unaligned accesses */
43static int ia64_unaligned_sigbus = 0;	/* don't SIGBUS on fixed-up accesses */
44
45SYSCTL_INT(_machdep, OID_AUTO, unaligned_print, CTLFLAG_RW,
46    &ia64_unaligned_print, 0, "warn about unaligned accesses");
47
48SYSCTL_INT(_machdep, OID_AUTO, unaligned_fix, CTLFLAG_RW,
49    &ia64_unaligned_fix, 0, "fix up unaligned accesses (if possible)");
50
51SYSCTL_INT(_machdep, OID_AUTO, unaligned_sigbus, CTLFLAG_RW,
52    &ia64_unaligned_sigbus, 0, "do not SIGBUS on fixed-up accesses");
53
54int unaligned_fixup(struct trapframe *framep, struct thread *td);
55
56enum type {
57	LD_SA,
58	LD_S,
59	LD_A,
60	LD_C_CLR,
61	LD_C_NC,
62	LD
63};
64
65struct decoding {
66	int isload;		/* non-zero if load */
67	enum type type;		/* type of load or store */
68	int basereg;		/* address to load or store */
69	int reg;		/* register number to load or store */
70	int width;		/* number of bytes */
71	int update;		/* update value for basereg */
72	int updateisreg;	/* non-zero if update is a register */
73	int fence;		/* non-zero if fence needed */
74};
75
76static int
77unaligned_decode_M1(union ia64_instruction ins, struct decoding *d)
78{
79	static enum type types[] = {
80		LD, LD_S, LD_A,	LD_SA, LD,
81		LD, LD, LD_C_CLR, LD_C_NC, LD_C_CLR
82	};
83	d->isload = 1;
84	d->type = types[ins.M1.x6 >> 2];
85	d->basereg = ins.M1.r3;
86	d->reg = ins.M1.r1;
87	d->width = (1 << (ins.M1.x6 & 3));
88	if ((ins.M1.x6 >= 0x14 && ins.M1.x6 <= 0x17)
89	    || (ins.M1.x6 >= 0x28 && ins.M1.x6 <= 0x2b))
90	    d->fence = 1;
91	return 1;
92}
93
94static int
95unaligned_decode_M2(union ia64_instruction ins, struct decoding *d)
96{
97	static enum type types[] = {
98		LD, LD_S, LD_A,	LD_SA, LD,
99		LD, LD, LD_C_CLR, LD_C_NC, LD_C_CLR
100	};
101	d->isload = 1;
102	d->type = types[ins.M1.x6 >> 2];
103	d->basereg = ins.M2.r3;
104	d->reg = ins.M2.r1;
105	d->width = (1 << (ins.M2.x6 & 3));
106	d->update = ins.M2.r2;
107	d->updateisreg = 1;
108	if ((ins.M2.x6 >= 0x14 && ins.M2.x6 <= 0x17)
109	    || (ins.M2.x6 >= 0x28 && ins.M2.x6 <= 0x2b))
110	    d->fence = 1;
111	return 1;
112}
113
114static int
115unaligned_decode_M3(union ia64_instruction ins, struct decoding *d)
116{
117	static enum type types[] = {
118		LD, LD_S, LD_A,	LD_SA, LD,
119		LD, LD, LD_C_CLR, LD_C_NC, LD_C_CLR
120	};
121	d->isload = 1;
122	d->type = types[ins.M1.x6 >> 2];
123	d->basereg = ins.M3.r3;
124	d->reg = ins.M3.r1;
125	d->width = (1 << (ins.M3.x6 & 3));
126	d->update = sign_extend((ins.M3.s << 8)
127				| (ins.M3.i << 7)
128				| ins.M3.imm7b, 9);
129	if ((ins.M3.x6 >= 0x14 && ins.M3.x6 <= 0x17)
130	    || (ins.M3.x6 >= 0x28 && ins.M3.x6 <= 0x2b))
131	    d->fence = 1;
132	return 1;
133}
134
135static int
136unaligned_decode_M4(union ia64_instruction ins, struct decoding *d)
137{
138	d->isload = 0;
139	d->basereg = ins.M4.r3;
140	d->reg = ins.M4.r2;
141	d->width = (1 << (ins.M4.x6 & 3));
142	if (ins.M4.x6 >= 0x34 && ins.M4.x6 <= 0x37)
143	    d->fence = 1;
144	return 1;
145}
146
147static int
148unaligned_decode_M5(union ia64_instruction ins, struct decoding *d)
149{
150	d->isload = 0;
151	d->basereg = ins.M5.r3;
152	d->reg = ins.M5.r2;
153	d->width = (1 << (ins.M5.x6 & 3));
154	d->update = sign_extend((ins.M5.s << 8)
155				| (ins.M5.i << 7)
156				| ins.M5.imm7a, 9);
157	if (ins.M5.x6 >= 0x34 && ins.M5.x6 <= 0x37)
158	    d->fence = 1;
159	return 1;
160}
161
162static int
163read_register(struct trapframe *framep, struct thread *td,
164	      int reg, u_int64_t *valuep)
165{
166
167	if (reg < 32) {
168		switch (reg) {
169		case 0:  *valuep = 0; break;
170		case 1:  *valuep = framep->tf_special.gp; break;
171		case 2:	 *valuep = framep->tf_scratch.gr2; break;
172		case 3:  *valuep = framep->tf_scratch.gr3; break;
173		case 8:  *valuep = framep->tf_scratch.gr8; break;
174		case 9:  *valuep = framep->tf_scratch.gr9; break;
175		case 10: *valuep = framep->tf_scratch.gr10; break;
176		case 11: *valuep = framep->tf_scratch.gr11; break;
177		case 12: *valuep = framep->tf_special.sp; break;
178		case 13: *valuep = framep->tf_special.tp; break;
179		case 14: *valuep = framep->tf_scratch.gr14; break;
180		case 15: *valuep = framep->tf_scratch.gr15; break;
181		case 16: *valuep = framep->tf_scratch.gr16; break;
182		case 17: *valuep = framep->tf_scratch.gr17; break;
183		case 18: *valuep = framep->tf_scratch.gr18; break;
184		case 19: *valuep = framep->tf_scratch.gr19; break;
185		case 20: *valuep = framep->tf_scratch.gr20; break;
186		case 21: *valuep = framep->tf_scratch.gr21; break;
187		case 22: *valuep = framep->tf_scratch.gr22; break;
188		case 23: *valuep = framep->tf_scratch.gr23; break;
189		case 24: *valuep = framep->tf_scratch.gr24; break;
190		case 25: *valuep = framep->tf_scratch.gr25; break;
191		case 26: *valuep = framep->tf_scratch.gr26; break;
192		case 27: *valuep = framep->tf_scratch.gr27; break;
193		case 28: *valuep = framep->tf_scratch.gr28; break;
194		case 29: *valuep = framep->tf_scratch.gr29; break;
195		case 30: *valuep = framep->tf_scratch.gr30; break;
196		case 31: *valuep = framep->tf_scratch.gr31; break;
197		default:
198			return (EINVAL);
199		}
200	} else {
201#if 0
202		u_int64_t cfm = framep->tf_special.cfm;
203		u_int64_t *bsp = (u_int64_t *)(td->td_kstack +
204		    framep->tf_ndirty);
205		int sof = cfm & 0x7f;
206		int sor = 8*((cfm >> 14) & 15);
207		int rrb_gr = (cfm >> 18) & 0x7f;
208
209		/*
210		 * Skip back to the start of the interrupted frame.
211		 */
212		bsp = ia64_rse_previous_frame(bsp, sof);
213
214		if (reg - 32 > sof)
215			return EINVAL;
216		if (reg - 32 < sor) {
217			if (reg - 32 + rrb_gr >= sor)
218				reg = reg + rrb_gr - sor;
219			else
220				reg = reg + rrb_gr;
221		}
222
223		*valuep = *ia64_rse_register_address(bsp, reg);
224		return (0);
225#else
226		return (EINVAL);
227#endif
228	}
229	return (0);
230}
231
232static int
233write_register(struct trapframe *framep, struct thread *td,
234	      int reg, u_int64_t value)
235{
236
237	if (reg < 32) {
238		switch (reg) {
239		case 1:  framep->tf_special.gp = value; break;
240		case 2:	 framep->tf_scratch.gr2 = value; break;
241		case 3:  framep->tf_scratch.gr3 = value; break;
242		case 8:  framep->tf_scratch.gr8 = value; break;
243		case 9:  framep->tf_scratch.gr9 = value; break;
244		case 10: framep->tf_scratch.gr10 = value; break;
245		case 11: framep->tf_scratch.gr11 = value; break;
246		case 12: framep->tf_special.sp = value; break;
247		case 13: framep->tf_special.tp = value; break;
248		case 14: framep->tf_scratch.gr14 = value; break;
249		case 15: framep->tf_scratch.gr15 = value; break;
250		case 16: framep->tf_scratch.gr16 = value; break;
251		case 17: framep->tf_scratch.gr17 = value; break;
252		case 18: framep->tf_scratch.gr18 = value; break;
253		case 19: framep->tf_scratch.gr19 = value; break;
254		case 20: framep->tf_scratch.gr20 = value; break;
255		case 21: framep->tf_scratch.gr21 = value; break;
256		case 22: framep->tf_scratch.gr22 = value; break;
257		case 23: framep->tf_scratch.gr23 = value; break;
258		case 24: framep->tf_scratch.gr24 = value; break;
259		case 25: framep->tf_scratch.gr25 = value; break;
260		case 26: framep->tf_scratch.gr26 = value; break;
261		case 27: framep->tf_scratch.gr27 = value; break;
262		case 28: framep->tf_scratch.gr28 = value; break;
263		case 29: framep->tf_scratch.gr29 = value; break;
264		case 30: framep->tf_scratch.gr30 = value; break;
265		case 31: framep->tf_scratch.gr31 = value; break;
266		default:
267			return (EINVAL);
268		}
269	} else {
270#if 0
271		u_int64_t cfm = framep->tf_special.cfm;
272		u_int64_t *bsp = (u_int64_t *) (td->td_kstack
273						+ framep->tf_ndirty);
274		int sof = cfm & 0x7f;
275		int sor = 8*((cfm >> 14) & 15);
276		int rrb_gr = (cfm >> 18) & 0x7f;
277
278		/*
279		 * Skip back to the start of the interrupted frame.
280		 */
281		bsp = ia64_rse_previous_frame(bsp, sof);
282
283		if (reg - 32 > sof)
284			return EINVAL;
285		if (reg - 32 < sor) {
286			if (reg - 32 + rrb_gr >= sor)
287				reg = reg + rrb_gr - sor;
288			else
289				reg = reg + rrb_gr;
290		}
291
292		*ia64_rse_register_address(bsp, reg) = value;
293		return 0;
294#else
295		return (EINVAL);
296#endif
297	}
298	return (0);
299}
300
301/*
302 * Messy.
303 */
304static void
305invala_e(int reg)
306{
307	switch (reg) {
308	case   0:	__asm __volatile("invala.e r0"); break;
309	case   1:	__asm __volatile("invala.e r1"); break;
310	case   2:	__asm __volatile("invala.e r2"); break;
311	case   3:	__asm __volatile("invala.e r3"); break;
312	case   4:	__asm __volatile("invala.e r4"); break;
313	case   5:	__asm __volatile("invala.e r5"); break;
314	case   6:	__asm __volatile("invala.e r6"); break;
315	case   7:	__asm __volatile("invala.e r7"); break;
316	case   8:	__asm __volatile("invala.e r8"); break;
317	case   9:	__asm __volatile("invala.e r9"); break;
318	case  10:	__asm __volatile("invala.e r10"); break;
319	case  11:	__asm __volatile("invala.e r11"); break;
320	case  12:	__asm __volatile("invala.e r12"); break;
321	case  13:	__asm __volatile("invala.e r13"); break;
322	case  14:	__asm __volatile("invala.e r14"); break;
323	case  15:	__asm __volatile("invala.e r15"); break;
324	case  16:	__asm __volatile("invala.e r16"); break;
325	case  17:	__asm __volatile("invala.e r17"); break;
326	case  18:	__asm __volatile("invala.e r18"); break;
327	case  19:	__asm __volatile("invala.e r19"); break;
328	case  20:	__asm __volatile("invala.e r20"); break;
329	case  21:	__asm __volatile("invala.e r21"); break;
330	case  22:	__asm __volatile("invala.e r22"); break;
331	case  23:	__asm __volatile("invala.e r23"); break;
332	case  24:	__asm __volatile("invala.e r24"); break;
333	case  25:	__asm __volatile("invala.e r25"); break;
334	case  26:	__asm __volatile("invala.e r26"); break;
335	case  27:	__asm __volatile("invala.e r27"); break;
336	case  28:	__asm __volatile("invala.e r28"); break;
337	case  29:	__asm __volatile("invala.e r29"); break;
338	case  30:	__asm __volatile("invala.e r30"); break;
339	case  31:	__asm __volatile("invala.e r31"); break;
340	case  32:	__asm __volatile("invala.e r32"); break;
341	case  33:	__asm __volatile("invala.e r33"); break;
342	case  34:	__asm __volatile("invala.e r34"); break;
343	case  35:	__asm __volatile("invala.e r35"); break;
344	case  36:	__asm __volatile("invala.e r36"); break;
345	case  37:	__asm __volatile("invala.e r37"); break;
346	case  38:	__asm __volatile("invala.e r38"); break;
347	case  39:	__asm __volatile("invala.e r39"); break;
348	case  40:	__asm __volatile("invala.e r40"); break;
349	case  41:	__asm __volatile("invala.e r41"); break;
350	case  42:	__asm __volatile("invala.e r42"); break;
351	case  43:	__asm __volatile("invala.e r43"); break;
352	case  44:	__asm __volatile("invala.e r44"); break;
353	case  45:	__asm __volatile("invala.e r45"); break;
354	case  46:	__asm __volatile("invala.e r46"); break;
355	case  47:	__asm __volatile("invala.e r47"); break;
356	case  48:	__asm __volatile("invala.e r48"); break;
357	case  49:	__asm __volatile("invala.e r49"); break;
358	case  50:	__asm __volatile("invala.e r50"); break;
359	case  51:	__asm __volatile("invala.e r51"); break;
360	case  52:	__asm __volatile("invala.e r52"); break;
361	case  53:	__asm __volatile("invala.e r53"); break;
362	case  54:	__asm __volatile("invala.e r54"); break;
363	case  55:	__asm __volatile("invala.e r55"); break;
364	case  56:	__asm __volatile("invala.e r56"); break;
365	case  57:	__asm __volatile("invala.e r57"); break;
366	case  58:	__asm __volatile("invala.e r58"); break;
367	case  59:	__asm __volatile("invala.e r59"); break;
368	case  60:	__asm __volatile("invala.e r60"); break;
369	case  61:	__asm __volatile("invala.e r61"); break;
370	case  62:	__asm __volatile("invala.e r62"); break;
371	case  63:	__asm __volatile("invala.e r63"); break;
372	case  64:	__asm __volatile("invala.e r64"); break;
373	case  65:	__asm __volatile("invala.e r65"); break;
374	case  66:	__asm __volatile("invala.e r66"); break;
375	case  67:	__asm __volatile("invala.e r67"); break;
376	case  68:	__asm __volatile("invala.e r68"); break;
377	case  69:	__asm __volatile("invala.e r69"); break;
378	case  70:	__asm __volatile("invala.e r70"); break;
379	case  71:	__asm __volatile("invala.e r71"); break;
380	case  72:	__asm __volatile("invala.e r72"); break;
381	case  73:	__asm __volatile("invala.e r73"); break;
382	case  74:	__asm __volatile("invala.e r74"); break;
383	case  75:	__asm __volatile("invala.e r75"); break;
384	case  76:	__asm __volatile("invala.e r76"); break;
385	case  77:	__asm __volatile("invala.e r77"); break;
386	case  78:	__asm __volatile("invala.e r78"); break;
387	case  79:	__asm __volatile("invala.e r79"); break;
388	case  80:	__asm __volatile("invala.e r80"); break;
389	case  81:	__asm __volatile("invala.e r81"); break;
390	case  82:	__asm __volatile("invala.e r82"); break;
391	case  83:	__asm __volatile("invala.e r83"); break;
392	case  84:	__asm __volatile("invala.e r84"); break;
393	case  85:	__asm __volatile("invala.e r85"); break;
394	case  86:	__asm __volatile("invala.e r86"); break;
395	case  87:	__asm __volatile("invala.e r87"); break;
396	case  88:	__asm __volatile("invala.e r88"); break;
397	case  89:	__asm __volatile("invala.e r89"); break;
398	case  90:	__asm __volatile("invala.e r90"); break;
399	case  91:	__asm __volatile("invala.e r91"); break;
400	case  92:	__asm __volatile("invala.e r92"); break;
401	case  93:	__asm __volatile("invala.e r93"); break;
402	case  94:	__asm __volatile("invala.e r94"); break;
403	case  95:	__asm __volatile("invala.e r95"); break;
404	case  96:	__asm __volatile("invala.e r96"); break;
405	case  97:	__asm __volatile("invala.e r97"); break;
406	case  98:	__asm __volatile("invala.e r98"); break;
407	case  99:	__asm __volatile("invala.e r99"); break;
408	case 100:	__asm __volatile("invala.e r100"); break;
409	case 101:	__asm __volatile("invala.e r101"); break;
410	case 102:	__asm __volatile("invala.e r102"); break;
411	case 103:	__asm __volatile("invala.e r103"); break;
412	case 104:	__asm __volatile("invala.e r104"); break;
413	case 105:	__asm __volatile("invala.e r105"); break;
414	case 106:	__asm __volatile("invala.e r106"); break;
415	case 107:	__asm __volatile("invala.e r107"); break;
416	case 108:	__asm __volatile("invala.e r108"); break;
417	case 109:	__asm __volatile("invala.e r109"); break;
418	case 110:	__asm __volatile("invala.e r110"); break;
419	case 111:	__asm __volatile("invala.e r111"); break;
420	case 112:	__asm __volatile("invala.e r112"); break;
421	case 113:	__asm __volatile("invala.e r113"); break;
422	case 114:	__asm __volatile("invala.e r114"); break;
423	case 115:	__asm __volatile("invala.e r115"); break;
424	case 116:	__asm __volatile("invala.e r116"); break;
425	case 117:	__asm __volatile("invala.e r117"); break;
426	case 118:	__asm __volatile("invala.e r118"); break;
427	case 119:	__asm __volatile("invala.e r119"); break;
428	case 120:	__asm __volatile("invala.e r120"); break;
429	case 121:	__asm __volatile("invala.e r121"); break;
430	case 122:	__asm __volatile("invala.e r122"); break;
431	case 123:	__asm __volatile("invala.e r123"); break;
432	case 124:	__asm __volatile("invala.e r124"); break;
433	case 125:	__asm __volatile("invala.e r125"); break;
434	case 126:	__asm __volatile("invala.e r126"); break;
435	case 127:	__asm __volatile("invala.e r127"); break;
436	}
437}
438
439int
440unaligned_fixup(struct trapframe *framep, struct thread *td)
441{
442	vm_offset_t va = framep->tf_special.ifa;
443	int doprint, dofix, dosigbus;
444	int signal, size = 0;
445	unsigned long uac;
446	struct proc *p;
447	u_int64_t low, high;
448	struct ia64_bundle b;
449	int slot;
450	union ia64_instruction ins;
451	int decoded;
452	struct decoding dec;
453
454	/*
455	 * Figure out what actions to take.
456	 */
457
458	if (td) {
459		uac = td->td_md.md_flags & MDP_UAC_MASK;
460		p = td->td_proc;
461	} else {
462		uac = 0;
463		p = NULL;
464	}
465
466	doprint = ia64_unaligned_print && !(uac & MDP_UAC_NOPRINT);
467	dofix = ia64_unaligned_fix && !(uac & MDP_UAC_NOFIX);
468	dosigbus = ia64_unaligned_sigbus | (uac & MDP_UAC_SIGBUS);
469
470	/*
471	 * If psr.ac is set, then clearly the user program *wants* to
472	 * fault.
473	 */
474	if (framep->tf_special.psr & IA64_PSR_AC) {
475		dofix = 0;
476		dosigbus = 1;
477	}
478
479	/*
480	 * See if the user can access the memory in question.
481	 * Even if it's an unknown opcode, SEGV if the access
482	 * should have failed.
483	 */
484	if (!useracc((caddr_t)va, size ? size : 1, VM_PROT_WRITE)) {
485		signal = SIGSEGV;
486		goto out;
487	}
488
489	/*
490	 * Read the instruction bundle and attempt to decode the
491	 * offending instruction.
492	 * XXX assume that the instruction is in an 'M' slot.
493	 */
494	copyin((const void *) framep->tf_special.iip, &low, 8);
495	copyin((const void *) (framep->tf_special.iip + 8), &high, 8);
496	ia64_unpack_bundle(low, high, &b);
497	slot = (framep->tf_special.psr >> 41) & 3;
498	ins.ins = b.slot[slot];
499
500	decoded = 0;
501	bzero(&dec, sizeof(dec));
502	if (ins.M1.op == 4) {
503		if (ins.M1.m == 0 && ins.M1.x == 0) {
504			/* Table 4-29 */
505			if (ins.M1.x6 < 0x30)
506				decoded = unaligned_decode_M1(ins, &dec);
507			else
508				decoded = unaligned_decode_M4(ins, &dec);
509		} else if (ins.M1.m == 1 && ins.M1.x == 0) {
510			/* Table 4-30 */
511			decoded = unaligned_decode_M2(ins, &dec);
512		}
513	} else if (ins.M1.op == 5) {
514		/* Table 4-31 */
515		if (ins.M1.x6 < 0x30)
516			decoded = unaligned_decode_M3(ins, &dec);
517		else
518			decoded = unaligned_decode_M5(ins, &dec);
519	}
520
521	/*
522	 * If we're supposed to be noisy, squawk now.
523	 */
524	if (doprint) {
525		uprintf("pid %d (%s): unaligned access: va=0x%lx pc=0x%lx",
526			p->p_pid, p->p_comm, va, framep->tf_special.iip);
527		if (decoded) {
528			uprintf(" op=");
529			if (dec.isload) {
530				static char *ldops[] = {
531					"ld%d.sa", "ld%d.s", "ld%d.a",
532					"ld%d.c.clr", "ld%d.c.nc", "ld%d"
533				};
534				uprintf(ldops[dec.type], dec.width);
535				uprintf(" r%d=[r%d]", dec.reg, dec.basereg);
536			} else {
537				uprintf("st%d [r%d]=r%d", dec.width,
538					dec.basereg, dec.reg);
539			}
540			if (dec.updateisreg)
541				uprintf(",r%d\n", dec.update);
542			else if (dec.update)
543				uprintf(",%d\n", dec.update);
544			else
545				uprintf("\n");
546		} else {
547			uprintf("\n");
548		}
549	}
550
551	/*
552	 * If we should try to fix it and know how, give it a shot.
553	 *
554	 * We never allow bad data to be unknowingly used by the
555	 * user process.  That is, if we decide not to fix up an
556	 * access we cause a SIGBUS rather than letting the user
557	 * process go on without warning.
558	 *
559	 * If we're trying to do a fixup, we assume that things
560	 * will be botched.  If everything works out OK,
561	 * unaligned_{load,store}_* clears the signal flag.
562	 */
563	signal = 0;
564	if (dofix && decoded) {
565		u_int64_t addr, update, value, isr;
566		int error = 0;
567
568		/*
569		 * We only really need this if the current bspstore
570		 * hasn't advanced past the user's register frame. Its
571		 * hardly worth trying to optimise though.
572		 */
573		__asm __volatile("flushrs");
574
575		isr = framep->tf_special.isr;
576		error = read_register(framep, td, dec.basereg, &addr);
577		if (error) {
578			signal = SIGBUS;
579			goto out;
580		}
581		if (dec.updateisreg) {
582			error = read_register(framep, td, dec.update, &update);
583			if (error) {
584				signal = SIGBUS;
585				goto out;
586			}
587		} else {
588			update = dec.update;
589		}
590
591		/* Assume little-endian */
592		if (dec.isload) {
593			/*
594			 * Sanity checks.
595			 */
596			if (!(isr & IA64_ISR_R)
597			    || (isr & (IA64_ISR_W|IA64_ISR_X|IA64_ISR_NA))) {
598				printf("unaligned_fixup: unexpected cr.isr value\n");
599				signal = SIGBUS;
600				goto out;
601			}
602
603			if (dec.type == LD_SA || dec.type == LD_A) {
604				invala_e(dec.reg);
605				goto out;
606			}
607			if (dec.type == LD_C_CLR)
608				invala_e(dec.reg);
609			if (dec.type == LD_S)
610				/* XXX not quite sure what to do here */;
611
612			value = 0;
613			if (!error && dec.fence)
614				ia64_mf();
615			error = copyin((const void *)addr, &value, dec.width);
616			if (!error)
617				error = write_register(framep, td, dec.reg,
618						       value);
619			if (!error && update)
620				error = write_register(framep, td, dec.basereg,
621						       addr + update);
622		} else {
623			error = read_register(framep, td, dec.reg, &value);
624			if (!error)
625				error = copyout(&value, (void *)addr,
626						dec.width);
627			if (!error && dec.fence)
628				ia64_mf();
629			if (!error && update)
630				error = write_register(framep, td, dec.basereg,
631						       addr + update);
632		}
633		if (error) {
634			signal = SIGBUS;
635		} else {
636			/*
637			 * Advance to the instruction following the
638			 * one which faulted.
639			 */
640			if ((framep->tf_special.psr & IA64_PSR_RI)
641			    == IA64_PSR_RI_2) {
642				framep->tf_special.psr &= ~IA64_PSR_RI;
643				framep->tf_special.iip += 16;
644			} else {
645				framep->tf_special.psr += IA64_PSR_RI_1;
646			}
647		}
648	} else {
649		signal = SIGBUS;
650	}
651
652	/*
653	 * Force SIGBUS if requested.
654	 */
655	if (dosigbus)
656		signal = SIGBUS;
657
658out:
659	return (signal);
660}
661