trap.c revision 281262
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/trap.c 281262 2015-04-08 04:37:11Z jhibbits $");
36
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <sys/proc.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/pioctl.h>
44#include <sys/ptrace.h>
45#include <sys/reboot.h>
46#include <sys/syscall.h>
47#include <sys/sysent.h>
48#include <sys/systm.h>
49#include <sys/kernel.h>
50#include <sys/uio.h>
51#include <sys/signalvar.h>
52#include <sys/vmmeter.h>
53
54#include <security/audit/audit.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/vm_extern.h>
59#include <vm/vm_param.h>
60#include <vm/vm_kern.h>
61#include <vm/vm_map.h>
62#include <vm/vm_page.h>
63
64#include <machine/_inttypes.h>
65#include <machine/altivec.h>
66#include <machine/cpu.h>
67#include <machine/db_machdep.h>
68#include <machine/fpu.h>
69#include <machine/frame.h>
70#include <machine/pcb.h>
71#include <machine/pmap.h>
72#include <machine/psl.h>
73#include <machine/trap.h>
74#include <machine/spr.h>
75#include <machine/sr.h>
76
77#define	FAULTBUF_LR	0
78#define	FAULTBUF_R1	1
79#define	FAULTBUF_R2	2
80#define	FAULTBUF_CR	3
81#define	FAULTBUF_R13	4
82
83static void	trap_fatal(struct trapframe *frame);
84static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
85		    int user);
86static int	trap_pfault(struct trapframe *frame, int user);
87static int	fix_unaligned(struct thread *td, struct trapframe *frame);
88static int	handle_onfault(struct trapframe *frame);
89static void	syscall(struct trapframe *frame);
90
91#ifdef __powerpc64__
92       void	handle_kernel_slb_spill(int, register_t, register_t);
93static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
94extern int	n_slbs;
95#endif
96
97struct powerpc_exception {
98	u_int	vector;
99	char	*name;
100};
101
102#ifdef KDTRACE_HOOKS
103#include <sys/dtrace_bsd.h>
104
105int (*dtrace_invop_jump_addr)(struct trapframe *);
106#endif
107
108static struct powerpc_exception powerpc_exceptions[] = {
109	{ EXC_CRIT,	"critical input" },
110	{ EXC_RST,	"system reset" },
111	{ EXC_MCHK,	"machine check" },
112	{ EXC_DSI,	"data storage interrupt" },
113	{ EXC_DSE,	"data segment exception" },
114	{ EXC_ISI,	"instruction storage interrupt" },
115	{ EXC_ISE,	"instruction segment exception" },
116	{ EXC_EXI,	"external interrupt" },
117	{ EXC_ALI,	"alignment" },
118	{ EXC_PGM,	"program" },
119	{ EXC_FPU,	"floating-point unavailable" },
120	{ EXC_APU,	"auxiliary proc unavailable" },
121	{ EXC_DECR,	"decrementer" },
122	{ EXC_FIT,	"fixed-interval timer" },
123	{ EXC_WDOG,	"watchdog timer" },
124	{ EXC_SC,	"system call" },
125	{ EXC_TRC,	"trace" },
126	{ EXC_FPA,	"floating-point assist" },
127	{ EXC_DEBUG,	"debug" },
128	{ EXC_PERF,	"performance monitoring" },
129	{ EXC_VEC,	"altivec unavailable" },
130	{ EXC_VSX,	"vsx unavailable" },
131	{ EXC_ITMISS,	"instruction tlb miss" },
132	{ EXC_DLMISS,	"data load tlb miss" },
133	{ EXC_DSMISS,	"data store tlb miss" },
134	{ EXC_BPT,	"instruction breakpoint" },
135	{ EXC_SMI,	"system management" },
136	{ EXC_VECAST_G4,	"altivec assist" },
137	{ EXC_THRM,	"thermal management" },
138	{ EXC_RUNMODETRC,	"run mode/trace" },
139	{ EXC_LAST,	NULL }
140};
141
142static const char *
143trapname(u_int vector)
144{
145	struct	powerpc_exception *pe;
146
147	for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) {
148		if (pe->vector == vector)
149			return (pe->name);
150	}
151
152	return ("unknown");
153}
154
155void
156trap(struct trapframe *frame)
157{
158	struct thread	*td;
159	struct proc	*p;
160#ifdef KDTRACE_HOOKS
161	uint32_t inst;
162#endif
163	int		sig, type, user;
164	u_int		ucode;
165	ksiginfo_t	ksi;
166
167	PCPU_INC(cnt.v_trap);
168
169	td = curthread;
170	p = td->td_proc;
171
172	type = ucode = frame->exc;
173	sig = 0;
174	user = frame->srr1 & PSL_PR;
175
176	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
177	    trapname(type), user ? "user" : "kernel");
178
179#ifdef KDTRACE_HOOKS
180	/*
181	 * A trap can occur while DTrace executes a probe. Before
182	 * executing the probe, DTrace blocks re-scheduling and sets
183	 * a flag in its per-cpu flags to indicate that it doesn't
184	 * want to fault. On returning from the probe, the no-fault
185	 * flag is cleared and finally re-scheduling is enabled.
186	 *
187	 * If the DTrace kernel module has registered a trap handler,
188	 * call it and if it returns non-zero, assume that it has
189	 * handled the trap and modified the trap frame so that this
190	 * function can return normally.
191	 */
192	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
193		return;
194#endif
195
196	if (user) {
197		td->td_pticks = 0;
198		td->td_frame = frame;
199		if (td->td_ucred != p->p_ucred)
200			cred_update_thread(td);
201
202		/* User Mode Traps */
203		switch (type) {
204		case EXC_RUNMODETRC:
205		case EXC_TRC:
206			frame->srr1 &= ~PSL_SE;
207			sig = SIGTRAP;
208			ucode = TRAP_TRACE;
209			break;
210
211#ifdef __powerpc64__
212		case EXC_ISE:
213		case EXC_DSE:
214			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
215			    (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
216				sig = SIGSEGV;
217				ucode = SEGV_MAPERR;
218			}
219			break;
220#endif
221		case EXC_DSI:
222		case EXC_ISI:
223			sig = trap_pfault(frame, 1);
224			if (sig == SIGSEGV)
225				ucode = SEGV_MAPERR;
226			break;
227
228		case EXC_SC:
229			syscall(frame);
230			break;
231
232		case EXC_FPU:
233			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
234			    ("FPU already enabled for thread"));
235			enable_fpu(td);
236			break;
237
238		case EXC_VEC:
239			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
240			    ("Altivec already enabled for thread"));
241			enable_vec(td);
242			break;
243
244		case EXC_VSX:
245			KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
246			    ("VSX already enabled for thread"));
247			if (!(td->td_pcb->pcb_flags & PCB_VEC))
248				enable_vec(td);
249			if (!(td->td_pcb->pcb_flags & PCB_FPU))
250				save_fpu(td);
251			td->td_pcb->pcb_flags |= PCB_VSX;
252			enable_fpu(td);
253			break;
254
255		case EXC_VECAST_G4:
256		case EXC_VECAST_G5:
257			/*
258			 * We get a VPU assist exception for IEEE mode
259			 * vector operations on denormalized floats.
260			 * Emulating this is a giant pain, so for now,
261			 * just switch off IEEE mode and treat them as
262			 * zero.
263			 */
264
265			save_vec(td);
266			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
267			enable_vec(td);
268			break;
269
270		case EXC_ALI:
271			if (fix_unaligned(td, frame) != 0) {
272				sig = SIGBUS;
273				ucode = BUS_ADRALN;
274			}
275			else
276				frame->srr0 += 4;
277			break;
278
279		case EXC_DEBUG:	/* Single stepping */
280			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
281			frame->srr1 &= ~PSL_DE;
282			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
283			sig = SIGTRAP;
284			ucode = TRAP_TRACE;
285			break;
286
287		case EXC_PGM:
288			/* Identify the trap reason */
289#ifdef AIM
290			if (frame->srr1 & EXC_PGM_TRAP) {
291#else
292			if (frame->cpu.booke.esr & ESR_PTR) {
293#endif
294#ifdef KDTRACE_HOOKS
295				inst = fuword32((const void *)frame->srr0);
296				if (inst == 0x0FFFDDDD &&
297				    dtrace_pid_probe_ptr != NULL) {
298					struct reg regs;
299					fill_regs(td, &regs);
300					(*dtrace_pid_probe_ptr)(&regs);
301					break;
302				}
303#endif
304 				sig = SIGTRAP;
305				ucode = TRAP_BRKPT;
306			} else {
307				sig = ppc_instr_emulate(frame, td->td_pcb);
308				if (sig == SIGILL) {
309					if (frame->srr1 & EXC_PGM_PRIV)
310						ucode = ILL_PRVOPC;
311					else if (frame->srr1 & EXC_PGM_ILLEGAL)
312						ucode = ILL_ILLOPC;
313				} else if (sig == SIGFPE)
314					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
315			}
316			break;
317
318		case EXC_MCHK:
319			/*
320			 * Note that this may not be recoverable for the user
321			 * process, depending on the type of machine check,
322			 * but it at least prevents the kernel from dying.
323			 */
324			sig = SIGBUS;
325			ucode = BUS_OBJERR;
326			break;
327
328		default:
329			trap_fatal(frame);
330		}
331	} else {
332		/* Kernel Mode Traps */
333
334		KASSERT(cold || td->td_ucred != NULL,
335		    ("kernel trap doesn't have ucred"));
336		switch (type) {
337#ifdef KDTRACE_HOOKS
338		case EXC_PGM:
339			if (frame->srr1 & EXC_PGM_TRAP) {
340				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
341					if (dtrace_invop_jump_addr != NULL) {
342						dtrace_invop_jump_addr(frame);
343						return;
344					}
345				}
346			}
347			break;
348#endif
349#ifdef __powerpc64__
350		case EXC_DSE:
351			if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
352				__asm __volatile ("slbmte %0, %1" ::
353					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
354					"r"(USER_SLB_SLBE));
355				return;
356			}
357			break;
358#endif
359		case EXC_DSI:
360			if (trap_pfault(frame, 0) == 0)
361 				return;
362			break;
363		case EXC_MCHK:
364			if (handle_onfault(frame))
365 				return;
366			break;
367		default:
368			break;
369		}
370		trap_fatal(frame);
371	}
372
373	if (sig != 0) {
374		if (p->p_sysent->sv_transtrap != NULL)
375			sig = (p->p_sysent->sv_transtrap)(sig, type);
376		ksiginfo_init_trap(&ksi);
377		ksi.ksi_signo = sig;
378		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
379		/* ksi.ksi_addr = ? */
380		ksi.ksi_trapno = type;
381		trapsignal(td, &ksi);
382	}
383
384	userret(td, frame);
385}
386
387static void
388trap_fatal(struct trapframe *frame)
389{
390
391	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
392#ifdef KDB
393	if ((debugger_on_panic || kdb_active) &&
394	    kdb_trap(frame->exc, 0, frame))
395		return;
396#endif
397	panic("%s trap", trapname(frame->exc));
398}
399
400static void
401printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
402{
403
404	printf("\n");
405	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
406	    user ? "user" : "kernel");
407	printf("\n");
408	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
409	switch (vector) {
410	case EXC_DSE:
411	case EXC_DSI:
412	case EXC_DTMISS:
413		printf("   virtual address = 0x%" PRIxPTR "\n", frame->dar);
414#ifdef AIM
415		printf("   dsisr           = 0x%" PRIxPTR "\n",
416		    frame->cpu.aim.dsisr);
417#endif
418		break;
419	case EXC_ISE:
420	case EXC_ISI:
421	case EXC_ITMISS:
422		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
423		break;
424	}
425#ifdef BOOKE
426	printf("   esr             = 0x%" PRIxPTR "\n",
427	    frame->cpu.booke.esr);
428#endif
429	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
430	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
431	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
432	printf("   curthread       = %p\n", curthread);
433	if (curthread != NULL)
434		printf("          pid = %d, comm = %s\n",
435		    curthread->td_proc->p_pid, curthread->td_name);
436	printf("\n");
437}
438
439/*
440 * Handles a fatal fault when we have onfault state to recover.  Returns
441 * non-zero if there was onfault recovery state available.
442 */
443static int
444handle_onfault(struct trapframe *frame)
445{
446	struct		thread *td;
447	faultbuf	*fb;
448
449	td = curthread;
450	fb = td->td_pcb->pcb_onfault;
451	if (fb != NULL) {
452		frame->srr0 = (*fb)[FAULTBUF_LR];
453		frame->fixreg[1] = (*fb)[FAULTBUF_R1];
454		frame->fixreg[2] = (*fb)[FAULTBUF_R2];
455		frame->fixreg[3] = 1;
456		frame->cr = (*fb)[FAULTBUF_CR];
457		bcopy(&(*fb)[FAULTBUF_R13], &frame->fixreg[13],
458		    19 * sizeof(register_t));
459		return (1);
460	}
461	return (0);
462}
463
464int
465cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
466{
467	struct proc *p;
468	struct trapframe *frame;
469	caddr_t	params;
470	size_t argsz;
471	int error, n, i;
472
473	p = td->td_proc;
474	frame = td->td_frame;
475
476	sa->code = frame->fixreg[0];
477	params = (caddr_t)(frame->fixreg + FIRSTARG);
478	n = NARGREG;
479
480	if (sa->code == SYS_syscall) {
481		/*
482		 * code is first argument,
483		 * followed by actual args.
484		 */
485		sa->code = *(register_t *) params;
486		params += sizeof(register_t);
487		n -= 1;
488	} else if (sa->code == SYS___syscall) {
489		/*
490		 * Like syscall, but code is a quad,
491		 * so as to maintain quad alignment
492		 * for the rest of the args.
493		 */
494		if (SV_PROC_FLAG(p, SV_ILP32)) {
495			params += sizeof(register_t);
496			sa->code = *(register_t *) params;
497			params += sizeof(register_t);
498			n -= 2;
499		} else {
500			sa->code = *(register_t *) params;
501			params += sizeof(register_t);
502			n -= 1;
503		}
504	}
505
506 	if (p->p_sysent->sv_mask)
507		sa->code &= p->p_sysent->sv_mask;
508	if (sa->code >= p->p_sysent->sv_size)
509		sa->callp = &p->p_sysent->sv_table[0];
510	else
511		sa->callp = &p->p_sysent->sv_table[sa->code];
512
513	sa->narg = sa->callp->sy_narg;
514
515	if (SV_PROC_FLAG(p, SV_ILP32)) {
516		argsz = sizeof(uint32_t);
517
518		for (i = 0; i < n; i++)
519			sa->args[i] = ((u_register_t *)(params))[i] &
520			    0xffffffff;
521	} else {
522		argsz = sizeof(uint64_t);
523
524		for (i = 0; i < n; i++)
525			sa->args[i] = ((u_register_t *)(params))[i];
526	}
527
528	if (sa->narg > n)
529		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
530			       (sa->narg - n) * argsz);
531	else
532		error = 0;
533
534#ifdef __powerpc64__
535	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
536		/* Expand the size of arguments copied from the stack */
537
538		for (i = sa->narg; i >= n; i--)
539			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
540	}
541#endif
542
543	if (error == 0) {
544		td->td_retval[0] = 0;
545		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
546	}
547	return (error);
548}
549
550#include "../../kern/subr_syscall.c"
551
552void
553syscall(struct trapframe *frame)
554{
555	struct thread *td;
556	struct syscall_args sa;
557	int error;
558
559	td = curthread;
560	td->td_frame = frame;
561
562#ifdef __powerpc64__
563	/*
564	 * Speculatively restore last user SLB segment, which we know is
565	 * invalid already, since we are likely to do copyin()/copyout().
566	 */
567	__asm __volatile ("slbmte %0, %1; isync" ::
568            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
569#endif
570
571	error = syscallenter(td, &sa);
572	syscallret(td, error, &sa);
573}
574
575#ifdef __powerpc64__
576/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
577void
578handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
579{
580	struct slb *slbcache;
581	uint64_t slbe, slbv;
582	uint64_t esid, addr;
583	int i;
584
585	addr = (type == EXC_ISE) ? srr0 : dar;
586	slbcache = PCPU_GET(slb);
587	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
588	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
589
590	/* See if the hardware flushed this somehow (can happen in LPARs) */
591	for (i = 0; i < n_slbs; i++)
592		if (slbcache[i].slbe == (slbe | (uint64_t)i))
593			return;
594
595	/* Not in the map, needs to actually be added */
596	slbv = kernel_va_to_slbv(addr);
597	if (slbcache[USER_SLB_SLOT].slbe == 0) {
598		for (i = 0; i < n_slbs; i++) {
599			if (i == USER_SLB_SLOT)
600				continue;
601			if (!(slbcache[i].slbe & SLBE_VALID))
602				goto fillkernslb;
603		}
604
605		if (i == n_slbs)
606			slbcache[USER_SLB_SLOT].slbe = 1;
607	}
608
609	/* Sacrifice a random SLB entry that is not the user entry */
610	i = mftb() % n_slbs;
611	if (i == USER_SLB_SLOT)
612		i = (i+1) % n_slbs;
613
614fillkernslb:
615	/* Write new entry */
616	slbcache[i].slbv = slbv;
617	slbcache[i].slbe = slbe | (uint64_t)i;
618
619	/* Trap handler will restore from cache on exit */
620}
621
622static int
623handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
624{
625	struct slb *user_entry;
626	uint64_t esid;
627	int i;
628
629	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
630
631	PMAP_LOCK(pm);
632	user_entry = user_va_to_slb_entry(pm, addr);
633
634	if (user_entry == NULL) {
635		/* allocate_vsid auto-spills it */
636		(void)allocate_user_vsid(pm, esid, 0);
637	} else {
638		/*
639		 * Check that another CPU has not already mapped this.
640		 * XXX: Per-thread SLB caches would be better.
641		 */
642		for (i = 0; i < pm->pm_slb_len; i++)
643			if (pm->pm_slb[i] == user_entry)
644				break;
645
646		if (i == pm->pm_slb_len)
647			slb_insert_user(pm, user_entry);
648	}
649	PMAP_UNLOCK(pm);
650
651	return (0);
652}
653#endif
654
655static int
656trap_pfault(struct trapframe *frame, int user)
657{
658	vm_offset_t	eva, va;
659	struct		thread *td;
660	struct		proc *p;
661	vm_map_t	map;
662	vm_prot_t	ftype;
663	int		rv;
664#ifdef AIM
665	register_t	user_sr;
666#endif
667
668	td = curthread;
669	p = td->td_proc;
670	if (frame->exc == EXC_ISI) {
671		eva = frame->srr0;
672		ftype = VM_PROT_EXECUTE;
673		if (frame->srr1 & SRR1_ISI_PFAULT)
674			ftype |= VM_PROT_READ;
675	} else {
676		eva = frame->dar;
677#ifdef BOOKE
678		if (frame->cpu.booke.esr & ESR_ST)
679#else
680		if (frame->cpu.aim.dsisr & DSISR_STORE)
681#endif
682			ftype = VM_PROT_WRITE;
683		else
684			ftype = VM_PROT_READ;
685	}
686
687	if (user) {
688		KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace  NULL"));
689		map = &p->p_vmspace->vm_map;
690	} else {
691#ifdef BOOKE
692		if (eva < VM_MAXUSER_ADDRESS) {
693#else
694		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
695#endif
696			if (p->p_vmspace == NULL)
697				return (SIGSEGV);
698
699			map = &p->p_vmspace->vm_map;
700
701#ifdef AIM
702			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
703			eva &= ADDR_PIDX | ADDR_POFF;
704			eva |= user_sr << ADDR_SR_SHFT;
705#endif
706		} else {
707			map = kernel_map;
708		}
709	}
710	va = trunc_page(eva);
711
712	if (map != kernel_map) {
713		/*
714		 * Keep swapout from messing with us during this
715		 *	critical time.
716		 */
717		PROC_LOCK(p);
718		++p->p_lock;
719		PROC_UNLOCK(p);
720
721		/* Fault in the user page: */
722		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
723
724		PROC_LOCK(p);
725		--p->p_lock;
726		PROC_UNLOCK(p);
727		/*
728		 * XXXDTRACE: add dtrace_doubletrap_func here?
729		 */
730	} else {
731		/*
732		 * Don't have to worry about process locking or stacks in the
733		 * kernel.
734		 */
735		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
736	}
737
738	if (rv == KERN_SUCCESS)
739		return (0);
740
741	if (!user && handle_onfault(frame))
742		return (0);
743
744	return (SIGSEGV);
745}
746
747/*
748 * For now, this only deals with the particular unaligned access case
749 * that gcc tends to generate.  Eventually it should handle all of the
750 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
751 */
752
753static int
754fix_unaligned(struct thread *td, struct trapframe *frame)
755{
756	struct thread	*fputhread;
757	int		indicator, reg;
758	double		*fpr;
759
760	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
761
762	switch (indicator) {
763	case EXC_ALI_LFD:
764	case EXC_ALI_STFD:
765		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
766		fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr;
767		fputhread = PCPU_GET(fputhread);
768
769		/* Juggle the FPU to ensure that we've initialized
770		 * the FPRs, and that their current state is in
771		 * the PCB.
772		 */
773		if (fputhread != td) {
774			if (fputhread)
775				save_fpu(fputhread);
776			enable_fpu(td);
777		}
778		save_fpu(td);
779
780		if (indicator == EXC_ALI_LFD) {
781			if (copyin((void *)frame->dar, fpr,
782			    sizeof(double)) != 0)
783				return (-1);
784			enable_fpu(td);
785		} else {
786			if (copyout(fpr, (void *)frame->dar,
787			    sizeof(double)) != 0)
788				return (-1);
789		}
790		return (0);
791		break;
792	}
793
794	return (-1);
795}
796
797#ifdef KDB
798int db_trap_glue(struct trapframe *);		/* Called from trap_subr.S */
799
800int
801db_trap_glue(struct trapframe *frame)
802{
803	if (!(frame->srr1 & PSL_PR)
804	    && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
805#ifdef AIM
806		|| (frame->exc == EXC_PGM
807		    && (frame->srr1 & 0x20000))
808#else
809		|| (frame->exc == EXC_DEBUG)
810#endif
811		|| frame->exc == EXC_BPT
812		|| frame->exc == EXC_DSI)) {
813		int type = frame->exc;
814
815		/* Ignore DTrace traps. */
816		if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
817			return (0);
818#ifdef AIM
819		if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
820#else
821		if (frame->cpu.booke.esr & ESR_PTR) {
822#endif
823			type = T_BREAKPOINT;
824		}
825		return (kdb_trap(type, 0, frame));
826	}
827
828	return (0);
829}
830#endif
831