trap.c revision 199669
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/powerpc/aim/trap.c 199669 2009-11-22 20:45:15Z nwhitehorn $");
36
37#include "opt_ktrace.h"
38
39#include <sys/param.h>
40#include <sys/kdb.h>
41#include <sys/proc.h>
42#include <sys/ktr.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/pioctl.h>
46#include <sys/ptrace.h>
47#include <sys/reboot.h>
48#include <sys/syscall.h>
49#include <sys/sysent.h>
50#include <sys/systm.h>
51#include <sys/uio.h>
52#include <sys/signalvar.h>
53#ifdef KTRACE
54#include <sys/ktrace.h>
55#endif
56#include <sys/vmmeter.h>
57
58#include <security/audit/audit.h>
59
60#include <vm/vm.h>
61#include <vm/pmap.h>
62#include <vm/vm_extern.h>
63#include <vm/vm_param.h>
64#include <vm/vm_kern.h>
65#include <vm/vm_map.h>
66#include <vm/vm_page.h>
67
68#include <machine/altivec.h>
69#include <machine/cpu.h>
70#include <machine/db_machdep.h>
71#include <machine/fpu.h>
72#include <machine/frame.h>
73#include <machine/pcb.h>
74#include <machine/pmap.h>
75#include <machine/psl.h>
76#include <machine/trap.h>
77#include <machine/spr.h>
78#include <machine/sr.h>
79
80static void	trap_fatal(struct trapframe *frame);
81static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
82		    int user);
83static int	trap_pfault(struct trapframe *frame, int user);
84static int	fix_unaligned(struct thread *td, struct trapframe *frame);
85static int	handle_onfault(struct trapframe *frame);
86static void	syscall(struct trapframe *frame);
87
88int	setfault(faultbuf);		/* defined in locore.S */
89
90/* Why are these not defined in a header? */
91int	badaddr(void *, size_t);
92int	badaddr_read(void *, size_t, int *);
93
94extern char	*syscallnames[];
95
96struct powerpc_exception {
97	u_int	vector;
98	char	*name;
99};
100
101static struct powerpc_exception powerpc_exceptions[] = {
102	{ 0x0100, "system reset" },
103	{ 0x0200, "machine check" },
104	{ 0x0300, "data storage interrupt" },
105	{ 0x0400, "instruction storage interrupt" },
106	{ 0x0500, "external interrupt" },
107	{ 0x0600, "alignment" },
108	{ 0x0700, "program" },
109	{ 0x0800, "floating-point unavailable" },
110	{ 0x0900, "decrementer" },
111	{ 0x0c00, "system call" },
112	{ 0x0d00, "trace" },
113	{ 0x0e00, "floating-point assist" },
114	{ 0x0f00, "performance monitoring" },
115	{ 0x0f20, "altivec unavailable" },
116	{ 0x1000, "instruction tlb miss" },
117	{ 0x1100, "data load tlb miss" },
118	{ 0x1200, "data store tlb miss" },
119	{ 0x1300, "instruction breakpoint" },
120	{ 0x1400, "system management" },
121	{ 0x1600, "altivec assist" },
122	{ 0x1700, "thermal management" },
123	{ 0x2000, "run mode/trace" },
124	{ 0x3000, NULL }
125};
126
127static const char *
128trapname(u_int vector)
129{
130	struct	powerpc_exception *pe;
131
132	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
133		if (pe->vector == vector)
134			return (pe->name);
135	}
136
137	return ("unknown");
138}
139
140void
141trap(struct trapframe *frame)
142{
143	struct thread	*td;
144	struct proc	*p;
145	int		sig, type, user;
146	u_int		ucode;
147	ksiginfo_t	ksi;
148
149	PCPU_INC(cnt.v_trap);
150
151	td = PCPU_GET(curthread);
152	p = td->td_proc;
153
154	type = ucode = frame->exc;
155	sig = 0;
156	user = frame->srr1 & PSL_PR;
157
158	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
159	    trapname(type), user ? "user" : "kernel");
160
161	if (user) {
162		td->td_pticks = 0;
163		td->td_frame = frame;
164		if (td->td_ucred != p->p_ucred)
165			cred_update_thread(td);
166
167		/* User Mode Traps */
168		switch (type) {
169		case EXC_RUNMODETRC:
170		case EXC_TRC:
171			frame->srr1 &= ~PSL_SE;
172			sig = SIGTRAP;
173			break;
174
175		case EXC_DSI:
176		case EXC_ISI:
177			sig = trap_pfault(frame, 1);
178			break;
179
180		case EXC_SC:
181			syscall(frame);
182			break;
183
184		case EXC_FPU:
185			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
186			    ("FPU already enabled for thread"));
187			enable_fpu(td);
188			break;
189
190		case EXC_VEC:
191			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
192			    ("Altivec already enabled for thread"));
193			enable_vec(td);
194			break;
195
196		case EXC_VECAST:
197			printf("Vector assist exception!\n");
198			sig = SIGILL;
199			break;
200
201		case EXC_ALI:
202			if (fix_unaligned(td, frame) != 0)
203				sig = SIGBUS;
204			else
205				frame->srr0 += 4;
206			break;
207
208		case EXC_PGM:
209			/* Identify the trap reason */
210			if (frame->srr1 & EXC_PGM_TRAP)
211				sig = SIGTRAP;
212 			else
213				sig = SIGILL;
214			break;
215
216		default:
217			trap_fatal(frame);
218		}
219	} else {
220		/* Kernel Mode Traps */
221
222		KASSERT(cold || td->td_ucred != NULL,
223		    ("kernel trap doesn't have ucred"));
224		switch (type) {
225		case EXC_DSI:
226			if (trap_pfault(frame, 0) == 0)
227 				return;
228			break;
229		case EXC_MCHK:
230			if (handle_onfault(frame))
231 				return;
232			break;
233		default:
234			break;
235		}
236		trap_fatal(frame);
237	}
238
239	if (sig != 0) {
240		if (p->p_sysent->sv_transtrap != NULL)
241			sig = (p->p_sysent->sv_transtrap)(sig, type);
242		ksiginfo_init_trap(&ksi);
243		ksi.ksi_signo = sig;
244		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
245		/* ksi.ksi_addr = ? */
246		ksi.ksi_trapno = type;
247		trapsignal(td, &ksi);
248	}
249
250	userret(td, frame);
251	mtx_assert(&Giant, MA_NOTOWNED);
252}
253
254static void
255trap_fatal(struct trapframe *frame)
256{
257
258	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
259#ifdef KDB
260	if ((debugger_on_panic || kdb_active) &&
261	    kdb_trap(frame->exc, 0, frame))
262		return;
263#endif
264	panic("%s trap", trapname(frame->exc));
265}
266
267static void
268printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
269{
270
271	printf("\n");
272	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
273	    user ? "user" : "kernel");
274	printf("\n");
275	printf("   exception       = 0x%x (%s)\n", vector >> 8,
276	    trapname(vector));
277	switch (vector) {
278	case EXC_DSI:
279		printf("   virtual address = 0x%x\n", frame->cpu.aim.dar);
280		break;
281	case EXC_ISI:
282		printf("   virtual address = 0x%x\n", frame->srr0);
283		break;
284	}
285	printf("   srr0            = 0x%x\n", frame->srr0);
286	printf("   srr1            = 0x%x\n", frame->srr1);
287	printf("   lr              = 0x%x\n", frame->lr);
288	printf("   curthread       = %p\n", curthread);
289	if (curthread != NULL)
290		printf("          pid = %d, comm = %s\n",
291		    curthread->td_proc->p_pid, curthread->td_name);
292	printf("\n");
293}
294
295/*
296 * Handles a fatal fault when we have onfault state to recover.  Returns
297 * non-zero if there was onfault recovery state available.
298 */
299static int
300handle_onfault(struct trapframe *frame)
301{
302	struct		thread *td;
303	faultbuf	*fb;
304
305	td = curthread;
306	fb = td->td_pcb->pcb_onfault;
307	if (fb != NULL) {
308		frame->srr0 = (*fb)[0];
309		frame->fixreg[1] = (*fb)[1];
310		frame->fixreg[2] = (*fb)[2];
311		frame->fixreg[3] = 1;
312		frame->cr = (*fb)[3];
313		bcopy(&(*fb)[4], &frame->fixreg[13],
314		    19 * sizeof(register_t));
315		return (1);
316	}
317	return (0);
318}
319
320void
321syscall(struct trapframe *frame)
322{
323	caddr_t		params;
324	struct		sysent *callp;
325	struct		thread *td;
326	struct		proc *p;
327	int		error, n;
328	size_t		narg;
329	register_t	args[10];
330	u_int		code;
331
332	td = PCPU_GET(curthread);
333	p = td->td_proc;
334
335	PCPU_INC(cnt.v_syscall);
336
337	code = frame->fixreg[0];
338	params = (caddr_t)(frame->fixreg + FIRSTARG);
339	n = NARGREG;
340
341	if (p->p_sysent->sv_prepsyscall) {
342		/*
343		 * The prep code is MP aware.
344		 */
345		(*p->p_sysent->sv_prepsyscall)(frame, args, &code, &params);
346	} else if (code == SYS_syscall) {
347		/*
348		 * code is first argument,
349		 * followed by actual args.
350		 */
351		code = *(u_int *) params;
352		params += sizeof(register_t);
353		n -= 1;
354	} else if (code == SYS___syscall) {
355		/*
356		 * Like syscall, but code is a quad,
357		 * so as to maintain quad alignment
358		 * for the rest of the args.
359		 */
360		params += sizeof(register_t);
361		code = *(u_int *) params;
362		params += sizeof(register_t);
363		n -= 2;
364	}
365
366 	if (p->p_sysent->sv_mask)
367 		code &= p->p_sysent->sv_mask;
368
369 	if (code >= p->p_sysent->sv_size)
370 		callp = &p->p_sysent->sv_table[0];
371  	else
372 		callp = &p->p_sysent->sv_table[code];
373
374	narg = callp->sy_narg;
375
376	if (narg > n) {
377		bcopy(params, args, n * sizeof(register_t));
378		error = copyin(MOREARGS(frame->fixreg[1]), args + n,
379			       (narg - n) * sizeof(register_t));
380		params = (caddr_t)args;
381	} else
382		error = 0;
383
384	CTR5(KTR_SYSC, "syscall: p=%s %s(%x %x %x)", td->td_name,
385	     syscallnames[code],
386	     frame->fixreg[FIRSTARG],
387	     frame->fixreg[FIRSTARG+1],
388	     frame->fixreg[FIRSTARG+2]);
389
390#ifdef	KTRACE
391	if (KTRPOINT(td, KTR_SYSCALL))
392		ktrsyscall(code, narg, (register_t *)params);
393#endif
394
395	td->td_syscalls++;
396
397	if (error == 0) {
398		td->td_retval[0] = 0;
399		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
400
401		STOPEVENT(p, S_SCE, narg);
402
403		PTRACESTOP_SC(p, td, S_PT_SCE);
404
405		AUDIT_SYSCALL_ENTER(code, td);
406		error = (*callp->sy_call)(td, params);
407		AUDIT_SYSCALL_EXIT(error, td);
408
409		CTR3(KTR_SYSC, "syscall: p=%s %s ret=%x", td->td_name,
410		     syscallnames[code], td->td_retval[0]);
411	}
412
413	cpu_set_syscall_retval(td, error);
414
415	/*
416	 * Check for misbehavior.
417	 */
418	WITNESS_WARN(WARN_PANIC, NULL, "System call %s returning",
419	    (code >= 0 && code < SYS_MAXSYSCALL) ? syscallnames[code] : "???");
420	KASSERT(td->td_critnest == 0,
421	    ("System call %s returning in a critical section",
422	    (code >= 0 && code < SYS_MAXSYSCALL) ? syscallnames[code] : "???"));
423	KASSERT(td->td_locks == 0,
424	    ("System call %s returning with %d locks held",
425	    (code >= 0 && code < SYS_MAXSYSCALL) ? syscallnames[code] : "???",
426	    td->td_locks));
427
428#ifdef	KTRACE
429	if (KTRPOINT(td, KTR_SYSRET))
430		ktrsysret(code, error, td->td_retval[0]);
431#endif
432
433	/*
434	 * Does the comment in the i386 code about errno apply here?
435	 */
436	STOPEVENT(p, S_SCX, code);
437
438	PTRACESTOP_SC(p, td, S_PT_SCX);
439}
440
441static int
442trap_pfault(struct trapframe *frame, int user)
443{
444	vm_offset_t	eva, va;
445	struct		thread *td;
446	struct		proc *p;
447	vm_map_t	map;
448	vm_prot_t	ftype;
449	int		rv;
450	u_int		user_sr;
451
452	td = curthread;
453	p = td->td_proc;
454	if (frame->exc == EXC_ISI) {
455		eva = frame->srr0;
456		ftype = VM_PROT_READ | VM_PROT_EXECUTE;
457	} else {
458		eva = frame->cpu.aim.dar;
459		if (frame->cpu.aim.dsisr & DSISR_STORE)
460			ftype = VM_PROT_WRITE;
461		else
462			ftype = VM_PROT_READ;
463	}
464
465	if (user) {
466		map = &p->p_vmspace->vm_map;
467	} else {
468		if ((eva >> ADDR_SR_SHFT) == USER_SR) {
469			if (p->p_vmspace == NULL)
470				return (SIGSEGV);
471
472			__asm ("mfsr %0, %1"
473			    : "=r"(user_sr)
474			    : "K"(USER_SR));
475			eva &= ADDR_PIDX | ADDR_POFF;
476			eva |= user_sr << ADDR_SR_SHFT;
477			map = &p->p_vmspace->vm_map;
478		} else {
479			map = kernel_map;
480		}
481	}
482	va = trunc_page(eva);
483
484	if (map != kernel_map) {
485		/*
486		 * Keep swapout from messing with us during this
487		 *	critical time.
488		 */
489		PROC_LOCK(p);
490		++p->p_lock;
491		PROC_UNLOCK(p);
492
493		/* Fault in the user page: */
494		rv = vm_fault(map, va, ftype,
495		      (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
496					      : VM_FAULT_NORMAL);
497
498		PROC_LOCK(p);
499		--p->p_lock;
500		PROC_UNLOCK(p);
501	} else {
502		/*
503		 * Don't have to worry about process locking or stacks in the
504		 * kernel.
505		 */
506		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
507	}
508
509	if (rv == KERN_SUCCESS)
510		return (0);
511
512	if (!user && handle_onfault(frame))
513		return (0);
514
515	return (SIGSEGV);
516}
517
518int
519badaddr(void *addr, size_t size)
520{
521	return (badaddr_read(addr, size, NULL));
522}
523
524int
525badaddr_read(void *addr, size_t size, int *rptr)
526{
527	struct thread	*td;
528	faultbuf	env;
529	int		x;
530
531	/* Get rid of any stale machine checks that have been waiting.  */
532	__asm __volatile ("sync; isync");
533
534	td = PCPU_GET(curthread);
535
536	if (setfault(env)) {
537		td->td_pcb->pcb_onfault = 0;
538		__asm __volatile ("sync");
539		return 1;
540	}
541
542	__asm __volatile ("sync");
543
544	switch (size) {
545	case 1:
546		x = *(volatile int8_t *)addr;
547		break;
548	case 2:
549		x = *(volatile int16_t *)addr;
550		break;
551	case 4:
552		x = *(volatile int32_t *)addr;
553		break;
554	default:
555		panic("badaddr: invalid size (%d)", size);
556	}
557
558	/* Make sure we took the machine check, if we caused one. */
559	__asm __volatile ("sync; isync");
560
561	td->td_pcb->pcb_onfault = 0;
562	__asm __volatile ("sync");	/* To be sure. */
563
564	/* Use the value to avoid reorder. */
565	if (rptr)
566		*rptr = x;
567
568	return (0);
569}
570
571/*
572 * For now, this only deals with the particular unaligned access case
573 * that gcc tends to generate.  Eventually it should handle all of the
574 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
575 */
576
577static int
578fix_unaligned(struct thread *td, struct trapframe *frame)
579{
580	struct thread	*fputhread;
581	int		indicator, reg;
582	double		*fpr;
583
584	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
585
586	switch (indicator) {
587	case EXC_ALI_LFD:
588	case EXC_ALI_STFD:
589		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
590		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
591		fputhread = PCPU_GET(fputhread);
592
593		/* Juggle the FPU to ensure that we've initialized
594		 * the FPRs, and that their current state is in
595		 * the PCB.
596		 */
597		if (fputhread != td) {
598			if (fputhread)
599				save_fpu(fputhread);
600			enable_fpu(td);
601		}
602		save_fpu(td);
603
604		if (indicator == EXC_ALI_LFD) {
605			if (copyin((void *)frame->cpu.aim.dar, fpr,
606			    sizeof(double)) != 0)
607				return -1;
608			enable_fpu(td);
609		} else {
610			if (copyout(fpr, (void *)frame->cpu.aim.dar,
611			    sizeof(double)) != 0)
612				return -1;
613		}
614		return 0;
615		break;
616	}
617
618	return -1;
619}
620