1/*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <mach_kdb.h>
33#include <mach_kdp.h>
34#include <debug.h>
35
36#include <mach/mach_types.h>
37#include <mach/mach_traps.h>
38#include <mach/thread_status.h>
39
40#include <kern/processor.h>
41#include <kern/thread.h>
42#include <kern/exception.h>
43#include <kern/syscall_sw.h>
44#include <kern/cpu_data.h>
45#include <kern/debug.h>
46
47#include <vm/vm_fault.h>
48#include <vm/vm_kern.h> 	/* For kernel_map */
49
50#include <ppc/misc_protos.h>
51#include <ppc/trap.h>
52#include <ppc/exception.h>
53#include <ppc/proc_reg.h>	/* for SR_xxx definitions */
54#include <ppc/pmap.h>
55#include <ppc/mem.h>
56#include <ppc/mappings.h>
57#include <ppc/Firmware.h>
58#include <ppc/low_trace.h>
59#include <ppc/Diagnostics.h>
60#include <ppc/hw_perfmon.h>
61#include <ppc/fpu_protos.h>
62
63#include <sys/kdebug.h>
64
65perfCallback perfTrapHook; /* Pointer to CHUD trap hook routine */
66perfCallback perfASTHook;  /* Pointer to CHUD AST hook routine */
67
68#if CONFIG_DTRACE
69extern kern_return_t dtrace_user_probe(ppc_saved_state_t *sv);
70
71/* See <rdar://problem/4613924> */
72perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
73
74extern boolean_t dtrace_tally_fault(user_addr_t);
75#endif
76
77#if	MACH_KDB
78#include <ddb/db_watch.h>
79#include <ddb/db_run.h>
80#include <ddb/db_break.h>
81#include <ddb/db_trap.h>
82
83boolean_t let_ddb_vm_fault = FALSE;
84boolean_t	debug_all_traps_with_kdb = FALSE;
85extern struct db_watchpoint *db_watchpoint_list;
86extern boolean_t db_watchpoints_inserted;
87extern boolean_t db_breakpoints_inserted;
88
89
90
91#endif	/* MACH_KDB */
92
93extern task_t bsd_init_task;
94extern char init_task_failure_data[];
95extern int not_in_kdp;
96
97#define	PROT_EXEC	(VM_PROT_EXECUTE)
98#define PROT_RO		(VM_PROT_READ)
99#define PROT_RW		(VM_PROT_READ|VM_PROT_WRITE)
100
101
102/* A useful macro to update the ppc_exception_state in the PCB
103 * before calling doexception
104 */
105#define UPDATE_PPC_EXCEPTION_STATE {							\
106	thread_t _thread = current_thread();							\
107	_thread->machine.pcb->save_dar = (uint64_t)dar;					\
108	_thread->machine.pcb->save_dsisr = dsisr;						\
109	_thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE;	/* back to powerpc */ \
110}
111
112void unresolved_kernel_trap(int trapno,
113				   struct savearea *ssp,
114				   unsigned int dsisr,
115				   addr64_t dar,
116				   const char *message);
117
118static void handleMck(struct savearea *ssp);		/* Common machine check handler */
119
120#ifdef MACH_BSD
121extern void get_procrustime(time_value_t *);
122extern void bsd_uprofil(time_value_t *, user_addr_t);
123#endif /* MACH_BSD */
124
125
126struct savearea *trap(int trapno,
127			     struct savearea *ssp,
128			     unsigned int dsisr,
129			     addr64_t dar)
130{
131	int exception;
132	mach_exception_code_t code = 0;
133	mach_exception_subcode_t subcode = 0;
134	vm_map_t map;
135	vm_map_offset_t offset;
136	thread_t thread = current_thread();
137	boolean_t intr;
138	ast_t *myast;
139	int ret;
140
141#ifdef MACH_BSD
142	time_value_t tv;
143#endif /* MACH_BSD */
144
145	myast = ast_pending();
146	if(perfASTHook) {
147		if(*myast & AST_CHUD_ALL) {
148			perfASTHook(trapno, ssp, dsisr, (unsigned int)dar);
149		}
150	} else {
151		*myast &= ~AST_CHUD_ALL;
152	}
153
154	if(perfTrapHook) {							/* Is there a hook? */
155		if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
156	}
157
158#if CONFIG_DTRACE
159	if(tempDTraceTrapHook) {							/* Is there a hook? */
160		if(tempDTraceTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
161	}
162#endif
163
164#if 0
165	{
166		extern void fctx_text(void);
167		fctx_test();
168	}
169#endif
170
171	exception = 0;								/* Clear exception for now */
172
173/*
174 *	Remember that we are disabled for interruptions when we come in here.  Because
175 *	of latency concerns, we need to enable interruptions in the interrupted process
176 *	was enabled itself as soon as we can.
177 */
178
179	intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0;	/* Remember if we were enabled */
180
181	/* Handle kernel traps first */
182
183	if (!USER_MODE(ssp->save_srr1)) {
184		/*
185		 * Trap came from kernel
186		 */
187		switch (trapno) {
188
189		case T_PREEMPT:			/* Handle a preempt trap */
190			ast_taken(AST_PREEMPTION, FALSE);
191			break;
192
193		case T_PERF_MON:
194			perfmon_handle_pmi(ssp);
195			break;
196
197		case T_RESET:					/* Reset interruption */
198			if (!Call_Debugger(trapno, ssp))
199				unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
200			break;						/* We just ignore these */
201
202		/*
203		 * These trap types should never be seen by trap()
204		 * in kernel mode, anyway.
205		 * Some are interrupts that should be seen by
206		 * interrupt() others just don't happen because they
207		 * are handled elsewhere. Some could happen but are
208		 * considered to be fatal in kernel mode.
209		 */
210		case T_DECREMENTER:
211		case T_IN_VAIN:			/* Shouldn't ever see this, lowmem_vectors eats it */
212		case T_SYSTEM_MANAGEMENT:
213		case T_ALTIVEC_ASSIST:
214		case T_INTERRUPT:
215		case T_FP_UNAVAILABLE:
216		case T_IO_ERROR:
217		case T_RESERVED:
218		default:
219			unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
220			break;
221
222
223/*
224 *			Here we handle a machine check in the kernel
225 */
226
227		case T_MACHINE_CHECK:
228			handleMck(ssp);						/* Common to both user and kernel */
229			break;
230
231
232		case T_ALIGNMENT:
233/*
234*			If enaNotifyEMb is set, we get here, and
235*			we have actually already emulated the unaligned access.
236*			All that we want to do here is to ignore the interrupt. This is to allow logging or
237*			tracing of unaligned accesses.
238*/
239
240			if(ssp->save_hdr.save_misc3) {				/* Was it a handled exception? */
241				unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);	/* Go panic */
242				break;
243			}
244			KERNEL_DEBUG_CONSTANT(
245				MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
246				(int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
247			break;
248
249		case T_EMULATE:
250/*
251*			If enaNotifyEMb is set we get here, and
252*			we have actually already emulated the instruction.
253*			All that we want to do here is to ignore the interrupt. This is to allow logging or
254*			tracing of emulated instructions.
255*/
256
257			KERNEL_DEBUG_CONSTANT(
258				MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
259				(int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
260			break;
261
262
263
264
265
266		case T_TRACE:
267		case T_RUNMODE_TRACE:
268		case T_INSTRUCTION_BKPT:
269			if (!Call_Debugger(trapno, ssp))
270				unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
271			break;
272
273		case T_PROGRAM:
274			if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
275				if (!Call_Debugger(trapno, ssp))
276					unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
277			} else {
278				unresolved_kernel_trap(trapno, ssp,
279							dsisr, dar, NULL);
280			}
281			break;
282
283		case T_DATA_ACCESS:
284#if	MACH_KDB
285			mp_disable_preemption();
286			if (debug_mode
287			    && getPerProc()->debugger_active
288			    && !let_ddb_vm_fault) {
289				/*
290				 * Force kdb to handle this one.
291				 */
292				kdb_trap(trapno, ssp);
293			}
294			mp_enable_preemption();
295#endif	/* MACH_KDB */
296			/* can we take this during normal panic dump operation? */
297			if (debug_mode
298			    && getPerProc()->debugger_active
299			    && !not_in_kdp) {
300			        /*
301				 * Access fault while in kernel core dump.
302				 */
303			        kdp_dump_trap(trapno, ssp);
304			}
305
306
307			if(ssp->save_dsisr & dsiInvMode) {			/* Did someone try to reserve cache inhibited? */
308				panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
309			}
310
311			if(intr) ml_set_interrupts_enabled(TRUE);	/* Enable if we were */
312
313			if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF))  {	/* User memory window access? */
314
315				offset = (vm_map_offset_t)dar;				/* Set the failing address */
316				map = kernel_map;						/* No, this is a normal kernel access */
317
318/*
319 *	Note: Some ROM device drivers will access page 0 when they start.  The IOKit will
320 *	set a flag to tell us to ignore any access fault on page 0.  After the driver is
321 *	opened, it will clear the flag.
322 */
323				if((0 == (offset & -PAGE_SIZE)) && 		/* Check for access of page 0 and */
324				  ((thread->machine.specFlags) & ignoreZeroFault)) {	/* special case of ignoring page zero faults */
325					ssp->save_srr0 += 4;				/* Point to next instruction */
326					break;
327				}
328
329#if CONFIG_DTRACE
330				if (thread->options & TH_OPT_DTRACE) {	/* Executing under dtrace_probe? */
331					if (dtrace_tally_fault(dar)) { /* Should a fault under dtrace be ignored? */
332						ssp->save_srr0 += 4;                /* Point to next instruction */
333						break;
334					} else {
335						unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected page fault under dtrace_probe");
336					}
337				}
338#endif
339
340				code = vm_fault(map, vm_map_trunc_page(offset),
341						dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
342						FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
343
344				if (code != KERN_SUCCESS) {
345					unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
346				} else {
347					ssp->save_hdr.save_flags |= SAVredrive;	/* Tell low-level to re-try fault */
348					ssp->save_dsisr = (ssp->save_dsisr &
349						~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);	/* Make sure this is marked as a miss */
350				}
351				break;
352			}
353
354			/* If we get here, the fault was due to a user memory window access */
355
356#if CONFIG_DTRACE
357			if (thread->options & TH_OPT_DTRACE) {	/* Executing under dtrace_probe? */
358				if (dtrace_tally_fault(dar)) { /* Should a user memory window access fault under dtrace be ignored? */
359					if (thread->recover) {
360						ssp->save_srr0 = thread->recover;
361						thread->recover = (vm_offset_t)NULL;
362					} else {
363						unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
364					}
365					break;
366				} else {
367					unresolved_kernel_trap(trapno, ssp, dsisr, dar, "Unexpected UMW page fault under dtrace_probe");
368				}
369			}
370#endif
371
372			map = thread->map;
373
374			offset = (vm_map_offset_t)(thread->machine.umwRelo + dar);	/* Compute the user space address */
375
376			code = vm_fault(map, vm_map_trunc_page(offset),
377					dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
378					FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
379
380			/* If we failed, there should be a recovery
381			 * spot to rfi to.
382			 */
383			if (code != KERN_SUCCESS) {
384				if (thread->recover) {
385					ssp->save_srr0 = thread->recover;
386					thread->recover = (vm_offset_t)NULL;
387				} else {
388					unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
389				}
390			}
391			else {
392				ssp->save_hdr.save_flags |= SAVredrive;	/* Tell low-level to re-try fault */
393				ssp->save_dsisr = (ssp->save_dsisr &
394					~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);	/* Make sure this is marked as a miss */
395			}
396
397			break;
398
399		case T_INSTRUCTION_ACCESS:
400
401#if	MACH_KDB
402			if (debug_mode
403			    && getPerProc()->debugger_active
404			    && !let_ddb_vm_fault) {
405				/*
406				 * Force kdb to handle this one.
407				 */
408				kdb_trap(trapno, ssp);
409			}
410#endif	/* MACH_KDB */
411
412			/* Same as for data access, except fault type
413			 * is PROT_EXEC and addr comes from srr0
414			 */
415
416			if(intr) ml_set_interrupts_enabled(TRUE);	/* Enable if we were */
417
418			map = kernel_map;
419
420			code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
421					(PROT_EXEC | PROT_RO), FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
422
423			if (code != KERN_SUCCESS) {
424				unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
425			} else {
426				ssp->save_hdr.save_flags |= SAVredrive;	/* Tell low-level to re-try fault */
427				ssp->save_srr1 = (ssp->save_srr1 &
428					~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);		/* Make sure this is marked as a miss */
429			}
430			break;
431
432		/* Usually shandler handles all the system calls, but the
433		 * atomic thread switcher may throwup (via thandler) and
434		 * have to pass it up to the exception handler.
435		 */
436
437		case T_SYSTEM_CALL:
438			unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
439			break;
440
441		case T_AST:
442			unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
443			break;
444		}
445	} else {
446
447		/*
448		 * Processing for user state traps with interrupt enabled
449		 * For T_AST, interrupts are enabled in the AST delivery
450		 */
451		if (trapno != T_AST)
452			ml_set_interrupts_enabled(TRUE);
453
454#ifdef MACH_BSD
455		{
456			get_procrustime(&tv);
457		}
458#endif /* MACH_BSD */
459
460
461		/*
462		 * Trap came from user task
463		 */
464
465		switch (trapno) {
466
467			case T_PREEMPT:
468				unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
469				break;
470
471			case T_PERF_MON:
472				perfmon_handle_pmi(ssp);
473				break;
474
475				/*
476				 * These trap types should never be seen by trap()
477				 * Some are interrupts that should be seen by
478				 * interrupt() others just don't happen because they
479				 * are handled elsewhere.
480				 */
481			case T_DECREMENTER:
482			case T_IN_VAIN:								/* Shouldn't ever see this, lowmem_vectors eats it */
483			case T_INTERRUPT:
484			case T_FP_UNAVAILABLE:
485			case T_SYSTEM_MANAGEMENT:
486			case T_RESERVED:
487			case T_IO_ERROR:
488
489			default:
490
491				ml_set_interrupts_enabled(FALSE);		/* Turn off interruptions */
492
493				panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
494					   cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
495				break;
496
497
498	/*
499	 *			Here we handle a machine check in user state
500	 */
501
502			case T_MACHINE_CHECK:
503				handleMck(ssp);							/* Common to both user and kernel */
504				break;
505
506			case T_RESET:
507				ml_set_interrupts_enabled(FALSE);		/* Turn off interruptions */
508				if (!Call_Debugger(trapno, ssp))
509					panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
510						ssp->save_srr0, ssp->save_srr1);
511				break;									/* We just ignore these */
512
513			case T_ALIGNMENT:
514	/*
515	*			If enaNotifyEMb is set, we get here, and
516	*			we have actually already emulated the unaligned access.
517	*			All that we want to do here is to ignore the interrupt. This is to allow logging or
518	*			tracing of unaligned accesses.
519	*/
520
521				KERNEL_DEBUG_CONSTANT(
522					MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
523					(int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
524
525				if(ssp->save_hdr.save_misc3) {			/* Was it a handled exception? */
526					exception = EXC_BAD_ACCESS;			/* Yes, throw exception */
527					code = EXC_PPC_UNALIGNED;
528					subcode = dar;
529				}
530				break;
531
532			case T_EMULATE:
533	/*
534	*			If enaNotifyEMb is set we get here, and
535	*			we have actually already emulated the instruction.
536	*			All that we want to do here is to ignore the interrupt. This is to allow logging or
537	*			tracing of emulated instructions.
538	*/
539
540				KERNEL_DEBUG_CONSTANT(
541					MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
542					(int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
543				break;
544
545			case T_TRACE:			/* Real PPC chips */
546			case T_INSTRUCTION_BKPT:
547				exception = EXC_BREAKPOINT;
548				code = EXC_PPC_TRACE;
549				subcode = ssp->save_srr0;
550				break;
551
552			case T_PROGRAM:
553				if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
554					fpu_save(thread->machine.curctx);
555					UPDATE_PPC_EXCEPTION_STATE;
556					exception = EXC_ARITHMETIC;
557					code = EXC_ARITHMETIC;
558
559					mp_disable_preemption();
560					subcode = ssp->save_fpscr;
561					mp_enable_preemption();
562				}
563				else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
564
565					UPDATE_PPC_EXCEPTION_STATE
566					exception = EXC_BAD_INSTRUCTION;
567					code = EXC_PPC_UNIPL_INST;
568					subcode = ssp->save_srr0;
569				} else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
570
571					UPDATE_PPC_EXCEPTION_STATE;
572					exception = EXC_BAD_INSTRUCTION;
573					code = EXC_PPC_PRIVINST;
574					subcode = ssp->save_srr0;
575				} else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
576					unsigned int inst;
577
578					if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
579
580					if(dgWork.dgFlags & enaDiagTrap) {	/* Is the diagnostic trap enabled? */
581						if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) {	/* Is this a TWI 31,R31,0xFFFx? */
582							if(diagTrap(ssp, inst & 0xF)) {	/* Call the trap code */
583								ssp->save_srr0 += 4ULL;		/* If we eat the trap, bump pc */
584								exception = 0;				/* Clear exception */
585								break;						/* All done here */
586							}
587						}
588					}
589
590#if CONFIG_DTRACE
591					if(inst == 0x0FFFDDDD) {				/* Is this the dtrace trap? */
592						ret = dtrace_user_probe((ppc_saved_state_t *)ssp);	/* Go check if it is for real and process if so... */
593						if(ret == KERN_SUCCESS) {			/* Was it really? */
594							exception = 0;					/* Clear the exception */
595							break;							/* Go flow through and out... */
596						}
597					}
598#endif
599
600					UPDATE_PPC_EXCEPTION_STATE;
601
602					if (inst == 0x7FE00008) {
603						exception = EXC_BREAKPOINT;
604						code = EXC_PPC_BREAKPOINT;
605					} else {
606						exception = EXC_SOFTWARE;
607						code = EXC_PPC_TRAP;
608					}
609					subcode = ssp->save_srr0;
610				}
611				break;
612
613#if CONFIG_DTRACE
614			case T_DTRACE_RET:								/* Are we returning from a dtrace injection? */
615				ret = dtrace_user_probe((ppc_saved_state_t *)ssp);	/* Call the probe function if so... */
616				if(ret == KERN_SUCCESS) {					/* Did this actually work? */
617					exception = 0;							/* Clear the exception */
618					break;									/* Go flow through and out... */
619				}
620				break;
621#endif
622
623			case T_ALTIVEC_ASSIST:
624				UPDATE_PPC_EXCEPTION_STATE;
625				exception = EXC_ARITHMETIC;
626				code = EXC_PPC_ALTIVECASSIST;
627				subcode = ssp->save_srr0;
628				break;
629
630			case T_DATA_ACCESS:
631				map = thread->map;
632
633				if(ssp->save_dsisr & dsiInvMode) {			/* Did someone try to reserve cache inhibited? */
634					UPDATE_PPC_EXCEPTION_STATE;				/* Don't even bother VM with this one */
635					exception = EXC_BAD_ACCESS;
636					subcode = dar;
637					break;
638				}
639
640				code = vm_fault(map, vm_map_trunc_page(dar),
641					 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
642					 FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
643
644				if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
645					UPDATE_PPC_EXCEPTION_STATE;
646					exception = EXC_BAD_ACCESS;
647					subcode = dar;
648				} else {
649					ssp->save_hdr.save_flags |= SAVredrive;	/* Tell low-level to retry fault */
650					ssp->save_dsisr = (ssp->save_dsisr &
651						~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);	/* Make sure this is marked as a miss */
652				}
653				break;
654
655			case T_INSTRUCTION_ACCESS:
656				/* Same as for data access, except fault type
657				 * is PROT_EXEC and addr comes from srr0
658				 */
659				map = thread->map;
660
661				code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
662						(PROT_EXEC | PROT_RO), FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
663
664				if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
665					UPDATE_PPC_EXCEPTION_STATE;
666					exception = EXC_BAD_ACCESS;
667					subcode = ssp->save_srr0;
668				} else {
669					ssp->save_hdr.save_flags |= SAVredrive;	/* Tell low-level to re-try fault */
670					ssp->save_srr1 = (ssp->save_srr1 &
671						~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);		/* Make sure this is marked as a miss */
672				}
673				break;
674
675			case T_AST:
676				/* AST delivery is done below */
677				break;
678
679		}
680
681#ifdef MACH_BSD
682		{
683		bsd_uprofil(&tv, ssp->save_srr0);
684		}
685#endif /* MACH_BSD */
686	}
687
688	if (exception) {
689		/* if this is the init task, save the exception information */
690		/* this probably is a fatal exception */
691#if 0
692		if(bsd_init_task == current_task()) {
693			char *buf;
694        		int i;
695
696			buf = init_task_failure_data;
697
698
699			buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
700			buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
701								, dsisr, dar);
702
703			for (i=0; i<32; i++) {
704		       		if ((i % 8) == 0) {
705					buf += sprintf(buf, "\n%4d :",i);
706				}
707				buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
708			}
709
710        		buf += sprintf(buf, "\n\n");
711        		buf += sprintf(buf, "cr        = 0x%08X\t\t",ssp->save_cr);
712        		buf += sprintf(buf, "xer       = 0x%08X\n",ssp->save_xer);
713        		buf += sprintf(buf, "lr        = 0x%016llX\t\t",ssp->save_lr);
714        		buf += sprintf(buf, "ctr       = 0x%016llX\n",ssp->save_ctr);
715        		buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
716        		buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
717                   	   "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
718                    	   "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
719        		buf += sprintf(buf, "\n\n");
720
721        		/* generate some stack trace */
722        		buf += sprintf(buf, "Application level back trace:\n");
723        		if (ssp->save_srr1 & MASK(MSR_PR)) {
724                	   char *addr = (char*)ssp->save_r1;
725                	   unsigned int stack_buf[3];
726                	   for (i = 0; i < 8; i++) {
727                        	if (addr == (char*)NULL)
728                               		break;
729                        	if (!copyin(ssp->save_r1,(char*)stack_buf,
730							3 * sizeof(int))) {
731                               		buf += sprintf(buf, "0x%08X : 0x%08X\n"
732						,addr,stack_buf[2]);
733                               		addr = (char*)stack_buf[0];
734                        	} else {
735                               		break;
736                       	   	}
737                	   }
738        		}
739			buf[0] = '\0';
740		}
741#endif
742		doexception(exception, code, subcode);
743	}
744	/* AST delivery
745	 * Check to see if we need an AST, if so take care of it here
746	 */
747	ml_set_interrupts_enabled(FALSE);
748
749	if (USER_MODE(ssp->save_srr1)) {
750		myast = ast_pending();
751		while (*myast & AST_ALL) {
752			ast_taken(AST_ALL, intr);
753			ml_set_interrupts_enabled(FALSE);
754			myast = ast_pending();
755		}
756	}
757
758	return ssp;
759}
760
761/* This routine is called from assembly before each and every system call.
762 * It must preserve r3.
763 */
764
765extern int syscall_trace(int, struct savearea *);
766
767
768extern int pmdebug;
769
770int syscall_trace(int retval, struct savearea *ssp)
771{
772	int i, argc;
773	int kdarg[3];
774/* Always prepare to trace mach system calls */
775
776	kdarg[0]=0;
777	kdarg[1]=0;
778	kdarg[2]=0;
779
780	argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
781
782	if (argc > 3)
783		argc = 3;
784
785	for (i=0; i < argc; i++)
786		kdarg[i] = (int)*(&ssp->save_r3 + i);
787
788	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
789		kdarg[0], kdarg[1], kdarg[2], 0, 0);
790
791	return retval;
792}
793
794/* This routine is called from assembly after each mach system call
795 * It must preserve r3.
796 */
797
798extern int syscall_trace_end(int, struct savearea *);
799
800int syscall_trace_end(int retval, struct savearea *ssp)
801{
802	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
803		retval, 0, 0, 0, 0);
804	return retval;
805}
806
807/*
808 * called from syscall if there is an error
809 */
810
811int syscall_error(
812	int exception,
813	mach_exception_code_t code,
814	mach_exception_subcode_t subcode,
815	struct savearea *ssp)
816{
817	register thread_t thread;
818
819	thread = current_thread();
820
821	if (thread == 0)
822	    panic("syscall error in boot phase");
823
824	if (!USER_MODE(ssp->save_srr1))
825		panic("system call called from kernel");
826
827	doexception(exception, code, subcode);
828
829	return 0;
830}
831
832/* Pass up a server syscall/exception */
833void
834doexception(
835	    int exc,
836	    mach_exception_code_t code,
837	    mach_exception_subcode_t sub)
838{
839	mach_exception_data_type_t   codes[EXCEPTION_CODE_MAX];
840
841	codes[0] = code;
842	codes[1] = sub;
843	exception_triage(exc, codes, 2);
844}
845
846const char *trap_type[] = {
847	"Unknown",
848	"0x100 - System reset",
849	"0x200 - Machine check",
850	"0x300 - Data access",
851	"0x400 - Inst access",
852	"0x500 - Ext int",
853	"0x600 - Alignment",
854	"0x700 - Program",
855	"0x800 - Floating point",
856	"0x900 - Decrementer",
857	"0xA00 - n/a",
858	"0xB00 - n/a",
859	"0xC00 - System call",
860	"0xD00 - Trace",
861	"0xE00 - FP assist",
862	"0xF00 - Perf mon",
863	"0xF20 - VMX",
864	"INVALID EXCEPTION",
865	"INVALID EXCEPTION",
866	"INVALID EXCEPTION",
867	"0x1300 - Inst bkpnt",
868	"0x1400 - Sys mgmt",
869	"0x1600 - Altivec Assist",
870	"0x1700 - Thermal",
871	"INVALID EXCEPTION",
872	"INVALID EXCEPTION",
873	"INVALID EXCEPTION",
874	"INVALID EXCEPTION",
875	"INVALID EXCEPTION",
876	"INVALID EXCEPTION",
877	"INVALID EXCEPTION",
878	"INVALID EXCEPTION",
879	"Emulate",
880	"0x2000 - Run Mode/Trace",
881	"Signal Processor",
882	"Preemption",
883	"Context Switch",
884	"Shutdown",
885	"System Failure"
886};
887int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
888
889void unresolved_kernel_trap(int trapno,
890			    struct savearea *ssp,
891			    __unused unsigned int dsisr,
892			    addr64_t dar,
893			    const char *message)
894{
895	const char *trap_name;
896
897	ml_set_interrupts_enabled(FALSE);					/* Turn off interruptions */
898	lastTrace = LLTraceSet(0);							/* Disable low-level tracing */
899
900#if 0
901	{
902		struct per_proc_info *pp;
903		kprintf("  srr0: %016llX\n", ssp->save_srr0);	/* (TEST/DEBUG) */
904		kprintf("  srr1: %016llX\n", ssp->save_srr1);	/* (TEST/DEBUG) */
905		kprintf("   dar: %016llX\n", ssp->save_dar);	/* (TEST/DEBUG) */
906		kprintf("   xcp: %08X\n", ssp->save_exception);	/* (TEST/DEBUG) */
907		kprintf("  ins0: %08X\n", ssp->save_instr[0]);	/* (TEST/DEBUG) */
908		kprintf("  ins1: %08X\n", ssp->save_instr[1]);	/* (TEST/DEBUG) */
909		kprintf("  ins2: %08X\n", ssp->save_instr[2]);	/* (TEST/DEBUG) */
910		kprintf("  ins3: %08X\n", ssp->save_instr[3]);	/* (TEST/DEBUG) */
911		kprintf("  ins4: %08X\n", ssp->save_instr[4]);	/* (TEST/DEBUG) */
912		kprintf("  ins5: %08X\n", ssp->save_instr[5]);	/* (TEST/DEBUG) */
913		kprintf("  ins6: %08X\n", ssp->save_instr[6]);	/* (TEST/DEBUG) */
914		kprintf("  ins7: %08X\n", ssp->save_instr[7]);	/* (TEST/DEBUG) */
915		pp = getPerProc();								/* (TEST/DEBUG) */
916		kprintf("ijsave: %016llX\n", pp->ijsave);		/* (TEST/DEBUG) */
917	}
918#endif
919
920	if( logPanicDataToScreen )
921		disable_debug_output = FALSE;
922
923	debug_mode++;
924	if ((unsigned)trapno <= T_MAX)
925		trap_name = trap_type[trapno / T_VECTOR_SIZE];
926	else
927		trap_name = "???? unrecognized exception";
928	if (message == NULL)
929		message = trap_name;
930
931	kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
932	       cpu_number(), trap_name, dar, ssp->save_srr0);
933
934	print_backtrace(ssp);
935
936	panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
937	/* Commit the panic log buffer to NVRAM, unless otherwise
938	 * specified via a boot-arg.
939	 */
940	if (panicDebugging)
941		commit_paniclog();
942
943	draw_panic_dialog();
944	/* XXX: This is yet another codepath into the debugger, which should
945	 * be reworked to enter the primary panic codepath instead.
946	 * The idea appears to be to enter the debugger (performing a
947	 * stack switch) as soon as possible, but we do have a
948	 * savearea encapsulating state (accessible by walking the savearea
949	 * chain), so that's superfluous.
950	 */
951	if( panicDebugging )
952		(void)Call_Debugger(trapno, ssp);
953	panic_plain(message);
954}
955
956const char *corr[2] = {"uncorrected", "corrected  "};
957
958void handleMck(struct savearea *ssp) {					/* Common machine check handler */
959
960	int cpu;
961
962	cpu = cpu_number();
963
964	printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
965		cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar);		/* Tell us about it */
966	printf("Machine check (%d) -   AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
967	printf("Machine check (%d) -      L2FIR = %016llX,  BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
968
969	if(ssp->save_hdr.save_misc3) return;				/* Leave the the machine check was recovered */
970
971	panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
972	      "  AsyncSrc = %016llX, CoreFIR = %016llx\n"
973	      "     L2FIR = %016llX,  BusFir = %016llx\n",
974		  ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar,
975		  ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
976
977	return;
978}
979
980void
981thread_syscall_return(
982        kern_return_t ret)
983{
984        register thread_t   thread = current_thread();
985        register struct savearea *regs = USER_REGS(thread);
986
987	if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
988	  /* Mach trap */
989	  KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
990		       ret, 0, 0, 0, 0);
991	}
992        regs->save_r3 = ret;
993
994        thread_exception_return();
995        /*NOTREACHED*/
996}
997
998
999#if	MACH_KDB
1000void
1001thread_kdb_return(void)
1002{
1003	register thread_t	thread = current_thread();
1004	register struct savearea *regs = USER_REGS(thread);
1005
1006	Call_Debugger(thread->machine.pcb->save_exception, regs);
1007	thread_exception_return();
1008	/*NOTREACHED*/
1009}
1010#endif	/* MACH_KDB */
1011