1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 *  (c) Copyright 1988 HEWLETT-PACKARD COMPANY
36 *
37 *  To anyone who acknowledges that this file is provided "AS IS"
38 *  without any express or implied warranty:
39 *      permission to use, copy, modify, and distribute this file
40 *  for any purpose is hereby granted without fee, provided that
41 *  the above copyright notice and this notice appears in all
42 *  copies, and that the name of Hewlett-Packard Company not be
43 *  used in advertising or publicity pertaining to distribution
44 *  of the software without specific, written prior permission.
45 *  Hewlett-Packard Company makes no representations about the
46 *  suitability of this software for any purpose.
47 */
48/*
49 * Copyright (c) 1990,1991,1992,1994 The University of Utah and
50 * the Computer Systems Laboratory (CSL).  All rights reserved.
51 *
52 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
54 * WHATSOEVER RESULTING FROM ITS USE.
55 *
56 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
57 * improvements that they make and grant CSL redistribution rights.
58 *
59 * 	Utah $Hdr: model_dep.c 1.34 94/12/14$
60 */
61/*
62 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
63 * support for mandatory and extensible security protections.  This notice
64 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * Version 2.0.
66 */
67
68#include <debug.h>
69#include <mach_kdb.h>
70#include <mach_kdp.h>
71#include <db_machine_commands.h>
72
73#include <kern/thread.h>
74#include <machine/pmap.h>
75#include <device/device_types.h>
76
77#include <mach/vm_param.h>
78#include <mach/clock_types.h>
79#include <mach/machine.h>
80#include <mach/kmod.h>
81#include <ppc/boot.h>
82
83#include <kern/misc_protos.h>
84#include <kern/startup.h>
85#include <ppc/misc_protos.h>
86#include <ppc/proc_reg.h>
87#include <ppc/thread.h>
88#include <ppc/asm.h>
89#include <ppc/mem.h>
90#include <ppc/Firmware.h>
91#include <ppc/low_trace.h>
92#include <ppc/mappings.h>
93#include <ppc/FirmwareCalls.h>
94#include <ppc/cpu_internal.h>
95#include <ppc/exception.h>
96#include <ppc/hw_perfmon.h>
97#include <ppc/lowglobals.h>
98#include <ppc/machine_cpu.h>
99#include <ppc/db_machdep.h>
100
101#include <kern/clock.h>
102#include <kern/debug.h>
103#include <machine/trap.h>
104#include <kern/spl.h>
105#include <pexpert/pexpert.h>
106#include <kern/sched.h>
107#include <kern/task.h>
108#include <kern/machine.h>
109#include <vm/vm_map.h>
110
111#include <IOKit/IOPlatformExpert.h>
112
113#include <mach/vm_prot.h>
114#include <vm/pmap.h>
115#include <mach/time_value.h>
116#include <mach/mach_types.h>
117#include <mach/mach_vm.h>
118#include <machine/machparam.h>	/* for btop */
119
120#if	MACH_KDB
121#include <ddb/db_aout.h>
122#include <ddb/db_output.h>
123#include <ddb/db_command.h>
124#include <machine/db_machdep.h>
125
126extern struct db_command ppc_db_commands[];
127#endif	/* MACH_KDB */
128
129char kernel_args_buf[256] = "/mach_kernel";
130char boot_args_buf[256] = "/mach_servers/bootstrap";
131char env_buf[256];
132
133#define TRAP_DEBUGGER	__asm__ volatile("tw 4,r3,r3");
134#define TRAP_DEBUGGER_INST	0x7c831808
135#define TRAP_DIRECT	__asm__ volatile("tw 4,r4,r4");
136#define TRAP_DIRECT_INST	0x7c842008
137#define TRAP_INST_SIZE	4
138#define BREAK_TO_KDP0 0x7fe00008
139#define BREAK_TO_KDP1 0x7c800008
140#define BREAK_TO_KDB0 0x7c810808
141
142/*
143 * Code used to synchronize debuggers among all cpus, one active at a time, switch
144 * from on to another using kdb_on! #cpu or cpu #cpu
145 */
146
147hw_lock_data_t debugger_lock;	/* debugger lock */
148hw_lock_data_t pbtlock;		/* backtrace print lock */
149
150unsigned int debugger_cpu = (unsigned)-1; /* current cpu running debugger	*/
151int			debugger_debug = 0;			/* Debug debugger */
152int 		db_run_mode;				/* Debugger run mode */
153unsigned int debugger_sync = 0;			/* Cross processor debugger entry sync */
154extern 		unsigned int NMIss;			/* NMI debounce switch */
155
156extern volatile int panicwait;
157volatile unsigned int pbtcnt = 0;
158volatile unsigned int pbtcpu = -1;
159
160unsigned int lastTrace;					/* Value of low-level exception trace controls */
161
162
163volatile unsigned int	cpus_holding_bkpts;	/* counter for number of cpus holding
164											   breakpoints (ie: cpus that did not
165											   insert back breakpoints) */
166void unlock_debugger(void);
167void lock_debugger(void);
168void dump_backtrace(struct savearea *sv,
169		    unsigned int stackptr,
170		    unsigned int fence);
171void dump_savearea(struct savearea *sv,
172		   unsigned int fence);
173
174#if !MACH_KDB
175boolean_t	db_breakpoints_inserted = TRUE;
176jmp_buf_t *db_recover;
177#endif
178
179#if	MACH_KDB
180#include <ddb/db_run.h>
181int	kdb_flag=0;
182extern boolean_t db_breakpoints_inserted;
183extern jmp_buf_t *db_recover;
184#define	KDB_READY	0x1
185#endif
186
187#if	MACH_KDP
188extern int 	kdp_flag;
189#define	KDP_READY	0x1
190#endif
191
192unsigned int db_im_stepping = 0xFFFFFFFF; /* Remember if we were stepping */
193
194
195const char *failNames[] = {
196	"Debugging trap",			/* failDebug */
197	"Corrupt stack",			/* failStack */
198	"Corrupt mapping tables",	/* failMapping */
199	"Corrupt context",			/* failContext */
200	"No saveareas",				/* failNoSavearea */
201	"Savearea corruption",		/* failSaveareaCorr */
202	"Invalid live context",		/* failBadLiveContext */
203	"Corrupt skip lists",		/* failSkipLists */
204	"Unaligned stack",			/* failUnalignedStk */
205	"Invalid pmap",				/* failPmap */
206	"Lock timeout",				/* failTimeout */
207	"Unknown failure code"		/* Unknown failure code - must always be last */
208};
209
210const char *invxcption = "Unknown code";
211
212static unsigned	commit_paniclog_to_nvram;
213
214#if !MACH_KDB
215void kdb_trap(__unused int type, __unused struct savearea *regs) {}
216#endif /* !MACH_KDB */
217
218#if !MACH_KDP
219void kdp_trap(__unused int type, __unused struct savearea *regs) {}
220#endif /* !MACH_KDP */
221
222extern int default_preemption_rate;
223extern int max_unsafe_quanta;
224extern int max_poll_quanta;
225
226void
227machine_startup(void)
228{
229	int	boot_arg;
230	unsigned int wncpu;
231
232	if (PE_parse_boot_argn("cpus", &wncpu, sizeof (wncpu))) {
233		if ((wncpu > 0) && (wncpu < MAX_CPUS))
234                        max_ncpus = wncpu;
235	}
236
237	if( PE_get_hotkey( kPEControlKey ))
238            halt_in_debugger = halt_in_debugger ? 0 : 1;
239
240	if (PE_parse_boot_argn("debug", &boot_arg, sizeof (boot_arg))) {
241		if (boot_arg & DB_HALT) halt_in_debugger=1;
242		if (boot_arg & DB_PRT) disable_debug_output=FALSE;
243		if (boot_arg & DB_SLOG) systemLogDiags=TRUE;
244		if (boot_arg & DB_NMI) panicDebugging=TRUE;
245		if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
246	}
247
248	if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
249		commit_paniclog_to_nvram = 1;
250
251	PE_parse_boot_argn("vmmforce", &lowGlo.lgVMMforcedFeats, sizeof (lowGlo.lgVMMforcedFeats));
252
253	hw_lock_init(&debugger_lock);				/* initialize debugger lock */
254	hw_lock_init(&pbtlock);						/* initialize print backtrace lock */
255
256#if	MACH_KDB
257	/*
258	 * Initialize KDB
259	 */
260#if	DB_MACHINE_COMMANDS
261	db_machine_commands_install(ppc_db_commands);
262#endif	/* DB_MACHINE_COMMANDS */
263	ddb_init();
264
265	if (boot_arg & DB_KDB)
266		current_debugger = KDB_CUR_DB;
267
268	/*
269	 * Cause a breakpoint trap to the debugger before proceeding
270	 * any further if the proper option bit was specified in
271	 * the boot flags.
272	 */
273	if (halt_in_debugger && (current_debugger == KDB_CUR_DB)) {
274	        Debugger("inline call to debugger(machine_startup)");
275		halt_in_debugger = 0;
276		active_debugger =1;
277	}
278#endif /* MACH_KDB */
279	if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
280		default_preemption_rate = boot_arg;
281	}
282	if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
283		max_unsafe_quanta = boot_arg;
284	}
285	if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
286		max_poll_quanta = boot_arg;
287	}
288	if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
289		sched_poll_yield_shift = boot_arg;
290	}
291
292	machine_conf();
293
294	/*
295	 * Kick off the kernel bootstrap.
296	 */
297	kernel_bootstrap();
298	/*NOTREACHED*/
299}
300
301char *
302machine_boot_info(__unused char *buf, __unused vm_size_t size)
303{
304	return(PE_boot_args());
305}
306
307void
308machine_conf(void)
309{
310	machine_info.memory_size = mem_size;	/* Note that this will be 2 GB for >= 2 GB machines */
311}
312
313void
314machine_init(void)
315{
316	debug_log_init();
317	clock_config();
318/*	Note that we must initialize the stepper tables AFTER the clock is configured!!!!! */
319	if(pmsExperimental & 1) pmsCPUConf();	/* (EXPERIMENTAL) Initialize the stepper tables */
320	perfmon_init();
321	return;
322
323}
324
325void
326slave_machine_init(__unused void *param)
327{
328	cpu_machine_init();			/* Initialize the processor */
329	clock_init();				/* Init the clock */
330}
331
332void
333halt_all_cpus(boolean_t	reboot)
334{
335	if(reboot)
336	{
337		printf("MACH Reboot\n");
338		PEHaltRestart(kPERestartCPU);
339	}
340	else
341	{
342		printf("CPU halted\n");
343		PEHaltRestart(kPEHaltCPU);
344	}
345	while(1);
346}
347
348void
349halt_cpu(void)
350{
351        halt_all_cpus(FALSE);
352}
353
354#if	MACH_ASSERT
355/*
356 * Machine-dependent routine to fill in an array with up to callstack_max
357 * levels of return pc information.
358 */
359void
360machine_callstack(__unused natural_t *buf, __unused vm_size_t callstack_max)
361{
362}
363#endif	/* MACH_ASSERT */
364
365void
366print_backtrace(struct savearea *ssp)
367{
368	unsigned int stackptr, fence;
369	struct savearea *sv, *svssp, *psv;
370	unsigned int cpu;
371
372/*
373 *	We need this lock to make sure we don't hang up when we double panic on an MP.
374 */
375
376	cpu  = cpu_number();					/* Just who are we anyways? */
377	if(pbtcpu != cpu) {						/* Allow recursion */
378		(void)hw_atomic_add(&pbtcnt, 1); /* Remember we are trying */
379		while(!hw_lock_try(&pbtlock));		/* Spin here until we can get in. If we never do, well, we're crashing anyhow... */
380		pbtcpu = cpu;						/* Mark it as us */
381	}
382
383	svssp = (struct savearea *)ssp;				/* Make this easier */
384	sv = NULL;
385	if(current_thread())
386		sv = (struct savearea *)current_thread()->machine.pcb;	/* Find most current savearea if system has started */
387
388	fence = 0xFFFFFFFF;						/* Show we go all the way */
389	if(sv) fence = (unsigned int)sv->save_r1;	/* Stop at previous exception point */
390
391	if(!svssp) {							/* Should we start from stack? */
392		kdb_printf("Latest stack backtrace for cpu %d:\n", cpu_number());
393		__asm__ volatile("mr %0,r1" : "=r" (stackptr));	/* Get current stack */
394		dump_backtrace((struct savearea *)0,stackptr, fence);	/* Dump the backtrace */
395		if(!sv) {							/* Leave if no saveareas */
396			hw_lock_unlock(&pbtlock);		/* Allow another back trace to happen */
397			goto pbt_exit;
398		}
399	}
400	else {									/* Were we passed an exception? */
401		fence = 0xFFFFFFFF;					/* Show we go all the way */
402		if(svssp->save_hdr.save_prev) {
403			if((svssp->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)svssp->save_hdr.save_prev))) {	/* Valid address? */
404				psv = (struct savearea *)((unsigned int)svssp->save_hdr.save_prev);	/* Get the 64-bit back chain converted to a regualr pointer */
405				fence = (unsigned int)psv->save_r1;	/* Stop at previous exception point */
406			}
407		}
408
409		kdb_printf("Latest crash info for cpu %d:\n", cpu_number());
410		kdb_printf("   Exception state (sv=%p)\n", svssp);
411		dump_savearea(svssp, fence);		/* Dump this savearea */
412	}
413
414	if(!sv) {								/* Leave if no saveareas */
415		hw_lock_unlock(&pbtlock);			/* Allow another back trace to happen */
416		goto pbt_exit;
417	}
418
419	kdb_printf("Proceeding back via exception chain:\n");
420
421	while(sv) {								/* Do them all... */
422		if(!(((addr64_t)((uintptr_t)sv) <= vm_last_addr) &&
423			(unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)sv)))) {	/* Valid address? */
424			kdb_printf("   Exception state (sv=%p) Not mapped or invalid. stopping...\n", sv);
425			break;
426		}
427
428		kdb_printf("   Exception state (sv=%p)\n", sv);
429		if(sv == svssp) {					/* Did we dump it already? */
430			kdb_printf("      previously dumped as \"Latest\" state. skipping...\n");
431		}
432		else {
433			fence = 0xFFFFFFFF;				/* Show we go all the way */
434			if(sv->save_hdr.save_prev) {
435				if((sv->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)sv->save_hdr.save_prev))) {	/* Valid address? */
436					psv = (struct savearea *)((unsigned int)sv->save_hdr.save_prev);	/* Get the 64-bit back chain converted to a regualr pointer */
437					fence = (unsigned int)psv->save_r1;	/* Stop at previous exception point */
438				}
439			}
440			dump_savearea(sv, fence);		/* Dump this savearea */
441		}
442
443		sv = CAST_DOWN(struct savearea *, sv->save_hdr.save_prev);	/* Back chain */
444	}
445
446
447	pbtcpu = -1;							/* Mark as unowned */
448	hw_lock_unlock(&pbtlock);				/* Allow another back trace to happen */
449	(void)hw_atomic_sub(&pbtcnt, 1);  /* Show we are done */
450
451	while(pbtcnt);							/* Wait for completion */
452pbt_exit:
453	panic_display_system_configuration();
454	panic_display_zprint();
455        dump_kext_info(&kdb_log);
456	return;
457}
458
459void
460dump_savearea(struct savearea *sv, unsigned int fence)
461{
462	const char *xcode;
463
464	if(sv->save_exception > T_MAX)
465		xcode = invxcption;	/* Too big for table */
466	else
467		xcode = trap_type[sv->save_exception / 4];		/* Point to the type */
468
469	kdb_printf("      PC=0x%08X; MSR=0x%08X; DAR=0x%08X; DSISR=0x%08X; LR=0x%08X; R1=0x%08X; XCP=0x%08X (%s)\n",
470		(unsigned int)sv->save_srr0, (unsigned int)sv->save_srr1, (unsigned int)sv->save_dar, sv->save_dsisr,
471		(unsigned int)sv->save_lr, (unsigned int)sv->save_r1, sv->save_exception, xcode);
472
473	if(!(sv->save_srr1 & MASK(MSR_PR))) {		/* Are we in the kernel? */
474		dump_backtrace(sv, (unsigned int)sv->save_r1, fence);	/* Dump the stack back trace from  here if not user state */
475	}
476
477	return;
478}
479
480#define DUMPFRAMES 34
481#define LRindex 2
482
483void dump_backtrace(struct savearea *sv, unsigned int stackptr, unsigned int fence) {
484
485	unsigned int bframes[DUMPFRAMES];
486	unsigned int  sframe[8], raddr, dumbo;
487	int i, index=0;
488//	char syminfo[80];
489
490	kdb_printf("      Backtrace:\n");
491	if (sv != (struct savearea *)0) {
492		bframes[0] = (unsigned int)sv->save_srr0;
493		bframes[1] = (unsigned int)sv->save_lr;
494		index = 2;
495	}
496	for(i = index; i < DUMPFRAMES; i++) {			/* Dump up to max frames */
497
498		if(!stackptr || (stackptr == fence)) break;		/* Hit stop point or end... */
499
500		if(stackptr & 0x0000000F) {				/* Is stack pointer valid? */
501			kdb_printf("\n         backtrace terminated - unaligned frame address: 0x%08X\n", stackptr);	/* No, tell 'em */
502			break;
503		}
504
505		raddr = (unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)stackptr);	/* Get physical frame address */
506		if(!raddr || (stackptr > vm_last_addr)) {		/* Is it mapped? */
507			kdb_printf("\n         backtrace terminated - frame not mapped or invalid: 0x%08X\n", stackptr);	/* No, tell 'em */
508			break;
509		}
510
511		if(!mapping_phys_lookup(raddr, &dumbo)) {	/* Is it within physical RAM? */
512			kdb_printf("\n         backtrace terminated - frame outside of RAM: v=0x%08X, p=%08X\n", stackptr, raddr);	/* No, tell 'em */
513			break;
514		}
515
516		ReadReal((addr64_t)((raddr << 12) | (stackptr & 4095)), &sframe[0]);	/* Fetch the stack frame */
517
518		bframes[i] = sframe[LRindex];				/* Save the link register */
519
520//		syms_formataddr((vm_offset_t)bframes[i], syminfo, sizeof (syminfo));
521//		kdb_printf("        %s\n", syminfo);
522		if(!i) kdb_printf("         ");				/* Indent first time */
523		else if(!(i & 7)) kdb_printf("\n         ");	/* Skip to new line every 8 */
524		kdb_printf("0x%08X ", bframes[i]);			/* Dump the link register */
525
526		stackptr = sframe[0];						/* Chain back */
527	}
528	kdb_printf("\n");
529	if(i >= DUMPFRAMES) kdb_printf("      backtrace continues...\n");	/* Say we terminated early */
530	if(i) kmod_dump((vm_offset_t *)&bframes[0], i);	/* Show what kmods are in trace */
531
532}
533
534void commit_paniclog(void) {
535	unsigned long pi_size = 0;
536
537	if (debug_buf_size > 0)	{
538		if (commit_paniclog_to_nvram) {
539			unsigned int bufpos;
540
541			/* XXX Consider using the WKdm compressor in the
542			 * future, rather than just packing - would need to
543			 * be co-ordinated with crashreporter, which decodes
544			 * this post-restart. The compressor should be
545			 * capable of in-place compression.
546			 */
547			bufpos = packA(debug_buf, (unsigned) (debug_buf_ptr - debug_buf), debug_buf_size);
548			/* If compression was successful,
549			 * use the compressed length
550			 */
551			pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
552
553			/* Truncate if the buffer is larger than a
554			 * certain magic size - this really ought to
555			 * be some appropriate fraction of the NVRAM
556			 * image buffer, and is best done in the
557			 * savePanicInfo() or PESavePanicInfo() calls
558			 * This call must save data synchronously,
559			 * since we can subsequently halt the system.
560			 */
561			kprintf("Attempting to commit panic log to NVRAM\n");
562			/* N.B.: This routine (currently an IOKit wrapper that
563			 * calls through to the appropriate platform NVRAM
564			 * driver, must be panic context safe, i.e.
565			 * acquire no locks or require kernel services.
566			 * This does not appear to be the case currently
567			 * on some platforms, unfortunately (the driver
568			 * on command gate serialization).
569			 */
570			pi_size = PESavePanicInfo((unsigned char *)debug_buf,
571			    ((pi_size > 2040) ? 2040 : pi_size));
572			/* Uncompress in-place, to allow debuggers to examine
573			 * the panic log.
574			 */
575			if (bufpos)
576				unpackA(debug_buf, bufpos);
577		}
578	}
579}
580
581void
582Debugger(const char	*message) {
583
584	spl_t spl;
585
586	spl = splhigh();								/* No interruptions from here on */
587
588/*
589 *	backtrace for Debugger() call  from panic() if no current debugger
590 *	backtrace and return for double panic() call
591 */
592	if ((panicstr != (char *)0) &&
593	  (((nestedpanic != 0) && (current_debugger == 1)) || (active_debugger == 0))) {
594		print_backtrace(NULL);
595		if (nestedpanic != 0)  {
596			splx(spl);
597			return;									/* Yeah, don't enter again... */
598		}
599	}
600
601	if (debug_mode && getPerProc()->debugger_active) {	/* Are we already on debugger on this processor? */
602		splx(spl);
603		return;										/* Yeah, don't do it again... */
604	}
605
606
607/*
608 * The above stuff catches the double panic case so we shouldn't have to worry about that here.
609 */
610	if ( panicstr != (char *)0 )
611	{
612		disable_preemption();
613		/* Commit the panic log buffer to NVRAM, unless otherwise
614		 * specified via a boot-arg.
615		 */
616		commit_paniclog();
617		if(!panicDebugging) {
618			unsigned int my_cpu, tcpu;
619
620			my_cpu = cpu_number();
621			debugger_cpu = my_cpu;
622
623			(void)hw_atomic_add(&debug_mode, 1);
624			PerProcTable[my_cpu].ppe_vaddr->debugger_active++;
625			lock_debugger();
626
627			for(tcpu = 0; tcpu < real_ncpus; tcpu++) {
628				if(tcpu == my_cpu) continue;
629				(void)hw_atomic_add(&debugger_sync, 1);
630				(void)cpu_signal(tcpu, SIGPdebug, 0 ,0);
631			}
632			(void)hw_cpu_sync(&debugger_sync, LockTimeOut);
633			debugger_sync = 0;
634		}
635
636		draw_panic_dialog();
637
638		if(!panicDebugging) {
639#if CONFIG_EMBEDDED
640					PEHaltRestart(kPEPanicRestartCPU);
641#else
642					PEHaltRestart( kPEHangCPU );
643#endif
644		}
645
646		enable_preemption();
647	}
648
649	if ((current_debugger != NO_CUR_DB)) {			/* If there is a debugger configured, enter it */
650		printf("Debugger(%s)\n", message);
651		TRAP_DEBUGGER;
652		splx(spl);
653		return;										/* Done debugging for a while */
654	}
655
656	printf("\nNo debugger configured - dumping debug information\n");
657	printf("MSR=%08X\n",mfmsr());
658	print_backtrace(NULL);
659	splx(spl);
660	return;
661}
662
663/*
664 *		Here's where we attempt to get some diagnostic information dumped out
665 *		when the system is really confused.  We will try to get into the
666 *		debugger as well.
667 *
668 *		We are here with interrupts disabled and on the debug stack.  The savearea
669 *		that was passed in is NOT chained to the activation.
670 *
671 *		save_r3 contains the failure reason code.
672 */
673
674void
675SysChoked(unsigned int type, struct savearea *sv)
676{
677	unsigned int failcode;
678	const char * const pmsg = "System Failure: cpu=%d; code=%08X (%s)\n";
679	mp_disable_preemption();
680	disable_debug_output = FALSE;
681	debug_mode = TRUE;
682
683	failcode = (unsigned int)sv->save_r3;			/* Get the failure code */
684	if(failcode > failUnknown) failcode = failUnknown;	/* Set unknown code code */
685
686	kprintf(pmsg, cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]);
687	kdb_printf(pmsg, cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]);
688
689	print_backtrace(sv);							/* Attempt to print backtrace */
690
691	/* Commit the panic log buffer to NVRAM, unless otherwise
692	 * specified via a boot-arg. For certain types of panics
693	 * which result in a "choke" exception, this may well
694	 * be inadvisable, and setting the nvram_paniclog=0
695	 * boot-arg may be useful.
696	 */
697
698	if (panicDebugging)
699		commit_paniclog();
700
701	Call_DebuggerC(type, sv);						/* Attempt to get into debugger */
702
703	if ((current_debugger != NO_CUR_DB))
704		Call_DebuggerC(type, sv);	/* Attempt to get into debugger */
705	panic_plain(pmsg, cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]);
706}
707
708
709
710/*
711 *	When we get here, interruptions are disabled and we are on the debugger stack
712 *	Never, ever, ever, ever enable interruptions from here on
713 */
714
715int
716Call_DebuggerC(unsigned int type, struct savearea *saved_state)
717{
718	int				directcall, wait;
719	addr64_t		instr_ptr = 0ULL;
720	ppnum_t			instr_pp;
721	unsigned int 	instr, tcpu, my_cpu;
722	int 			wasdebugger;
723
724	my_cpu = cpu_number();								/* Get our CPU */
725
726#if	MACH_KDB
727	if((debugger_cpu == my_cpu) && 						/* Do we already own debugger? */
728	  PerProcTable[my_cpu].ppe_vaddr->debugger_active && 						/* and are we really active? */
729	  db_recover && 									/* and have we set up recovery? */
730	  (current_debugger == KDB_CUR_DB)) {				/* and are we in KDB (only it handles recovery) */
731		kdb_trap(type, saved_state);					/* Then reenter it... */
732	}
733#endif
734
735	(void)hw_atomic_add(&debug_mode, 1); /* Indicate we are in debugger */
736	PerProcTable[my_cpu].ppe_vaddr->debugger_active++;	/* Show active on our CPU */
737
738	lock_debugger();									/* Insure that only one CPU is in debugger */
739
740	if(db_im_stepping == my_cpu) {						/* Are we just back from a step? */
741		enable_preemption_no_check();					/* Enable preemption now */
742		db_im_stepping = 0xFFFFFFFF;					/* Nobody stepping right now */
743	}
744
745	if (debugger_debug) {
746#if 0
747		kprintf("Call_DebuggerC(%d): %08X %08X, debact = %d\n", my_cpu, type, (uint32_t)saved_state, debug_mode);	/* (TEST/DEBUG) */
748#endif
749		printf("Call_Debugger: enter - cpu %d, is_slave %d, debugger_cpu %d, pc %08llX\n",
750		   my_cpu, PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave, debugger_cpu, saved_state->save_srr0);
751	}
752
753	instr_pp = (vm_offset_t)pmap_find_phys(kernel_pmap, (addr64_t)(saved_state->save_srr0));
754
755	if (instr_pp) {
756		instr_ptr = (addr64_t)(((addr64_t)instr_pp << 12) | (saved_state->save_srr0 & 0xFFF));	/* Make physical address */
757		instr = ml_phys_read_64(instr_ptr);				/* Get the trap that caused entry */
758	}
759	else instr = 0;
760
761#if 0
762	if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_pp = %08X, instr_ptr = %016llX, instr = %08X\n", my_cpu, instr_pp, instr_ptr, instr);	/* (TEST/DEBUG) */
763#endif
764
765	if (db_breakpoints_inserted) cpus_holding_bkpts++;	/* Bump up the holding count */
766	if ((debugger_cpu == (unsigned)-1) &&
767		!PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave) {
768#if 0
769		if (debugger_debug) kprintf("Call_DebuggerC(%d): lasttrace = %08X\n", my_cpu, lastTrace);	/* (TEST/DEBUG) */
770#endif
771		debugger_cpu = my_cpu;							/* Show that we are debugger */
772
773
774		lastTrace = LLTraceSet(0);						/* Disable low-level tracing */
775
776		for(tcpu = 0; tcpu < real_ncpus; tcpu++) {		/* Stop all the other guys */
777			if(tcpu == my_cpu) continue;				/* Don't diddle ourselves */
778			(void)hw_atomic_add(&debugger_sync, 1); /* Count signal sent */
779			(void)cpu_signal(tcpu, SIGPdebug, 0 ,0);	/* Tell 'em to enter debugger */
780		}
781		(void)hw_cpu_sync(&debugger_sync, LockTimeOut);	/* Wait for the other processors to enter debug */
782		debugger_sync = 0;								/* We're done with it */
783	}
784	else if (debugger_cpu != my_cpu)  goto debugger_exit;	/* We are not debugger, don't continue... */
785
786
787	if (instr == TRAP_DIRECT_INST) {
788		disable_debug_output = FALSE;
789		print_backtrace(saved_state);
790	}
791
792	switch_debugger = 0;								/* Make sure switch request is off */
793	directcall = 1;										/* Assume direct call */
794
795	if (saved_state->save_srr1 & MASK(SRR1_PRG_TRAP)) {	/* Trap instruction? */
796
797		directcall = 0;									/* We had a trap not a direct call */
798
799		switch (instr) {								/* Select trap type */
800
801#if	MACH_KDP
802			case BREAK_TO_KDP0:							/* Breakpoint into KDP? */
803			case BREAK_TO_KDP1:							/* Breakpoint into KDP? */
804				current_debugger = KDP_CUR_DB;			/* Yes, set KDP */
805				kdp_trap(type, saved_state);			/* Enter it */
806				break;
807#endif
808
809#if	MACH_KDB
810			case BREAK_TO_KDB0: 						/* Breakpoint to KDB (the "good" debugger)? */
811				current_debugger = KDB_CUR_DB;			/* Yes, set it */
812				kdb_trap(type, saved_state);			/* Enter it */
813				break;
814#endif
815
816			case TRAP_DEBUGGER_INST:					/* Should we enter the current debugger? */
817			case TRAP_DIRECT_INST:						/* Should we enter the current debugger? */
818				if (current_debugger == KDP_CUR_DB) 	/* Is current KDP? */
819					kdp_trap(type, saved_state);		/* Yes, enter it */
820				else if (current_debugger == KDB_CUR_DB) 	/* Is this KDB? */
821					kdb_trap(type, saved_state);		/* Yes, go ahead and enter */
822				else goto debugger_error;				/* No debugger active */
823				break;
824
825			default:									/* Unknown/bogus trap type */
826				goto debugger_error;
827		}
828	}
829
830	while(1) {											/* We are here to handle debugger switches */
831
832		if(!directcall) {								/* Was this a direct call? */
833			if(!switch_debugger) break;					/* No, then leave if no switch requested... */
834
835/*
836 *			Note: we can only switch to a debugger we have.  Ignore bogus switch requests.
837 */
838#if 0
839			if (debugger_debug) kprintf("Call_DebuggerC(%d): switching debuggers\n", my_cpu);	/* (TEST/DEBUG) */
840#endif
841#if MACH_KDB
842			if(current_debugger == KDP_CUR_DB) current_debugger = KDB_CUR_DB; /* Switch to KDB */
843#if MACH_KDP
844			else
845#endif
846#endif
847#if MACH_KDP
848			if(current_debugger == KDB_CUR_DB) current_debugger = KDP_CUR_DB;		/* Switch to KDP */
849#endif
850		}
851
852		switch_debugger = 0;							/* Clear request */
853		directcall = 0;									/* Clear first-time direct call indication */
854
855		switch (current_debugger) {						/* Enter correct debugger */
856
857			case KDP_CUR_DB:							/* Enter KDP */
858				kdp_trap(type, saved_state);
859				break;
860
861			case KDB_CUR_DB:							/* Enter KDB */
862				kdb_trap(type, saved_state);
863				break;
864
865			default:									/* No debugger installed */
866				goto debugger_error;
867				break;
868		}
869	}
870
871debugger_exit:
872#if 0
873	if (debugger_debug) kprintf("Call_DebuggerC(%d): exit - inst = %08X, cpu=%d(%d), run=%d\n", my_cpu,
874		instr, my_cpu, debugger_cpu, db_run_mode);	/* (TEST/DEBUG) */
875#endif
876	if ((instr == TRAP_DEBUGGER_INST) ||				/* Did we trap to enter debugger? */
877		(instr == TRAP_DIRECT_INST)) saved_state->save_srr0 += TRAP_INST_SIZE;	/* Yes, point past trap */
878
879	wasdebugger = 0;									/* Assume not debugger */
880	if(debugger_cpu == my_cpu) {						/* Are the debugger processor? */
881		wasdebugger = 1;								/* Remember that we were the debugger */
882		LLTraceSet(lastTrace);							/* Enable tracing on the way out if we are debugger */
883	}
884
885	wait = FALSE;										/* Assume we are not going to wait */
886	if (db_run_mode == STEP_CONTINUE) {					/* Are we going to run? */
887		wait = TRUE;									/* Yeah, remember to wait for breakpoints to clear */
888		debugger_cpu = -1;								/* Release other processor's debuggers */
889		for(tcpu = 0; tcpu < real_ncpus; tcpu++)
890			PerProcTable[tcpu].ppe_vaddr->debugger_pending = 0;	/* Release request (this is a HACK) */
891		NMIss = 0;										/* Let NMI bounce */
892	}
893
894	if(db_run_mode == STEP_ONCE) {						/* Are we about to step? */
895		disable_preemption();							/* Disable preemption for the step */
896		db_im_stepping = my_cpu;						/* Remember that I am about to step */
897	}
898
899	if (db_breakpoints_inserted) cpus_holding_bkpts--;	/* If any breakpoints, back off count */
900	if (PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave) PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave--;	/* If we were a slove, uncount us */
901	if (debugger_debug)
902		printf("Call_Debugger: exit - cpu %d, debugger_cpu %d, run_mode %d holds %d\n",
903			  my_cpu, debugger_cpu, db_run_mode,
904			  cpus_holding_bkpts);
905
906	unlock_debugger();									/* Release the lock */
907	PerProcTable[my_cpu].ppe_vaddr->debugger_active--;	/* Say we aren't active anymore */
908
909	if (wait) while(cpus_holding_bkpts);				/* Wait for breakpoints to clear */
910
911
912	(void)hw_atomic_sub(&debug_mode, 1); /* Set out of debug now */
913
914	return(1);											/* Exit debugger normally */
915
916debugger_error:
917	if(db_run_mode != STEP_ONCE) enable_preemption_no_check();	/* Enable preemption, but don't preempt here */
918	(void)hw_atomic_sub(&debug_mode, 1); /* Set out of debug now */
919	return(0);											/* Return in shame... */
920
921}
922
923void
924lock_debugger(void)
925{
926	unsigned int my_cpu;
927
928	my_cpu = cpu_number();								/* Get our CPU number */
929
930	while(1) { /* Check until we get it */
931		if (debugger_cpu != (unsigned)-1 && debugger_cpu != my_cpu)
932			continue;	/* Someone, not us, is debugger... */
933		if (hw_lock_try(&debugger_lock)) { /* Get the debug lock */
934			if (debugger_cpu == (unsigned)-1 || debugger_cpu == my_cpu)
935				break;	/* Is it us? */
936			hw_lock_unlock(&debugger_lock); /* Not us, release lock */
937		}
938	}
939}
940
941void unlock_debugger(void) {
942
943	hw_lock_unlock(&debugger_lock);
944
945}
946
947int patchInst(task_t task, addr64_t vaddr, uint32_t inst);
948int patchInst(task_t task, addr64_t vaddr, uint32_t inst)
949{
950	vm_map_t map;
951	addr64_t paddr;
952	uint32_t instr, nestingDepth;
953	kern_return_t ret;
954	vm_region_submap_short_info_data_64_t info;
955	mach_msg_type_number_t count;
956	mach_vm_address_t address;
957	mach_vm_size_t sizeOfRegion;
958	vm_prot_t reprotect;
959
960	if(task == TASK_NULL) return -1;		/* Leave if task is bogus... */
961
962	task_lock(task);						/* Make sure the task doesn't go anywhaere */
963	if (!task->active) {					/* Is is alive? */
964		task_unlock(task);					/* Nope, unlock */
965		return -1;							/* Not a active task, fail... */
966	}
967	map = task->map;						/* Get his map */
968	vm_map_reference_swap(map);				/* Don't let it go away */
969	task_unlock(task);						/* Unleash the task */
970
971	/* Find the memory permissions. */
972	nestingDepth=999999;					/* Limit recursion */
973
974	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
975	address = (mach_vm_address_t)vaddr;
976	sizeOfRegion = (mach_vm_size_t)4;
977
978	ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
979	if (ret != KERN_SUCCESS) {				/* Leave if it didn't work */
980		vm_map_deallocate(map);				/* Drop reference on map */
981		return (-1);
982	}
983
984/*
985 *	We need to check if there could be a problem if the dtrace probes are being removed and the code is being
986 *	executed at the same time.  This sequence may leave us with no-execute turned on temporarily when we execute
987 *	through it.
988 */
989
990	if (!(info.protection & VM_PROT_WRITE)) {
991		/* Save the original protection values for restoration later */
992		reprotect = info.protection;
993
994		if (info.max_protection & VM_PROT_WRITE) {
995			/* The memory is not currently writable, but can be made writable. */
996			ret = mach_vm_protect(map, (mach_vm_offset_t)vaddr, (mach_vm_size_t)4, 0, reprotect | VM_PROT_WRITE);
997		}
998		else {
999			/*
1000			 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
1001			 *
1002			 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
1003			 */
1004			ret = mach_vm_protect(map, (mach_vm_offset_t)vaddr, (mach_vm_size_t)4, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
1005		}
1006
1007		if (ret != KERN_SUCCESS) {
1008			vm_map_deallocate(map);			/* Drop reference on map */
1009			return (-1);
1010		}
1011
1012	}
1013	else {
1014		/* The memory was already writable. */
1015		reprotect = VM_PROT_NONE;
1016	}
1017
1018	instr = inst;							/* Place instruction in local memory */
1019	ret = vm_map_write_user(map, &inst, (vm_map_address_t)vaddr, (vm_size_t)4);	/* Write the instruction */
1020	if (ret != KERN_SUCCESS) {				/* Leave if it didn't work */
1021
1022		if (reprotect != VM_PROT_NONE) {
1023			ret = mach_vm_protect (map, (mach_vm_offset_t)vaddr, (mach_vm_size_t)4, 0, reprotect);
1024		}
1025
1026		vm_map_deallocate(map);				/* Drop reference on map */
1027		return (-1);
1028	}
1029
1030	paddr = (addr64_t)pmap_find_phys(map->pmap, vaddr) << 12;	/* Find the physical address of the patched address */
1031	if(!paddr) {							/* Is address mapped now? */
1032		vm_map_deallocate(map);				/* Drop reference on map */
1033		return 0;							/* Leave... */
1034	}
1035	paddr = paddr | (vaddr & 4095);			/* Construct physical address */
1036	invalidate_icache64(paddr, 4, 1);		/* Flush out the instruction cache here */
1037
1038	if (reprotect != VM_PROT_NONE) {
1039		ret = mach_vm_protect(map, (mach_vm_offset_t)vaddr, (mach_vm_size_t)4, 0, reprotect);
1040	}
1041
1042	vm_map_deallocate(map);
1043
1044	return (0);
1045}
1046