1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_assert.h>
58#include <mach_kdp.h>
59
60#include <kern/cpu_number.h>
61#include <kern/kalloc.h>
62#include <kern/lock.h>
63#include <kern/spl.h>
64#include <kern/thread.h>
65#include <kern/assert.h>
66#include <kern/sched_prim.h>
67#include <kern/misc_protos.h>
68#include <kern/clock.h>
69#include <vm/vm_kern.h>
70#include <vm/pmap.h>
71#include <stdarg.h>
72#if !MACH_KDP
73#include <kdp/kdp_udp.h>
74#endif
75
76#if defined(__i386__) || defined(__x86_64__)
77#include <i386/cpu_threads.h>
78#include <i386/pmCPU.h>
79#endif
80
81#include <IOKit/IOPlatformExpert.h>
82#include <machine/pal_routines.h>
83
84#include <sys/kdebug.h>
85#include <libkern/OSKextLibPrivate.h>
86#include <libkern/OSAtomic.h>
87#include <libkern/kernel_mach_header.h>
88#include <uuid/uuid.h>
89
90unsigned int	halt_in_debugger = 0;
91unsigned int	switch_debugger = 0;
92unsigned int	current_debugger = 0;
93unsigned int	active_debugger = 0;
94unsigned int	debug_mode=0;
95unsigned int 	disable_debug_output = TRUE;
96unsigned int 	systemLogDiags = FALSE;
97unsigned int 	panicDebugging = FALSE;
98unsigned int	logPanicDataToScreen = FALSE;
99
100int mach_assert = 1;
101
102const char		*panicstr = (char *) 0;
103decl_simple_lock_data(,panic_lock)
104int			paniccpu;
105volatile int		panicwait;
106volatile unsigned int	nestedpanic= 0;
107unsigned int		panic_is_inited = 0;
108unsigned int		return_on_panic = 0;
109unsigned long		panic_caller;
110
111#if CONFIG_EMBEDDED
112#define DEBUG_BUF_SIZE (PAGE_SIZE)
113#else
114#define DEBUG_BUF_SIZE (3 * PAGE_SIZE)
115#endif
116
117char debug_buf[DEBUG_BUF_SIZE];
118char *debug_buf_ptr = debug_buf;
119unsigned int debug_buf_size = sizeof(debug_buf);
120
121static char model_name[64];
122/* uuid_string_t */ char kernel_uuid[37];
123
124static spl_t panic_prologue(const char *str);
125static void panic_epilogue(spl_t s);
126
127struct pasc {
128  unsigned a: 7;
129  unsigned b: 7;
130  unsigned c: 7;
131  unsigned d: 7;
132  unsigned e: 7;
133  unsigned f: 7;
134  unsigned g: 7;
135  unsigned h: 7;
136}  __attribute__((packed));
137
138typedef struct pasc pasc_t;
139
140/* Prevent CPP from breaking the definition below */
141#if CONFIG_NO_PANIC_STRINGS
142#undef Assert
143#endif
144
145void
146Assert(
147	const char	*file,
148	int		line,
149	const char	*expression
150      )
151{
152	int saved_return_on_panic;
153
154	if (!mach_assert) {
155		return;
156	}
157
158	saved_return_on_panic = return_on_panic;
159	return_on_panic = 1;
160
161	panic_plain("%s:%d Assertion failed: %s", file, line, expression);
162
163	return_on_panic = saved_return_on_panic;
164}
165
166/*
167 *	Carefully use the panic_lock.  There's always a chance that
168 *	somehow we'll call panic before getting to initialize the
169 *	panic_lock -- in this case, we'll assume that the world is
170 *	in uniprocessor mode and just avoid using the panic lock.
171 */
172#define	PANIC_LOCK()							\
173MACRO_BEGIN								\
174	if (panic_is_inited)						\
175		simple_lock(&panic_lock);				\
176MACRO_END
177
178#define	PANIC_UNLOCK()							\
179MACRO_BEGIN								\
180	if (panic_is_inited)						\
181		simple_unlock(&panic_lock);				\
182MACRO_END
183
184
185void
186panic_init(void)
187{
188	unsigned long uuidlen = 0;
189	void *uuid;
190
191	uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
192	if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
193		uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid);
194	}
195
196	simple_lock_init(&panic_lock, 0);
197	panic_is_inited = 1;
198	panic_caller = 0;
199}
200
201void
202debug_log_init(void)
203{
204	if (debug_buf_size != 0)
205		return;
206	debug_buf_ptr = debug_buf;
207	debug_buf_size = sizeof(debug_buf);
208}
209
210#if defined(__i386__) || defined(__x86_64__)
211#define panic_stop()	pmCPUHalt(PM_HALT_PANIC)
212#define panic_safe()	pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE)
213#define panic_normal()	pmSafeMode(x86_lcpu(), PM_SAFE_FL_NORMAL)
214#else
215#define panic_stop()	Halt_system()
216#define panic_safe()
217#define panic_normal()
218#endif
219
220/*
221 * Prevent CPP from breaking the definition below,
222 * since all clients get a #define to prepend line numbers
223 */
224#undef panic
225
226void _consume_panic_args(int a __unused, ...)
227{
228    panic("panic");
229}
230
231static spl_t
232panic_prologue(const char *str)
233{
234	spl_t	s;
235
236	if (kdebug_enable) {
237		ml_set_interrupts_enabled(TRUE);
238		kdbg_dump_trace_to_file("/var/tmp/panic.trace");
239	}
240
241	s = splhigh();
242	disable_preemption();
243
244#if	defined(__i386__) || defined(__x86_64__)
245	/* Attempt to display the unparsed panic string */
246	const char *tstr = str;
247
248	kprintf("Panic initiated, string: ");
249	while (tstr && *tstr)
250		kprintf("%c", *tstr++);
251	kprintf("\n");
252#endif
253
254	panic_safe();
255
256#ifndef __arm__ 	/* xxx show all panic output for now. */
257	if( logPanicDataToScreen )
258#endif
259		disable_debug_output = FALSE;
260	debug_mode = TRUE;
261
262restart:
263	PANIC_LOCK();
264
265	if (panicstr) {
266		if (cpu_number() != paniccpu) {
267			PANIC_UNLOCK();
268			/*
269			 * Wait until message has been printed to identify correct
270			 * cpu that made the first panic.
271			 */
272			while (panicwait)
273				continue;
274			goto restart;
275	    } else {
276			nestedpanic +=1;
277			PANIC_UNLOCK();
278			Debugger("double panic");
279			printf("double panic:  We are hanging here...\n");
280			panic_stop();
281			/* NOTREACHED */
282		}
283	}
284	panicstr = str;
285	paniccpu = cpu_number();
286	panicwait = 1;
287
288	PANIC_UNLOCK();
289	return(s);
290}
291
292
293static void
294panic_epilogue(spl_t	s)
295{
296	/*
297	 * Release panicstr so that we can handle normally other panics.
298	 */
299	PANIC_LOCK();
300	panicstr = (char *)0;
301	PANIC_UNLOCK();
302
303	if (return_on_panic) {
304		panic_normal();
305		enable_preemption();
306		splx(s);
307		return;
308	}
309	kdb_printf("panic: We are hanging here...\n");
310	panic_stop();
311	/* NOTREACHED */
312}
313
314extern int enable_timing;
315
316void
317panic(const char *str, ...)
318{
319	va_list	listp;
320	spl_t	s;
321
322	if(enable_timing)
323		enable_timing = 0;
324
325	/* panic_caller is initialized to 0.  If set, don't change it */
326	if ( ! panic_caller )
327		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
328
329	s = panic_prologue(str);
330	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
331	if (str) {
332		va_start(listp, str);
333		_doprnt(str, &listp, consdebug_putc, 0);
334		va_end(listp);
335	}
336	kdb_printf("\n");
337
338	/*
339	 * Release panicwait indicator so that other cpus may call Debugger().
340	 */
341	enable_timing = 1;
342
343	panicwait = 0;
344	Debugger("panic");
345	panic_epilogue(s);
346}
347
348void
349panic_context(unsigned int reason, void *ctx, const char *str, ...)
350{
351	va_list	listp;
352	spl_t	s;
353
354	/* panic_caller is initialized to 0.  If set, don't change it */
355	if ( ! panic_caller )
356		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
357
358	s = panic_prologue(str);
359	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
360	if (str) {
361		va_start(listp, str);
362		_doprnt(str, &listp, consdebug_putc, 0);
363		va_end(listp);
364	}
365	kdb_printf("\n");
366
367	/*
368	 * Release panicwait indicator so that other cpus may call Debugger().
369	 */
370	panicwait = 0;
371	DebuggerWithContext(reason, ctx, "panic");
372	panic_epilogue(s);
373}
374
375void
376log(__unused int level, char *fmt, ...)
377{
378	va_list	listp;
379
380#ifdef lint
381	level++;
382#endif /* lint */
383#ifdef	MACH_BSD
384	disable_preemption();
385	va_start(listp, fmt);
386	_doprnt(fmt, &listp, conslog_putc, 0);
387	va_end(listp);
388	enable_preemption();
389#endif
390}
391
392void
393debug_putc(char c)
394{
395	if ((debug_buf_size != 0) &&
396		((debug_buf_ptr-debug_buf) < (int)debug_buf_size)) {
397		*debug_buf_ptr=c;
398		debug_buf_ptr++;
399	}
400}
401
402/* In-place packing routines -- inefficient, but they're called at most once.
403 * Assumes "buflen" is a multiple of 8.
404 */
405
406int packA(char *inbuf, uint32_t length, uint32_t buflen)
407{
408  unsigned int i, j = 0;
409  pasc_t pack;
410
411  length = MIN(((length + 7) & ~7), buflen);
412
413  for (i = 0; i < length; i+=8)
414    {
415      pack.a = inbuf[i];
416      pack.b = inbuf[i+1];
417      pack.c = inbuf[i+2];
418      pack.d = inbuf[i+3];
419      pack.e = inbuf[i+4];
420      pack.f = inbuf[i+5];
421      pack.g = inbuf[i+6];
422      pack.h = inbuf[i+7];
423      bcopy ((char *) &pack, inbuf + j, 7);
424      j += 7;
425    }
426  return j;
427}
428
429void unpackA(char *inbuf, uint32_t length)
430{
431	pasc_t packs;
432	unsigned i = 0;
433	length = (length * 8)/7;
434
435	while (i < length) {
436	  packs = *(pasc_t *)&inbuf[i];
437	  bcopy(&inbuf[i+7], &inbuf[i+8], MAX(0, (int) (length - i - 8)));
438	  inbuf[i++] = packs.a;
439	  inbuf[i++] = packs.b;
440	  inbuf[i++] = packs.c;
441	  inbuf[i++] = packs.d;
442	  inbuf[i++] = packs.e;
443	  inbuf[i++] = packs.f;
444	  inbuf[i++] = packs.g;
445	  inbuf[i++] = packs.h;
446	}
447}
448
449extern void *proc_name_address(void *p);
450
451static void
452panic_display_process_name(void) {
453	char proc_name[32] = "Unknown";
454	task_t ctask = 0;
455	void *cbsd_info = 0;
456
457	if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
458		if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(&ctask->bsd_info)) == sizeof(&ctask->bsd_info))
459			if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
460				proc_name[sizeof(proc_name) - 1] = '\0';
461	kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
462}
463
464unsigned	panic_active(void) {
465	return ((panicstr != (char *) 0));
466}
467
468void populate_model_name(char *model_string) {
469	strlcpy(model_name, model_string, sizeof(model_name));
470}
471
472static void panic_display_model_name(void) {
473#ifndef __arm__
474	char tmp_model_name[sizeof(model_name)];
475
476	if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name))
477		return;
478
479	tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
480
481	if (tmp_model_name[0] != 0)
482		kdb_printf("System model name: %s\n", tmp_model_name);
483#else
484	kdb_printf("System model name: %s\n", model_name);
485#endif
486}
487
488static void panic_display_kernel_uuid(void) {
489#ifndef __arm__
490	char tmp_kernel_uuid[sizeof(kernel_uuid)];
491
492	if (ml_nofault_copy((vm_offset_t) &kernel_uuid, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid)) != sizeof(kernel_uuid))
493		return;
494
495	if (tmp_kernel_uuid[0] != '\0')
496		kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid);
497#else
498	kdb_printf("Kernel UUID: %s\n", kernel_uuid);
499#endif
500}
501
502static void panic_display_kernel_aslr(void) {
503#if	defined(__x86_64__) || (__arm__)
504	if (vm_kernel_slide) {
505		kdb_printf("Kernel slide:     0x%016lx\n", vm_kernel_slide);
506		kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext);
507	}
508#endif
509}
510
511static void panic_display_uptime(void) {
512	uint64_t	uptime;
513	absolutetime_to_nanoseconds(mach_absolute_time(), &uptime);
514
515	kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime);
516}
517
518extern const char version[];
519extern char osversion[];
520
521static volatile uint32_t config_displayed = 0;
522
523#ifdef __arm__
524#define	panic_display_pal_info() do { } while(0)
525#endif
526
527#ifdef __arm__
528extern char firmware_version[32];
529extern uint32_t debug_enabled;
530#endif
531
532__private_extern__ void panic_display_system_configuration(void) {
533	panic_display_process_name();
534	if (OSCompareAndSwap(0, 1, &config_displayed)) {
535		char buf[256];
536		if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
537			kdb_printf("Boot args: %s\n", buf);
538		kdb_printf("\nMac OS version:\n%s\n",
539		    (osversion[0] != 0) ? osversion : "Not yet set");
540		kdb_printf("\nKernel version:\n%s\n",version);
541		panic_display_kernel_uuid();
542		panic_display_kernel_aslr();
543		panic_display_pal_info();
544		panic_display_model_name();
545		panic_display_uptime();
546		panic_display_zprint();
547#if CONFIG_ZLEAKS
548		panic_display_ztrace();
549#endif /* CONFIG_ZLEAKS */
550		kext_dump_panic_lists(&kdb_log);
551	}
552}
553
554extern zone_t		first_zone;
555extern unsigned int	num_zones, stack_total;
556extern unsigned long long stack_allocs;
557
558#if defined(__i386__) || defined (__x86_64__)
559extern unsigned int	inuse_ptepages_count;
560extern long long alloc_ptepages_count;
561#endif
562
563extern boolean_t	panic_include_zprint;
564
565__private_extern__ void panic_display_zprint()
566{
567	if(panic_include_zprint == TRUE) {
568
569		unsigned int	i;
570		struct zone	zone_copy;
571
572		if(first_zone!=NULL) {
573			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
574				for (i = 0; i < num_zones; i++) {
575					if(zone_copy.cur_size > (1024*1024)) {
576						kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
577					}
578
579					if(zone_copy.next_zone == NULL) {
580						break;
581					}
582
583					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
584						break;
585					}
586				}
587			}
588		}
589
590		kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
591
592#if defined(__i386__) || defined (__x86_64__)
593		kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
594#endif
595
596		kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
597	}
598}
599
600#if CONFIG_ZLEAKS
601extern boolean_t	panic_include_ztrace;
602extern struct ztrace* top_ztrace;
603/*
604 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
605 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
606 */
607__private_extern__ void panic_display_ztrace(void)
608{
609	if(panic_include_ztrace == TRUE) {
610		unsigned int i = 0;
611		struct ztrace top_ztrace_copy;
612
613		/* Make sure not to trip another panic if there's something wrong with memory */
614		if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
615			kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
616			/* Print the backtrace addresses */
617			for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
618				kdb_printf("%p\n", top_ztrace_copy.zt_stack[i]);
619			}
620			/* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
621			kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
622		}
623		else {
624			kdb_printf("\nCan't access top_ztrace...\n");
625		}
626		kdb_printf("\n");
627	}
628}
629#endif /* CONFIG_ZLEAKS */
630
631#if !MACH_KDP
632static struct ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}};
633
634/* XXX ugly forward declares to stop warnings */
635void *kdp_get_interface(void);
636void kdp_set_ip_and_mac_addresses(struct in_addr *, struct ether_addr *);
637void kdp_set_gateway_mac(void *);
638void kdp_set_interface(void *);
639void kdp_register_send_receive(void *, void *);
640void kdp_unregister_send_receive(void *, void *);
641void kdp_snapshot_preflight(int, void *, uint32_t, uint32_t);
642int kdp_stack_snapshot_geterror(void);
643int kdp_stack_snapshot_bytes_traced(void);
644
645void *
646kdp_get_interface( void)
647{
648        return(void *)0;
649}
650
651unsigned int
652kdp_get_ip_address(void )
653{ return 0; }
654
655struct ether_addr
656kdp_get_mac_addr(void)
657{
658        return kdp_current_mac_address;
659}
660
661void
662kdp_set_ip_and_mac_addresses(
663        __unused struct in_addr          *ipaddr,
664        __unused struct ether_addr       *macaddr)
665{}
666
667void
668kdp_set_gateway_mac(__unused void *gatewaymac)
669{}
670
671void
672kdp_set_interface(__unused void *ifp)
673{}
674
675void
676kdp_register_send_receive(__unused void *send, __unused void *receive)
677{}
678
679void
680kdp_unregister_send_receive(__unused void *send, __unused void *receive)
681{}
682
683void
684kdp_snapshot_preflight(__unused int pid, __unused void * tracebuf,
685		__unused uint32_t tracebuf_size, __unused uint32_t options)
686{}
687
688int
689kdp_stack_snapshot_geterror(void)
690{
691        return -1;
692}
693
694int
695kdp_stack_snapshot_bytes_traced(void)
696{
697        return 0;
698}
699
700#endif
701