1/*
2 * Copyright (c) 2005-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29// NOTE:  This file is only c++ so I can get static initialisers going
30#include <libkern/OSDebug.h>
31#include <IOKit/IOLib.h>
32
33#include <sys/cdefs.h>
34
35#include <stdarg.h>
36#include <mach/mach_types.h>
37#include <mach/kmod.h>
38#include <kern/locks.h>
39
40#include <libkern/libkern.h>	// From bsd's libkern directory
41#include <mach/vm_param.h>
42
43#include <sys/kdebug.h>
44#include <kern/thread.h>
45
46extern int etext;
47__BEGIN_DECLS
48// From osmfk/kern/thread.h but considered to be private
49extern vm_offset_t min_valid_stack_address(void);
50extern vm_offset_t max_valid_stack_address(void);
51
52// From osfmk/kmod.c
53extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide);
54
55extern addr64_t kvtophys(vm_offset_t va);
56
57__END_DECLS
58
59extern lck_grp_t *IOLockGroup;
60
61static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
62
63/* Use kernel_debug() to log a backtrace */
64void
65trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) {
66	void *bt[16];
67	const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
68  	unsigned i;
69	int found = 0;
70
71	OSBacktrace(bt, cnt);
72
73	/* find first non-kernel frame */
74  	for (i = 3; i < cnt && bt[i]; i++) {
75 		if (bt[i] > (void*)&etext) {
76			found = 1;
77  			break;
78		}
79	}
80	/*
81	 * if there are non-kernel frames, only log these
82	 * otherwise, log everything but the first two
83	 */
84	if (!found) i=2;
85
86#define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : 0)
87	kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i+1), 0);
88	kernel_debug(debugid2, safe_bt(i+2), safe_bt(i+3), safe_bt(i+4), safe_bt(i+5), 0);
89}
90
91/* Report a message with a 4 entry backtrace - very slow */
92void
93OSReportWithBacktrace(const char *str, ...)
94{
95    char buf[128];
96    void *bt[9];
97    const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
98    va_list listp;
99
100    // Ignore the our and our callers stackframes, skipping frames 0 & 1
101    (void) OSBacktrace(bt, cnt);
102
103    va_start(listp, str);
104    vsnprintf(buf, sizeof(buf), str, listp);
105    va_end(listp);
106
107    lck_mtx_lock(sOSReportLock);
108    {
109        printf("%s\nBacktrace 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", buf,
110            (unsigned long) VM_KERNEL_UNSLIDE(bt[2]), (unsigned long) VM_KERNEL_UNSLIDE(bt[3]),
111            (unsigned long) VM_KERNEL_UNSLIDE(bt[4]), (unsigned long) VM_KERNEL_UNSLIDE(bt[5]),
112            (unsigned long) VM_KERNEL_UNSLIDE(bt[6]), (unsigned long) VM_KERNEL_UNSLIDE(bt[7]),
113            (unsigned long) VM_KERNEL_UNSLIDE(bt[8]));
114        kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2, TRUE);
115    }
116    lck_mtx_unlock(sOSReportLock);
117}
118
119static vm_offset_t minstackaddr = min_valid_stack_address();
120static vm_offset_t maxstackaddr = max_valid_stack_address();
121
122
123#if __x86_64__
124#define x86_64_RETURN_OFFSET 8
125static unsigned int
126x86_64_validate_raddr(vm_offset_t raddr)
127{
128	return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) &&
129	    (raddr < VM_MAX_KERNEL_ADDRESS));
130}
131static unsigned int
132x86_64_validate_stackptr(vm_offset_t stackptr)
133{
134	/* Existence and alignment check
135	 */
136	if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr))
137		return 0;
138
139	/* Is a virtual->physical translation present?
140	 */
141	if (!kvtophys(stackptr))
142		return 0;
143
144	/* Check if the return address lies on the same page;
145	 * If not, verify that a translation exists.
146	 */
147	if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) &&
148	    !kvtophys(stackptr + x86_64_RETURN_OFFSET))
149		return 0;
150	return 1;
151}
152#endif
153
154void
155OSPrintBacktrace(void)
156{
157	void * btbuf[20];
158	int tmp = OSBacktrace(btbuf, 20);
159	int i;
160	for(i=0;i<tmp;i++)
161	{
162		kprintf("bt[%.2d] = %p\n", i, btbuf[i]);
163	}
164}
165
166unsigned OSBacktrace(void **bt, unsigned maxAddrs)
167{
168    unsigned frame;
169
170#if   __x86_64__
171#define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1)
172    vm_offset_t stackptr, stackptr_prev, raddr;
173    unsigned frame_index = 0;
174/* Obtain current frame pointer */
175
176    __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
177
178    if (!x86_64_validate_stackptr(stackptr))
179	    goto pad;
180
181    raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
182
183    if (!x86_64_validate_raddr(raddr))
184	    goto pad;
185
186    bt[frame_index++] = (void *) raddr;
187
188    for ( ; frame_index < maxAddrs; frame_index++) {
189	    stackptr_prev = stackptr;
190	    stackptr = *((vm_offset_t *) stackptr_prev);
191
192	    if (!x86_64_validate_stackptr(stackptr))
193		    break;
194	/* Stack grows downwards */
195	    if (stackptr < stackptr_prev)
196		    break;
197
198	    if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE)
199		    break;
200
201	    raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
202
203	    if (!x86_64_validate_raddr(raddr))
204		    break;
205
206	    bt[frame_index] = (void *) raddr;
207    }
208pad:
209    frame = frame_index;
210
211    for ( ; frame_index < maxAddrs; frame_index++)
212	    bt[frame_index] = (void *) 0;
213#else
214#error arch
215#endif
216    return frame;
217}
218