1/*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29// NOTE:  This file is only c++ so I can get static initialisers going
30#include <libkern/OSDebug.h>
31#include <IOKit/IOLib.h>
32
33#include <sys/cdefs.h>
34
35#include <stdarg.h>
36#include <mach/mach_types.h>
37#include <mach/kmod.h>
38#include <kern/locks.h>
39
40#include <libkern/libkern.h>	// From bsd's libkern directory
41#include <mach/vm_param.h>
42
43#include <sys/kdebug.h>
44#include <kern/thread.h>
45
46#if defined(__arm__) || defined(__aarch64__)
47extern "C" {
48extern int copyinframe(vm_address_t fp, uint32_t *frame);
49}
50#endif
51
52extern int etext;
53__BEGIN_DECLS
54// From osmfk/kern/thread.h but considered to be private
55extern vm_offset_t min_valid_stack_address(void);
56extern vm_offset_t max_valid_stack_address(void);
57
58// From osfmk/kmod.c
59extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt);
60
61extern addr64_t kvtophys(vm_offset_t va);
62
63__END_DECLS
64
65extern lck_grp_t *IOLockGroup;
66
67static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
68
69/* Use kernel_debug() to log a backtrace */
70void
71trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) {
72	void *bt[16];
73	const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
74  	unsigned i;
75	int found = 0;
76
77	OSBacktrace(bt, cnt);
78
79	/* find first non-kernel frame */
80  	for (i = 3; i < cnt && bt[i]; i++) {
81 		if (bt[i] > (void*)&etext) {
82			found = 1;
83  			break;
84		}
85	}
86	/*
87	 * if there are non-kernel frames, only log these
88	 * otherwise, log everything but the first two
89	 */
90	if (!found) i=2;
91
92#define safe_bt(a) (uintptr_t)(a<cnt ? bt[a] : 0)
93	kernel_debug(debugid, data, size, safe_bt(i), safe_bt(i+1), 0);
94	kernel_debug(debugid2, safe_bt(i+2), safe_bt(i+3), safe_bt(i+4), safe_bt(i+5), 0);
95}
96
97/* Report a message with a 4 entry backtrace - very slow */
98void
99OSReportWithBacktrace(const char *str, ...)
100{
101    char buf[128];
102    void *bt[9];
103    const unsigned cnt = sizeof(bt) / sizeof(bt[0]);
104    va_list listp;
105
106    // Ignore the our and our callers stackframes, skipping frames 0 & 1
107    (void) OSBacktrace(bt, cnt);
108
109    va_start(listp, str);
110    vsnprintf(buf, sizeof(buf), str, listp);
111    va_end(listp);
112
113    lck_mtx_lock(sOSReportLock);
114    {
115        printf("%s\nBacktrace %p %p %p %p %p %p %p\n",
116            buf, bt[2], bt[3], bt[4], bt[5], bt[6], bt[7], bt[8]);
117        kmod_dump_log((vm_offset_t *) &bt[2], cnt - 2);
118    }
119    lck_mtx_unlock(sOSReportLock);
120}
121
122static vm_offset_t minstackaddr = min_valid_stack_address();
123static vm_offset_t maxstackaddr = max_valid_stack_address();
124
125#if __i386__
126#define i386_RETURN_OFFSET 4
127
128static unsigned int
129i386_validate_stackptr(vm_offset_t stackptr)
130{
131	/* Existence and alignment check
132	 */
133	if (!stackptr || (stackptr & 0x3))
134		return 0;
135
136	/* Is a virtual->physical translation present?
137	 */
138	if (!kvtophys(stackptr))
139		return 0;
140
141	/* Check if the return address lies on the same page;
142	 * If not, verify that a translation exists.
143	 */
144	if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < i386_RETURN_OFFSET) &&
145	    !kvtophys(stackptr + i386_RETURN_OFFSET))
146		return 0;
147	return 1;
148}
149
150static unsigned int
151i386_validate_raddr(vm_offset_t raddr)
152{
153	return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) &&
154	    (raddr < VM_MAX_KERNEL_ADDRESS));
155}
156#endif
157
158#if __x86_64__
159#define x86_64_RETURN_OFFSET 8
160static unsigned int
161x86_64_validate_raddr(vm_offset_t raddr)
162{
163	return ((raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) &&
164	    (raddr < VM_MAX_KERNEL_ADDRESS));
165}
166static unsigned int
167x86_64_validate_stackptr(vm_offset_t stackptr)
168{
169	/* Existence and alignment check
170	 */
171	if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr))
172		return 0;
173
174	/* Is a virtual->physical translation present?
175	 */
176	if (!kvtophys(stackptr))
177		return 0;
178
179	/* Check if the return address lies on the same page;
180	 * If not, verify that a translation exists.
181	 */
182	if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) &&
183	    !kvtophys(stackptr + x86_64_RETURN_OFFSET))
184		return 0;
185	return 1;
186}
187#endif
188
189void
190OSPrintBacktrace(void)
191{
192	void * btbuf[20];
193	int tmp = OSBacktrace(btbuf, 20);
194	int i;
195	for(i=0;i<tmp;i++)
196	{
197		kprintf("bt[%.2d] = %p\n", i, btbuf[i]);
198	}
199}
200
201unsigned OSBacktrace(void **bt, unsigned maxAddrs)
202{
203    unsigned frame;
204
205#if __i386__
206#define SANE_i386_FRAME_SIZE (kernel_stack_size >> 1)
207    vm_offset_t stackptr, stackptr_prev, raddr;
208    unsigned frame_index = 0;
209/* Obtain current frame pointer */
210    __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
211
212    if (!i386_validate_stackptr(stackptr))
213	    goto pad;
214
215    raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET));
216
217    if (!i386_validate_raddr(raddr))
218	    goto pad;
219
220    bt[frame_index++] = (void *) raddr;
221
222    for ( ; frame_index < maxAddrs; frame_index++) {
223	    stackptr_prev = stackptr;
224	    stackptr = *((vm_offset_t *) stackptr_prev);
225
226	    if (!i386_validate_stackptr(stackptr))
227		    break;
228	/* Stack grows downwards */
229	    if (stackptr < stackptr_prev)
230		    break;
231
232	    if ((stackptr - stackptr_prev) > SANE_i386_FRAME_SIZE)
233		    break;
234
235	    raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET));
236
237	    if (!i386_validate_raddr(raddr))
238		    break;
239
240	    bt[frame_index] = (void *) raddr;
241    }
242pad:
243    frame = frame_index;
244
245    for ( ; frame_index < maxAddrs; frame_index++)
246	    bt[frame_index] = (void *) 0;
247#elif __x86_64__
248#define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1)
249    vm_offset_t stackptr, stackptr_prev, raddr;
250    unsigned frame_index = 0;
251/* Obtain current frame pointer */
252
253    __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
254
255    if (!x86_64_validate_stackptr(stackptr))
256	    goto pad;
257
258    raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
259
260    if (!x86_64_validate_raddr(raddr))
261	    goto pad;
262
263    bt[frame_index++] = (void *) raddr;
264
265    for ( ; frame_index < maxAddrs; frame_index++) {
266	    stackptr_prev = stackptr;
267	    stackptr = *((vm_offset_t *) stackptr_prev);
268
269	    if (!x86_64_validate_stackptr(stackptr))
270		    break;
271	/* Stack grows downwards */
272	    if (stackptr < stackptr_prev)
273		    break;
274
275	    if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE)
276		    break;
277
278	    raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET));
279
280	    if (!x86_64_validate_raddr(raddr))
281		    break;
282
283	    bt[frame_index] = (void *) raddr;
284    }
285pad:
286    frame = frame_index;
287
288    for ( ; frame_index < maxAddrs; frame_index++)
289	    bt[frame_index] = (void *) 0;
290#elif __arm__
291    uint32_t i = 0;
292    uint32_t fp = 0;
293    uint32_t frameb[2];
294
295    /* Get the frame pointer from the current thread */
296    __asm__ __volatile("mov %0, r7" : "=r" (fp));
297
298    /* Crawl up the stack recording the link value of each frame */
299    do {
300        /* Check boundaries */
301        if ((fp == 0) || ((fp & 3) != 0) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_ADDRESS))
302            break;
303
304        /* Safeley read frame */
305        if (copyinframe(fp, frameb) != 0)
306            break;
307
308        /* No need to use copyin as this is always a kernel address, see check above */
309        bt[i] = (void*)frameb[1]; /* link register */
310        fp = frameb[0];
311    } while (i++ < maxAddrs);
312
313    frame = i;
314#elif __aarch64__
315#warning "TODO: arm64 OSBacktrace"
316    panic("OSBacktrace is not implemented!\n");
317#else
318#error "Unsupported architecture"
319#endif
320    return frame;
321}
322