1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_kdp.h>
30#include <mach/mach_types.h>
31#include <mach/machine.h>
32#include <mach/exception_types.h>
33#include <kern/cpu_data.h>
34#include <i386/trap.h>
35#include <i386/mp.h>
36#include <kdp/kdp_internal.h>
37#include <kdp/kdp_callout.h>
38#include <mach-o/loader.h>
39#include <mach-o/nlist.h>
40#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41#include <kern/machine.h> /* for halt_all_cpus */
42#include <libkern/OSAtomic.h>
43
44#include <kern/thread.h>
45#include <i386/thread.h>
46#include <vm/vm_map.h>
47#include <i386/pmap.h>
48#include <kern/kalloc.h>
49
50#define KDP_TEST_HARNESS 0
51#if KDP_TEST_HARNESS
52#define dprintf(x) printf x
53#else
54#define dprintf(x)
55#endif
56
57extern cpu_type_t cpuid_cputype(void);
58extern cpu_subtype_t cpuid_cpusubtype(void);
59
60extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr);
61extern void machine_trace_thread_clear_validation_cache(void);
62
63void		print_saved_state(void *);
64void		kdp_call(void);
65int		kdp_getc(void);
66boolean_t	kdp_call_kdb(void);
67void		kdp_getstate(x86_thread_state64_t *);
68void		kdp_setstate(x86_thread_state64_t *);
69void		kdp_print_phys(int);
70
71int
72machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
73
74int
75machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
76
77unsigned
78machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
79
80static void	kdp_callouts(kdp_event_t event);
81
82void
83kdp_exception(
84    unsigned char	*pkt,
85    int	*len,
86    unsigned short	*remote_port,
87    unsigned int	exception,
88    unsigned int	code,
89    unsigned int	subcode
90)
91{
92    kdp_exception_t	*rq = (kdp_exception_t *)pkt;
93
94    rq->hdr.request = KDP_EXCEPTION;
95    rq->hdr.is_reply = 0;
96    rq->hdr.seq = kdp.exception_seq;
97    rq->hdr.key = 0;
98    rq->hdr.len = sizeof (*rq);
99
100    rq->n_exc_info = 1;
101    rq->exc_info[0].cpu = 0;
102    rq->exc_info[0].exception = exception;
103    rq->exc_info[0].code = code;
104    rq->exc_info[0].subcode = subcode;
105
106    rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
107
108    bcopy((char *)rq, (char *)pkt, rq->hdr.len);
109
110    kdp.exception_ack_needed = TRUE;
111
112    *remote_port = kdp.exception_port;
113    *len = rq->hdr.len;
114}
115
116boolean_t
117kdp_exception_ack(
118    unsigned char	*pkt,
119    int			len
120)
121{
122    kdp_exception_ack_t	*rq = (kdp_exception_ack_t *)pkt;
123
124    if (((unsigned int) len) < sizeof (*rq))
125	return(FALSE);
126
127    if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
128    	return(FALSE);
129
130    dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
131
132    if (rq->hdr.seq == kdp.exception_seq) {
133	kdp.exception_ack_needed = FALSE;
134	kdp.exception_seq++;
135    }
136    return(TRUE);
137}
138
139void
140kdp_getstate(
141    x86_thread_state64_t	*state
142)
143{
144    x86_saved_state64_t	*saved_state;
145
146    saved_state = (x86_saved_state64_t *)kdp.saved_state;
147
148    state->rax = saved_state->rax;
149    state->rbx = saved_state->rbx;
150    state->rcx = saved_state->rcx;
151    state->rdx = saved_state->rdx;
152    state->rdi = saved_state->rdi;
153    state->rsi = saved_state->rsi;
154    state->rbp = saved_state->rbp;
155
156    state->r8  = saved_state->r8;
157    state->r9  = saved_state->r9;
158    state->r10 = saved_state->r10;
159    state->r11 = saved_state->r11;
160    state->r12 = saved_state->r12;
161    state->r13 = saved_state->r13;
162    state->r14 = saved_state->r14;
163    state->r15 = saved_state->r15;
164
165    state->rsp = saved_state->isf.rsp;
166    state->rflags = saved_state->isf.rflags;
167    state->rip = saved_state->isf.rip;
168
169    state->cs = saved_state->isf.cs;
170    state->fs = saved_state->fs;
171    state->gs = saved_state->gs;
172}
173
174
175void
176kdp_setstate(
177    x86_thread_state64_t	*state
178)
179{
180    x86_saved_state64_t		*saved_state;
181
182    saved_state = (x86_saved_state64_t *)kdp.saved_state;
183    saved_state->rax = state->rax;
184    saved_state->rbx = state->rbx;
185    saved_state->rcx = state->rcx;
186    saved_state->rdx = state->rdx;
187    saved_state->rdi = state->rdi;
188    saved_state->rsi = state->rsi;
189    saved_state->rbp = state->rbp;
190    saved_state->r8  = state->r8;
191    saved_state->r9  = state->r9;
192    saved_state->r10 = state->r10;
193    saved_state->r11 = state->r11;
194    saved_state->r12 = state->r12;
195    saved_state->r13 = state->r13;
196    saved_state->r14 = state->r14;
197    saved_state->r15 = state->r15;
198
199    saved_state->isf.rflags = state->rflags;
200    saved_state->isf.rsp = state->rsp;
201    saved_state->isf.rip = state->rip;
202
203    saved_state->fs = (uint32_t)state->fs;
204    saved_state->gs = (uint32_t)state->gs;
205}
206
207
208kdp_error_t
209kdp_machine_read_regs(
210    __unused unsigned int cpu,
211    unsigned int flavor,
212    char *data,
213    int *size
214)
215{
216    static x86_float_state64_t  null_fpstate;
217
218    switch (flavor) {
219
220    case x86_THREAD_STATE64:
221	dprintf(("kdp_readregs THREAD_STATE64\n"));
222	kdp_getstate((x86_thread_state64_t *)data);
223	*size = sizeof (x86_thread_state64_t);
224	return KDPERR_NO_ERROR;
225
226    case x86_FLOAT_STATE64:
227	dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
228	*(x86_float_state64_t *)data = null_fpstate;
229	*size = sizeof (x86_float_state64_t);
230	return KDPERR_NO_ERROR;
231
232    default:
233	dprintf(("kdp_readregs bad flavor %d\n", flavor));
234	*size = 0;
235	return KDPERR_BADFLAVOR;
236    }
237}
238
239kdp_error_t
240kdp_machine_write_regs(
241    __unused unsigned int cpu,
242    unsigned int flavor,
243    char *data,
244    __unused int *size
245)
246{
247    switch (flavor) {
248
249    case x86_THREAD_STATE64:
250	dprintf(("kdp_writeregs THREAD_STATE64\n"));
251	kdp_setstate((x86_thread_state64_t *)data);
252	return KDPERR_NO_ERROR;
253
254    case x86_FLOAT_STATE64:
255	dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
256	return KDPERR_NO_ERROR;
257
258    default:
259	dprintf(("kdp_writeregs bad flavor %d\n", flavor));
260	return KDPERR_BADFLAVOR;
261    }
262}
263
264
265
266void
267kdp_machine_hostinfo(
268    kdp_hostinfo_t *hostinfo
269)
270{
271    int			i;
272
273    hostinfo->cpus_mask = 0;
274
275    for (i = 0; i < machine_info.max_cpus; i++) {
276	if (cpu_data_ptr[i] == NULL)
277            continue;
278
279        hostinfo->cpus_mask |= (1 << i);
280    }
281
282    hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
283    hostinfo->cpu_subtype = cpuid_cpusubtype();
284}
285
286void
287kdp_panic(
288    const char		*msg
289)
290{
291    kprintf("kdp panic: %s\n", msg);
292    __asm__ volatile("hlt");
293}
294
295
296void
297kdp_machine_reboot(void)
298{
299	printf("Attempting system restart...");
300	/* Call the platform specific restart*/
301	if (PE_halt_restart)
302		(*PE_halt_restart)(kPERestartCPU);
303	/* If we do reach this, give up */
304	halt_all_cpus(TRUE);
305}
306
307int
308kdp_intr_disbl(void)
309{
310   return splhigh();
311}
312
313void
314kdp_intr_enbl(int s)
315{
316	splx(s);
317}
318
319int
320kdp_getc(void)
321{
322	return	cnmaygetc();
323}
324
325void
326kdp_us_spin(int usec)
327{
328    delay(usec/100);
329}
330
331void print_saved_state(void *state)
332{
333    x86_saved_state64_t		*saved_state;
334
335    saved_state = state;
336
337	kprintf("pc = 0x%llx\n", saved_state->isf.rip);
338	kprintf("cr2= 0x%llx\n", saved_state->cr2);
339	kprintf("rp = TODO FIXME\n");
340	kprintf("sp = %p\n", saved_state);
341
342}
343
344void
345kdp_sync_cache(void)
346{
347	return;	/* No op here. */
348}
349
350void
351kdp_call(void)
352{
353	__asm__ volatile ("int	$3");	/* Let the processor do the work */
354}
355
356
357typedef struct _cframe_t {
358    struct _cframe_t	*prev;
359    unsigned		caller;
360    unsigned		args[0];
361} cframe_t;
362
363extern pt_entry_t *DMAP2;
364extern caddr_t DADDR2;
365
366void
367kdp_print_phys(int src)
368{
369	unsigned int   *iptr;
370	int             i;
371
372	*(int *) DMAP2 = 0x63 | (src & 0xfffff000);
373	invlpg((uintptr_t) DADDR2);
374	iptr = (unsigned int *) DADDR2;
375	for (i = 0; i < 100; i++) {
376		kprintf("0x%x ", *iptr++);
377		if ((i % 8) == 0)
378			kprintf("\n");
379	}
380	kprintf("\n");
381	*(int *) DMAP2 = 0;
382
383}
384
385boolean_t
386kdp_i386_trap(
387    unsigned int		trapno,
388    x86_saved_state64_t	*saved_state,
389    kern_return_t	result,
390    vm_offset_t		va
391)
392{
393    unsigned int exception, subcode = 0, code;
394
395    if (trapno != T_INT3 && trapno != T_DEBUG) {
396    	kprintf("Debugger: Unexpected kernel trap number: "
397		"0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
398		trapno, saved_state->isf.rip, saved_state->cr2);
399	if (!kdp.is_conn)
400	    return FALSE;
401    }
402
403    mp_kdp_enter();
404    kdp_callouts(KDP_EVENT_ENTER);
405
406    if (saved_state->isf.rflags & EFL_TF) {
407	    enable_preemption_no_check();
408    }
409
410    switch (trapno) {
411
412    case T_DIVIDE_ERROR:
413	exception = EXC_ARITHMETIC;
414	code = EXC_I386_DIVERR;
415	break;
416
417    case T_OVERFLOW:
418	exception = EXC_SOFTWARE;
419	code = EXC_I386_INTOFLT;
420	break;
421
422    case T_OUT_OF_BOUNDS:
423	exception = EXC_ARITHMETIC;
424	code = EXC_I386_BOUNDFLT;
425	break;
426
427    case T_INVALID_OPCODE:
428	exception = EXC_BAD_INSTRUCTION;
429	code = EXC_I386_INVOPFLT;
430	break;
431
432    case T_SEGMENT_NOT_PRESENT:
433	exception = EXC_BAD_INSTRUCTION;
434	code = EXC_I386_SEGNPFLT;
435	subcode	= (unsigned int)saved_state->isf.err;
436	break;
437
438    case T_STACK_FAULT:
439	exception = EXC_BAD_INSTRUCTION;
440	code = EXC_I386_STKFLT;
441	subcode	= (unsigned int)saved_state->isf.err;
442	break;
443
444    case T_GENERAL_PROTECTION:
445	exception = EXC_BAD_INSTRUCTION;
446	code = EXC_I386_GPFLT;
447	subcode	= (unsigned int)saved_state->isf.err;
448	break;
449
450    case T_PAGE_FAULT:
451    	exception = EXC_BAD_ACCESS;
452	code = result;
453	subcode = (unsigned int)va;
454	break;
455
456    case T_WATCHPOINT:
457	exception = EXC_SOFTWARE;
458	code = EXC_I386_ALIGNFLT;
459	break;
460
461    case T_DEBUG:
462    case T_INT3:
463	exception = EXC_BREAKPOINT;
464	code = EXC_I386_BPTFLT;
465	break;
466
467    default:
468    	exception = EXC_BAD_INSTRUCTION;
469	code = trapno;
470	break;
471    }
472
473    if (current_cpu_datap()->cpu_fatal_trap_state) {
474	    current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
475	    saved_state = current_cpu_datap()->cpu_fatal_trap_state;
476    }
477
478    kdp_raise_exception(exception, code, subcode, saved_state);
479    /* If the instruction single step bit is set, disable kernel preemption
480     */
481    if (saved_state->isf.rflags & EFL_TF) {
482	    disable_preemption();
483    }
484
485    kdp_callouts(KDP_EVENT_EXIT);
486    mp_kdp_exit();
487
488    return TRUE;
489}
490
491boolean_t
492kdp_call_kdb(
493        void)
494{
495        return(FALSE);
496}
497
498void
499kdp_machine_get_breakinsn(
500						  uint8_t *bytes,
501						  uint32_t *size
502)
503{
504	bytes[0] = 0xcc;
505	*size = 1;
506}
507
508extern pmap_t kdp_pmap;
509
510#define RETURN_OFFSET 4
511
512int
513machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
514{
515	uint32_t *tracebuf = (uint32_t *)tracepos;
516	uint32_t fence = 0;
517	uint32_t stackptr = 0;
518	uint32_t stacklimit = 0xfc000000;
519	int framecount = 0;
520	uint32_t init_eip = 0;
521	uint32_t prevsp = 0;
522	uint32_t framesize = 2 * sizeof(vm_offset_t);
523	vm_offset_t kern_virt_addr = 0;
524
525	if (user_p) {
526	        x86_saved_state32_t	*iss32;
527
528		iss32 = USER_REGS32(thread);
529		init_eip = iss32->eip;
530		stackptr = iss32->ebp;
531
532		stacklimit = 0xffffffff;
533		kdp_pmap = thread->task->map->pmap;
534	}
535	else
536		panic("32-bit trace attempted on 64-bit kernel");
537
538	*tracebuf++ = init_eip;
539
540	for (framecount = 0; framecount < nframes; framecount++) {
541
542		if ((tracebound - ((char *)tracebuf)) < (4 * framesize)) {
543			tracebuf--;
544			break;
545		}
546
547		*tracebuf++ = stackptr;
548/* Invalid frame, or hit fence */
549		if (!stackptr || (stackptr == fence)) {
550			break;
551		}
552
553		/* Unaligned frame */
554		if (stackptr & 0x0000003) {
555			break;
556		}
557
558		if (stackptr <= prevsp) {
559			break;
560		}
561
562		if (stackptr > stacklimit) {
563			break;
564		}
565
566		kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET);
567
568		if (!kern_virt_addr) {
569			break;
570		}
571
572		*tracebuf = *(uint32_t *)kern_virt_addr;
573		tracebuf++;
574
575		prevsp = stackptr;
576		kern_virt_addr = machine_trace_thread_get_kva(stackptr);
577
578		if (!kern_virt_addr) {
579			*tracebuf++ = 0;
580			break;
581		}
582
583		stackptr = *(uint32_t *)kern_virt_addr;
584	}
585
586	machine_trace_thread_clear_validation_cache();
587	kdp_pmap = 0;
588
589	return (uint32_t) (((char *) tracebuf) - tracepos);
590}
591
592
593#define RETURN_OFFSET64	8
594/* Routine to encapsulate the 64-bit address read hack*/
595unsigned
596machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
597{
598	return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
599}
600
601int
602machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
603{
604	uint64_t *tracebuf = (uint64_t *)tracepos;
605	uint32_t fence = 0;
606	addr64_t stackptr = 0;
607	int	 framecount = 0;
608	addr64_t init_rip = 0;
609	addr64_t prevsp = 0;
610	unsigned framesize = 2 * sizeof(addr64_t);
611	vm_offset_t kern_virt_addr = 0;
612
613	if (user_p) {
614		x86_saved_state64_t	*iss64;
615		iss64 = USER_REGS64(thread);
616		init_rip = iss64->isf.rip;
617		stackptr = iss64->rbp;
618		kdp_pmap = thread->task->map->pmap;
619	}
620	else {
621		stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
622		init_rip = STACK_IKS(thread->kernel_stack)->k_rip;
623		init_rip = VM_KERNEL_UNSLIDE(init_rip);
624		kdp_pmap = 0;
625	}
626
627	*tracebuf++ = init_rip;
628
629	for (framecount = 0; framecount < nframes; framecount++) {
630
631		if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
632			tracebuf--;
633			break;
634		}
635
636		*tracebuf++ = stackptr;
637
638		if (!stackptr || (stackptr == fence)){
639			break;
640		}
641
642		if (stackptr & 0x0000007) {
643			break;
644		}
645
646		if (stackptr <= prevsp) {
647			break;
648		}
649
650		kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64);
651
652		if (!kern_virt_addr) {
653			break;
654		}
655
656		*tracebuf = *(uint64_t *)kern_virt_addr;
657		if (!user_p)
658			*tracebuf = VM_KERNEL_UNSLIDE(*tracebuf);
659
660		tracebuf++;
661
662		prevsp = stackptr;
663		kern_virt_addr = machine_trace_thread_get_kva(stackptr);
664
665		if (!kern_virt_addr) {
666			*tracebuf++ = 0;
667			break;
668		}
669
670		stackptr = *(uint64_t *)kern_virt_addr;
671	}
672
673	machine_trace_thread_clear_validation_cache();
674	kdp_pmap = NULL;
675
676	return (uint32_t) (((char *) tracebuf) - tracepos);
677}
678
679static struct kdp_callout {
680	struct kdp_callout	*callout_next;
681	kdp_callout_fn_t	callout_fn;
682	void			*callout_arg;
683} *kdp_callout_list = NULL;
684
685
686/*
687 * Called from kernel context to register a kdp event callout.
688 */
689void
690kdp_register_callout(
691	kdp_callout_fn_t	fn,
692	void			*arg)
693{
694	struct kdp_callout	*kcp;
695	struct kdp_callout	*list_head;
696
697	kcp = kalloc(sizeof(*kcp));
698	if (kcp == NULL)
699		panic("kdp_register_callout() kalloc failed");
700
701	kcp->callout_fn  = fn;
702	kcp->callout_arg = arg;
703
704	/* Lock-less list insertion using compare and exchange. */
705	do {
706		list_head = kdp_callout_list;
707		kcp->callout_next = list_head;
708	} while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
709}
710
711/*
712 * Called at exception/panic time when extering or exiting kdp.
713 * We are single-threaded at this time and so we don't use locks.
714 */
715static void
716kdp_callouts(kdp_event_t event)
717{
718	struct kdp_callout	*kcp = kdp_callout_list;
719
720	while (kcp) {
721		kcp->callout_fn(kcp->callout_arg, event);
722		kcp = kcp->callout_next;
723	}
724}
725
726void
727kdp_ml_enter_debugger(void)
728{
729	__asm__ __volatile__("int3");
730}
731