1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_kdp.h>
30#include <mach/mach_types.h>
31#include <mach/machine.h>
32#include <mach/exception_types.h>
33#include <kern/cpu_data.h>
34#include <i386/trap.h>
35#include <i386/mp.h>
36#include <kdp/kdp_internal.h>
37#include <kdp/kdp_callout.h>
38#include <mach-o/loader.h>
39#include <mach-o/nlist.h>
40#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41#include <kern/machine.h> /* for halt_all_cpus */
42#include <libkern/OSAtomic.h>
43
44#include <kern/thread.h>
45#include <i386/thread.h>
46#include <vm/vm_map.h>
47#include <i386/pmap.h>
48#include <kern/kalloc.h>
49
50#define KDP_TEST_HARNESS 0
51#if KDP_TEST_HARNESS
52#define dprintf(x) printf x
53#else
54#define dprintf(x)
55#endif
56
57extern cpu_type_t cpuid_cputype(void);
58extern cpu_subtype_t cpuid_cpusubtype(void);
59
60void		print_saved_state(void *);
61void		kdp_call(void);
62int		kdp_getc(void);
63boolean_t	kdp_call_kdb(void);
64void		kdp_getstate(i386_thread_state_t *);
65void		kdp_setstate(i386_thread_state_t *);
66void		kdp_print_phys(int);
67
68int
69machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
70
71int
72machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
73
74unsigned
75machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
76
77static void	kdp_callouts(kdp_event_t event);
78
79void
80kdp_exception(
81    unsigned char	*pkt,
82    int	*len,
83    unsigned short	*remote_port,
84    unsigned int	exception,
85    unsigned int	code,
86    unsigned int	subcode
87)
88{
89    kdp_exception_t	*rq = (kdp_exception_t *)pkt;
90
91    rq->hdr.request = KDP_EXCEPTION;
92    rq->hdr.is_reply = 0;
93    rq->hdr.seq = kdp.exception_seq;
94    rq->hdr.key = 0;
95    rq->hdr.len = sizeof (*rq);
96
97    rq->n_exc_info = 1;
98    rq->exc_info[0].cpu = 0;
99    rq->exc_info[0].exception = exception;
100    rq->exc_info[0].code = code;
101    rq->exc_info[0].subcode = subcode;
102
103    rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
104
105    bcopy((char *)rq, (char *)pkt, rq->hdr.len);
106
107    kdp.exception_ack_needed = TRUE;
108
109    *remote_port = kdp.exception_port;
110    *len = rq->hdr.len;
111}
112
113boolean_t
114kdp_exception_ack(
115    unsigned char	*pkt,
116    int			len
117)
118{
119    kdp_exception_ack_t	*rq = (kdp_exception_ack_t *)pkt;
120
121    if (((unsigned int) len) < sizeof (*rq))
122	return(FALSE);
123
124    if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
125    	return(FALSE);
126
127    dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
128
129    if (rq->hdr.seq == kdp.exception_seq) {
130	kdp.exception_ack_needed = FALSE;
131	kdp.exception_seq++;
132    }
133    return(TRUE);
134}
135
136void
137kdp_getstate(
138    x86_thread_state32_t	*state
139)
140{
141    static x86_thread_state32_t	null_state;
142    x86_saved_state32_t	*saved_state;
143
144    saved_state = (x86_saved_state32_t *)kdp.saved_state;
145
146    *state = null_state;
147    state->eax = saved_state->eax;
148    state->ebx = saved_state->ebx;
149    state->ecx = saved_state->ecx;
150    state->edx = saved_state->edx;
151    state->edi = saved_state->edi;
152    state->esi = saved_state->esi;
153    state->ebp = saved_state->ebp;
154
155    if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
156	    if (cpu_mode_is64bit())
157		    state->esp = (uint32_t) saved_state->uesp;
158	    else
159		    state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
160        state->ss = KERNEL_DS;
161    } else {
162    	state->esp = saved_state->uesp;
163    	state->ss = saved_state->ss;
164    }
165
166    state->eflags = saved_state->efl;
167    state->eip = saved_state->eip;
168    state->cs = saved_state->cs;
169    state->ds = saved_state->ds;
170    state->es = saved_state->es;
171    state->fs = saved_state->fs;
172    state->gs = saved_state->gs;
173}
174
175
176void
177kdp_setstate(
178    x86_thread_state32_t	*state
179)
180{
181    x86_saved_state32_t		*saved_state;
182
183    saved_state = (x86_saved_state32_t *)kdp.saved_state;
184
185    saved_state->eax = state->eax;
186    saved_state->ebx = state->ebx;
187    saved_state->ecx = state->ecx;
188    saved_state->edx = state->edx;
189    saved_state->edi = state->edi;
190    saved_state->esi = state->esi;
191    saved_state->ebp = state->ebp;
192    saved_state->efl = state->eflags;
193#if	0
194    saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
195    saved_state->frame.eflags |=  ( EFL_IF | EFL_SET );
196#endif
197    saved_state->eip = state->eip;
198}
199
200
201kdp_error_t
202kdp_machine_read_regs(
203    __unused unsigned int cpu,
204    __unused unsigned int flavor,
205    char *data,
206    __unused int *size
207)
208{
209    static x86_float_state32_t  null_fpstate;
210
211    switch (flavor) {
212
213    case x86_THREAD_STATE32:
214	dprintf(("kdp_readregs THREAD_STATE\n"));
215	kdp_getstate((x86_thread_state32_t *)data);
216	*size = sizeof (x86_thread_state32_t);
217	return KDPERR_NO_ERROR;
218
219    case x86_FLOAT_STATE32:
220	dprintf(("kdp_readregs THREAD_FPSTATE\n"));
221	*(x86_float_state32_t *)data = null_fpstate;
222	*size = sizeof (x86_float_state32_t);
223	return KDPERR_NO_ERROR;
224
225    default:
226	dprintf(("kdp_readregs bad flavor %d\n", flavor));
227	*size = 0;
228	return KDPERR_BADFLAVOR;
229    }
230}
231
232kdp_error_t
233kdp_machine_write_regs(
234    __unused unsigned int cpu,
235    unsigned int flavor,
236    char *data,
237    __unused int *size
238)
239{
240    switch (flavor) {
241
242    case x86_THREAD_STATE32:
243	dprintf(("kdp_writeregs THREAD_STATE\n"));
244	kdp_setstate((x86_thread_state32_t *)data);
245	return KDPERR_NO_ERROR;
246
247    case x86_FLOAT_STATE32:
248	dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
249	return KDPERR_NO_ERROR;
250
251    default:
252	dprintf(("kdp_writeregs bad flavor %d\n"));
253	return KDPERR_BADFLAVOR;
254    }
255}
256
257
258
259void
260kdp_machine_hostinfo(
261    kdp_hostinfo_t *hostinfo
262)
263{
264    int			i;
265
266    hostinfo->cpus_mask = 0;
267
268    for (i = 0; i < machine_info.max_cpus; i++) {
269	if (cpu_data_ptr[i] == NULL)
270            continue;
271
272        hostinfo->cpus_mask |= (1 << i);
273    }
274
275    hostinfo->cpu_type = cpuid_cputype();
276    hostinfo->cpu_subtype = cpuid_cpusubtype();
277}
278
279void
280kdp_panic(
281    const char		*msg
282)
283{
284    kprintf("kdp panic: %s\n", msg);
285    __asm__ volatile("hlt");
286}
287
288
289void
290kdp_machine_reboot(void)
291{
292	printf("Attempting system restart...");
293	kprintf("Attempting system restart...");
294	/* Call the platform specific restart*/
295	if (PE_halt_restart)
296		(*PE_halt_restart)(kPERestartCPU);
297	/* If we do reach this, give up */
298	halt_all_cpus(TRUE);
299}
300
301int
302kdp_intr_disbl(void)
303{
304   return splhigh();
305}
306
307void
308kdp_intr_enbl(int s)
309{
310	splx(s);
311}
312
313int
314kdp_getc(void)
315{
316	return	cnmaygetc();
317}
318
319void
320kdp_us_spin(int usec)
321{
322    delay(usec/100);
323}
324
325void print_saved_state(void *state)
326{
327    x86_saved_state32_t		*saved_state;
328
329    saved_state = state;
330
331	kprintf("pc = 0x%x\n", saved_state->eip);
332	kprintf("cr2= 0x%x\n", saved_state->cr2);
333	kprintf("rp = TODO FIXME\n");
334	kprintf("sp = %p\n", saved_state);
335
336}
337
338void
339kdp_sync_cache(void)
340{
341	return;	/* No op here. */
342}
343
344void
345kdp_call(void)
346{
347	__asm__ volatile ("int	$3");	/* Let the processor do the work */
348}
349
350
351typedef struct _cframe_t {
352    struct _cframe_t	*prev;
353    unsigned		caller;
354    unsigned		args[0];
355} cframe_t;
356
357#include <i386/pmap.h>
358extern pt_entry_t *DMAP2;
359extern caddr_t DADDR2;
360
361void
362kdp_print_phys(int src)
363{
364	unsigned int   *iptr;
365	int             i;
366
367	*(int *) DMAP2 = 0x63 | (src & 0xfffff000);
368	invlpg((u_int) DADDR2);
369	iptr = (unsigned int *) DADDR2;
370	for (i = 0; i < 100; i++) {
371		kprintf("0x%x ", *iptr++);
372		if ((i % 8) == 0)
373			kprintf("\n");
374	}
375	kprintf("\n");
376	*(int *) DMAP2 = 0;
377
378}
379
380boolean_t
381kdp_i386_trap(
382    unsigned int		trapno,
383    x86_saved_state32_t	*saved_state,
384    kern_return_t	result,
385    vm_offset_t		va
386)
387{
388    unsigned int exception, subcode = 0, code;
389
390    if (trapno != T_INT3 && trapno != T_DEBUG) {
391    	kprintf("Debugger: Unexpected kernel trap number: "
392		"0x%x, EIP: 0x%x, CR2: 0x%x\n",
393		trapno, saved_state->eip, saved_state->cr2);
394	if (!kdp.is_conn)
395	    return FALSE;
396    }
397
398    mp_kdp_enter();
399    kdp_callouts(KDP_EVENT_ENTER);
400
401    if (saved_state->efl & EFL_TF) {
402	    enable_preemption_no_check();
403    }
404
405    switch (trapno) {
406
407    case T_DIVIDE_ERROR:
408	exception = EXC_ARITHMETIC;
409	code = EXC_I386_DIVERR;
410	break;
411
412    case T_OVERFLOW:
413	exception = EXC_SOFTWARE;
414	code = EXC_I386_INTOFLT;
415	break;
416
417    case T_OUT_OF_BOUNDS:
418	exception = EXC_ARITHMETIC;
419	code = EXC_I386_BOUNDFLT;
420	break;
421
422    case T_INVALID_OPCODE:
423	exception = EXC_BAD_INSTRUCTION;
424	code = EXC_I386_INVOPFLT;
425	break;
426
427    case T_SEGMENT_NOT_PRESENT:
428	exception = EXC_BAD_INSTRUCTION;
429	code = EXC_I386_SEGNPFLT;
430	subcode	= saved_state->err;
431	break;
432
433    case T_STACK_FAULT:
434	exception = EXC_BAD_INSTRUCTION;
435	code = EXC_I386_STKFLT;
436	subcode	= saved_state->err;
437	break;
438
439    case T_GENERAL_PROTECTION:
440	exception = EXC_BAD_INSTRUCTION;
441	code = EXC_I386_GPFLT;
442	subcode	= saved_state->err;
443	break;
444
445    case T_PAGE_FAULT:
446    	exception = EXC_BAD_ACCESS;
447	code = result;
448	subcode = va;
449	break;
450
451    case T_WATCHPOINT:
452	exception = EXC_SOFTWARE;
453	code = EXC_I386_ALIGNFLT;
454	break;
455
456    case T_DEBUG:
457    case T_INT3:
458	exception = EXC_BREAKPOINT;
459	code = EXC_I386_BPTFLT;
460	break;
461
462    default:
463    	exception = EXC_BAD_INSTRUCTION;
464	code = trapno;
465	break;
466    }
467
468    kdp_raise_exception(exception, code, subcode, saved_state);
469    /* If the instruction single step bit is set, disable kernel preemption
470     */
471    if (saved_state->efl & EFL_TF) {
472	    disable_preemption();
473    }
474
475    kdp_callouts(KDP_EVENT_EXIT);
476    mp_kdp_exit();
477
478    return TRUE;
479}
480
481boolean_t
482kdp_call_kdb(
483        void)
484{
485        return(FALSE);
486}
487
488void
489kdp_machine_get_breakinsn(
490						  uint8_t *bytes,
491						  uint32_t *size
492)
493{
494	bytes[0] = 0xcc;
495	*size = 1;
496}
497
498extern pmap_t kdp_pmap;
499
500#define RETURN_OFFSET 4
501int
502machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
503{
504	uint32_t *tracebuf = (uint32_t *)tracepos;
505	uint32_t fence = 0;
506	uint32_t stackptr = 0;
507	uint32_t stacklimit = 0xfc000000;
508	int framecount = 0;
509	uint32_t init_eip = 0;
510	uint32_t prevsp = 0;
511	uint32_t framesize = 2 * sizeof(vm_offset_t);
512
513	if (user_p) {
514	        x86_saved_state32_t	*iss32;
515
516		iss32 = USER_REGS32(thread);
517
518			init_eip = iss32->eip;
519			stackptr = iss32->ebp;
520
521		/* This bound isn't useful, but it doesn't hinder us*/
522		stacklimit = 0xffffffff;
523		kdp_pmap = thread->task->map->pmap;
524	}
525	else {
526		/*Examine the i386_saved_state at the base of the kernel stack*/
527		stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
528		init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
529	}
530
531	*tracebuf++ = init_eip;
532
533	for (framecount = 0; framecount < nframes; framecount++) {
534
535		if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
536			tracebuf--;
537			break;
538		}
539
540		*tracebuf++ = stackptr;
541/* Invalid frame, or hit fence */
542		if (!stackptr || (stackptr == fence)) {
543			break;
544		}
545
546		/* Unaligned frame */
547		if (stackptr & 0x0000003) {
548			break;
549		}
550
551		if (stackptr > stacklimit) {
552			break;
553		}
554
555		if (stackptr <= prevsp) {
556			break;
557		}
558
559		if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
560			break;
561		}
562		tracebuf++;
563
564		prevsp = stackptr;
565		if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
566			*tracebuf++ = 0;
567			break;
568		}
569	}
570
571	kdp_pmap = 0;
572
573	return (uint32_t) (((char *) tracebuf) - tracepos);
574}
575
576#define RETURN_OFFSET64	8
577/* Routine to encapsulate the 64-bit address read hack*/
578unsigned
579machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
580{
581	return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
582}
583
584int
585machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
586{
587	uint64_t *tracebuf = (uint64_t *)tracepos;
588	uint32_t fence = 0;
589	addr64_t stackptr = 0;
590	uint64_t stacklimit = 0xfc000000;
591	int framecount = 0;
592	addr64_t init_rip = 0;
593	addr64_t prevsp = 0;
594	unsigned framesize = 2 * sizeof(addr64_t);
595
596	if (user_p) {
597		x86_saved_state64_t	*iss64;
598		iss64 = USER_REGS64(thread);
599		init_rip = iss64->isf.rip;
600		stackptr = iss64->rbp;
601		stacklimit = 0xffffffffffffffffULL;
602		kdp_pmap = thread->task->map->pmap;
603	}
604
605	*tracebuf++ = init_rip;
606
607	for (framecount = 0; framecount < nframes; framecount++) {
608
609		if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
610			tracebuf--;
611			break;
612		}
613
614		*tracebuf++ = stackptr;
615
616		if (!stackptr || (stackptr == fence)){
617			break;
618		}
619
620		if (stackptr & 0x0000003) {
621			break;
622		}
623		if (stackptr > stacklimit) {
624			break;
625		}
626
627		if (stackptr <= prevsp) {
628			break;
629		}
630
631		if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
632			break;
633		}
634		tracebuf++;
635
636		prevsp = stackptr;
637		if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
638			*tracebuf++ = 0;
639			break;
640		}
641	}
642
643	kdp_pmap = NULL;
644
645	return (uint32_t) (((char *) tracebuf) - tracepos);
646}
647
648static struct kdp_callout {
649	struct kdp_callout	*callout_next;
650	kdp_callout_fn_t	callout_fn;
651	void			*callout_arg;
652} *kdp_callout_list = NULL;
653
654
655/*
656 * Called from kernel context to register a kdp event callout.
657 */
658void
659kdp_register_callout(
660	kdp_callout_fn_t	fn,
661	void			*arg)
662{
663	struct kdp_callout	*kcp;
664	struct kdp_callout	*list_head;
665
666	kcp = kalloc(sizeof(*kcp));
667	if (kcp == NULL)
668		panic("kdp_register_callout() kalloc failed");
669
670	kcp->callout_fn  = fn;
671	kcp->callout_arg = arg;
672
673	/* Lock-less list insertion using compare and exchange. */
674	do {
675		list_head = kdp_callout_list;
676		kcp->callout_next = list_head;
677	} while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
678}
679
680/*
681 * Called at exception/panic time when extering or exiting kdp.
682 * We are single-threaded at this time and so we don't use locks.
683 */
684static void
685kdp_callouts(kdp_event_t event)
686{
687	struct kdp_callout	*kcp = kdp_callout_list;
688
689	while (kcp) {
690		kcp->callout_fn(kcp->callout_arg, event);
691		kcp = kcp->callout_next;
692	}
693}
694
695void
696kdp_ml_enter_debugger(void)
697{
698	__asm__ __volatile__("int3");
699}
700