1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/errno.h>
30
31#include <mach/mach_types.h>
32#include <mach/vm_attributes.h>
33#include <mach/vm_param.h>
34#include <libsa/types.h>
35
36#include <vm/vm_map.h>
37#include <i386/pmap.h>
38#include <i386/pmap_internal.h> /* pmap_pde */
39#include <i386/mp.h>
40#include <i386/misc_protos.h>
41#include <i386/pio.h>
42#include <i386/proc_reg.h>
43
44#include <i386/pmap_internal.h>
45
46#include <kdp/kdp_internal.h>
47#include <kdp/kdp_core.h>
48#include <kdp/ml/i386/kdp_x86_common.h>
49#include <mach/vm_map.h>
50
51#include <vm/vm_protos.h>
52#include <vm/vm_kern.h>
53
54#include <machine/pal_routines.h>
55#include <libkern/kernel_mach_header.h>
56
57// #define KDP_VM_READ_DEBUG 1
58// #define KDP_VM_WRITE_DEBUG 1
59
60/*
61 * A (potentially valid) physical address is not a kernel address
62 * i.e. it'a a user address.
63 */
64#define IS_PHYS_ADDR(addr)		IS_USERADDR64_CANONICAL(addr)
65
66boolean_t kdp_read_io;
67boolean_t kdp_trans_off;
68
69addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
70
71int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
72											   vm_map_offset_t end,
73											   void *context);
74int kern_dump_pmap_traverse_send_callback(vm_map_offset_t start,
75										  vm_map_offset_t end,
76										  void *context);
77
78pmap_t kdp_pmap = 0;
79
80addr64_t
81kdp_vtophys(
82	pmap_t pmap,
83	addr64_t va)
84{
85	addr64_t    pa;
86	ppnum_t pp;
87
88	pp = pmap_find_phys(pmap, va);
89	if(!pp) return 0;
90
91	pa = ((addr64_t)pp << PAGE_SHIFT) | (va & PAGE_MASK);
92
93	return(pa);
94}
95
96mach_vm_size_t
97kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
98{
99	addr64_t cur_virt_src = PAL_KDP_ADDR((addr64_t)src);
100	addr64_t cur_virt_dst = PAL_KDP_ADDR((addr64_t)(intptr_t)dst);
101	addr64_t cur_phys_dst, cur_phys_src;
102	mach_vm_size_t resid = len;
103	mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
104	pmap_t src_pmap = kernel_pmap;
105
106#ifdef KDP_VM_READ_DEBUG
107	printf("kdp_vm_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
108#endif
109
110	if (kdp_trans_off && IS_PHYS_ADDR(src)) {
111		kdp_readphysmem64_req_t rq;
112		mach_vm_size_t ret;
113
114		rq.address = src;
115		rq.nbytes = (uint32_t)len;
116		ret = kdp_machine_phys_read(&rq, dst, KDP_CURRENT_LCPU);
117		return ret;
118	}
119
120/* If a different pmap has been specified with kdp_pmap, use it to translate the
121 * source (cur_virt_src); otherwise, the source is translated using the
122 * kernel_pmap.
123 */
124	if (kdp_pmap)
125		src_pmap = kdp_pmap;
126
127	while (resid != 0) {
128		if (!(cur_phys_src = kdp_vtophys(src_pmap,
129			    cur_virt_src)))
130			goto exit;
131
132/* Always translate the destination buffer using the kernel_pmap */
133		if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
134			goto exit;
135
136		/* Validate physical page numbers unless kdp_read_io is set */
137		if (kdp_read_io == FALSE)
138			if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
139				goto exit;
140
141/* Get length left on page */
142		cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
143		cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
144		if (cnt_src > cnt_dst)
145			cnt = cnt_dst;
146		else
147			cnt = cnt_src;
148		if (cnt > resid)
149			cnt = resid;
150
151/* Do a physical copy */
152		if (EFAULT == ml_copy_phys(cur_phys_src,
153					   cur_phys_dst,
154					   (vm_size_t)cnt))
155			goto exit;
156		cur_virt_src += cnt;
157		cur_virt_dst += cnt;
158		resid -= cnt;
159	}
160exit:
161	return (len - resid);
162}
163
164mach_vm_size_t
165kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst,
166		      uint16_t lcpu)
167{
168	mach_vm_address_t src = rq->address;
169	mach_vm_size_t    len = rq->nbytes;
170
171	addr64_t cur_virt_dst;
172	addr64_t cur_phys_dst, cur_phys_src;
173	mach_vm_size_t resid = len;
174	mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
175
176        if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
177		return (mach_vm_size_t)
178			kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst);
179        }
180
181#ifdef KDP_VM_READ_DEBUG
182	printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len);
183#endif
184
185	cur_virt_dst = (addr64_t)(intptr_t)dst;
186	cur_phys_src = (addr64_t)src;
187
188	while (resid != 0) {
189
190		if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
191			goto exit;
192
193/* Get length left on page */
194		cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
195		cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
196		if (cnt_src > cnt_dst)
197			cnt = cnt_dst;
198		else
199			cnt = cnt_src;
200		if (cnt > resid)
201			cnt = resid;
202
203	/* Do a physical copy; use ml_copy_phys() in the event this is
204	 * a short read with potential side effects.
205	 */
206		if (EFAULT == ml_copy_phys(cur_phys_src,
207					   cur_phys_dst,
208					   (vm_size_t)cnt))
209			goto exit;
210		cur_phys_src += cnt;
211		cur_virt_dst += cnt;
212		resid -= cnt;
213	}
214exit:
215	return (len - resid);
216}
217
218/*
219 *
220 */
221mach_vm_size_t
222kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
223{
224	addr64_t cur_virt_src, cur_virt_dst;
225	addr64_t cur_phys_src, cur_phys_dst;
226	unsigned resid, cnt, cnt_src, cnt_dst;
227
228#ifdef KDP_VM_WRITE_DEBUG
229	printf("kdp_vm_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
230#endif
231
232	cur_virt_src = PAL_KDP_ADDR((addr64_t)(intptr_t)src);
233	cur_virt_dst = PAL_KDP_ADDR((addr64_t)dst);
234
235	resid = (unsigned)len;
236
237	while (resid != 0) {
238		if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
239			goto exit;
240
241		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
242			goto exit;
243
244		/* Copy as many bytes as possible without crossing a page */
245		cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
246		cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
247
248		if (cnt_src > cnt_dst)
249			cnt = cnt_dst;
250		else
251			cnt = cnt_src;
252		if (cnt > resid)
253			cnt = resid;
254
255		if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt))
256			goto exit;		/* Copy stuff over */
257
258		cur_virt_src +=cnt;
259		cur_virt_dst +=cnt;
260		resid -= cnt;
261	}
262exit:
263	return (len - resid);
264}
265
266/*
267 *
268 */
269mach_vm_size_t
270kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src,
271		       uint16_t lcpu)
272{
273	mach_vm_address_t dst = rq->address;
274	mach_vm_size_t    len = rq->nbytes;
275	addr64_t cur_virt_src;
276	addr64_t cur_phys_src, cur_phys_dst;
277	unsigned resid, cnt, cnt_src, cnt_dst;
278
279        if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
280		return (mach_vm_size_t)
281			kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src);
282        }
283
284#ifdef KDP_VM_WRITE_DEBUG
285	printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]);
286#endif
287
288	cur_virt_src = (addr64_t)(intptr_t)src;
289	cur_phys_dst = (addr64_t)dst;
290
291	resid = (unsigned)len;
292
293	while (resid != 0) {
294		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
295			goto exit;
296
297		/* Copy as many bytes as possible without crossing a page */
298		cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
299		cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
300
301		if (cnt_src > cnt_dst)
302			cnt = cnt_dst;
303		else
304			cnt = cnt_src;
305		if (cnt > resid)
306			cnt = resid;
307
308		if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt))
309			goto exit;		/* Copy stuff over */
310
311		cur_virt_src +=cnt;
312		cur_phys_dst +=cnt;
313		resid -= cnt;
314	}
315
316exit:
317	return (len - resid);
318}
319
320int
321kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu)
322{
323	uint16_t addr = rq->address;
324	uint16_t size = rq->nbytes;
325
326	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
327		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data);
328        }
329
330        switch (size)
331	{
332	case 1:
333		*((uint8_t *) data)  = inb(addr);
334		break;
335	case 2:
336		*((uint16_t *) data) = inw(addr);
337		break;
338	case 4:
339		*((uint32_t *) data) = inl(addr);
340		break;
341	default:
342		return KDPERR_BADFLAVOR;
343		break;
344	}
345
346	return KDPERR_NO_ERROR;
347}
348
349int
350kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu)
351{
352	uint16_t addr = rq->address;
353	uint16_t size = rq->nbytes;
354
355	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
356		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data);
357	}
358
359	switch (size)
360	{
361	case 1:
362		outb(addr, *((uint8_t *) data));
363		break;
364	case 2:
365		outw(addr, *((uint16_t *) data));
366		break;
367	case 4:
368		outl(addr, *((uint32_t *) data));
369		break;
370	default:
371		return KDPERR_BADFLAVOR;
372		break;
373	}
374
375	return KDPERR_NO_ERROR;
376}
377
378int
379kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
380{
381	uint64_t *value = (uint64_t *) data;
382	uint32_t msr    = rq->address;
383
384	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
385		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_read, rq, data);
386	}
387
388	*value = rdmsr64(msr);
389	return KDPERR_NO_ERROR;
390}
391
392int
393kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
394{
395	uint64_t *value = (uint64_t *) data;
396	uint32_t msr    = rq->address;
397
398	if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) {
399		return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_msr64_write, rq, data);
400	}
401
402	wrmsr64(msr, *value);
403	return KDPERR_NO_ERROR;
404}
405
406int
407pmap_traverse_present_mappings(pmap_t pmap,
408							   vm_map_offset_t start,
409							   vm_map_offset_t end,
410							   pmap_traverse_callback callback,
411							   void *context)
412{
413	int ret = KERN_SUCCESS;
414	vm_map_offset_t vcurstart, vcur;
415	boolean_t lastvavalid = FALSE;
416
417	/* Assumes pmap is locked, or being called from the kernel debugger */
418
419	if (start > end) {
420		return (KERN_INVALID_ARGUMENT);
421	}
422
423	if (start & PAGE_MASK_64) {
424		return (KERN_INVALID_ARGUMENT);
425	}
426
427	for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
428		ppnum_t ppn = pmap_find_phys(pmap, vcur);
429
430		if (ppn != 0 && !pmap_valid_page(ppn)) {
431			/* not something we want */
432			ppn = 0;
433		}
434
435		if (ppn != 0) {
436			if (!lastvavalid) {
437				/* Start of a new virtual region */
438				vcurstart = vcur;
439				lastvavalid = TRUE;
440			}
441		} else {
442			if (lastvavalid) {
443				/* end of a virtual region */
444
445				ret = callback(vcurstart, vcur, context);
446
447				lastvavalid = FALSE;
448			}
449
450			/* Try to skip by 2MB if possible */
451			if (((vcur & PDMASK) == 0) && cpu_64bit) {
452				pd_entry_t *pde;
453
454				pde = pmap_pde(pmap, vcur);
455				if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
456					/* Make sure we wouldn't overflow */
457					if (vcur < (end - NBPD)) {
458						vcur += NBPD;
459						continue;
460					}
461				}
462			}
463		}
464
465		vcur += PAGE_SIZE_64;
466	}
467
468	if ((ret == KERN_SUCCESS)
469		&& lastvavalid) {
470		/* send previous run */
471
472		ret = callback(vcurstart, vcur, context);
473	}
474	return (ret);
475}
476
477struct kern_dump_preflight_context {
478	uint32_t	region_count;
479	uint64_t	dumpable_bytes;
480};
481
482struct kern_dump_send_context {
483	uint64_t	hoffset;
484	uint64_t	foffset;
485	uint64_t	header_size;
486};
487
488int
489kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
490										   vm_map_offset_t end,
491										   void *context)
492{
493	struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
494	int ret = KERN_SUCCESS;
495
496	kdc->region_count++;
497	kdc->dumpable_bytes += (end - start);
498
499	return (ret);
500}
501
502int
503kern_dump_pmap_traverse_send_callback(vm_map_offset_t start,
504									  vm_map_offset_t end,
505									  void *context)
506{
507	struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
508	int ret = KERN_SUCCESS;
509	kernel_segment_command_t sc;
510	vm_size_t size = (vm_size_t)(end - start);
511
512	if (kdc->hoffset + sizeof(sc) > kdc->header_size) {
513		return (KERN_NO_SPACE);
514	}
515
516	/*
517	 *	Fill in segment command structure.
518	 */
519
520	sc.cmd = LC_SEGMENT_KERNEL;
521	sc.cmdsize = sizeof(kernel_segment_command_t);
522	sc.segname[0] = 0;
523	sc.vmaddr = (vm_address_t)start;
524	sc.vmsize = size;
525	sc.fileoff = (vm_address_t)kdc->foffset;
526	sc.filesize = size;
527	sc.maxprot = VM_PROT_READ;
528	sc.initprot = VM_PROT_READ;
529	sc.nsects = 0;
530	sc.flags = 0;
531
532	if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->hoffset) , &kdc->hoffset)) < 0) {
533		printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
534		goto out;
535	}
536
537	if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_segment_command_t) , (caddr_t) &sc)) < 0) {
538		printf ("kdp_send_crashdump_data failed with error %d\n", ret);
539		goto out;
540	}
541
542	kdc->hoffset += sizeof(kernel_segment_command_t);
543
544	if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(kdc->foffset) , &kdc->foffset)) < 0) {
545		printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
546		goto out;
547	}
548
549	if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, (unsigned int)size, (caddr_t)(uintptr_t)start)) < 0)	{
550		printf ("kdp_send_crashdump_data failed with error %d\n", ret);
551		goto out;
552	}
553
554	kdc->foffset += size;
555
556out:
557	return (ret);
558}
559
560int
561kern_dump(void)
562{
563	int			ret;
564	struct kern_dump_preflight_context kdc_preflight;
565	struct kern_dump_send_context kdc_send;
566	uint32_t	segment_count;
567	size_t		command_size = 0, header_size = 0, tstate_size = 0;
568	uint64_t	hoffset = 0, foffset = 0;
569	kernel_mach_header_t	mh;
570
571
572	kdc_preflight.region_count = 0;
573	kdc_preflight.dumpable_bytes = 0;
574
575	ret = pmap_traverse_present_mappings(kernel_pmap,
576										 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
577										 VM_MAX_KERNEL_ADDRESS,
578										 kern_dump_pmap_traverse_preflight_callback,
579										 &kdc_preflight);
580	if (ret) {
581		printf("pmap traversal failed: %d\n", ret);
582		return (ret);
583	}
584
585	printf("Kernel dump region count: %u\n", kdc_preflight.region_count);
586	printf("Kernel dump byte count: %llu\n", kdc_preflight.dumpable_bytes);
587
588	segment_count = kdc_preflight.region_count;
589
590	tstate_size = sizeof(struct thread_command) + kern_collectth_state_size();
591
592	command_size = segment_count * sizeof(kernel_segment_command_t) +
593				tstate_size;
594
595	header_size = command_size + sizeof(kernel_mach_header_t);
596
597	/*
598	 *	Set up Mach-O header for currently executing kernel.
599	 */
600	printf ("Generated Mach-O header size was %lu\n", header_size);
601
602	mh.magic = _mh_execute_header.magic;
603	mh.cputype = _mh_execute_header.cputype;;
604	mh.cpusubtype = _mh_execute_header.cpusubtype;
605	mh.filetype = MH_CORE;
606	mh.ncmds = segment_count + 1 /* thread */;
607	mh.sizeofcmds = (uint32_t)command_size;
608	mh.flags = 0;
609#if defined(__LP64__)
610	mh.reserved = 0;
611#endif
612
613	hoffset = 0;	/* offset into header */
614	foffset = (uint32_t)round_page(header_size);	/* offset into file */
615
616	/* Transmit the Mach-O MH_CORE header, and seek forward past the
617	 * area reserved for the segment and thread commands
618	 * to begin data transmission
619	 */
620	if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
621		printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
622		goto out;
623	}
624	if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(kernel_mach_header_t), (caddr_t) &mh) < 0)) {
625		printf ("kdp_send_crashdump_data failed with error %d\n", ret);
626		goto out;
627	}
628
629	hoffset += sizeof(kernel_mach_header_t);
630
631	if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
632		printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
633		goto out;
634	}
635
636	printf ("Transmitting kernel state, please wait: ");
637
638	kdc_send.hoffset = hoffset;
639	kdc_send.foffset = foffset;
640	kdc_send.header_size = header_size;
641
642	ret = pmap_traverse_present_mappings(kernel_pmap,
643										 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
644										 VM_MAX_KERNEL_ADDRESS,
645										 kern_dump_pmap_traverse_send_callback,
646										 &kdc_send);
647	if (ret) {
648		kprintf("pmap traversal failed: %d\n", ret);
649		return (ret);
650	}
651
652	/* Reload mutated offsets */
653	hoffset = kdc_send.hoffset;
654	foffset = kdc_send.foffset;
655
656	/*
657	 * Now send out the LC_THREAD load command, with the thread information
658	 * for the current activation.
659	 */
660	if (tstate_size > 0) {
661		char tstate[tstate_size];
662
663		kern_collectth_state (current_thread(), tstate, tstate_size);
664
665		if ((ret = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset), &hoffset)) < 0) {
666			printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
667			goto out;
668		}
669
670		if ((ret = kdp_send_crashdump_data (KDP_DATA, NULL, tstate_size, tstate)) < 0) {
671			printf ("kdp_send_crashdump_data failed with error %d\n", ret);
672			goto out;
673		}
674
675		hoffset += tstate_size;
676	}
677
678	/* last packet */
679	if ((ret = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
680	{
681		printf ("kdp_send_crashdump_pkt failed with error %d\n", ret);
682		goto out;
683	}
684
685out:
686	return (ret);
687}
688
689
690pt_entry_t *debugger_ptep;
691vm_map_offset_t debugger_window_kva;
692
693/* Establish a pagetable window that can be remapped on demand.
694 * This is utilized by the debugger to address regions outside
695 * the physical map.
696 */
697
698void
699kdp_machine_init(void) {
700	if (debug_boot_arg == 0)
701		return;
702
703	vm_map_entry_t e;
704	kern_return_t kr = vm_map_find_space(kernel_map,
705	    &debugger_window_kva,
706	    PAGE_SIZE, 0,
707	    VM_MAKE_TAG(VM_MEMORY_IOKIT), &e);
708
709	if (kr != KERN_SUCCESS) {
710		panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__, kr);
711	}
712
713	vm_map_unlock(kernel_map);
714
715	debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
716
717	if (debugger_ptep == NULL) {
718		pmap_expand(kernel_pmap, debugger_window_kva, PMAP_EXPAND_OPTIONS_NONE);
719		debugger_ptep = pmap_pte(kernel_pmap, debugger_window_kva);
720	}
721}
722
723