1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach/mach_types.h>
29#include <mach/vm_attributes.h>
30#include <mach/vm_param.h>
31#include <libsa/types.h>
32
33#include <vm/vm_map.h>
34#include <i386/pmap.h>
35
36#include <kdp/kdp_core.h>
37#include <kdp/kdp_internal.h>
38#include <mach-o/loader.h>
39#include <mach/vm_map.h>
40#include <mach/vm_statistics.h>
41#include <mach/thread_status.h>
42#include <i386/thread.h>
43
44#include <vm/vm_protos.h>
45#include <vm/vm_kern.h>
46
47unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
48unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
49
50boolean_t kdp_read_io;
51boolean_t kdp_trans_off;
52uint32_t kdp_src_high32;
53extern pmap_paddr_t avail_start, avail_end;
54
55extern void bcopy_phys(addr64_t from, addr64_t to, int size);
56static addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
57
58pmap_t kdp_pmap = 0;
59
60unsigned int not_in_kdp = 1; /* Cleared when we begin to access vm functions in kdp */
61
62extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
63extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
64
65int	kern_dump(void);
66int	kdp_dump_trap(int type, x86_saved_state32_t *regs);
67
68typedef struct {
69	int	flavor;			/* the number for this flavor */
70	mach_msg_type_number_t	count;	/* count of ints in this flavor */
71} mythread_state_flavor_t;
72
73static mythread_state_flavor_t thread_flavor_array [] = {
74	{x86_THREAD_STATE32, x86_THREAD_STATE32_COUNT}
75};
76
77static int kdp_mynum_flavors = 1;
78static int MAX_TSTATE_FLAVORS = 1;
79
80typedef struct {
81	vm_offset_t header;
82	int  hoffset;
83	mythread_state_flavor_t *flavors;
84	int tstate_size;
85} tir_t;
86
87char command_buffer[512];
88
89static addr64_t
90kdp_vtophys(
91	pmap_t pmap,
92	addr64_t va)
93{
94	addr64_t    pa;
95	ppnum_t pp;
96
97	pp = pmap_find_phys(pmap, va);
98	if(!pp) return 0;
99
100	pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL);
101	return(pa);
102}
103
104/*
105 *
106 */
107unsigned kdp_vm_read(
108	caddr_t src,
109	caddr_t dst,
110	unsigned len)
111{
112	addr64_t cur_virt_src = (addr64_t)((unsigned int)src | (((uint64_t)kdp_src_high32) << 32));
113	addr64_t cur_virt_dst = (addr64_t)((unsigned int)dst);
114	addr64_t cur_phys_dst, cur_phys_src;
115	unsigned resid = len;
116	unsigned cnt = 0;
117	pmap_t src_pmap = kernel_pmap;
118
119/* If a different pmap has been specified with kdp_pmap, use it to translate the
120 * source (cur_virt_src); otherwise, the source is translated using the
121 * kernel_pmap.
122 */
123	if (kdp_pmap)
124		src_pmap = kdp_pmap;
125
126	while (resid != 0) {
127/* Translate, unless kdp_trans_off is set */
128		if (!kdp_trans_off) {
129			if (!(cur_phys_src = kdp_vtophys(src_pmap,
130				    cur_virt_src)))
131				goto exit;
132		}
133		else
134			cur_phys_src = cur_virt_src;
135
136/* Always translate the destination buffer using the kernel_pmap */
137		if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)))
138			goto exit;
139
140		/* Validate physical page numbers unless kdp_read_io is set */
141		if (kdp_read_io == FALSE)
142			if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
143				goto exit;
144
145/* Get length left on page */
146		cnt = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
147		if (cnt > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
148			cnt = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
149		if (cnt > resid)
150			cnt = resid;
151
152/* Do a physical copy */
153		bcopy_phys(cur_phys_src, cur_phys_dst, cnt);
154
155		cur_virt_src += cnt;
156		cur_virt_dst += cnt;
157		resid -= cnt;
158	}
159exit:
160	return (len - resid);
161}
162
163/*
164 *
165 */
166unsigned kdp_vm_write(
167        caddr_t src,
168        caddr_t dst,
169        unsigned len)
170{
171	addr64_t cur_virt_src, cur_virt_dst;
172	addr64_t cur_phys_src, cur_phys_dst;
173	unsigned resid, cnt, cnt_src, cnt_dst;
174
175#ifdef KDP_VM_WRITE_DEBUG
176	printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]);
177#endif
178
179	cur_virt_src = (addr64_t)((unsigned int)src);
180	cur_virt_dst = (addr64_t)((unsigned int)dst);
181
182	resid = len;
183
184	while (resid != 0) {
185		if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
186			goto exit;
187
188		if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
189			goto exit;
190
191		cnt_src = ((cur_phys_src + PAGE_SIZE) & (PAGE_MASK)) - cur_phys_src;
192		cnt_dst = ((cur_phys_dst + PAGE_SIZE) & (PAGE_MASK)) - cur_phys_dst;
193
194		if (cnt_src > cnt_dst)
195			cnt = cnt_dst;
196		else
197			cnt = cnt_src;
198		if (cnt > resid)
199			cnt = resid;
200
201		bcopy_phys(cur_phys_src, cur_phys_dst, cnt);		/* Copy stuff over */
202
203		cur_virt_src +=cnt;
204		cur_virt_dst +=cnt;
205		resid -= cnt;
206	}
207exit:
208	return (len - resid);
209}
210
211static void
212kern_collectth_state(thread_t thread, tir_t *t)
213{
214	vm_offset_t	header;
215	int  hoffset, i ;
216	mythread_state_flavor_t *flavors;
217	struct thread_command	*tc;
218	/*
219	 *	Fill in thread command structure.
220	 */
221	header = t->header;
222	hoffset = t->hoffset;
223	flavors = t->flavors;
224
225	tc = (struct thread_command *) (header + hoffset);
226	tc->cmd = LC_THREAD;
227	tc->cmdsize = sizeof(struct thread_command) + t->tstate_size;
228	hoffset += sizeof(struct thread_command);
229	/*
230	 * Follow with a struct thread_state_flavor and
231	 * the appropriate thread state struct for each
232	 * thread state flavor.
233	 */
234	for (i = 0; i < kdp_mynum_flavors; i++) {
235		*(mythread_state_flavor_t *)(header+hoffset) =
236		    flavors[i];
237		hoffset += sizeof(mythread_state_flavor_t);
238		/* Locate and obtain the non-volatile register context
239		 * for this kernel thread. This should ideally be
240		 * encapsulated in machine_thread_get_kern_state()
241		 * but that routine appears to have been co-opted
242		 * by CHUD to obtain pre-interrupt state.
243		 */
244		if (flavors[i].flavor == x86_THREAD_STATE32) {
245			x86_thread_state32_t *tstate = (x86_thread_state32_t *) (header + hoffset);
246			vm_offset_t kstack;
247			bzero(tstate, x86_THREAD_STATE32_COUNT * sizeof(int));
248			if ((kstack = thread->kernel_stack) != 0){
249				struct x86_kernel_state32 *iks = STACK_IKS(kstack);
250				tstate->ebx = iks->k_ebx;
251				tstate->esp = iks->k_esp;
252				tstate->ebp = iks->k_ebp;
253				tstate->edi = iks->k_edi;
254				tstate->esi = iks->k_esi;
255				tstate->eip = iks->k_eip;
256		}
257		}
258		else if (machine_thread_get_kern_state(thread,
259			flavors[i].flavor, (thread_state_t) (header+hoffset),
260			&flavors[i].count) != KERN_SUCCESS)
261			printf ("Failure in machine_thread_get_kern_state()\n");
262		hoffset += flavors[i].count*sizeof(int);
263	}
264
265	t->hoffset = hoffset;
266}
267
268/* Intended to be called from the kernel trap handler if an unrecoverable fault
269 * occurs during a crashdump (which shouldn't happen since we validate mappings
270 * and so on). This should be reworked to attempt some form of recovery.
271 */
272int
273kdp_dump_trap(
274	int type,
275	__unused x86_saved_state32_t	*saved_state)
276{
277	printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type);
278	kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0));
279	abort_panic_transfer();
280	kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
281	kdp_flag &= ~PANIC_CORE_ON_NMI;
282	kdp_flag &= ~PANIC_LOG_DUMP;
283
284	kdp_reset();
285
286	kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
287	return( 0 );
288}
289
290int
291kern_dump(void)
292{
293	vm_map_t	map;
294	unsigned int	thread_count, segment_count;
295	unsigned int	command_size = 0, header_size = 0, tstate_size = 0;
296	unsigned int	hoffset = 0, foffset = 0, nfoffset = 0,  vmoffset = 0;
297	unsigned int	max_header_size = 0;
298	vm_offset_t	header;
299	struct mach_header	*mh;
300	struct segment_command	*sc;
301	vm_size_t	size;
302	vm_prot_t	prot = 0;
303	vm_prot_t	maxprot = 0;
304	vm_inherit_t	inherit = 0;
305	mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
306	vm_size_t	nflavors;
307	vm_size_t	i;
308	uint32_t	nesting_depth = 0;
309	kern_return_t	kret = 0;
310	struct vm_region_submap_info_64	vbr;
311	mach_msg_type_number_t	vbrcount  = 0;
312	tir_t tir1;
313
314	int error = 0;
315	int panic_error = 0;
316	unsigned int txstart = 0;
317	unsigned int mach_section_count = 4;
318	unsigned int num_sects_txed = 0;
319
320	map = kernel_map;
321
322	not_in_kdp = 0; /* Signal vm functions not to acquire locks */
323
324	thread_count = 1;
325	segment_count = get_vmmap_entries(map);
326
327	printf("Kernel map has %d entries\n", segment_count);
328
329	nflavors = kdp_mynum_flavors;
330	bcopy((char *)thread_flavor_array,(char *) flavors,sizeof(thread_flavor_array));
331
332	for (i = 0; i < nflavors; i++)
333		tstate_size += sizeof(mythread_state_flavor_t) +
334		    (flavors[i].count * sizeof(int));
335
336	command_size = (segment_count + mach_section_count) *
337	    sizeof(struct segment_command) +
338	    thread_count * sizeof(struct thread_command) +
339	    tstate_size * thread_count;
340
341	header_size = command_size + sizeof(struct mach_header);
342	header = (vm_offset_t) command_buffer;
343
344	/*
345	 *	Set up Mach-O header for currently executing 32 bit kernel.
346	 */
347	printf ("Generated Mach-O header size was %d\n", header_size);
348
349	mh = (struct mach_header *) header;
350	mh->magic = MH_MAGIC;
351	mh->cputype = cpu_type();
352	mh->cpusubtype = cpu_subtype();
353	mh->filetype = MH_CORE;
354	mh->ncmds = segment_count + thread_count + mach_section_count;
355	mh->sizeofcmds = command_size;
356	mh->flags = 0;
357
358	hoffset = sizeof(struct mach_header);	/* offset into header */
359	foffset = round_page_32(header_size);	/* offset into file */
360	/* Padding */
361	if ((foffset - header_size) < (4*sizeof(struct segment_command))) {
362		foffset += ((4*sizeof(struct segment_command)) - (foffset-header_size));
363	}
364
365	max_header_size = foffset;
366
367	vmoffset = VM_MIN_ADDRESS;		/* offset into VM */
368
369	/* Transmit the Mach-O MH_CORE header, and seek forward past the
370	 * area reserved for the segment and thread commands
371	 * to begin data transmission
372	 */
373
374	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(nfoffset) , &nfoffset)) < 0) {
375		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
376		error = panic_error;
377		goto out;
378	}
379
380	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct mach_header), (caddr_t) mh) < 0)) {
381		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
382		error = panic_error;
383		goto out;
384	}
385
386	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset) < 0)) {
387		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
388		error = panic_error;
389		goto out;
390	}
391	printf ("Transmitting kernel state, please wait: ");
392
393	while ((segment_count > 0) || (kret == KERN_SUCCESS)){
394		/* Check if we've transmitted all the kernel sections */
395		if (num_sects_txed == mach_section_count) {
396
397			while (1) {
398
399				/*
400				 *	Get region information for next region.
401				 */
402
403				vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
404				if((kret = vm_region_recurse_64(map,
405					    &vmoffset, &size, &nesting_depth,
406					    (vm_region_recurse_info_t)&vbr,
407					    &vbrcount)) != KERN_SUCCESS) {
408					break;
409				}
410
411				if(vbr.is_submap) {
412					nesting_depth++;
413					continue;
414				} else {
415					break;
416				}
417			}
418
419			if(kret != KERN_SUCCESS)
420				break;
421
422			prot = vbr.protection;
423			maxprot = vbr.max_protection;
424			inherit = vbr.inheritance;
425		}
426		else
427		{
428			switch (num_sects_txed) {
429			case 0:
430				/* Transmit the kernel text section */
431				vmoffset = sectTEXTB;
432				size = sectSizeTEXT;
433				break;
434			case 1:
435				vmoffset = sectDATAB;
436				size = sectSizeDATA;
437				break;
438			case 2:
439				vmoffset = sectPRELINKB;
440				size = sectSizePRELINK;
441				break;
442			case 3:
443				vmoffset = sectLINKB;
444				size = sectSizeLINK;
445				break;
446			}
447			num_sects_txed++;
448		}
449		/*
450		 *	Fill in segment command structure.
451		 */
452
453		if (hoffset > max_header_size)
454			break;
455		sc = (struct segment_command *) (header);
456		sc->cmd = LC_SEGMENT;
457		sc->cmdsize = sizeof(struct segment_command);
458		sc->segname[0] = 0;
459		sc->vmaddr = vmoffset;
460		sc->vmsize = size;
461		sc->fileoff = foffset;
462		sc->filesize = size;
463		sc->maxprot = maxprot;
464		sc->initprot = prot;
465		sc->nsects = 0;
466
467		if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
468			printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
469			error = panic_error;
470			goto out;
471		}
472
473		if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, sizeof(struct segment_command) , (caddr_t) sc)) < 0) {
474			printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
475			error = panic_error;
476			goto out;
477		}
478
479		/* Do not transmit memory tagged VM_MEMORY_IOKIT - instead,
480		 * seek past that region on the server - this creates a
481		 * hole in the file.
482		 */
483
484		if ((vbr.user_tag != VM_MEMORY_IOKIT)) {
485
486			if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(foffset) , &foffset)) < 0) {
487				printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
488				error = panic_error;
489				goto out;
490			}
491
492			txstart = vmoffset;
493
494			if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, size, (caddr_t) txstart)) < 0)	{
495				printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
496				error = panic_error;
497				goto out;
498			}
499		}
500
501		hoffset += sizeof(struct segment_command);
502		foffset += size;
503		vmoffset += size;
504		segment_count--;
505	}
506	tir1.header = header;
507	tir1.hoffset = 0;
508	tir1.flavors = flavors;
509	tir1.tstate_size = tstate_size;
510
511	/* Now send out the LC_THREAD load command, with the thread information
512	 * for the current activation.
513	 * Note that the corefile can contain LC_SEGMENT commands with file
514	 * offsets that point past the edge of the corefile, in the event that
515	 * the last N VM regions were all I/O mapped or otherwise
516	 * non-transferable memory,  not followed by a normal VM region;
517	 * i.e. there will be no hole that reaches to the end of the core file.
518	 */
519	kern_collectth_state (current_thread(), &tir1);
520
521	if ((panic_error = kdp_send_crashdump_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
522		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
523		error = panic_error;
524		goto out;
525	}
526
527	if ((panic_error = kdp_send_crashdump_data (KDP_DATA, NULL, tir1.hoffset , (caddr_t) header)) < 0) {
528		printf ("kdp_send_crashdump_data failed with error %d\n", panic_error);
529		error = panic_error;
530		goto out;
531	}
532
533	/* last packet */
534	if ((panic_error = kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0))) < 0)
535	{
536		printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
537		error = panic_error;
538		goto out;
539	}
540out:
541	return (error);
542}
543