minidump_machdep.c revision 215133
1/*-
2 * Copyright (c) 2006 Peter Wemm
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/minidump_machdep.c 215133 2010-11-11 18:35:28Z avg $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/conf.h>
33#include <sys/cons.h>
34#include <sys/kernel.h>
35#include <sys/kerneldump.h>
36#include <sys/msgbuf.h>
37#include <vm/vm.h>
38#include <vm/pmap.h>
39#include <machine/atomic.h>
40#include <machine/elf.h>
41#include <machine/md_var.h>
42#include <machine/vmparam.h>
43#include <machine/minidump.h>
44
45CTASSERT(sizeof(struct kerneldumpheader) == 512);
46
47/*
48 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
49 * is to protect us from metadata and to protect metadata from us.
50 */
51#define	SIZEOF_METADATA		(64*1024)
52
53#define	MD_ALIGN(x)	(((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
54#define	DEV_ALIGN(x)	(((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
55
56extern uint64_t KPDPphys;
57
58uint64_t *vm_page_dump;
59int vm_page_dump_size;
60
61static struct kerneldumpheader kdh;
62static off_t dumplo;
63
64/* Handle chunked writes. */
65static size_t fragsz;
66static void *dump_va;
67static size_t counter, progress;
68
69CTASSERT(sizeof(*vm_page_dump) == 8);
70
71static int
72is_dumpable(vm_paddr_t pa)
73{
74	int i;
75
76	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
77		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
78			return (1);
79	}
80	return (0);
81}
82
83#define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
84
85static int
86blk_flush(struct dumperinfo *di)
87{
88	int error;
89
90	if (fragsz == 0)
91		return (0);
92
93	error = dump_write(di, dump_va, 0, dumplo, fragsz);
94	dumplo += fragsz;
95	fragsz = 0;
96	return (error);
97}
98
99static int
100blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
101{
102	size_t len;
103	int error, i, c;
104	u_int maxdumpsz;
105
106	maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
107	if (maxdumpsz == 0)	/* seatbelt */
108		maxdumpsz = PAGE_SIZE;
109	error = 0;
110	if ((sz % PAGE_SIZE) != 0) {
111		printf("size not page aligned\n");
112		return (EINVAL);
113	}
114	if (ptr != NULL && pa != 0) {
115		printf("cant have both va and pa!\n");
116		return (EINVAL);
117	}
118	if (pa != 0 && (((uintptr_t)ptr) % PAGE_SIZE) != 0) {
119		printf("address not page aligned\n");
120		return (EINVAL);
121	}
122	if (ptr != NULL) {
123		/* If we're doing a virtual dump, flush any pre-existing pa pages */
124		error = blk_flush(di);
125		if (error)
126			return (error);
127	}
128	while (sz) {
129		len = maxdumpsz - fragsz;
130		if (len > sz)
131			len = sz;
132		counter += len;
133		progress -= len;
134		if (counter >> 24) {
135			printf(" %ld", PG2MB(progress >> PAGE_SHIFT));
136			counter &= (1<<24) - 1;
137		}
138		if (ptr) {
139			error = dump_write(di, ptr, 0, dumplo, len);
140			if (error)
141				return (error);
142			dumplo += len;
143			ptr += len;
144			sz -= len;
145		} else {
146			for (i = 0; i < len; i += PAGE_SIZE)
147				dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
148			fragsz += len;
149			pa += len;
150			sz -= len;
151			if (fragsz == maxdumpsz) {
152				error = blk_flush(di);
153				if (error)
154					return (error);
155			}
156		}
157
158		/* Check for user abort. */
159		c = cncheckc();
160		if (c == 0x03)
161			return (ECANCELED);
162		if (c != -1)
163			printf(" (CTRL-C to abort) ");
164	}
165
166	return (0);
167}
168
169/* A fake page table page, to avoid having to handle both 4K and 2M pages */
170static pd_entry_t fakepd[NPDEPG];
171
172void
173minidumpsys(struct dumperinfo *di)
174{
175	uint64_t dumpsize;
176	uint32_t pmapsize;
177	vm_offset_t va;
178	int error;
179	uint64_t bits;
180	uint64_t *pdp, *pd, *pt, pa;
181	int i, j, k, n, bit;
182	int retry_count;
183	struct minidumphdr mdhdr;
184
185	retry_count = 0;
186 retry:
187	retry_count++;
188	counter = 0;
189	/* Walk page table pages, set bits in vm_page_dump */
190	pmapsize = 0;
191	pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
192	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
193	    kernel_vm_end); ) {
194		/*
195		 * We always write a page, even if it is zero. Each
196		 * page written corresponds to 1GB of space
197		 */
198		pmapsize += PAGE_SIZE;
199		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
200		if ((pdp[i] & PG_V) == 0) {
201			va += NBPDP;
202			continue;
203		}
204
205		/*
206		 * 1GB page is represented as 512 2MB pages in a dump.
207		 */
208		if ((pdp[i] & PG_PS) != 0) {
209			va += NBPDP;
210			pa = pdp[i] & PG_PS_FRAME;
211			for (n = 0; n < NPDEPG * NPTEPG; n++) {
212				if (is_dumpable(pa))
213					dump_add_page(pa);
214				pa += PAGE_SIZE;
215			}
216			continue;
217		}
218
219		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
220		for (n = 0; n < NPDEPG; n++, va += NBPDR) {
221			j = (va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1);
222
223			if ((pd[j] & PG_V) == 0)
224				continue;
225
226			if ((pd[j] & PG_PS) != 0) {
227				/* This is an entire 2M page. */
228				pa = pd[j] & PG_PS_FRAME;
229				for (k = 0; k < NPTEPG; k++) {
230					if (is_dumpable(pa))
231						dump_add_page(pa);
232					pa += PAGE_SIZE;
233				}
234				continue;
235			}
236
237			pa = pd[j] & PG_FRAME;
238			/* set bit for this PTE page */
239			if (is_dumpable(pa))
240				dump_add_page(pa);
241			/* and for each valid page in this 2MB block */
242			pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
243			for (k = 0; k < NPTEPG; k++) {
244				if ((pt[k] & PG_V) == 0)
245					continue;
246				pa = pt[k] & PG_FRAME;
247				if (is_dumpable(pa))
248					dump_add_page(pa);
249			}
250		}
251	}
252
253	/* Calculate dump size. */
254	dumpsize = pmapsize;
255	dumpsize += round_page(msgbufp->msg_size);
256	dumpsize += round_page(vm_page_dump_size);
257	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
258		bits = vm_page_dump[i];
259		while (bits) {
260			bit = bsfq(bits);
261			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
262			/* Clear out undumpable pages now if needed */
263			if (is_dumpable(pa)) {
264				dumpsize += PAGE_SIZE;
265			} else {
266				dump_drop_page(pa);
267			}
268			bits &= ~(1ul << bit);
269		}
270	}
271	dumpsize += PAGE_SIZE;
272
273	/* Determine dump offset on device. */
274	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
275		error = E2BIG;
276		goto fail;
277	}
278	dumplo = di->mediaoffset + di->mediasize - dumpsize;
279	dumplo -= sizeof(kdh) * 2;
280	progress = dumpsize;
281
282	/* Initialize mdhdr */
283	bzero(&mdhdr, sizeof(mdhdr));
284	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
285	mdhdr.version = MINIDUMP_VERSION;
286	mdhdr.msgbufsize = msgbufp->msg_size;
287	mdhdr.bitmapsize = vm_page_dump_size;
288	mdhdr.pmapsize = pmapsize;
289	mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
290	mdhdr.dmapbase = DMAP_MIN_ADDRESS;
291	mdhdr.dmapend = DMAP_MAX_ADDRESS;
292
293	mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION, dumpsize, di->blocksize);
294
295	printf("Physical memory: %ju MB\n", ptoa((uintmax_t)physmem) / 1048576);
296	printf("Dumping %llu MB:", (long long)dumpsize >> 20);
297
298	/* Dump leader */
299	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
300	if (error)
301		goto fail;
302	dumplo += sizeof(kdh);
303
304	/* Dump my header */
305	bzero(&fakepd, sizeof(fakepd));
306	bcopy(&mdhdr, &fakepd, sizeof(mdhdr));
307	error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
308	if (error)
309		goto fail;
310
311	/* Dump msgbuf up front */
312	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
313	if (error)
314		goto fail;
315
316	/* Dump bitmap */
317	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
318	if (error)
319		goto fail;
320
321	/* Dump kernel page directory pages */
322	bzero(fakepd, sizeof(fakepd));
323	pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
324	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
325	    kernel_vm_end); va += NBPDP) {
326		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
327
328		/* We always write a page, even if it is zero */
329		if ((pdp[i] & PG_V) == 0) {
330			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
331			if (error)
332				goto fail;
333			/* flush, in case we reuse fakepd in the same block */
334			error = blk_flush(di);
335			if (error)
336				goto fail;
337			continue;
338		}
339
340		/* 1GB page is represented as 512 2MB pages in a dump */
341		if ((pdp[i] & PG_PS) != 0) {
342			/* PDPE and PDP have identical layout in this case */
343			fakepd[0] = pdp[i];
344			for (j = 1; j < NPDEPG; j++)
345				fakepd[j] = fakepd[j - 1] + NBPDR;
346			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
347			if (error)
348				goto fail;
349			/* flush, in case we reuse fakepd in the same block */
350			error = blk_flush(di);
351			if (error)
352				goto fail;
353			bzero(fakepd, sizeof(fakepd));
354			continue;
355		}
356
357		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
358		error = blk_write(di, (char *)pd, 0, PAGE_SIZE);
359		if (error)
360			goto fail;
361		error = blk_flush(di);
362		if (error)
363			goto fail;
364	}
365
366	/* Dump memory chunks */
367	/* XXX cluster it up and use blk_dump() */
368	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
369		bits = vm_page_dump[i];
370		while (bits) {
371			bit = bsfq(bits);
372			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
373			error = blk_write(di, 0, pa, PAGE_SIZE);
374			if (error)
375				goto fail;
376			bits &= ~(1ul << bit);
377		}
378	}
379
380	error = blk_flush(di);
381	if (error)
382		goto fail;
383
384	/* Dump trailer */
385	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
386	if (error)
387		goto fail;
388	dumplo += sizeof(kdh);
389
390	/* Signal completion, signoff and exit stage left. */
391	dump_write(di, NULL, 0, 0, 0);
392	printf("\nDump complete\n");
393	return;
394
395 fail:
396	if (error < 0)
397		error = -error;
398
399	printf("\n");
400	if (error == ENOSPC) {
401		printf("Dump map grown while dumping. ");
402		if (retry_count < 5) {
403			printf("Retrying...\n");
404			goto retry;
405		}
406		printf("Dump failed.\n");
407	}
408	else if (error == ECANCELED)
409		printf("Dump aborted\n");
410	else if (error == E2BIG)
411		printf("Dump failed. Partition too small.\n");
412	else
413		printf("** DUMP FAILED (ERROR %d) **\n", error);
414}
415
416void
417dump_add_page(vm_paddr_t pa)
418{
419	int idx, bit;
420
421	pa >>= PAGE_SHIFT;
422	idx = pa >> 6;		/* 2^6 = 64 */
423	bit = pa & 63;
424	atomic_set_long(&vm_page_dump[idx], 1ul << bit);
425}
426
427void
428dump_drop_page(vm_paddr_t pa)
429{
430	int idx, bit;
431
432	pa >>= PAGE_SHIFT;
433	idx = pa >> 6;		/* 2^6 = 64 */
434	bit = pa & 63;
435	atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
436}
437