minidump_machdep.c revision 257216
1/*-
2 * Copyright (c) 2006 Peter Wemm
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/amd64/minidump_machdep.c 257216 2013-10-27 16:31:12Z kib $");
29
30#include "opt_pmap.h"
31#include "opt_watchdog.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/conf.h>
36#include <sys/cons.h>
37#include <sys/kernel.h>
38#include <sys/kerneldump.h>
39#include <sys/msgbuf.h>
40#include <sys/watchdog.h>
41#include <vm/vm.h>
42#include <vm/vm_param.h>
43#include <vm/vm_page.h>
44#include <vm/vm_phys.h>
45#include <vm/pmap.h>
46#include <machine/atomic.h>
47#include <machine/elf.h>
48#include <machine/md_var.h>
49#include <machine/vmparam.h>
50#include <machine/minidump.h>
51
52CTASSERT(sizeof(struct kerneldumpheader) == 512);
53
54/*
55 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
56 * is to protect us from metadata and to protect metadata from us.
57 */
58#define	SIZEOF_METADATA		(64*1024)
59
60#define	MD_ALIGN(x)	(((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
61#define	DEV_ALIGN(x)	(((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
62
63uint64_t *vm_page_dump;
64int vm_page_dump_size;
65
66static struct kerneldumpheader kdh;
67static off_t dumplo;
68
69/* Handle chunked writes. */
70static size_t fragsz;
71static void *dump_va;
72static size_t counter, progress, dumpsize;
73
74CTASSERT(sizeof(*vm_page_dump) == 8);
75
76static int
77is_dumpable(vm_paddr_t pa)
78{
79	vm_page_t m;
80	int i;
81
82	if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
83		return ((m->flags & PG_NODUMP) == 0);
84	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
85		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
86			return (1);
87	}
88	return (0);
89}
90
91#define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
92
93static int
94blk_flush(struct dumperinfo *di)
95{
96	int error;
97
98	if (fragsz == 0)
99		return (0);
100
101	error = dump_write(di, dump_va, 0, dumplo, fragsz);
102	dumplo += fragsz;
103	fragsz = 0;
104	return (error);
105}
106
107static struct {
108	int min_per;
109	int max_per;
110	int visited;
111} progress_track[10] = {
112	{  0,  10, 0},
113	{ 10,  20, 0},
114	{ 20,  30, 0},
115	{ 30,  40, 0},
116	{ 40,  50, 0},
117	{ 50,  60, 0},
118	{ 60,  70, 0},
119	{ 70,  80, 0},
120	{ 80,  90, 0},
121	{ 90, 100, 0}
122};
123
124static void
125report_progress(size_t progress, size_t dumpsize)
126{
127	int sofar, i;
128
129	sofar = 100 - ((progress * 100) / dumpsize);
130	for (i = 0; i < nitems(progress_track); i++) {
131		if (sofar < progress_track[i].min_per ||
132		    sofar > progress_track[i].max_per)
133			continue;
134		if (progress_track[i].visited)
135			return;
136		progress_track[i].visited = 1;
137		printf("..%d%%", sofar);
138		return;
139	}
140}
141
142static int
143blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
144{
145	size_t len;
146	int error, i, c;
147	u_int maxdumpsz;
148
149	maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
150	if (maxdumpsz == 0)	/* seatbelt */
151		maxdumpsz = PAGE_SIZE;
152	error = 0;
153	if ((sz % PAGE_SIZE) != 0) {
154		printf("size not page aligned\n");
155		return (EINVAL);
156	}
157	if (ptr != NULL && pa != 0) {
158		printf("cant have both va and pa!\n");
159		return (EINVAL);
160	}
161	if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
162		printf("address not page aligned %p\n", ptr);
163		return (EINVAL);
164	}
165	if (ptr != NULL) {
166		/* If we're doing a virtual dump, flush any pre-existing pa pages */
167		error = blk_flush(di);
168		if (error)
169			return (error);
170	}
171	while (sz) {
172		len = maxdumpsz - fragsz;
173		if (len > sz)
174			len = sz;
175		counter += len;
176		progress -= len;
177		if (counter >> 24) {
178			report_progress(progress, dumpsize);
179			counter &= (1<<24) - 1;
180		}
181
182		wdog_kern_pat(WD_LASTVAL);
183
184		if (ptr) {
185			error = dump_write(di, ptr, 0, dumplo, len);
186			if (error)
187				return (error);
188			dumplo += len;
189			ptr += len;
190			sz -= len;
191		} else {
192			for (i = 0; i < len; i += PAGE_SIZE)
193				dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
194			fragsz += len;
195			pa += len;
196			sz -= len;
197			if (fragsz == maxdumpsz) {
198				error = blk_flush(di);
199				if (error)
200					return (error);
201			}
202		}
203
204		/* Check for user abort. */
205		c = cncheckc();
206		if (c == 0x03)
207			return (ECANCELED);
208		if (c != -1)
209			printf(" (CTRL-C to abort) ");
210	}
211
212	return (0);
213}
214
215/* A fake page table page, to avoid having to handle both 4K and 2M pages */
216static pd_entry_t fakepd[NPDEPG];
217
218void
219minidumpsys(struct dumperinfo *di)
220{
221	uint32_t pmapsize;
222	vm_offset_t va;
223	int error;
224	uint64_t bits;
225	uint64_t *pml4, *pdp, *pd, *pt, pa;
226	int i, ii, j, k, n, bit;
227	int retry_count;
228	struct minidumphdr mdhdr;
229
230	retry_count = 0;
231 retry:
232	retry_count++;
233	counter = 0;
234	for (i = 0; i < nitems(progress_track); i++)
235		progress_track[i].visited = 0;
236	/* Walk page table pages, set bits in vm_page_dump */
237	pmapsize = 0;
238	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
239	    kernel_vm_end); ) {
240		/*
241		 * We always write a page, even if it is zero. Each
242		 * page written corresponds to 1GB of space
243		 */
244		pmapsize += PAGE_SIZE;
245		ii = (va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1);
246		pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
247		pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
248		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
249		if ((pdp[i] & PG_V) == 0) {
250			va += NBPDP;
251			continue;
252		}
253
254		/*
255		 * 1GB page is represented as 512 2MB pages in a dump.
256		 */
257		if ((pdp[i] & PG_PS) != 0) {
258			va += NBPDP;
259			pa = pdp[i] & PG_PS_FRAME;
260			for (n = 0; n < NPDEPG * NPTEPG; n++) {
261				if (is_dumpable(pa))
262					dump_add_page(pa);
263				pa += PAGE_SIZE;
264			}
265			continue;
266		}
267
268		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
269		for (n = 0; n < NPDEPG; n++, va += NBPDR) {
270			j = (va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1);
271
272			if ((pd[j] & PG_V) == 0)
273				continue;
274
275			if ((pd[j] & PG_PS) != 0) {
276				/* This is an entire 2M page. */
277				pa = pd[j] & PG_PS_FRAME;
278				for (k = 0; k < NPTEPG; k++) {
279					if (is_dumpable(pa))
280						dump_add_page(pa);
281					pa += PAGE_SIZE;
282				}
283				continue;
284			}
285
286			pa = pd[j] & PG_FRAME;
287			/* set bit for this PTE page */
288			if (is_dumpable(pa))
289				dump_add_page(pa);
290			/* and for each valid page in this 2MB block */
291			pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
292			for (k = 0; k < NPTEPG; k++) {
293				if ((pt[k] & PG_V) == 0)
294					continue;
295				pa = pt[k] & PG_FRAME;
296				if (is_dumpable(pa))
297					dump_add_page(pa);
298			}
299		}
300	}
301
302	/* Calculate dump size. */
303	dumpsize = pmapsize;
304	dumpsize += round_page(msgbufp->msg_size);
305	dumpsize += round_page(vm_page_dump_size);
306	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
307		bits = vm_page_dump[i];
308		while (bits) {
309			bit = bsfq(bits);
310			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
311			/* Clear out undumpable pages now if needed */
312			if (is_dumpable(pa)) {
313				dumpsize += PAGE_SIZE;
314			} else {
315				dump_drop_page(pa);
316			}
317			bits &= ~(1ul << bit);
318		}
319	}
320	dumpsize += PAGE_SIZE;
321
322	/* Determine dump offset on device. */
323	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
324		error = E2BIG;
325		goto fail;
326	}
327	dumplo = di->mediaoffset + di->mediasize - dumpsize;
328	dumplo -= sizeof(kdh) * 2;
329	progress = dumpsize;
330
331	/* Initialize mdhdr */
332	bzero(&mdhdr, sizeof(mdhdr));
333	strcpy(mdhdr.magic, MINIDUMP_MAGIC);
334	mdhdr.version = MINIDUMP_VERSION;
335	mdhdr.msgbufsize = msgbufp->msg_size;
336	mdhdr.bitmapsize = vm_page_dump_size;
337	mdhdr.pmapsize = pmapsize;
338	mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
339	mdhdr.dmapbase = DMAP_MIN_ADDRESS;
340	mdhdr.dmapend = DMAP_MAX_ADDRESS;
341
342	mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION, dumpsize, di->blocksize);
343
344	printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
345	    ptoa((uintmax_t)physmem) / 1048576);
346
347	/* Dump leader */
348	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
349	if (error)
350		goto fail;
351	dumplo += sizeof(kdh);
352
353	/* Dump my header */
354	bzero(&fakepd, sizeof(fakepd));
355	bcopy(&mdhdr, &fakepd, sizeof(mdhdr));
356	error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
357	if (error)
358		goto fail;
359
360	/* Dump msgbuf up front */
361	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
362	if (error)
363		goto fail;
364
365	/* Dump bitmap */
366	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
367	if (error)
368		goto fail;
369
370	/* Dump kernel page directory pages */
371	bzero(fakepd, sizeof(fakepd));
372	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
373	    kernel_vm_end); va += NBPDP) {
374		ii = (va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1);
375		pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii;
376		pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
377		i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
378
379		/* We always write a page, even if it is zero */
380		if ((pdp[i] & PG_V) == 0) {
381			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
382			if (error)
383				goto fail;
384			/* flush, in case we reuse fakepd in the same block */
385			error = blk_flush(di);
386			if (error)
387				goto fail;
388			continue;
389		}
390
391		/* 1GB page is represented as 512 2MB pages in a dump */
392		if ((pdp[i] & PG_PS) != 0) {
393			/* PDPE and PDP have identical layout in this case */
394			fakepd[0] = pdp[i];
395			for (j = 1; j < NPDEPG; j++)
396				fakepd[j] = fakepd[j - 1] + NBPDR;
397			error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE);
398			if (error)
399				goto fail;
400			/* flush, in case we reuse fakepd in the same block */
401			error = blk_flush(di);
402			if (error)
403				goto fail;
404			bzero(fakepd, sizeof(fakepd));
405			continue;
406		}
407
408		pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
409		error = blk_write(di, (char *)pd, 0, PAGE_SIZE);
410		if (error)
411			goto fail;
412		error = blk_flush(di);
413		if (error)
414			goto fail;
415	}
416
417	/* Dump memory chunks */
418	/* XXX cluster it up and use blk_dump() */
419	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
420		bits = vm_page_dump[i];
421		while (bits) {
422			bit = bsfq(bits);
423			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
424			error = blk_write(di, 0, pa, PAGE_SIZE);
425			if (error)
426				goto fail;
427			bits &= ~(1ul << bit);
428		}
429	}
430
431	error = blk_flush(di);
432	if (error)
433		goto fail;
434
435	/* Dump trailer */
436	error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
437	if (error)
438		goto fail;
439	dumplo += sizeof(kdh);
440
441	/* Signal completion, signoff and exit stage left. */
442	dump_write(di, NULL, 0, 0, 0);
443	printf("\nDump complete\n");
444	return;
445
446 fail:
447	if (error < 0)
448		error = -error;
449
450	printf("\n");
451	if (error == ENOSPC) {
452		printf("Dump map grown while dumping. ");
453		if (retry_count < 5) {
454			printf("Retrying...\n");
455			goto retry;
456		}
457		printf("Dump failed.\n");
458	}
459	else if (error == ECANCELED)
460		printf("Dump aborted\n");
461	else if (error == E2BIG)
462		printf("Dump failed. Partition too small.\n");
463	else
464		printf("** DUMP FAILED (ERROR %d) **\n", error);
465}
466
467void
468dump_add_page(vm_paddr_t pa)
469{
470	int idx, bit;
471
472	pa >>= PAGE_SHIFT;
473	idx = pa >> 6;		/* 2^6 = 64 */
474	bit = pa & 63;
475	atomic_set_long(&vm_page_dump[idx], 1ul << bit);
476}
477
478void
479dump_drop_page(vm_paddr_t pa)
480{
481	int idx, bit;
482
483	pa >>= PAGE_SHIFT;
484	idx = pa >> 6;		/* 2^6 = 64 */
485	bit = pa & 63;
486	atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
487}
488