Deleted Added
full compact
minidump_machdep.c (302408) minidump_machdep.c (331017)
1/*-
2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under
7 * sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 Peter Wemm
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under
7 * sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/minidump_machdep.c 297446 2016-03-31 11:07:24Z andrew $");
33__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/minidump_machdep.c 331017 2018-03-15 19:08:33Z kevans $");
34
35#include "opt_watchdog.h"
36
37#include "opt_watchdog.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/cons.h>
43#include <sys/kernel.h>
44#include <sys/kerneldump.h>
45#include <sys/msgbuf.h>
46#include <sys/watchdog.h>
34
35#include "opt_watchdog.h"
36
37#include "opt_watchdog.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/cons.h>
43#include <sys/kernel.h>
44#include <sys/kerneldump.h>
45#include <sys/msgbuf.h>
46#include <sys/watchdog.h>
47#include <sys/vmmeter.h>
47
48#include <vm/vm.h>
49#include <vm/vm_param.h>
50#include <vm/vm_page.h>
51#include <vm/vm_phys.h>
52#include <vm/pmap.h>
53
54#include <machine/md_var.h>
55#include <machine/pte.h>
56#include <machine/minidump.h>
57
58CTASSERT(sizeof(struct kerneldumpheader) == 512);
59
60/*
61 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
62 * is to protect us from metadata and to protect metadata from us.
63 */
64#define SIZEOF_METADATA (64*1024)
65
66uint64_t *vm_page_dump;
67int vm_page_dump_size;
68
69static struct kerneldumpheader kdh;
70static off_t dumplo;
71
72/* Handle chunked writes. */
73static size_t fragsz;
74static void *dump_va;
75static size_t counter, progress, dumpsize;
76
77static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
78
79CTASSERT(sizeof(*vm_page_dump) == 8);
80
81static int
82is_dumpable(vm_paddr_t pa)
83{
84 vm_page_t m;
85 int i;
86
87 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
88 return ((m->flags & PG_NODUMP) == 0);
89 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
90 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
91 return (1);
92 }
93 return (0);
94}
95
96static int
97blk_flush(struct dumperinfo *di)
98{
99 int error;
100
101 if (fragsz == 0)
102 return (0);
103
104 error = dump_write(di, dump_va, 0, dumplo, fragsz);
105 dumplo += fragsz;
106 fragsz = 0;
107 return (error);
108}
109
110static struct {
111 int min_per;
112 int max_per;
113 int visited;
114} progress_track[10] = {
115 { 0, 10, 0},
116 { 10, 20, 0},
117 { 20, 30, 0},
118 { 30, 40, 0},
119 { 40, 50, 0},
120 { 50, 60, 0},
121 { 60, 70, 0},
122 { 70, 80, 0},
123 { 80, 90, 0},
124 { 90, 100, 0}
125};
126
127static void
128report_progress(size_t progress, size_t dumpsize)
129{
130 int sofar, i;
131
132 sofar = 100 - ((progress * 100) / dumpsize);
133 for (i = 0; i < nitems(progress_track); i++) {
134 if (sofar < progress_track[i].min_per ||
135 sofar > progress_track[i].max_per)
136 continue;
137 if (progress_track[i].visited)
138 return;
139 progress_track[i].visited = 1;
140 printf("..%d%%", sofar);
141 return;
142 }
143}
144
145static int
146blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
147{
148 size_t len;
149 int error, c;
150 u_int maxdumpsz;
151
152 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
153 if (maxdumpsz == 0) /* seatbelt */
154 maxdumpsz = PAGE_SIZE;
155 error = 0;
156 if ((sz % PAGE_SIZE) != 0) {
157 printf("size not page aligned\n");
158 return (EINVAL);
159 }
160 if (ptr != NULL && pa != 0) {
161 printf("cant have both va and pa!\n");
162 return (EINVAL);
163 }
164 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
165 printf("address not page aligned %p\n", ptr);
166 return (EINVAL);
167 }
168 if (ptr != NULL) {
169 /*
170 * If we're doing a virtual dump, flush any
171 * pre-existing pa pages.
172 */
173 error = blk_flush(di);
174 if (error)
175 return (error);
176 }
177 while (sz) {
178 len = maxdumpsz - fragsz;
179 if (len > sz)
180 len = sz;
181 counter += len;
182 progress -= len;
183 if (counter >> 22) {
184 report_progress(progress, dumpsize);
185 counter &= (1 << 22) - 1;
186 }
187
188 wdog_kern_pat(WD_LASTVAL);
189
190 if (ptr) {
191 error = dump_write(di, ptr, 0, dumplo, len);
192 if (error)
193 return (error);
194 dumplo += len;
195 ptr += len;
196 sz -= len;
197 } else {
198 dump_va = (void *)PHYS_TO_DMAP(pa);
199 fragsz += len;
200 pa += len;
201 sz -= len;
202 error = blk_flush(di);
203 if (error)
204 return (error);
205 }
206
207 /* Check for user abort. */
208 c = cncheckc();
209 if (c == 0x03)
210 return (ECANCELED);
211 if (c != -1)
212 printf(" (CTRL-C to abort) ");
213 }
214
215 return (0);
216}
217
218int
219minidumpsys(struct dumperinfo *di)
220{
221 pd_entry_t *l0, *l1, *l2;
222 pt_entry_t *l3;
223 uint32_t pmapsize;
224 vm_offset_t va;
225 vm_paddr_t pa;
226 int error;
227 uint64_t bits;
228 int i, bit;
229 int retry_count;
230 struct minidumphdr mdhdr;
231
232 retry_count = 0;
233 retry:
234 retry_count++;
235 error = 0;
236 pmapsize = 0;
237 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
238 pmapsize += PAGE_SIZE;
239 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3))
240 continue;
241
242 /* We should always be using the l2 table for kvm */
243 if (l2 == NULL)
244 continue;
245
246 if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
247 pa = *l2 & ~ATTR_MASK;
248 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
249 if (is_dumpable(pa))
250 dump_add_page(pa);
251 }
252 } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
253 for (i = 0; i < Ln_ENTRIES; i++) {
254 if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
255 continue;
256 pa = l3[i] & ~ATTR_MASK;
257 if (is_dumpable(pa))
258 dump_add_page(pa);
259 }
260 }
261 }
262
263 /* Calculate dump size. */
264 dumpsize = pmapsize;
265 dumpsize += round_page(msgbufp->msg_size);
266 dumpsize += round_page(vm_page_dump_size);
267 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
268 bits = vm_page_dump[i];
269 while (bits) {
270 bit = ffsl(bits) - 1;
271 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
272 bit) * PAGE_SIZE;
273 /* Clear out undumpable pages now if needed */
274 if (is_dumpable(pa))
275 dumpsize += PAGE_SIZE;
276 else
277 dump_drop_page(pa);
278 bits &= ~(1ul << bit);
279 }
280 }
281 dumpsize += PAGE_SIZE;
282
283 /* Determine dump offset on device. */
284 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
285 error = E2BIG;
286 goto fail;
287 }
288 dumplo = di->mediaoffset + di->mediasize - dumpsize;
289 dumplo -= sizeof(kdh) * 2;
290 progress = dumpsize;
291
292 /* Initialize mdhdr */
293 bzero(&mdhdr, sizeof(mdhdr));
294 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
295 mdhdr.version = MINIDUMP_VERSION;
296 mdhdr.msgbufsize = msgbufp->msg_size;
297 mdhdr.bitmapsize = vm_page_dump_size;
298 mdhdr.pmapsize = pmapsize;
299 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
300 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
301 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
302 mdhdr.dmapend = DMAP_MAX_ADDRESS;
303
304 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
305 dumpsize, di->blocksize);
306
307 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
308 ptoa((uintmax_t)physmem) / 1048576);
309
310 /* Dump leader */
311 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
312 if (error)
313 goto fail;
314 dumplo += sizeof(kdh);
315
316 /* Dump my header */
317 bzero(&tmpbuffer, sizeof(tmpbuffer));
318 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
319 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
320 if (error)
321 goto fail;
322
323 /* Dump msgbuf up front */
324 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
325 round_page(msgbufp->msg_size));
326 if (error)
327 goto fail;
328
329 /* Dump bitmap */
330 error = blk_write(di, (char *)vm_page_dump, 0,
331 round_page(vm_page_dump_size));
332 if (error)
333 goto fail;
334
335 /* Dump kernel page directory pages */
336 bzero(&tmpbuffer, sizeof(tmpbuffer));
337 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
338 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) {
339 /* We always write a page, even if it is zero */
340 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
341 if (error)
342 goto fail;
343 /* flush, in case we reuse tmpbuffer in the same block*/
344 error = blk_flush(di);
345 if (error)
346 goto fail;
347 } else if (l2 == NULL) {
348 pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
349
350 /* Generate fake l3 entries based upon the l1 entry */
351 for (i = 0; i < Ln_ENTRIES; i++) {
352 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
353 ATTR_DEFAULT | L3_PAGE;
354 }
355 /* We always write a page, even if it is zero */
356 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
357 if (error)
358 goto fail;
359 /* flush, in case we reuse tmpbuffer in the same block*/
360 error = blk_flush(di);
361 if (error)
362 goto fail;
363 bzero(&tmpbuffer, sizeof(tmpbuffer));
364 } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
365 /* TODO: Handle an invalid L2 entry */
366 pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
367
368 /* Generate fake l3 entries based upon the l1 entry */
369 for (i = 0; i < Ln_ENTRIES; i++) {
370 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
371 ATTR_DEFAULT | L3_PAGE;
372 }
373 /* We always write a page, even if it is zero */
374 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
375 if (error)
376 goto fail;
377 /* flush, in case we reuse fakepd in the same block */
378 error = blk_flush(di);
379 if (error)
380 goto fail;
381 bzero(&tmpbuffer, sizeof(tmpbuffer));
382 continue;
383 } else {
384 pa = *l2 & ~ATTR_MASK;
385
386 /* We always write a page, even if it is zero */
387 error = blk_write(di, NULL, pa, PAGE_SIZE);
388 if (error)
389 goto fail;
390 }
391 }
392
393 /* Dump memory chunks */
394 /* XXX cluster it up and use blk_dump() */
395 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
396 bits = vm_page_dump[i];
397 while (bits) {
398 bit = ffsl(bits) - 1;
399 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
400 bit) * PAGE_SIZE;
401 error = blk_write(di, 0, pa, PAGE_SIZE);
402 if (error)
403 goto fail;
404 bits &= ~(1ul << bit);
405 }
406 }
407
408 error = blk_flush(di);
409 if (error)
410 goto fail;
411
412 /* Dump trailer */
413 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
414 if (error)
415 goto fail;
416 dumplo += sizeof(kdh);
417
418 /* Signal completion, signoff and exit stage left. */
419 dump_write(di, NULL, 0, 0, 0);
420 printf("\nDump complete\n");
421 return (0);
422
423 fail:
424 if (error < 0)
425 error = -error;
426
427 printf("\n");
428 if (error == ENOSPC) {
429 printf("Dump map grown while dumping. ");
430 if (retry_count < 5) {
431 printf("Retrying...\n");
432 goto retry;
433 }
434 printf("Dump failed.\n");
435 }
436 else if (error == ECANCELED)
437 printf("Dump aborted\n");
438 else if (error == E2BIG)
439 printf("Dump failed. Partition too small.\n");
440 else
441 printf("** DUMP FAILED (ERROR %d) **\n", error);
442 return (error);
443}
444
445void
446dump_add_page(vm_paddr_t pa)
447{
448 int idx, bit;
449
450 pa >>= PAGE_SHIFT;
451 idx = pa >> 6; /* 2^6 = 64 */
452 bit = pa & 63;
453 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
454}
455
456void
457dump_drop_page(vm_paddr_t pa)
458{
459 int idx, bit;
460
461 pa >>= PAGE_SHIFT;
462 idx = pa >> 6; /* 2^6 = 64 */
463 bit = pa & 63;
464 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
465}
48
49#include <vm/vm.h>
50#include <vm/vm_param.h>
51#include <vm/vm_page.h>
52#include <vm/vm_phys.h>
53#include <vm/pmap.h>
54
55#include <machine/md_var.h>
56#include <machine/pte.h>
57#include <machine/minidump.h>
58
59CTASSERT(sizeof(struct kerneldumpheader) == 512);
60
61/*
62 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
63 * is to protect us from metadata and to protect metadata from us.
64 */
65#define SIZEOF_METADATA (64*1024)
66
67uint64_t *vm_page_dump;
68int vm_page_dump_size;
69
70static struct kerneldumpheader kdh;
71static off_t dumplo;
72
73/* Handle chunked writes. */
74static size_t fragsz;
75static void *dump_va;
76static size_t counter, progress, dumpsize;
77
78static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
79
80CTASSERT(sizeof(*vm_page_dump) == 8);
81
82static int
83is_dumpable(vm_paddr_t pa)
84{
85 vm_page_t m;
86 int i;
87
88 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
89 return ((m->flags & PG_NODUMP) == 0);
90 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
91 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
92 return (1);
93 }
94 return (0);
95}
96
97static int
98blk_flush(struct dumperinfo *di)
99{
100 int error;
101
102 if (fragsz == 0)
103 return (0);
104
105 error = dump_write(di, dump_va, 0, dumplo, fragsz);
106 dumplo += fragsz;
107 fragsz = 0;
108 return (error);
109}
110
111static struct {
112 int min_per;
113 int max_per;
114 int visited;
115} progress_track[10] = {
116 { 0, 10, 0},
117 { 10, 20, 0},
118 { 20, 30, 0},
119 { 30, 40, 0},
120 { 40, 50, 0},
121 { 50, 60, 0},
122 { 60, 70, 0},
123 { 70, 80, 0},
124 { 80, 90, 0},
125 { 90, 100, 0}
126};
127
128static void
129report_progress(size_t progress, size_t dumpsize)
130{
131 int sofar, i;
132
133 sofar = 100 - ((progress * 100) / dumpsize);
134 for (i = 0; i < nitems(progress_track); i++) {
135 if (sofar < progress_track[i].min_per ||
136 sofar > progress_track[i].max_per)
137 continue;
138 if (progress_track[i].visited)
139 return;
140 progress_track[i].visited = 1;
141 printf("..%d%%", sofar);
142 return;
143 }
144}
145
146static int
147blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
148{
149 size_t len;
150 int error, c;
151 u_int maxdumpsz;
152
153 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
154 if (maxdumpsz == 0) /* seatbelt */
155 maxdumpsz = PAGE_SIZE;
156 error = 0;
157 if ((sz % PAGE_SIZE) != 0) {
158 printf("size not page aligned\n");
159 return (EINVAL);
160 }
161 if (ptr != NULL && pa != 0) {
162 printf("cant have both va and pa!\n");
163 return (EINVAL);
164 }
165 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
166 printf("address not page aligned %p\n", ptr);
167 return (EINVAL);
168 }
169 if (ptr != NULL) {
170 /*
171 * If we're doing a virtual dump, flush any
172 * pre-existing pa pages.
173 */
174 error = blk_flush(di);
175 if (error)
176 return (error);
177 }
178 while (sz) {
179 len = maxdumpsz - fragsz;
180 if (len > sz)
181 len = sz;
182 counter += len;
183 progress -= len;
184 if (counter >> 22) {
185 report_progress(progress, dumpsize);
186 counter &= (1 << 22) - 1;
187 }
188
189 wdog_kern_pat(WD_LASTVAL);
190
191 if (ptr) {
192 error = dump_write(di, ptr, 0, dumplo, len);
193 if (error)
194 return (error);
195 dumplo += len;
196 ptr += len;
197 sz -= len;
198 } else {
199 dump_va = (void *)PHYS_TO_DMAP(pa);
200 fragsz += len;
201 pa += len;
202 sz -= len;
203 error = blk_flush(di);
204 if (error)
205 return (error);
206 }
207
208 /* Check for user abort. */
209 c = cncheckc();
210 if (c == 0x03)
211 return (ECANCELED);
212 if (c != -1)
213 printf(" (CTRL-C to abort) ");
214 }
215
216 return (0);
217}
218
219int
220minidumpsys(struct dumperinfo *di)
221{
222 pd_entry_t *l0, *l1, *l2;
223 pt_entry_t *l3;
224 uint32_t pmapsize;
225 vm_offset_t va;
226 vm_paddr_t pa;
227 int error;
228 uint64_t bits;
229 int i, bit;
230 int retry_count;
231 struct minidumphdr mdhdr;
232
233 retry_count = 0;
234 retry:
235 retry_count++;
236 error = 0;
237 pmapsize = 0;
238 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
239 pmapsize += PAGE_SIZE;
240 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3))
241 continue;
242
243 /* We should always be using the l2 table for kvm */
244 if (l2 == NULL)
245 continue;
246
247 if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
248 pa = *l2 & ~ATTR_MASK;
249 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
250 if (is_dumpable(pa))
251 dump_add_page(pa);
252 }
253 } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
254 for (i = 0; i < Ln_ENTRIES; i++) {
255 if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
256 continue;
257 pa = l3[i] & ~ATTR_MASK;
258 if (is_dumpable(pa))
259 dump_add_page(pa);
260 }
261 }
262 }
263
264 /* Calculate dump size. */
265 dumpsize = pmapsize;
266 dumpsize += round_page(msgbufp->msg_size);
267 dumpsize += round_page(vm_page_dump_size);
268 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
269 bits = vm_page_dump[i];
270 while (bits) {
271 bit = ffsl(bits) - 1;
272 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
273 bit) * PAGE_SIZE;
274 /* Clear out undumpable pages now if needed */
275 if (is_dumpable(pa))
276 dumpsize += PAGE_SIZE;
277 else
278 dump_drop_page(pa);
279 bits &= ~(1ul << bit);
280 }
281 }
282 dumpsize += PAGE_SIZE;
283
284 /* Determine dump offset on device. */
285 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
286 error = E2BIG;
287 goto fail;
288 }
289 dumplo = di->mediaoffset + di->mediasize - dumpsize;
290 dumplo -= sizeof(kdh) * 2;
291 progress = dumpsize;
292
293 /* Initialize mdhdr */
294 bzero(&mdhdr, sizeof(mdhdr));
295 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
296 mdhdr.version = MINIDUMP_VERSION;
297 mdhdr.msgbufsize = msgbufp->msg_size;
298 mdhdr.bitmapsize = vm_page_dump_size;
299 mdhdr.pmapsize = pmapsize;
300 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
301 mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
302 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
303 mdhdr.dmapend = DMAP_MAX_ADDRESS;
304
305 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
306 dumpsize, di->blocksize);
307
308 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
309 ptoa((uintmax_t)physmem) / 1048576);
310
311 /* Dump leader */
312 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
313 if (error)
314 goto fail;
315 dumplo += sizeof(kdh);
316
317 /* Dump my header */
318 bzero(&tmpbuffer, sizeof(tmpbuffer));
319 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
320 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
321 if (error)
322 goto fail;
323
324 /* Dump msgbuf up front */
325 error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
326 round_page(msgbufp->msg_size));
327 if (error)
328 goto fail;
329
330 /* Dump bitmap */
331 error = blk_write(di, (char *)vm_page_dump, 0,
332 round_page(vm_page_dump_size));
333 if (error)
334 goto fail;
335
336 /* Dump kernel page directory pages */
337 bzero(&tmpbuffer, sizeof(tmpbuffer));
338 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
339 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) {
340 /* We always write a page, even if it is zero */
341 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
342 if (error)
343 goto fail;
344 /* flush, in case we reuse tmpbuffer in the same block*/
345 error = blk_flush(di);
346 if (error)
347 goto fail;
348 } else if (l2 == NULL) {
349 pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
350
351 /* Generate fake l3 entries based upon the l1 entry */
352 for (i = 0; i < Ln_ENTRIES; i++) {
353 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
354 ATTR_DEFAULT | L3_PAGE;
355 }
356 /* We always write a page, even if it is zero */
357 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
358 if (error)
359 goto fail;
360 /* flush, in case we reuse tmpbuffer in the same block*/
361 error = blk_flush(di);
362 if (error)
363 goto fail;
364 bzero(&tmpbuffer, sizeof(tmpbuffer));
365 } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
366 /* TODO: Handle an invalid L2 entry */
367 pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
368
369 /* Generate fake l3 entries based upon the l1 entry */
370 for (i = 0; i < Ln_ENTRIES; i++) {
371 tmpbuffer[i] = pa + (i * PAGE_SIZE) |
372 ATTR_DEFAULT | L3_PAGE;
373 }
374 /* We always write a page, even if it is zero */
375 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
376 if (error)
377 goto fail;
378 /* flush, in case we reuse fakepd in the same block */
379 error = blk_flush(di);
380 if (error)
381 goto fail;
382 bzero(&tmpbuffer, sizeof(tmpbuffer));
383 continue;
384 } else {
385 pa = *l2 & ~ATTR_MASK;
386
387 /* We always write a page, even if it is zero */
388 error = blk_write(di, NULL, pa, PAGE_SIZE);
389 if (error)
390 goto fail;
391 }
392 }
393
394 /* Dump memory chunks */
395 /* XXX cluster it up and use blk_dump() */
396 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
397 bits = vm_page_dump[i];
398 while (bits) {
399 bit = ffsl(bits) - 1;
400 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
401 bit) * PAGE_SIZE;
402 error = blk_write(di, 0, pa, PAGE_SIZE);
403 if (error)
404 goto fail;
405 bits &= ~(1ul << bit);
406 }
407 }
408
409 error = blk_flush(di);
410 if (error)
411 goto fail;
412
413 /* Dump trailer */
414 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
415 if (error)
416 goto fail;
417 dumplo += sizeof(kdh);
418
419 /* Signal completion, signoff and exit stage left. */
420 dump_write(di, NULL, 0, 0, 0);
421 printf("\nDump complete\n");
422 return (0);
423
424 fail:
425 if (error < 0)
426 error = -error;
427
428 printf("\n");
429 if (error == ENOSPC) {
430 printf("Dump map grown while dumping. ");
431 if (retry_count < 5) {
432 printf("Retrying...\n");
433 goto retry;
434 }
435 printf("Dump failed.\n");
436 }
437 else if (error == ECANCELED)
438 printf("Dump aborted\n");
439 else if (error == E2BIG)
440 printf("Dump failed. Partition too small.\n");
441 else
442 printf("** DUMP FAILED (ERROR %d) **\n", error);
443 return (error);
444}
445
446void
447dump_add_page(vm_paddr_t pa)
448{
449 int idx, bit;
450
451 pa >>= PAGE_SHIFT;
452 idx = pa >> 6; /* 2^6 = 64 */
453 bit = pa & 63;
454 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
455}
456
457void
458dump_drop_page(vm_paddr_t pa)
459{
460 int idx, bit;
461
462 pa >>= PAGE_SHIFT;
463 idx = pa >> 6; /* 2^6 = 64 */
464 bit = pa & 63;
465 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
466}