48 49#include <vm/vm.h> 50#include <vm/vm_param.h> 51#include <vm/vm_page.h> 52#include <vm/vm_phys.h> 53#include <vm/pmap.h> 54 55#include <machine/md_var.h> 56#include <machine/pte.h> 57#include <machine/minidump.h> 58 59CTASSERT(sizeof(struct kerneldumpheader) == 512); 60 61/* 62 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This 63 * is to protect us from metadata and to protect metadata from us. 64 */ 65#define SIZEOF_METADATA (64*1024) 66 67uint64_t *vm_page_dump; 68int vm_page_dump_size; 69 70static struct kerneldumpheader kdh; 71static off_t dumplo; 72 73/* Handle chunked writes. */ 74static size_t fragsz; 75static void *dump_va; 76static size_t counter, progress, dumpsize; 77 78static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)]; 79 80CTASSERT(sizeof(*vm_page_dump) == 8); 81 82static int 83is_dumpable(vm_paddr_t pa) 84{ 85 vm_page_t m; 86 int i; 87 88 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL) 89 return ((m->flags & PG_NODUMP) == 0); 90 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { 91 if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) 92 return (1); 93 } 94 return (0); 95} 96 97static int 98blk_flush(struct dumperinfo *di) 99{ 100 int error; 101 102 if (fragsz == 0) 103 return (0); 104 105 error = dump_write(di, dump_va, 0, dumplo, fragsz); 106 dumplo += fragsz; 107 fragsz = 0; 108 return (error); 109} 110 111static struct { 112 int min_per; 113 int max_per; 114 int visited; 115} progress_track[10] = { 116 { 0, 10, 0}, 117 { 10, 20, 0}, 118 { 20, 30, 0}, 119 { 30, 40, 0}, 120 { 40, 50, 0}, 121 { 50, 60, 0}, 122 { 60, 70, 0}, 123 { 70, 80, 0}, 124 { 80, 90, 0}, 125 { 90, 100, 0} 126}; 127 128static void 129report_progress(size_t progress, size_t dumpsize) 130{ 131 int sofar, i; 132 133 sofar = 100 - ((progress * 100) / dumpsize); 134 for (i = 0; i < nitems(progress_track); i++) { 135 if (sofar < progress_track[i].min_per || 136 sofar > progress_track[i].max_per) 137 continue; 138 if (progress_track[i].visited) 139 return; 140 progress_track[i].visited = 1; 141 printf("..%d%%", sofar); 142 return; 143 } 144} 145 146static int 147blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) 148{ 149 size_t len; 150 int error, c; 151 u_int maxdumpsz; 152 153 maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); 154 if (maxdumpsz == 0) /* seatbelt */ 155 maxdumpsz = PAGE_SIZE; 156 error = 0; 157 if ((sz % PAGE_SIZE) != 0) { 158 printf("size not page aligned\n"); 159 return (EINVAL); 160 } 161 if (ptr != NULL && pa != 0) { 162 printf("cant have both va and pa!\n"); 163 return (EINVAL); 164 } 165 if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { 166 printf("address not page aligned %p\n", ptr); 167 return (EINVAL); 168 } 169 if (ptr != NULL) { 170 /* 171 * If we're doing a virtual dump, flush any 172 * pre-existing pa pages. 173 */ 174 error = blk_flush(di); 175 if (error) 176 return (error); 177 } 178 while (sz) { 179 len = maxdumpsz - fragsz; 180 if (len > sz) 181 len = sz; 182 counter += len; 183 progress -= len; 184 if (counter >> 22) { 185 report_progress(progress, dumpsize); 186 counter &= (1 << 22) - 1; 187 } 188 189 wdog_kern_pat(WD_LASTVAL); 190 191 if (ptr) { 192 error = dump_write(di, ptr, 0, dumplo, len); 193 if (error) 194 return (error); 195 dumplo += len; 196 ptr += len; 197 sz -= len; 198 } else { 199 dump_va = (void *)PHYS_TO_DMAP(pa); 200 fragsz += len; 201 pa += len; 202 sz -= len; 203 error = blk_flush(di); 204 if (error) 205 return (error); 206 } 207 208 /* Check for user abort. */ 209 c = cncheckc(); 210 if (c == 0x03) 211 return (ECANCELED); 212 if (c != -1) 213 printf(" (CTRL-C to abort) "); 214 } 215 216 return (0); 217} 218 219int 220minidumpsys(struct dumperinfo *di) 221{ 222 pd_entry_t *l0, *l1, *l2; 223 pt_entry_t *l3; 224 uint32_t pmapsize; 225 vm_offset_t va; 226 vm_paddr_t pa; 227 int error; 228 uint64_t bits; 229 int i, bit; 230 int retry_count; 231 struct minidumphdr mdhdr; 232 233 retry_count = 0; 234 retry: 235 retry_count++; 236 error = 0; 237 pmapsize = 0; 238 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) { 239 pmapsize += PAGE_SIZE; 240 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) 241 continue; 242 243 /* We should always be using the l2 table for kvm */ 244 if (l2 == NULL) 245 continue; 246 247 if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) { 248 pa = *l2 & ~ATTR_MASK; 249 for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) { 250 if (is_dumpable(pa)) 251 dump_add_page(pa); 252 } 253 } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) { 254 for (i = 0; i < Ln_ENTRIES; i++) { 255 if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE) 256 continue; 257 pa = l3[i] & ~ATTR_MASK; 258 if (is_dumpable(pa)) 259 dump_add_page(pa); 260 } 261 } 262 } 263 264 /* Calculate dump size. */ 265 dumpsize = pmapsize; 266 dumpsize += round_page(msgbufp->msg_size); 267 dumpsize += round_page(vm_page_dump_size); 268 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { 269 bits = vm_page_dump[i]; 270 while (bits) { 271 bit = ffsl(bits) - 1; 272 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + 273 bit) * PAGE_SIZE; 274 /* Clear out undumpable pages now if needed */ 275 if (is_dumpable(pa)) 276 dumpsize += PAGE_SIZE; 277 else 278 dump_drop_page(pa); 279 bits &= ~(1ul << bit); 280 } 281 } 282 dumpsize += PAGE_SIZE; 283 284 /* Determine dump offset on device. */ 285 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) { 286 error = E2BIG; 287 goto fail; 288 } 289 dumplo = di->mediaoffset + di->mediasize - dumpsize; 290 dumplo -= sizeof(kdh) * 2; 291 progress = dumpsize; 292 293 /* Initialize mdhdr */ 294 bzero(&mdhdr, sizeof(mdhdr)); 295 strcpy(mdhdr.magic, MINIDUMP_MAGIC); 296 mdhdr.version = MINIDUMP_VERSION; 297 mdhdr.msgbufsize = msgbufp->msg_size; 298 mdhdr.bitmapsize = vm_page_dump_size; 299 mdhdr.pmapsize = pmapsize; 300 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; 301 mdhdr.dmapphys = DMAP_MIN_PHYSADDR; 302 mdhdr.dmapbase = DMAP_MIN_ADDRESS; 303 mdhdr.dmapend = DMAP_MAX_ADDRESS; 304 305 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION, 306 dumpsize, di->blocksize); 307 308 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, 309 ptoa((uintmax_t)physmem) / 1048576); 310 311 /* Dump leader */ 312 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); 313 if (error) 314 goto fail; 315 dumplo += sizeof(kdh); 316 317 /* Dump my header */ 318 bzero(&tmpbuffer, sizeof(tmpbuffer)); 319 bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr)); 320 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); 321 if (error) 322 goto fail; 323 324 /* Dump msgbuf up front */ 325 error = blk_write(di, (char *)msgbufp->msg_ptr, 0, 326 round_page(msgbufp->msg_size)); 327 if (error) 328 goto fail; 329 330 /* Dump bitmap */ 331 error = blk_write(di, (char *)vm_page_dump, 0, 332 round_page(vm_page_dump_size)); 333 if (error) 334 goto fail; 335 336 /* Dump kernel page directory pages */ 337 bzero(&tmpbuffer, sizeof(tmpbuffer)); 338 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) { 339 if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) { 340 /* We always write a page, even if it is zero */ 341 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); 342 if (error) 343 goto fail; 344 /* flush, in case we reuse tmpbuffer in the same block*/ 345 error = blk_flush(di); 346 if (error) 347 goto fail; 348 } else if (l2 == NULL) { 349 pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET); 350 351 /* Generate fake l3 entries based upon the l1 entry */ 352 for (i = 0; i < Ln_ENTRIES; i++) { 353 tmpbuffer[i] = pa + (i * PAGE_SIZE) | 354 ATTR_DEFAULT | L3_PAGE; 355 } 356 /* We always write a page, even if it is zero */ 357 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); 358 if (error) 359 goto fail; 360 /* flush, in case we reuse tmpbuffer in the same block*/ 361 error = blk_flush(di); 362 if (error) 363 goto fail; 364 bzero(&tmpbuffer, sizeof(tmpbuffer)); 365 } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) { 366 /* TODO: Handle an invalid L2 entry */ 367 pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET); 368 369 /* Generate fake l3 entries based upon the l1 entry */ 370 for (i = 0; i < Ln_ENTRIES; i++) { 371 tmpbuffer[i] = pa + (i * PAGE_SIZE) | 372 ATTR_DEFAULT | L3_PAGE; 373 } 374 /* We always write a page, even if it is zero */ 375 error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); 376 if (error) 377 goto fail; 378 /* flush, in case we reuse fakepd in the same block */ 379 error = blk_flush(di); 380 if (error) 381 goto fail; 382 bzero(&tmpbuffer, sizeof(tmpbuffer)); 383 continue; 384 } else { 385 pa = *l2 & ~ATTR_MASK; 386 387 /* We always write a page, even if it is zero */ 388 error = blk_write(di, NULL, pa, PAGE_SIZE); 389 if (error) 390 goto fail; 391 } 392 } 393 394 /* Dump memory chunks */ 395 /* XXX cluster it up and use blk_dump() */ 396 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { 397 bits = vm_page_dump[i]; 398 while (bits) { 399 bit = ffsl(bits) - 1; 400 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + 401 bit) * PAGE_SIZE; 402 error = blk_write(di, 0, pa, PAGE_SIZE); 403 if (error) 404 goto fail; 405 bits &= ~(1ul << bit); 406 } 407 } 408 409 error = blk_flush(di); 410 if (error) 411 goto fail; 412 413 /* Dump trailer */ 414 error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); 415 if (error) 416 goto fail; 417 dumplo += sizeof(kdh); 418 419 /* Signal completion, signoff and exit stage left. */ 420 dump_write(di, NULL, 0, 0, 0); 421 printf("\nDump complete\n"); 422 return (0); 423 424 fail: 425 if (error < 0) 426 error = -error; 427 428 printf("\n"); 429 if (error == ENOSPC) { 430 printf("Dump map grown while dumping. "); 431 if (retry_count < 5) { 432 printf("Retrying...\n"); 433 goto retry; 434 } 435 printf("Dump failed.\n"); 436 } 437 else if (error == ECANCELED) 438 printf("Dump aborted\n"); 439 else if (error == E2BIG) 440 printf("Dump failed. Partition too small.\n"); 441 else 442 printf("** DUMP FAILED (ERROR %d) **\n", error); 443 return (error); 444} 445 446void 447dump_add_page(vm_paddr_t pa) 448{ 449 int idx, bit; 450 451 pa >>= PAGE_SHIFT; 452 idx = pa >> 6; /* 2^6 = 64 */ 453 bit = pa & 63; 454 atomic_set_long(&vm_page_dump[idx], 1ul << bit); 455} 456 457void 458dump_drop_page(vm_paddr_t pa) 459{ 460 int idx, bit; 461 462 pa >>= PAGE_SHIFT; 463 idx = pa >> 6; /* 2^6 = 64 */ 464 bit = pa & 63; 465 atomic_clear_long(&vm_page_dump[idx], 1ul << bit); 466}
|