x86_xpmap.c revision 1.51
1/* $NetBSD: x86_xpmap.c,v 1.51 2013/11/08 02:23:52 christos Exp $ */ 2 3/* 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Copyright (c) 2006, 2007 Manuel Bouyer. 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 1. Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * 2. Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in the 29 * documentation and/or other materials provided with the distribution. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 */ 43 44/* 45 * 46 * Copyright (c) 2004 Christian Limpach. 47 * All rights reserved. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 68 */ 69 70 71#include <sys/cdefs.h> 72__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.51 2013/11/08 02:23:52 christos Exp $"); 73 74#include "opt_xen.h" 75#include "opt_ddb.h" 76#include "ksyms.h" 77 78#include <sys/param.h> 79#include <sys/systm.h> 80#include <sys/mutex.h> 81#include <sys/cpu.h> 82 83#include <uvm/uvm.h> 84 85#include <x86/pmap.h> 86#include <machine/gdt.h> 87#include <xen/xenfunc.h> 88 89#include <dev/isa/isareg.h> 90#include <machine/isa_machdep.h> 91 92#undef XENDEBUG 93/* #define XENDEBUG_SYNC */ 94/* #define XENDEBUG_LOW */ 95 96#ifdef XENDEBUG 97#define XENPRINTF(x) printf x 98#define XENPRINTK(x) printk x 99#define XENPRINTK2(x) /* printk x */ 100 101static char XBUF[256]; 102#else 103#define XENPRINTF(x) 104#define XENPRINTK(x) 105#define XENPRINTK2(x) 106#endif 107#define PRINTF(x) printf x 108#define PRINTK(x) printk x 109 110volatile shared_info_t *HYPERVISOR_shared_info; 111/* Xen requires the start_info struct to be page aligned */ 112union start_info_union start_info_union __aligned(PAGE_SIZE); 113unsigned long *xpmap_phys_to_machine_mapping; 114kmutex_t pte_lock; 115 116void xen_failsafe_handler(void); 117 118#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 119 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF) 120 121/* 122 * kcpuset internally uses an array of uint32_t while xen uses an array of 123 * u_long. As we're little-endian we can cast one to the other. 124 */ 125typedef union { 126#ifdef _LP64 127 uint32_t xcpum_km[2]; 128#else 129 uint32_t xcpum_km[1]; 130#endif 131 u_long xcpum_xm; 132} xcpumask_t; 133 134void 135xen_failsafe_handler(void) 136{ 137 138 panic("xen_failsafe_handler called!\n"); 139} 140 141 142void 143xen_set_ldt(vaddr_t base, uint32_t entries) 144{ 145 vaddr_t va; 146 vaddr_t end; 147 pt_entry_t *ptp; 148 int s; 149 150#ifdef __x86_64__ 151 end = base + (entries << 3); 152#else 153 end = base + entries * sizeof(union descriptor); 154#endif 155 156 for (va = base; va < end; va += PAGE_SIZE) { 157 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 158 ptp = kvtopte(va); 159 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n", 160 base, entries, ptp)); 161 pmap_pte_clearbits(ptp, PG_RW); 162 } 163 s = splvm(); 164 xpq_queue_set_ldt(base, entries); 165 splx(s); 166} 167 168#ifdef XENDEBUG 169void xpq_debug_dump(void); 170#endif 171 172#define XPQUEUE_SIZE 2048 173static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE]; 174static int xpq_idx_array[MAXCPUS]; 175 176#ifdef i386 177extern union descriptor tmpgdt[]; 178#endif /* i386 */ 179void 180xpq_flush_queue(void) 181{ 182 int i, ok = 0, ret; 183 184 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 185 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 186 187 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx)); 188 for (i = 0; i < xpq_idx; i++) 189 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i, 190 xpq_queue[i].ptr, xpq_queue[i].val)); 191 192retry: 193 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok); 194 195 if (xpq_idx != 0 && ret < 0) { 196 struct cpu_info *ci; 197 CPU_INFO_ITERATOR cii; 198 199 printf("xpq_flush_queue: %d entries (%d successful) on " 200 "cpu%d (%ld)\n", 201 xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid); 202 203 if (ok != 0) { 204 xpq_queue += ok; 205 xpq_idx -= ok; 206 ok = 0; 207 goto retry; 208 } 209 210 for (CPU_INFO_FOREACH(cii, ci)) { 211 xpq_queue = xpq_queue_array[ci->ci_cpuid]; 212 xpq_idx = xpq_idx_array[ci->ci_cpuid]; 213 printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid); 214 for (i = 0; i < xpq_idx; i++) { 215 printf(" 0x%016" PRIx64 ": 0x%016" PRIx64 "\n", 216 xpq_queue[i].ptr, xpq_queue[i].val); 217 } 218#ifdef __x86_64__ 219 for (i = 0; i < PDIR_SLOT_PTE; i++) { 220 if (ci->ci_kpm_pdir[i] == 0) 221 continue; 222 printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n", 223 i, ci->ci_kpm_pdir[i]); 224 } 225#endif 226 } 227 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret); 228 } 229 xpq_idx_array[curcpu()->ci_cpuid] = 0; 230} 231 232static inline void 233xpq_increment_idx(void) 234{ 235 236 if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE)) 237 xpq_flush_queue(); 238} 239 240void 241xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 242{ 243 244 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 245 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 246 247 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64 248 "\n", (int64_t)ma, (int64_t)pa)); 249 250 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 251 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT; 252 xpq_increment_idx(); 253#ifdef XENDEBUG_SYNC 254 xpq_flush_queue(); 255#endif 256} 257 258void 259xpq_queue_pte_update(paddr_t ptr, pt_entry_t val) 260{ 261 262 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 263 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 264 265 KASSERT((ptr & 3) == 0); 266 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 267 xpq_queue[xpq_idx].val = val; 268 xpq_increment_idx(); 269#ifdef XENDEBUG_SYNC 270 xpq_flush_queue(); 271#endif 272} 273 274void 275xpq_queue_pt_switch(paddr_t pa) 276{ 277 struct mmuext_op op; 278 xpq_flush_queue(); 279 280 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", 281 (int64_t)pa, (int64_t)pa)); 282 op.cmd = MMUEXT_NEW_BASEPTR; 283 op.arg1.mfn = pa >> PAGE_SHIFT; 284 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 285 panic("xpq_queue_pt_switch"); 286} 287 288void 289xpq_queue_pin_table(paddr_t pa, int lvl) 290{ 291 struct mmuext_op op; 292 293 xpq_flush_queue(); 294 295 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n", 296 lvl + 1, pa)); 297 298 op.arg1.mfn = pa >> PAGE_SHIFT; 299 op.cmd = lvl; 300 301 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 302 panic("xpq_queue_pin_table"); 303} 304 305void 306xpq_queue_unpin_table(paddr_t pa) 307{ 308 struct mmuext_op op; 309 310 xpq_flush_queue(); 311 312 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa)); 313 op.arg1.mfn = pa >> PAGE_SHIFT; 314 op.cmd = MMUEXT_UNPIN_TABLE; 315 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 316 panic("xpq_queue_unpin_table"); 317} 318 319void 320xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 321{ 322 struct mmuext_op op; 323 324 xpq_flush_queue(); 325 326 XENPRINTK2(("xpq_queue_set_ldt\n")); 327 KASSERT(va == (va & ~PAGE_MASK)); 328 op.cmd = MMUEXT_SET_LDT; 329 op.arg1.linear_addr = va; 330 op.arg2.nr_ents = entries; 331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 332 panic("xpq_queue_set_ldt"); 333} 334 335void 336xpq_queue_tlb_flush(void) 337{ 338 struct mmuext_op op; 339 340 xpq_flush_queue(); 341 342 XENPRINTK2(("xpq_queue_tlb_flush\n")); 343 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 344 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 345 panic("xpq_queue_tlb_flush"); 346} 347 348void 349xpq_flush_cache(void) 350{ 351 struct mmuext_op op; 352 int s = splvm(), err; 353 354 xpq_flush_queue(); 355 356 XENPRINTK2(("xpq_queue_flush_cache\n")); 357 op.cmd = MMUEXT_FLUSH_CACHE; 358 if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) { 359 panic("xpq_flush_cache, err %d", err); 360 } 361 splx(s); /* XXX: removeme */ 362} 363 364void 365xpq_queue_invlpg(vaddr_t va) 366{ 367 struct mmuext_op op; 368 xpq_flush_queue(); 369 370 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va)); 371 op.cmd = MMUEXT_INVLPG_LOCAL; 372 op.arg1.linear_addr = (va & ~PAGE_MASK); 373 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 374 panic("xpq_queue_invlpg"); 375} 376 377void 378xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc) 379{ 380 xcpumask_t xcpumask; 381 mmuext_op_t op; 382 383 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 384 385 /* Flush pending page updates */ 386 xpq_flush_queue(); 387 388 op.cmd = MMUEXT_INVLPG_MULTI; 389 op.arg1.linear_addr = va; 390 op.arg2.vcpumask = &xcpumask.xcpum_xm; 391 392 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 393 panic("xpq_queue_invlpg_all"); 394 } 395 396 return; 397} 398 399void 400xen_bcast_invlpg(vaddr_t va) 401{ 402 mmuext_op_t op; 403 404 /* Flush pending page updates */ 405 xpq_flush_queue(); 406 407 op.cmd = MMUEXT_INVLPG_ALL; 408 op.arg1.linear_addr = va; 409 410 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 411 panic("xpq_queue_invlpg_all"); 412 } 413 414 return; 415} 416 417/* This is a synchronous call. */ 418void 419xen_mcast_tlbflush(kcpuset_t *kc) 420{ 421 xcpumask_t xcpumask; 422 mmuext_op_t op; 423 424 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 425 426 /* Flush pending page updates */ 427 xpq_flush_queue(); 428 429 op.cmd = MMUEXT_TLB_FLUSH_MULTI; 430 op.arg2.vcpumask = &xcpumask.xcpum_xm; 431 432 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 433 panic("xpq_queue_invlpg_all"); 434 } 435 436 return; 437} 438 439/* This is a synchronous call. */ 440void 441xen_bcast_tlbflush(void) 442{ 443 mmuext_op_t op; 444 445 /* Flush pending page updates */ 446 xpq_flush_queue(); 447 448 op.cmd = MMUEXT_TLB_FLUSH_ALL; 449 450 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 451 panic("xpq_queue_invlpg_all"); 452 } 453 454 return; 455} 456 457/* This is a synchronous call. */ 458void 459xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc) 460{ 461 KASSERT(eva > sva); 462 463 /* Flush pending page updates */ 464 xpq_flush_queue(); 465 466 /* Align to nearest page boundary */ 467 sva &= ~PAGE_MASK; 468 eva &= ~PAGE_MASK; 469 470 for ( ; sva <= eva; sva += PAGE_SIZE) { 471 xen_mcast_invlpg(sva, kc); 472 } 473 474 return; 475} 476 477/* This is a synchronous call. */ 478void 479xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva) 480{ 481 KASSERT(eva > sva); 482 483 /* Flush pending page updates */ 484 xpq_flush_queue(); 485 486 /* Align to nearest page boundary */ 487 sva &= ~PAGE_MASK; 488 eva &= ~PAGE_MASK; 489 490 for ( ; sva <= eva; sva += PAGE_SIZE) { 491 xen_bcast_invlpg(sva); 492 } 493 494 return; 495} 496 497int 498xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom) 499{ 500 mmu_update_t op; 501 int ok; 502 503 xpq_flush_queue(); 504 505 op.ptr = ptr; 506 op.val = val; 507 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 508 return EFAULT; 509 return (0); 510} 511 512#ifdef XENDEBUG 513void 514xpq_debug_dump(void) 515{ 516 int i; 517 518 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 519 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 520 521 XENPRINTK2(("idx: %d\n", xpq_idx)); 522 for (i = 0; i < xpq_idx; i++) { 523 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64, 524 xpq_queue[i].ptr, xpq_queue[i].val); 525 if (++i < xpq_idx) 526 snprintf(XBUF + strlen(XBUF), 527 sizeof(XBUF) - strlen(XBUF), 528 "%" PRIx64 " %08" PRIx64, 529 xpq_queue[i].ptr, xpq_queue[i].val); 530 if (++i < xpq_idx) 531 snprintf(XBUF + strlen(XBUF), 532 sizeof(XBUF) - strlen(XBUF), 533 "%" PRIx64 " %08" PRIx64, 534 xpq_queue[i].ptr, xpq_queue[i].val); 535 if (++i < xpq_idx) 536 snprintf(XBUF + strlen(XBUF), 537 sizeof(XBUF) - strlen(XBUF), 538 "%" PRIx64 " %08" PRIx64, 539 xpq_queue[i].ptr, xpq_queue[i].val); 540 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF)); 541 } 542} 543#endif 544 545 546extern volatile struct xencons_interface *xencons_interface; /* XXX */ 547extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 548 549static void xen_bt_set_readonly (vaddr_t); 550static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int); 551 552/* How many PDEs ? */ 553#if L2_SLOT_KERNBASE > 0 554#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 555#else 556#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 557#endif 558 559/* 560 * Construct and switch to new pagetables 561 * first_avail is the first vaddr we can use after 562 * we get rid of Xen pagetables 563 */ 564 565vaddr_t xen_pmap_bootstrap (void); 566 567/* 568 * Function to get rid of Xen bootstrap tables 569 */ 570 571/* How many PDP do we need: */ 572#ifdef PAE 573/* 574 * For PAE, we consider a single contigous L2 "superpage" of 4 pages, 575 * all of them mapped by the L3 page. We also need a shadow page 576 * for L3[3]. 577 */ 578static const int l2_4_count = 6; 579#elif defined(__x86_64__) 580static const int l2_4_count = PTP_LEVELS; 581#else 582static const int l2_4_count = PTP_LEVELS - 1; 583#endif 584 585vaddr_t 586xen_pmap_bootstrap(void) 587{ 588 int count, oldcount; 589 long mapsize; 590 vaddr_t bootstrap_tables, init_tables; 591 592 memset(xpq_idx_array, 0, sizeof xpq_idx_array); 593 594 xpmap_phys_to_machine_mapping = 595 (unsigned long *)xen_start_info.mfn_list; 596 init_tables = xen_start_info.pt_base; 597 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables)); 598 599 /* Space after Xen boostrap tables should be free */ 600 bootstrap_tables = xen_start_info.pt_base + 601 (xen_start_info.nr_pt_frames * PAGE_SIZE); 602 603 /* 604 * Calculate how many space we need 605 * first everything mapped before the Xen bootstrap tables 606 */ 607 mapsize = init_tables - KERNTEXTOFF; 608 /* after the tables we'll have: 609 * - UAREA 610 * - dummy user PGD (x86_64) 611 * - HYPERVISOR_shared_info 612 * - early_zerop 613 * - ISA I/O mem (if needed) 614 */ 615 mapsize += UPAGES * NBPG; 616#ifdef __x86_64__ 617 mapsize += NBPG; 618#endif 619 mapsize += NBPG; 620 mapsize += NBPG; 621 622#ifdef DOM0OPS 623 if (xendomain_is_dom0()) { 624 /* space for ISA I/O mem */ 625 mapsize += IOM_SIZE; 626 } 627#endif 628 /* at this point mapsize doens't include the table size */ 629 630#ifdef __x86_64__ 631 count = TABLE_L2_ENTRIES; 632#else 633 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT; 634#endif /* __x86_64__ */ 635 636 /* now compute how many L2 pages we need exactly */ 637 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count)); 638 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF > 639 ((long)count << L2_SHIFT) + KERNBASE) { 640 count++; 641 } 642#ifndef __x86_64__ 643 /* 644 * one more L2 page: we'll alocate several pages after kva_start 645 * in pmap_bootstrap() before pmap_growkernel(), which have not been 646 * counted here. It's not a big issue to allocate one more L2 as 647 * pmap_growkernel() will be called anyway. 648 */ 649 count++; 650 nkptp[1] = count; 651#endif 652 653 /* 654 * install bootstrap pages. We may need more L2 pages than will 655 * have the final table here, as it's installed after the final table 656 */ 657 oldcount = count; 658 659bootstrap_again: 660 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount)); 661 /* 662 * Xen space we'll reclaim may not be enough for our new page tables, 663 * move bootstrap tables if necessary 664 */ 665 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 666 bootstrap_tables = init_tables + 667 ((count + l2_4_count) * PAGE_SIZE); 668 /* make sure we have enough to map the bootstrap_tables */ 669 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 670 ((long)oldcount << L2_SHIFT) + KERNBASE) { 671 oldcount++; 672 goto bootstrap_again; 673 } 674 675 /* Create temporary tables */ 676 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables, 677 xen_start_info.nr_pt_frames, oldcount, 0); 678 679 /* Create final tables */ 680 xen_bootstrap_tables(bootstrap_tables, init_tables, 681 oldcount + l2_4_count, count, 1); 682 683 /* zero out free space after tables */ 684 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 685 (UPAGES + 1) * NBPG); 686 687 /* Finally, flush TLB. */ 688 xpq_queue_tlb_flush(); 689 690 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 691} 692 693/* 694 * Build a new table and switch to it 695 * old_count is # of old tables (including PGD, PDTPE and PDE) 696 * new_count is # of new tables (PTE only) 697 * we assume areas don't overlap 698 */ 699static void 700xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd, 701 int old_count, int new_count, int final) 702{ 703 pd_entry_t *pdtpe, *pde, *pte; 704 pd_entry_t *bt_pgd; 705 paddr_t addr; 706 vaddr_t page, avail, text_end, map_end; 707 int i; 708 extern char __data_start; 709 extern char *early_zerop; /* from pmap.c */ 710 711 __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR "," 712 " %d, %d)\n", 713 old_pgd, new_pgd, old_count, new_count)); 714 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK; 715 /* 716 * size of R/W area after kernel text: 717 * xencons_interface (if present) 718 * xenstore_interface (if present) 719 * table pages (new_count + l2_4_count entries) 720 * extra mappings (only when final is true): 721 * UAREA 722 * dummy user PGD (x86_64 only)/gdt page (i386 only) 723 * HYPERVISOR_shared_info 724 * early_zerop 725 * ISA I/O mem (if needed) 726 */ 727 map_end = new_pgd + ((new_count + l2_4_count) * NBPG); 728 if (final) { 729 map_end += (UPAGES + 1) * NBPG; 730 HYPERVISOR_shared_info = (shared_info_t *)map_end; 731 map_end += NBPG; 732 early_zerop = (char *)map_end; 733 map_end += NBPG; 734 } 735 /* 736 * we always set atdevbase, as it's used by init386 to find the first 737 * available VA. map_end is updated only if we are dom0, so 738 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 739 * this case. 740 */ 741 if (final) 742 atdevbase = map_end; 743#ifdef DOM0OPS 744 if (final && xendomain_is_dom0()) { 745 /* ISA I/O mem */ 746 map_end += IOM_SIZE; 747 } 748#endif /* DOM0OPS */ 749 750 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n", 751 text_end, map_end)); 752 __PRINTK(("console %#lx ", xen_start_info.console_mfn)); 753 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn)); 754 755 /* 756 * Create bootstrap page tables 757 * What we need: 758 * - a PGD (level 4) 759 * - a PDTPE (level 3) 760 * - a PDE (level2) 761 * - some PTEs (level 1) 762 */ 763 764 bt_pgd = (pd_entry_t *) new_pgd; 765 memset (bt_pgd, 0, PAGE_SIZE); 766 avail = new_pgd + PAGE_SIZE; 767#if PTP_LEVELS > 3 768 /* per-cpu L4 PD */ 769 pd_entry_t *bt_cpu_pgd = bt_pgd; 770 /* pmap_kernel() "shadow" L4 PD */ 771 bt_pgd = (pd_entry_t *) avail; 772 memset(bt_pgd, 0, PAGE_SIZE); 773 avail += PAGE_SIZE; 774 775 /* Install level 3 */ 776 pdtpe = (pd_entry_t *) avail; 777 memset (pdtpe, 0, PAGE_SIZE); 778 avail += PAGE_SIZE; 779 780 addr = ((u_long) pdtpe) - KERNBASE; 781 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] = 782 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 783 784 __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 785 " -> L4[%#x]\n", 786 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF))); 787#else 788 pdtpe = bt_pgd; 789#endif /* PTP_LEVELS > 3 */ 790 791#if PTP_LEVELS > 2 792 /* Level 2 */ 793 pde = (pd_entry_t *) avail; 794 memset(pde, 0, PAGE_SIZE); 795 avail += PAGE_SIZE; 796 797 addr = ((u_long) pde) - KERNBASE; 798 pdtpe[pl3_pi(KERNTEXTOFF)] = 799 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW; 800 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 801 " -> L3[%#x]\n", 802 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF))); 803#elif defined(PAE) 804 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */ 805 pde = (pd_entry_t *) avail; 806 memset(pde, 0, PAGE_SIZE * 5); 807 avail += PAGE_SIZE * 5; 808 addr = ((u_long) pde) - KERNBASE; 809 /* 810 * enter L2 pages in the L3. 811 * The real L2 kernel PD will be the last one (so that 812 * pde[L2_SLOT_KERN] always point to the shadow). 813 */ 814 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 815 /* 816 * Xen doesn't want R/W mappings in L3 entries, it'll add it 817 * itself. 818 */ 819 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 820 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 821 " -> L3[%#x]\n", 822 (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i)); 823 } 824 addr += PAGE_SIZE; 825 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 826 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 827 " -> L3[%#x]\n", 828 (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3)); 829 830#else /* PAE */ 831 pde = bt_pgd; 832#endif /* PTP_LEVELS > 2 */ 833 834 /* Level 1 */ 835 page = KERNTEXTOFF; 836 for (i = 0; i < new_count; i ++) { 837 vaddr_t cur_page = page; 838 839 pte = (pd_entry_t *) avail; 840 avail += PAGE_SIZE; 841 842 memset(pte, 0, PAGE_SIZE); 843 while (pl2_pi(page) == pl2_pi (cur_page)) { 844 if (page >= map_end) { 845 /* not mapped at all */ 846 pte[pl1_pi(page)] = 0; 847 page += PAGE_SIZE; 848 continue; 849 } 850 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 851 if (page == (vaddr_t)HYPERVISOR_shared_info) { 852 pte[pl1_pi(page)] = xen_start_info.shared_info; 853 __PRINTK(("HYPERVISOR_shared_info " 854 "va %#lx pte %#" PRIxPADDR "\n", 855 HYPERVISOR_shared_info, pte[pl1_pi(page)])); 856 } 857 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 858 == xen_start_info.console.domU.mfn) { 859 xencons_interface = (void *)page; 860 pte[pl1_pi(page)] = xen_start_info.console_mfn; 861 pte[pl1_pi(page)] <<= PAGE_SHIFT; 862 __PRINTK(("xencons_interface " 863 "va %#lx pte %#" PRIxPADDR "\n", 864 xencons_interface, pte[pl1_pi(page)])); 865 } 866 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 867 == xen_start_info.store_mfn) { 868 xenstore_interface = (void *)page; 869 pte[pl1_pi(page)] = xen_start_info.store_mfn; 870 pte[pl1_pi(page)] <<= PAGE_SHIFT; 871 __PRINTK(("xenstore_interface " 872 "va %#lx pte %#" PRIxPADDR "\n", 873 xenstore_interface, pte[pl1_pi(page)])); 874 } 875#ifdef DOM0OPS 876 if (page >= (vaddr_t)atdevbase && 877 page < (vaddr_t)atdevbase + IOM_SIZE) { 878 pte[pl1_pi(page)] = 879 IOM_BEGIN + (page - (vaddr_t)atdevbase); 880 } 881#endif 882 pte[pl1_pi(page)] |= PG_k | PG_V; 883 if (page < text_end) { 884 /* map kernel text RO */ 885 pte[pl1_pi(page)] |= 0; 886 } else if (page >= old_pgd 887 && page < old_pgd + (old_count * PAGE_SIZE)) { 888 /* map old page tables RO */ 889 pte[pl1_pi(page)] |= 0; 890 } else if (page >= new_pgd && 891 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { 892 /* map new page tables RO */ 893 pte[pl1_pi(page)] |= 0; 894#ifdef i386 895 } else if (page == (vaddr_t)tmpgdt) { 896 /* 897 * Map bootstrap gdt R/O. Later, we 898 * will re-add this to page to uvm 899 * after making it writable. 900 */ 901 902 pte[pl1_pi(page)] = 0; 903 page += PAGE_SIZE; 904 continue; 905#endif /* i386 */ 906 } else { 907 /* map page RW */ 908 pte[pl1_pi(page)] |= PG_RW; 909 } 910 911 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE)) 912 || page >= new_pgd) { 913 __PRINTK(("va %#lx pa %#lx " 914 "entry 0x%" PRIxPADDR " -> L1[%#x]\n", 915 page, page - KERNBASE, 916 pte[pl1_pi(page)], pl1_pi(page))); 917 } 918 page += PAGE_SIZE; 919 } 920 921 addr = ((u_long) pte) - KERNBASE; 922 pde[pl2_pi(cur_page)] = 923 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 924 __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 925 " -> L2[%#x]\n", 926 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page))); 927 /* Mark readonly */ 928 xen_bt_set_readonly((vaddr_t) pte); 929 } 930 931 /* Install recursive page tables mapping */ 932#ifdef PAE 933 /* 934 * we need a shadow page for the kernel's L2 page 935 * The real L2 kernel PD will be the last one (so that 936 * pde[L2_SLOT_KERN] always point to the shadow. 937 */ 938 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE); 939 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG]; 940 cpu_info_primary.ci_kpm_pdirpa = 941 (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE; 942 943 /* 944 * We don't enter a recursive entry from the L3 PD. Instead, 945 * we enter the first 4 L2 pages, which includes the kernel's L2 946 * shadow. But we have to entrer the shadow after switching 947 * %cr3, or Xen will refcount some PTE with the wrong type. 948 */ 949 addr = (u_long)pde - KERNBASE; 950 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 951 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 952 __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR 953 " entry %#" PRIxPADDR "\n", 954 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, 955 addr, pde[PDIR_SLOT_PTE + i])); 956 } 957#if 0 958 addr += PAGE_SIZE; /* point to shadow L2 */ 959 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 960 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n", 961 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr, 962 (int64_t)pde[PDIR_SLOT_PTE + 3])); 963#endif 964 /* Mark tables RO, and pin the kernel's shadow as L2 */ 965 addr = (u_long)pde - KERNBASE; 966 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 967 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 968 if (i == 2 || i == 3) 969 continue; 970#if 0 971 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr)); 972 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 973#endif 974 } 975 if (final) { 976 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 977 __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr)); 978 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 979 } 980#if 0 981 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE; 982 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr)); 983 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 984#endif 985#else /* PAE */ 986 /* recursive entry in higher-level per-cpu PD and pmap_kernel() */ 987 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V; 988#ifdef __x86_64__ 989 bt_cpu_pgd[PDIR_SLOT_PTE] = 990 xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V; 991#endif /* __x86_64__ */ 992 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR 993 " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE, 994 bt_pgd[PDIR_SLOT_PTE])); 995 /* Mark tables RO */ 996 xen_bt_set_readonly((vaddr_t) pde); 997#endif 998#if PTP_LEVELS > 2 || defined(PAE) 999 xen_bt_set_readonly((vaddr_t) pdtpe); 1000#endif 1001#if PTP_LEVELS > 3 1002 xen_bt_set_readonly(new_pgd); 1003#endif 1004 /* Pin the PGD */ 1005 __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE)); 1006#ifdef __x86_64__ 1007 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1008#elif PAE 1009 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1010#else 1011 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1012#endif 1013 1014 /* Save phys. addr of PDP, for libkvm. */ 1015#ifdef PAE 1016 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */ 1017#else 1018 PDPpaddr = (u_long)bt_pgd - KERNBASE; 1019#endif 1020 1021 /* Switch to new tables */ 1022 __PRINTK(("switch to PGD\n")); 1023 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 1024 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n", 1025 bt_pgd[PDIR_SLOT_PTE])); 1026 1027#ifdef PAE 1028 if (final) { 1029 /* save the address of the L3 page */ 1030 cpu_info_primary.ci_pae_l3_pdir = pdtpe; 1031 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE); 1032 1033 /* now enter kernel's PTE mappings */ 1034 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3; 1035 xpq_queue_pte_update( 1036 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE), 1037 xpmap_ptom_masked(addr) | PG_k | PG_V); 1038 xpq_flush_queue(); 1039 } 1040#elif defined(__x86_64__) 1041 if (final) { 1042 /* save the address of the real per-cpu L4 pgd page */ 1043 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd; 1044 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t) bt_cpu_pgd - KERNBASE); 1045 } 1046#endif 1047 __USE(pdtpe); 1048 1049 /* Now we can safely reclaim space taken by old tables */ 1050 1051 __PRINTK(("unpin old PGD\n")); 1052 /* Unpin old PGD */ 1053 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 1054 /* Mark old tables RW */ 1055 page = old_pgd; 1056 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME; 1057 addr = xpmap_mtop(addr); 1058 pte = (pd_entry_t *) ((u_long)addr + KERNBASE); 1059 pte += pl1_pi(page); 1060 __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n", 1061 pde[pl2_pi(page)], addr, (long)pte)); 1062 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 1063 addr = xpmap_ptom(((u_long) pte) - KERNBASE); 1064 XENPRINTK(("addr %#" PRIxPADDR " pte %#lx " 1065 "*pte %#" PRIxPADDR "\n", 1066 addr, (long)pte, *pte)); 1067 xpq_queue_pte_update(addr, *pte | PG_RW); 1068 page += PAGE_SIZE; 1069 /* 1070 * Our ptes are contiguous 1071 * so it's safe to just "++" here 1072 */ 1073 pte++; 1074 } 1075 xpq_flush_queue(); 1076} 1077 1078 1079/* 1080 * Bootstrap helper functions 1081 */ 1082 1083/* 1084 * Mark a page readonly 1085 * XXX: assuming vaddr = paddr + KERNBASE 1086 */ 1087 1088static void 1089xen_bt_set_readonly (vaddr_t page) 1090{ 1091 pt_entry_t entry; 1092 1093 entry = xpmap_ptom_masked(page - KERNBASE); 1094 entry |= PG_k | PG_V; 1095 1096 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG); 1097} 1098 1099#ifdef __x86_64__ 1100void 1101xen_set_user_pgd(paddr_t page) 1102{ 1103 struct mmuext_op op; 1104 int s = splvm(); 1105 1106 xpq_flush_queue(); 1107 op.cmd = MMUEXT_NEW_USER_BASEPTR; 1108 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT; 1109 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 1110 panic("xen_set_user_pgd: failed to install new user page" 1111 " directory %#" PRIxPADDR, page); 1112 splx(s); 1113} 1114#endif /* __x86_64__ */ 1115