x86_xpmap.c revision 1.46
1/* $NetBSD: x86_xpmap.c,v 1.46 2012/06/30 22:50:37 jym Exp $ */ 2 3/* 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Copyright (c) 2006, 2007 Manuel Bouyer. 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 1. Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * 2. Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in the 29 * documentation and/or other materials provided with the distribution. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 */ 43 44/* 45 * 46 * Copyright (c) 2004 Christian Limpach. 47 * All rights reserved. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 68 */ 69 70 71#include <sys/cdefs.h> 72__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.46 2012/06/30 22:50:37 jym Exp $"); 73 74#include "opt_xen.h" 75#include "opt_ddb.h" 76#include "ksyms.h" 77 78#include <sys/param.h> 79#include <sys/systm.h> 80#include <sys/mutex.h> 81#include <sys/cpu.h> 82 83#include <uvm/uvm.h> 84 85#include <x86/pmap.h> 86#include <machine/gdt.h> 87#include <xen/xenfunc.h> 88 89#include <dev/isa/isareg.h> 90#include <machine/isa_machdep.h> 91 92#undef XENDEBUG 93/* #define XENDEBUG_SYNC */ 94/* #define XENDEBUG_LOW */ 95 96#ifdef XENDEBUG 97#define XENPRINTF(x) printf x 98#define XENPRINTK(x) printk x 99#define XENPRINTK2(x) /* printk x */ 100 101static char XBUF[256]; 102#else 103#define XENPRINTF(x) 104#define XENPRINTK(x) 105#define XENPRINTK2(x) 106#endif 107#define PRINTF(x) printf x 108#define PRINTK(x) printk x 109 110volatile shared_info_t *HYPERVISOR_shared_info; 111/* Xen requires the start_info struct to be page aligned */ 112union start_info_union start_info_union __aligned(PAGE_SIZE); 113unsigned long *xpmap_phys_to_machine_mapping; 114kmutex_t pte_lock; 115 116void xen_failsafe_handler(void); 117 118#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 119 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF) 120 121void 122xen_failsafe_handler(void) 123{ 124 125 panic("xen_failsafe_handler called!\n"); 126} 127 128 129void 130xen_set_ldt(vaddr_t base, uint32_t entries) 131{ 132 vaddr_t va; 133 vaddr_t end; 134 pt_entry_t *ptp; 135 int s; 136 137#ifdef __x86_64__ 138 end = base + (entries << 3); 139#else 140 end = base + entries * sizeof(union descriptor); 141#endif 142 143 for (va = base; va < end; va += PAGE_SIZE) { 144 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 145 ptp = kvtopte(va); 146 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n", 147 base, entries, ptp)); 148 pmap_pte_clearbits(ptp, PG_RW); 149 } 150 s = splvm(); 151 xpq_queue_set_ldt(base, entries); 152 splx(s); 153} 154 155#ifdef XENDEBUG 156void xpq_debug_dump(void); 157#endif 158 159#define XPQUEUE_SIZE 2048 160static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE]; 161static int xpq_idx_array[MAXCPUS]; 162 163#ifdef i386 164extern union descriptor tmpgdt[]; 165#endif /* i386 */ 166void 167xpq_flush_queue(void) 168{ 169 int i, ok = 0, ret; 170 171 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 172 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 173 174 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx)); 175 for (i = 0; i < xpq_idx; i++) 176 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i, 177 xpq_queue[i].ptr, xpq_queue[i].val)); 178 179retry: 180 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok); 181 182 if (xpq_idx != 0 && ret < 0) { 183 struct cpu_info *ci; 184 CPU_INFO_ITERATOR cii; 185 186 printf("xpq_flush_queue: %d entries (%d successful) on " 187 "cpu%d (%ld)\n", 188 xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid); 189 190 if (ok != 0) { 191 xpq_queue += ok; 192 xpq_idx -= ok; 193 ok = 0; 194 goto retry; 195 } 196 197 for (CPU_INFO_FOREACH(cii, ci)) { 198 xpq_queue = xpq_queue_array[ci->ci_cpuid]; 199 xpq_idx = xpq_idx_array[ci->ci_cpuid]; 200 printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid); 201 for (i = 0; i < xpq_idx; i++) { 202 printf(" 0x%016" PRIx64 ": 0x%016" PRIx64 "\n", 203 xpq_queue[i].ptr, xpq_queue[i].val); 204 } 205#ifdef __x86_64__ 206 for (i = 0; i < PDIR_SLOT_PTE; i++) { 207 if (ci->ci_kpm_pdir[i] == 0) 208 continue; 209 printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n", 210 i, ci->ci_kpm_pdir[i]); 211 } 212#endif 213 } 214 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret); 215 } 216 xpq_idx_array[curcpu()->ci_cpuid] = 0; 217} 218 219static inline void 220xpq_increment_idx(void) 221{ 222 223 if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE)) 224 xpq_flush_queue(); 225} 226 227void 228xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 229{ 230 231 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 232 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 233 234 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64 235 "\n", (int64_t)ma, (int64_t)pa)); 236 237 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 238 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT; 239 xpq_increment_idx(); 240#ifdef XENDEBUG_SYNC 241 xpq_flush_queue(); 242#endif 243} 244 245void 246xpq_queue_pte_update(paddr_t ptr, pt_entry_t val) 247{ 248 249 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 250 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 251 252 KASSERT((ptr & 3) == 0); 253 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 254 xpq_queue[xpq_idx].val = val; 255 xpq_increment_idx(); 256#ifdef XENDEBUG_SYNC 257 xpq_flush_queue(); 258#endif 259} 260 261void 262xpq_queue_pt_switch(paddr_t pa) 263{ 264 struct mmuext_op op; 265 xpq_flush_queue(); 266 267 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", 268 (int64_t)pa, (int64_t)pa)); 269 op.cmd = MMUEXT_NEW_BASEPTR; 270 op.arg1.mfn = pa >> PAGE_SHIFT; 271 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 272 panic("xpq_queue_pt_switch"); 273} 274 275void 276xpq_queue_pin_table(paddr_t pa, int lvl) 277{ 278 struct mmuext_op op; 279 280 xpq_flush_queue(); 281 282 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n", 283 lvl + 1, pa)); 284 285 op.arg1.mfn = pa >> PAGE_SHIFT; 286 op.cmd = lvl; 287 288 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 289 panic("xpq_queue_pin_table"); 290} 291 292void 293xpq_queue_unpin_table(paddr_t pa) 294{ 295 struct mmuext_op op; 296 297 xpq_flush_queue(); 298 299 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa)); 300 op.arg1.mfn = pa >> PAGE_SHIFT; 301 op.cmd = MMUEXT_UNPIN_TABLE; 302 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 303 panic("xpq_queue_unpin_table"); 304} 305 306void 307xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 308{ 309 struct mmuext_op op; 310 311 xpq_flush_queue(); 312 313 XENPRINTK2(("xpq_queue_set_ldt\n")); 314 KASSERT(va == (va & ~PAGE_MASK)); 315 op.cmd = MMUEXT_SET_LDT; 316 op.arg1.linear_addr = va; 317 op.arg2.nr_ents = entries; 318 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 319 panic("xpq_queue_set_ldt"); 320} 321 322void 323xpq_queue_tlb_flush(void) 324{ 325 struct mmuext_op op; 326 327 xpq_flush_queue(); 328 329 XENPRINTK2(("xpq_queue_tlb_flush\n")); 330 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 332 panic("xpq_queue_tlb_flush"); 333} 334 335void 336xpq_flush_cache(void) 337{ 338 struct mmuext_op op; 339 int s = splvm(), err; 340 341 xpq_flush_queue(); 342 343 XENPRINTK2(("xpq_queue_flush_cache\n")); 344 op.cmd = MMUEXT_FLUSH_CACHE; 345 if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) { 346 panic("xpq_flush_cache, err %d", err); 347 } 348 splx(s); /* XXX: removeme */ 349} 350 351void 352xpq_queue_invlpg(vaddr_t va) 353{ 354 struct mmuext_op op; 355 xpq_flush_queue(); 356 357 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va)); 358 op.cmd = MMUEXT_INVLPG_LOCAL; 359 op.arg1.linear_addr = (va & ~PAGE_MASK); 360 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 361 panic("xpq_queue_invlpg"); 362} 363 364void 365xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc) 366{ 367 u_long xcpumask = 0; 368 mmuext_op_t op; 369 370 kcpuset_copybits(kc, &xcpumask, sizeof(xcpumask)); 371 372 /* Flush pending page updates */ 373 xpq_flush_queue(); 374 375 op.cmd = MMUEXT_INVLPG_MULTI; 376 op.arg1.linear_addr = va; 377 op.arg2.vcpumask = &xcpumask; 378 379 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 380 panic("xpq_queue_invlpg_all"); 381 } 382 383 return; 384} 385 386void 387xen_bcast_invlpg(vaddr_t va) 388{ 389 mmuext_op_t op; 390 391 /* Flush pending page updates */ 392 xpq_flush_queue(); 393 394 op.cmd = MMUEXT_INVLPG_ALL; 395 op.arg1.linear_addr = va; 396 397 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 398 panic("xpq_queue_invlpg_all"); 399 } 400 401 return; 402} 403 404/* This is a synchronous call. */ 405void 406xen_mcast_tlbflush(kcpuset_t *kc) 407{ 408 u_long xcpumask = 0; 409 mmuext_op_t op; 410 411 kcpuset_copybits(kc, &xcpumask, sizeof(xcpumask)); 412 413 /* Flush pending page updates */ 414 xpq_flush_queue(); 415 416 op.cmd = MMUEXT_TLB_FLUSH_MULTI; 417 op.arg2.vcpumask = &xcpumask; 418 419 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 420 panic("xpq_queue_invlpg_all"); 421 } 422 423 return; 424} 425 426/* This is a synchronous call. */ 427void 428xen_bcast_tlbflush(void) 429{ 430 mmuext_op_t op; 431 432 /* Flush pending page updates */ 433 xpq_flush_queue(); 434 435 op.cmd = MMUEXT_TLB_FLUSH_ALL; 436 437 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 438 panic("xpq_queue_invlpg_all"); 439 } 440 441 return; 442} 443 444/* This is a synchronous call. */ 445void 446xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc) 447{ 448 KASSERT(eva > sva); 449 450 /* Flush pending page updates */ 451 xpq_flush_queue(); 452 453 /* Align to nearest page boundary */ 454 sva &= ~PAGE_MASK; 455 eva &= ~PAGE_MASK; 456 457 for ( ; sva <= eva; sva += PAGE_SIZE) { 458 xen_mcast_invlpg(sva, kc); 459 } 460 461 return; 462} 463 464/* This is a synchronous call. */ 465void 466xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva) 467{ 468 KASSERT(eva > sva); 469 470 /* Flush pending page updates */ 471 xpq_flush_queue(); 472 473 /* Align to nearest page boundary */ 474 sva &= ~PAGE_MASK; 475 eva &= ~PAGE_MASK; 476 477 for ( ; sva <= eva; sva += PAGE_SIZE) { 478 xen_bcast_invlpg(sva); 479 } 480 481 return; 482} 483 484int 485xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom) 486{ 487 mmu_update_t op; 488 int ok; 489 490 xpq_flush_queue(); 491 492 op.ptr = ptr; 493 op.val = val; 494 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 495 return EFAULT; 496 return (0); 497} 498 499#ifdef XENDEBUG 500void 501xpq_debug_dump(void) 502{ 503 int i; 504 505 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 506 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 507 508 XENPRINTK2(("idx: %d\n", xpq_idx)); 509 for (i = 0; i < xpq_idx; i++) { 510 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64, 511 xpq_queue[i].ptr, xpq_queue[i].val); 512 if (++i < xpq_idx) 513 snprintf(XBUF + strlen(XBUF), 514 sizeof(XBUF) - strlen(XBUF), 515 "%" PRIx64 " %08" PRIx64, 516 xpq_queue[i].ptr, xpq_queue[i].val); 517 if (++i < xpq_idx) 518 snprintf(XBUF + strlen(XBUF), 519 sizeof(XBUF) - strlen(XBUF), 520 "%" PRIx64 " %08" PRIx64, 521 xpq_queue[i].ptr, xpq_queue[i].val); 522 if (++i < xpq_idx) 523 snprintf(XBUF + strlen(XBUF), 524 sizeof(XBUF) - strlen(XBUF), 525 "%" PRIx64 " %08" PRIx64, 526 xpq_queue[i].ptr, xpq_queue[i].val); 527 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF)); 528 } 529} 530#endif 531 532 533extern volatile struct xencons_interface *xencons_interface; /* XXX */ 534extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 535 536static void xen_bt_set_readonly (vaddr_t); 537static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int); 538 539/* How many PDEs ? */ 540#if L2_SLOT_KERNBASE > 0 541#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 542#else 543#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 544#endif 545 546/* 547 * Construct and switch to new pagetables 548 * first_avail is the first vaddr we can use after 549 * we get rid of Xen pagetables 550 */ 551 552vaddr_t xen_pmap_bootstrap (void); 553 554/* 555 * Function to get rid of Xen bootstrap tables 556 */ 557 558/* How many PDP do we need: */ 559#ifdef PAE 560/* 561 * For PAE, we consider a single contigous L2 "superpage" of 4 pages, 562 * all of them mapped by the L3 page. We also need a shadow page 563 * for L3[3]. 564 */ 565static const int l2_4_count = 6; 566#elif defined(__x86_64__) 567static const int l2_4_count = PTP_LEVELS; 568#else 569static const int l2_4_count = PTP_LEVELS - 1; 570#endif 571 572vaddr_t 573xen_pmap_bootstrap(void) 574{ 575 int count, oldcount; 576 long mapsize; 577 vaddr_t bootstrap_tables, init_tables; 578 579 memset(xpq_idx_array, 0, sizeof xpq_idx_array); 580 581 xpmap_phys_to_machine_mapping = 582 (unsigned long *)xen_start_info.mfn_list; 583 init_tables = xen_start_info.pt_base; 584 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables)); 585 586 /* Space after Xen boostrap tables should be free */ 587 bootstrap_tables = xen_start_info.pt_base + 588 (xen_start_info.nr_pt_frames * PAGE_SIZE); 589 590 /* 591 * Calculate how many space we need 592 * first everything mapped before the Xen bootstrap tables 593 */ 594 mapsize = init_tables - KERNTEXTOFF; 595 /* after the tables we'll have: 596 * - UAREA 597 * - dummy user PGD (x86_64) 598 * - HYPERVISOR_shared_info 599 * - early_zerop 600 * - ISA I/O mem (if needed) 601 */ 602 mapsize += UPAGES * NBPG; 603#ifdef __x86_64__ 604 mapsize += NBPG; 605#endif 606 mapsize += NBPG; 607 mapsize += NBPG; 608 609#ifdef DOM0OPS 610 if (xendomain_is_dom0()) { 611 /* space for ISA I/O mem */ 612 mapsize += IOM_SIZE; 613 } 614#endif 615 /* at this point mapsize doens't include the table size */ 616 617#ifdef __x86_64__ 618 count = TABLE_L2_ENTRIES; 619#else 620 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT; 621#endif /* __x86_64__ */ 622 623 /* now compute how many L2 pages we need exactly */ 624 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count)); 625 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF > 626 ((long)count << L2_SHIFT) + KERNBASE) { 627 count++; 628 } 629#ifndef __x86_64__ 630 /* 631 * one more L2 page: we'll alocate several pages after kva_start 632 * in pmap_bootstrap() before pmap_growkernel(), which have not been 633 * counted here. It's not a big issue to allocate one more L2 as 634 * pmap_growkernel() will be called anyway. 635 */ 636 count++; 637 nkptp[1] = count; 638#endif 639 640 /* 641 * install bootstrap pages. We may need more L2 pages than will 642 * have the final table here, as it's installed after the final table 643 */ 644 oldcount = count; 645 646bootstrap_again: 647 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount)); 648 /* 649 * Xen space we'll reclaim may not be enough for our new page tables, 650 * move bootstrap tables if necessary 651 */ 652 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 653 bootstrap_tables = init_tables + 654 ((count + l2_4_count) * PAGE_SIZE); 655 /* make sure we have enough to map the bootstrap_tables */ 656 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 657 ((long)oldcount << L2_SHIFT) + KERNBASE) { 658 oldcount++; 659 goto bootstrap_again; 660 } 661 662 /* Create temporary tables */ 663 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables, 664 xen_start_info.nr_pt_frames, oldcount, 0); 665 666 /* Create final tables */ 667 xen_bootstrap_tables(bootstrap_tables, init_tables, 668 oldcount + l2_4_count, count, 1); 669 670 /* zero out free space after tables */ 671 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 672 (UPAGES + 1) * NBPG); 673 674 /* Finally, flush TLB. */ 675 xpq_queue_tlb_flush(); 676 677 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 678} 679 680/* 681 * Build a new table and switch to it 682 * old_count is # of old tables (including PGD, PDTPE and PDE) 683 * new_count is # of new tables (PTE only) 684 * we assume areas don't overlap 685 */ 686static void 687xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd, 688 int old_count, int new_count, int final) 689{ 690 pd_entry_t *pdtpe, *pde, *pte; 691 pd_entry_t *cur_pgd, *bt_pgd; 692 paddr_t addr; 693 vaddr_t page, avail, text_end, map_end; 694 int i; 695 extern char __data_start; 696 extern char *early_zerop; /* from pmap.c */ 697 698 __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR "," 699 " %d, %d)\n", 700 old_pgd, new_pgd, old_count, new_count)); 701 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK; 702 /* 703 * size of R/W area after kernel text: 704 * xencons_interface (if present) 705 * xenstore_interface (if present) 706 * table pages (new_count + l2_4_count entries) 707 * extra mappings (only when final is true): 708 * UAREA 709 * dummy user PGD (x86_64 only)/gdt page (i386 only) 710 * HYPERVISOR_shared_info 711 * early_zerop 712 * ISA I/O mem (if needed) 713 */ 714 map_end = new_pgd + ((new_count + l2_4_count) * NBPG); 715 if (final) { 716 map_end += (UPAGES + 1) * NBPG; 717 HYPERVISOR_shared_info = (shared_info_t *)map_end; 718 map_end += NBPG; 719 early_zerop = (char *)map_end; 720 map_end += NBPG; 721 } 722 /* 723 * we always set atdevbase, as it's used by init386 to find the first 724 * available VA. map_end is updated only if we are dom0, so 725 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 726 * this case. 727 */ 728 if (final) 729 atdevbase = map_end; 730#ifdef DOM0OPS 731 if (final && xendomain_is_dom0()) { 732 /* ISA I/O mem */ 733 map_end += IOM_SIZE; 734 } 735#endif /* DOM0OPS */ 736 737 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n", 738 text_end, map_end)); 739 __PRINTK(("console %#lx ", xen_start_info.console_mfn)); 740 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn)); 741 742 /* 743 * Create bootstrap page tables 744 * What we need: 745 * - a PGD (level 4) 746 * - a PDTPE (level 3) 747 * - a PDE (level2) 748 * - some PTEs (level 1) 749 */ 750 751 cur_pgd = (pd_entry_t *) old_pgd; 752 bt_pgd = (pd_entry_t *) new_pgd; 753 memset (bt_pgd, 0, PAGE_SIZE); 754 avail = new_pgd + PAGE_SIZE; 755#if PTP_LEVELS > 3 756 /* per-cpu L4 PD */ 757 pd_entry_t *bt_cpu_pgd = bt_pgd; 758 /* pmap_kernel() "shadow" L4 PD */ 759 bt_pgd = (pd_entry_t *) avail; 760 memset(bt_pgd, 0, PAGE_SIZE); 761 avail += PAGE_SIZE; 762 763 /* Install level 3 */ 764 pdtpe = (pd_entry_t *) avail; 765 memset (pdtpe, 0, PAGE_SIZE); 766 avail += PAGE_SIZE; 767 768 addr = ((u_long) pdtpe) - KERNBASE; 769 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] = 770 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 771 772 __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 773 " -> L4[%#x]\n", 774 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF))); 775#else 776 pdtpe = bt_pgd; 777#endif /* PTP_LEVELS > 3 */ 778 779#if PTP_LEVELS > 2 780 /* Level 2 */ 781 pde = (pd_entry_t *) avail; 782 memset(pde, 0, PAGE_SIZE); 783 avail += PAGE_SIZE; 784 785 addr = ((u_long) pde) - KERNBASE; 786 pdtpe[pl3_pi(KERNTEXTOFF)] = 787 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW; 788 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 789 " -> L3[%#x]\n", 790 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF))); 791#elif defined(PAE) 792 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */ 793 pde = (pd_entry_t *) avail; 794 memset(pde, 0, PAGE_SIZE * 5); 795 avail += PAGE_SIZE * 5; 796 addr = ((u_long) pde) - KERNBASE; 797 /* 798 * enter L2 pages in the L3. 799 * The real L2 kernel PD will be the last one (so that 800 * pde[L2_SLOT_KERN] always point to the shadow). 801 */ 802 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 803 /* 804 * Xen doesn't want R/W mappings in L3 entries, it'll add it 805 * itself. 806 */ 807 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 808 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 809 " -> L3[%#x]\n", 810 (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i)); 811 } 812 addr += PAGE_SIZE; 813 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 814 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 815 " -> L3[%#x]\n", 816 (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3)); 817 818#else /* PAE */ 819 pde = bt_pgd; 820#endif /* PTP_LEVELS > 2 */ 821 822 /* Level 1 */ 823 page = KERNTEXTOFF; 824 for (i = 0; i < new_count; i ++) { 825 vaddr_t cur_page = page; 826 827 pte = (pd_entry_t *) avail; 828 avail += PAGE_SIZE; 829 830 memset(pte, 0, PAGE_SIZE); 831 while (pl2_pi(page) == pl2_pi (cur_page)) { 832 if (page >= map_end) { 833 /* not mapped at all */ 834 pte[pl1_pi(page)] = 0; 835 page += PAGE_SIZE; 836 continue; 837 } 838 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 839 if (page == (vaddr_t)HYPERVISOR_shared_info) { 840 pte[pl1_pi(page)] = xen_start_info.shared_info; 841 __PRINTK(("HYPERVISOR_shared_info " 842 "va %#lx pte %#" PRIxPADDR "\n", 843 HYPERVISOR_shared_info, pte[pl1_pi(page)])); 844 } 845 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 846 == xen_start_info.console.domU.mfn) { 847 xencons_interface = (void *)page; 848 pte[pl1_pi(page)] = xen_start_info.console_mfn; 849 pte[pl1_pi(page)] <<= PAGE_SHIFT; 850 __PRINTK(("xencons_interface " 851 "va %#lx pte %#" PRIxPADDR "\n", 852 xencons_interface, pte[pl1_pi(page)])); 853 } 854 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 855 == xen_start_info.store_mfn) { 856 xenstore_interface = (void *)page; 857 pte[pl1_pi(page)] = xen_start_info.store_mfn; 858 pte[pl1_pi(page)] <<= PAGE_SHIFT; 859 __PRINTK(("xenstore_interface " 860 "va %#lx pte %#" PRIxPADDR "\n", 861 xenstore_interface, pte[pl1_pi(page)])); 862 } 863#ifdef DOM0OPS 864 if (page >= (vaddr_t)atdevbase && 865 page < (vaddr_t)atdevbase + IOM_SIZE) { 866 pte[pl1_pi(page)] = 867 IOM_BEGIN + (page - (vaddr_t)atdevbase); 868 } 869#endif 870 pte[pl1_pi(page)] |= PG_k | PG_V; 871 if (page < text_end) { 872 /* map kernel text RO */ 873 pte[pl1_pi(page)] |= 0; 874 } else if (page >= old_pgd 875 && page < old_pgd + (old_count * PAGE_SIZE)) { 876 /* map old page tables RO */ 877 pte[pl1_pi(page)] |= 0; 878 } else if (page >= new_pgd && 879 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { 880 /* map new page tables RO */ 881 pte[pl1_pi(page)] |= 0; 882#ifdef i386 883 } else if (page == (vaddr_t)tmpgdt) { 884 /* 885 * Map bootstrap gdt R/O. Later, we 886 * will re-add this to page to uvm 887 * after making it writable. 888 */ 889 890 pte[pl1_pi(page)] = 0; 891 page += PAGE_SIZE; 892 continue; 893#endif /* i386 */ 894 } else { 895 /* map page RW */ 896 pte[pl1_pi(page)] |= PG_RW; 897 } 898 899 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE)) 900 || page >= new_pgd) { 901 __PRINTK(("va %#lx pa %#lx " 902 "entry 0x%" PRIxPADDR " -> L1[%#x]\n", 903 page, page - KERNBASE, 904 pte[pl1_pi(page)], pl1_pi(page))); 905 } 906 page += PAGE_SIZE; 907 } 908 909 addr = ((u_long) pte) - KERNBASE; 910 pde[pl2_pi(cur_page)] = 911 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 912 __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 913 " -> L2[%#x]\n", 914 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page))); 915 /* Mark readonly */ 916 xen_bt_set_readonly((vaddr_t) pte); 917 } 918 919 /* Install recursive page tables mapping */ 920#ifdef PAE 921 /* 922 * we need a shadow page for the kernel's L2 page 923 * The real L2 kernel PD will be the last one (so that 924 * pde[L2_SLOT_KERN] always point to the shadow. 925 */ 926 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE); 927 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG]; 928 cpu_info_primary.ci_kpm_pdirpa = 929 (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE; 930 931 /* 932 * We don't enter a recursive entry from the L3 PD. Instead, 933 * we enter the first 4 L2 pages, which includes the kernel's L2 934 * shadow. But we have to entrer the shadow after switching 935 * %cr3, or Xen will refcount some PTE with the wrong type. 936 */ 937 addr = (u_long)pde - KERNBASE; 938 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 939 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 940 __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR 941 " entry %#" PRIxPADDR "\n", 942 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, 943 addr, pde[PDIR_SLOT_PTE + i])); 944 } 945#if 0 946 addr += PAGE_SIZE; /* point to shadow L2 */ 947 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 948 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n", 949 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr, 950 (int64_t)pde[PDIR_SLOT_PTE + 3])); 951#endif 952 /* Mark tables RO, and pin the kernel's shadow as L2 */ 953 addr = (u_long)pde - KERNBASE; 954 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 955 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 956 if (i == 2 || i == 3) 957 continue; 958#if 0 959 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr)); 960 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 961#endif 962 } 963 if (final) { 964 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 965 __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr)); 966 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 967 } 968#if 0 969 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE; 970 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr)); 971 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 972#endif 973#else /* PAE */ 974 /* recursive entry in higher-level per-cpu PD and pmap_kernel() */ 975 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V; 976#ifdef __x86_64__ 977 bt_cpu_pgd[PDIR_SLOT_PTE] = 978 xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V; 979#endif /* __x86_64__ */ 980 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR 981 " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE, 982 bt_pgd[PDIR_SLOT_PTE])); 983 /* Mark tables RO */ 984 xen_bt_set_readonly((vaddr_t) pde); 985#endif 986#if PTP_LEVELS > 2 || defined(PAE) 987 xen_bt_set_readonly((vaddr_t) pdtpe); 988#endif 989#if PTP_LEVELS > 3 990 xen_bt_set_readonly(new_pgd); 991#endif 992 /* Pin the PGD */ 993 __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE)); 994#ifdef __x86_64__ 995 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 996#elif PAE 997 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 998#else 999 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1000#endif 1001 1002 /* Save phys. addr of PDP, for libkvm. */ 1003#ifdef PAE 1004 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */ 1005#else 1006 PDPpaddr = (u_long)bt_pgd - KERNBASE; 1007#endif 1008 1009 /* Switch to new tables */ 1010 __PRINTK(("switch to PGD\n")); 1011 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 1012 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n", 1013 bt_pgd[PDIR_SLOT_PTE])); 1014 1015#ifdef PAE 1016 if (final) { 1017 /* save the address of the L3 page */ 1018 cpu_info_primary.ci_pae_l3_pdir = pdtpe; 1019 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE); 1020 1021 /* now enter kernel's PTE mappings */ 1022 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3; 1023 xpq_queue_pte_update( 1024 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE), 1025 xpmap_ptom_masked(addr) | PG_k | PG_V); 1026 xpq_flush_queue(); 1027 } 1028#elif defined(__x86_64__) 1029 if (final) { 1030 /* save the address of the real per-cpu L4 pgd page */ 1031 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd; 1032 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t) bt_cpu_pgd - KERNBASE); 1033 } 1034#endif 1035 1036 /* Now we can safely reclaim space taken by old tables */ 1037 1038 __PRINTK(("unpin old PGD\n")); 1039 /* Unpin old PGD */ 1040 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 1041 /* Mark old tables RW */ 1042 page = old_pgd; 1043 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME; 1044 addr = xpmap_mtop(addr); 1045 pte = (pd_entry_t *) ((u_long)addr + KERNBASE); 1046 pte += pl1_pi(page); 1047 __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n", 1048 pde[pl2_pi(page)], addr, (long)pte)); 1049 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 1050 addr = xpmap_ptom(((u_long) pte) - KERNBASE); 1051 XENPRINTK(("addr %#" PRIxPADDR " pte %#lx " 1052 "*pte %#" PRIxPADDR "\n", 1053 addr, (long)pte, *pte)); 1054 xpq_queue_pte_update(addr, *pte | PG_RW); 1055 page += PAGE_SIZE; 1056 /* 1057 * Our ptes are contiguous 1058 * so it's safe to just "++" here 1059 */ 1060 pte++; 1061 } 1062 xpq_flush_queue(); 1063} 1064 1065 1066/* 1067 * Bootstrap helper functions 1068 */ 1069 1070/* 1071 * Mark a page readonly 1072 * XXX: assuming vaddr = paddr + KERNBASE 1073 */ 1074 1075static void 1076xen_bt_set_readonly (vaddr_t page) 1077{ 1078 pt_entry_t entry; 1079 1080 entry = xpmap_ptom_masked(page - KERNBASE); 1081 entry |= PG_k | PG_V; 1082 1083 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG); 1084} 1085 1086#ifdef __x86_64__ 1087void 1088xen_set_user_pgd(paddr_t page) 1089{ 1090 struct mmuext_op op; 1091 int s = splvm(); 1092 1093 xpq_flush_queue(); 1094 op.cmd = MMUEXT_NEW_USER_BASEPTR; 1095 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT; 1096 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 1097 panic("xen_set_user_pgd: failed to install new user page" 1098 " directory %#" PRIxPADDR, page); 1099 splx(s); 1100} 1101#endif /* __x86_64__ */ 1102