x86_xpmap.c revision 1.33
1/* $NetBSD: x86_xpmap.c,v 1.33 2011/08/21 10:00:13 jym Exp $ */ 2 3/* 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Copyright (c) 2006, 2007 Manuel Bouyer. 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 1. Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * 2. Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in the 29 * documentation and/or other materials provided with the distribution. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 */ 43 44/* 45 * 46 * Copyright (c) 2004 Christian Limpach. 47 * All rights reserved. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 68 */ 69 70 71#include <sys/cdefs.h> 72__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.33 2011/08/21 10:00:13 jym Exp $"); 73 74#include "opt_xen.h" 75#include "opt_ddb.h" 76#include "ksyms.h" 77 78#include <sys/param.h> 79#include <sys/systm.h> 80#include <sys/simplelock.h> 81 82#include <uvm/uvm.h> 83 84#include <machine/pmap.h> 85#include <machine/gdt.h> 86#include <xen/xenfunc.h> 87 88#include <dev/isa/isareg.h> 89#include <machine/isa_machdep.h> 90 91#undef XENDEBUG 92/* #define XENDEBUG_SYNC */ 93/* #define XENDEBUG_LOW */ 94 95#ifdef XENDEBUG 96#define XENPRINTF(x) printf x 97#define XENPRINTK(x) printk x 98#define XENPRINTK2(x) /* printk x */ 99 100static char XBUF[256]; 101#else 102#define XENPRINTF(x) 103#define XENPRINTK(x) 104#define XENPRINTK2(x) 105#endif 106#define PRINTF(x) printf x 107#define PRINTK(x) printk x 108 109/* on x86_64 kernel runs in ring 3 */ 110#ifdef __x86_64__ 111#define PG_k PG_u 112#else 113#define PG_k 0 114#endif 115 116volatile shared_info_t *HYPERVISOR_shared_info; 117/* Xen requires the start_info struct to be page aligned */ 118union start_info_union start_info_union __aligned(PAGE_SIZE); 119unsigned long *xpmap_phys_to_machine_mapping; 120 121void xen_failsafe_handler(void); 122 123#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 124 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF) 125 126void 127xen_failsafe_handler(void) 128{ 129 130 panic("xen_failsafe_handler called!\n"); 131} 132 133 134void 135xen_set_ldt(vaddr_t base, uint32_t entries) 136{ 137 vaddr_t va; 138 vaddr_t end; 139 pt_entry_t *ptp; 140 int s; 141 142#ifdef __x86_64__ 143 end = base + (entries << 3); 144#else 145 end = base + entries * sizeof(union descriptor); 146#endif 147 148 for (va = base; va < end; va += PAGE_SIZE) { 149 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 150 ptp = kvtopte(va); 151 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n", 152 base, entries, ptp)); 153 pmap_pte_clearbits(ptp, PG_RW); 154 } 155 s = splvm(); 156 xpq_queue_lock(); 157 xpq_queue_set_ldt(base, entries); 158 xpq_queue_unlock(); 159 splx(s); 160} 161 162#ifdef XENDEBUG 163void xpq_debug_dump(void); 164#endif 165 166#define XPQUEUE_SIZE 2048 167static mmu_update_t xpq_queue[XPQUEUE_SIZE]; 168static int xpq_idx = 0; 169 170#ifdef MULTIPROCESSOR 171static struct simplelock xpq_lock = SIMPLELOCK_INITIALIZER; 172 173void 174xpq_queue_lock(void) 175{ 176 simple_lock(&xpq_lock); 177} 178 179void 180xpq_queue_unlock(void) 181{ 182 simple_unlock(&xpq_lock); 183} 184 185bool 186xpq_queue_locked(void) 187{ 188 return simple_lock_held(&xpq_lock); 189} 190#endif /* MULTIPROCESSOR */ 191 192/* Must be called with xpq_lock held */ 193void 194xpq_flush_queue(void) 195{ 196 int i, ok, ret; 197 198 KASSERT(xpq_queue_locked()); 199 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx)); 200 for (i = 0; i < xpq_idx; i++) 201 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i, 202 xpq_queue[i].ptr, xpq_queue[i].val)); 203 204 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok); 205 206 if (xpq_idx != 0 && ret < 0) { 207 printf("xpq_flush_queue: %d entries (%d successful)\n", 208 xpq_idx, ok); 209 for (i = 0; i < xpq_idx; i++) 210 printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n", 211 xpq_queue[i].ptr, xpq_queue[i].val); 212 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret); 213 } 214 xpq_idx = 0; 215} 216 217/* Must be called with xpq_lock held */ 218static inline void 219xpq_increment_idx(void) 220{ 221 222 KASSERT(xpq_queue_locked()); 223 xpq_idx++; 224 if (__predict_false(xpq_idx == XPQUEUE_SIZE)) 225 xpq_flush_queue(); 226} 227 228void 229xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 230{ 231 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64 232 "\n", (int64_t)ma, (int64_t)pa)); 233 KASSERT(xpq_queue_locked()); 234 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 235 xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT; 236 xpq_increment_idx(); 237#ifdef XENDEBUG_SYNC 238 xpq_flush_queue(); 239#endif 240} 241 242void 243xpq_queue_pte_update(paddr_t ptr, pt_entry_t val) 244{ 245 246 KASSERT((ptr & 3) == 0); 247 KASSERT(xpq_queue_locked()); 248 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 249 xpq_queue[xpq_idx].val = val; 250 xpq_increment_idx(); 251#ifdef XENDEBUG_SYNC 252 xpq_flush_queue(); 253#endif 254} 255 256void 257xpq_queue_pt_switch(paddr_t pa) 258{ 259 struct mmuext_op op; 260 KASSERT(xpq_queue_locked()); 261 xpq_flush_queue(); 262 263 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", 264 (int64_t)pa, (int64_t)pa)); 265 op.cmd = MMUEXT_NEW_BASEPTR; 266 op.arg1.mfn = pa >> PAGE_SHIFT; 267 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 268 panic("xpq_queue_pt_switch"); 269} 270 271void 272xpq_queue_pin_table(paddr_t pa, int lvl) 273{ 274 struct mmuext_op op; 275 276 KASSERT(xpq_queue_locked()); 277 xpq_flush_queue(); 278 279 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n", 280 lvl + 1, pa)); 281 282 op.arg1.mfn = pa >> PAGE_SHIFT; 283 op.cmd = lvl; 284 285 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 286 panic("xpq_queue_pin_table"); 287} 288 289void 290xpq_queue_unpin_table(paddr_t pa) 291{ 292 struct mmuext_op op; 293 294 KASSERT(xpq_queue_locked()); 295 xpq_flush_queue(); 296 297 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa)); 298 op.arg1.mfn = pa >> PAGE_SHIFT; 299 op.cmd = MMUEXT_UNPIN_TABLE; 300 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 301 panic("xpq_queue_unpin_table"); 302} 303 304void 305xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 306{ 307 struct mmuext_op op; 308 309 KASSERT(xpq_queue_locked()); 310 xpq_flush_queue(); 311 312 XENPRINTK2(("xpq_queue_set_ldt\n")); 313 KASSERT(va == (va & ~PAGE_MASK)); 314 op.cmd = MMUEXT_SET_LDT; 315 op.arg1.linear_addr = va; 316 op.arg2.nr_ents = entries; 317 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 318 panic("xpq_queue_set_ldt"); 319} 320 321void 322xpq_queue_tlb_flush(void) 323{ 324 struct mmuext_op op; 325 326 KASSERT(xpq_queue_locked()); 327 xpq_flush_queue(); 328 329 XENPRINTK2(("xpq_queue_tlb_flush\n")); 330 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 332 panic("xpq_queue_tlb_flush"); 333} 334 335void 336xpq_flush_cache(void) 337{ 338 struct mmuext_op op; 339 int s = splvm(), err; 340 341 xpq_queue_lock(); 342 xpq_flush_queue(); 343 344 XENPRINTK2(("xpq_queue_flush_cache\n")); 345 op.cmd = MMUEXT_FLUSH_CACHE; 346 if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) { 347 panic("xpq_flush_cache, err %d", err); 348 } 349 xpq_queue_unlock(); 350 splx(s); /* XXX: removeme */ 351} 352 353void 354xpq_queue_invlpg(vaddr_t va) 355{ 356 struct mmuext_op op; 357 KASSERT(xpq_queue_locked()); 358 xpq_flush_queue(); 359 360 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va)); 361 op.cmd = MMUEXT_INVLPG_LOCAL; 362 op.arg1.linear_addr = (va & ~PAGE_MASK); 363 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 364 panic("xpq_queue_invlpg"); 365} 366 367void 368xen_mcast_invlpg(vaddr_t va, uint32_t cpumask) 369{ 370 mmuext_op_t op; 371 372 KASSERT(xpq_queue_locked()); 373 374 /* Flush pending page updates */ 375 xpq_flush_queue(); 376 377 op.cmd = MMUEXT_INVLPG_MULTI; 378 op.arg1.linear_addr = va; 379 op.arg2.vcpumask = &cpumask; 380 381 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 382 panic("xpq_queue_invlpg_all"); 383 } 384 385 return; 386} 387 388void 389xen_bcast_invlpg(vaddr_t va) 390{ 391 mmuext_op_t op; 392 393 /* Flush pending page updates */ 394 KASSERT(xpq_queue_locked()); 395 xpq_flush_queue(); 396 397 op.cmd = MMUEXT_INVLPG_ALL; 398 op.arg1.linear_addr = va; 399 400 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 401 panic("xpq_queue_invlpg_all"); 402 } 403 404 return; 405} 406 407/* This is a synchronous call. */ 408void 409xen_mcast_tlbflush(uint32_t cpumask) 410{ 411 mmuext_op_t op; 412 413 /* Flush pending page updates */ 414 KASSERT(xpq_queue_locked()); 415 xpq_flush_queue(); 416 417 op.cmd = MMUEXT_TLB_FLUSH_MULTI; 418 op.arg2.vcpumask = &cpumask; 419 420 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 421 panic("xpq_queue_invlpg_all"); 422 } 423 424 return; 425} 426 427/* This is a synchronous call. */ 428void 429xen_bcast_tlbflush(void) 430{ 431 mmuext_op_t op; 432 433 /* Flush pending page updates */ 434 KASSERT(xpq_queue_locked()); 435 xpq_flush_queue(); 436 437 op.cmd = MMUEXT_TLB_FLUSH_ALL; 438 439 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 440 panic("xpq_queue_invlpg_all"); 441 } 442 443 return; 444} 445 446/* This is a synchronous call. */ 447void 448xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask) 449{ 450 KASSERT(eva > sva); 451 452 /* Flush pending page updates */ 453 KASSERT(xpq_queue_locked()); 454 xpq_flush_queue(); 455 456 /* Align to nearest page boundary */ 457 sva &= ~PAGE_MASK; 458 eva &= ~PAGE_MASK; 459 460 for ( ; sva <= eva; sva += PAGE_SIZE) { 461 xen_mcast_invlpg(sva, cpumask); 462 } 463 464 return; 465} 466 467/* This is a synchronous call. */ 468void 469xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva) 470{ 471 KASSERT(eva > sva); 472 473 /* Flush pending page updates */ 474 KASSERT(xpq_queue_locked()); 475 xpq_flush_queue(); 476 477 /* Align to nearest page boundary */ 478 sva &= ~PAGE_MASK; 479 eva &= ~PAGE_MASK; 480 481 for ( ; sva <= eva; sva += PAGE_SIZE) { 482 xen_bcast_invlpg(sva); 483 } 484 485 return; 486} 487 488int 489xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom) 490{ 491 mmu_update_t op; 492 int ok; 493 494 KASSERT(xpq_queue_locked()); 495 xpq_flush_queue(); 496 497 op.ptr = ptr; 498 op.val = val; 499 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 500 return EFAULT; 501 return (0); 502} 503 504#ifdef XENDEBUG 505void 506xpq_debug_dump(void) 507{ 508 int i; 509 510 XENPRINTK2(("idx: %d\n", xpq_idx)); 511 for (i = 0; i < xpq_idx; i++) { 512 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64, 513 xpq_queue[i].ptr, xpq_queue[i].val); 514 if (++i < xpq_idx) 515 snprintf(XBUF + strlen(XBUF), 516 sizeof(XBUF) - strlen(XBUF), 517 "%" PRIx64 " %08" PRIx64, 518 xpq_queue[i].ptr, xpq_queue[i].val); 519 if (++i < xpq_idx) 520 snprintf(XBUF + strlen(XBUF), 521 sizeof(XBUF) - strlen(XBUF), 522 "%" PRIx64 " %08" PRIx64, 523 xpq_queue[i].ptr, xpq_queue[i].val); 524 if (++i < xpq_idx) 525 snprintf(XBUF + strlen(XBUF), 526 sizeof(XBUF) - strlen(XBUF), 527 "%" PRIx64 " %08" PRIx64, 528 xpq_queue[i].ptr, xpq_queue[i].val); 529 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF)); 530 } 531} 532#endif 533 534 535extern volatile struct xencons_interface *xencons_interface; /* XXX */ 536extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 537 538static void xen_bt_set_readonly (vaddr_t); 539static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int); 540 541/* How many PDEs ? */ 542#if L2_SLOT_KERNBASE > 0 543#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 544#else 545#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 546#endif 547 548/* 549 * Construct and switch to new pagetables 550 * first_avail is the first vaddr we can use after 551 * we get rid of Xen pagetables 552 */ 553 554vaddr_t xen_pmap_bootstrap (void); 555 556/* 557 * Function to get rid of Xen bootstrap tables 558 */ 559 560/* How many PDP do we need: */ 561#ifdef PAE 562/* 563 * For PAE, we consider a single contigous L2 "superpage" of 4 pages, 564 * all of them mapped by the L3 page. We also need a shadow page 565 * for L3[3]. 566 */ 567static const int l2_4_count = 6; 568#else 569static const int l2_4_count = PTP_LEVELS - 1; 570#endif 571 572vaddr_t 573xen_pmap_bootstrap(void) 574{ 575 int count, oldcount; 576 long mapsize; 577 vaddr_t bootstrap_tables, init_tables; 578 579 xpmap_phys_to_machine_mapping = 580 (unsigned long *)xen_start_info.mfn_list; 581 init_tables = xen_start_info.pt_base; 582 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables)); 583 584 /* Space after Xen boostrap tables should be free */ 585 bootstrap_tables = xen_start_info.pt_base + 586 (xen_start_info.nr_pt_frames * PAGE_SIZE); 587 588 /* 589 * Calculate how many space we need 590 * first everything mapped before the Xen bootstrap tables 591 */ 592 mapsize = init_tables - KERNTEXTOFF; 593 /* after the tables we'll have: 594 * - UAREA 595 * - dummy user PGD (x86_64) 596 * - HYPERVISOR_shared_info 597 * - ISA I/O mem (if needed) 598 */ 599 mapsize += UPAGES * NBPG; 600#ifdef __x86_64__ 601 mapsize += NBPG; 602#endif 603 mapsize += NBPG; 604 605#ifdef DOM0OPS 606 if (xendomain_is_dom0()) { 607 /* space for ISA I/O mem */ 608 mapsize += IOM_SIZE; 609 } 610#endif 611 /* at this point mapsize doens't include the table size */ 612 613#ifdef __x86_64__ 614 count = TABLE_L2_ENTRIES; 615#else 616 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT; 617#endif /* __x86_64__ */ 618 619 /* now compute how many L2 pages we need exactly */ 620 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count)); 621 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF > 622 ((long)count << L2_SHIFT) + KERNBASE) { 623 count++; 624 } 625#ifndef __x86_64__ 626 /* 627 * one more L2 page: we'll alocate several pages after kva_start 628 * in pmap_bootstrap() before pmap_growkernel(), which have not been 629 * counted here. It's not a big issue to allocate one more L2 as 630 * pmap_growkernel() will be called anyway. 631 */ 632 count++; 633 nkptp[1] = count; 634#endif 635 636 /* 637 * install bootstrap pages. We may need more L2 pages than will 638 * have the final table here, as it's installed after the final table 639 */ 640 oldcount = count; 641 642bootstrap_again: 643 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount)); 644 /* 645 * Xen space we'll reclaim may not be enough for our new page tables, 646 * move bootstrap tables if necessary 647 */ 648 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 649 bootstrap_tables = init_tables + 650 ((count + l2_4_count) * PAGE_SIZE); 651 /* make sure we have enough to map the bootstrap_tables */ 652 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 653 ((long)oldcount << L2_SHIFT) + KERNBASE) { 654 oldcount++; 655 goto bootstrap_again; 656 } 657 658 /* Create temporary tables */ 659 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables, 660 xen_start_info.nr_pt_frames, oldcount, 0); 661 662 /* Create final tables */ 663 xen_bootstrap_tables(bootstrap_tables, init_tables, 664 oldcount + l2_4_count, count, 1); 665 666 /* zero out free space after tables */ 667 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 668 (UPAGES + 1) * NBPG); 669 670 /* Finally, flush TLB. */ 671 xpq_queue_lock(); 672 xpq_queue_tlb_flush(); 673 xpq_queue_unlock(); 674 675 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 676} 677 678/* 679 * Build a new table and switch to it 680 * old_count is # of old tables (including PGD, PDTPE and PDE) 681 * new_count is # of new tables (PTE only) 682 * we assume areas don't overlap 683 */ 684static void 685xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd, 686 int old_count, int new_count, int final) 687{ 688 pd_entry_t *pdtpe, *pde, *pte; 689 pd_entry_t *cur_pgd, *bt_pgd; 690 paddr_t addr; 691 vaddr_t page, avail, text_end, map_end; 692 int i; 693 extern char __data_start; 694 695 xpq_queue_lock(); 696 697 __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR "," 698 " %d, %d)\n", 699 old_pgd, new_pgd, old_count, new_count)); 700 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK; 701 /* 702 * size of R/W area after kernel text: 703 * xencons_interface (if present) 704 * xenstore_interface (if present) 705 * table pages (new_count + l2_4_count entries) 706 * extra mappings (only when final is true): 707 * UAREA 708 * dummy user PGD (x86_64 only)/gdt page (i386 only) 709 * HYPERVISOR_shared_info 710 * ISA I/O mem (if needed) 711 */ 712 map_end = new_pgd + ((new_count + l2_4_count) * NBPG); 713 if (final) { 714 map_end += (UPAGES + 1) * NBPG; 715 HYPERVISOR_shared_info = (shared_info_t *)map_end; 716 map_end += NBPG; 717 } 718 /* 719 * we always set atdevbase, as it's used by init386 to find the first 720 * available VA. map_end is updated only if we are dom0, so 721 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 722 * this case. 723 */ 724 if (final) 725 atdevbase = map_end; 726#ifdef DOM0OPS 727 if (final && xendomain_is_dom0()) { 728 /* ISA I/O mem */ 729 map_end += IOM_SIZE; 730 } 731#endif /* DOM0OPS */ 732 733 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n", 734 text_end, map_end)); 735 __PRINTK(("console %#lx ", xen_start_info.console_mfn)); 736 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn)); 737 738 /* 739 * Create bootstrap page tables 740 * What we need: 741 * - a PGD (level 4) 742 * - a PDTPE (level 3) 743 * - a PDE (level2) 744 * - some PTEs (level 1) 745 */ 746 747 cur_pgd = (pd_entry_t *) old_pgd; 748 bt_pgd = (pd_entry_t *) new_pgd; 749 memset (bt_pgd, 0, PAGE_SIZE); 750 avail = new_pgd + PAGE_SIZE; 751#if PTP_LEVELS > 3 752 /* Install level 3 */ 753 pdtpe = (pd_entry_t *) avail; 754 memset (pdtpe, 0, PAGE_SIZE); 755 avail += PAGE_SIZE; 756 757 addr = ((u_long) pdtpe) - KERNBASE; 758 bt_pgd[pl4_pi(KERNTEXTOFF)] = 759 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 760 761 __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 762 " -> L4[%#x]\n", 763 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF))); 764#else 765 pdtpe = bt_pgd; 766#endif /* PTP_LEVELS > 3 */ 767 768#if PTP_LEVELS > 2 769 /* Level 2 */ 770 pde = (pd_entry_t *) avail; 771 memset(pde, 0, PAGE_SIZE); 772 avail += PAGE_SIZE; 773 774 addr = ((u_long) pde) - KERNBASE; 775 pdtpe[pl3_pi(KERNTEXTOFF)] = 776 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW; 777 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 778 " -> L3[%#x]\n", 779 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF))); 780#elif defined(PAE) 781 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */ 782 pde = (pd_entry_t *) avail; 783 memset(pde, 0, PAGE_SIZE * 5); 784 avail += PAGE_SIZE * 5; 785 addr = ((u_long) pde) - KERNBASE; 786 /* 787 * enter L2 pages in the L3. 788 * The real L2 kernel PD will be the last one (so that 789 * pde[L2_SLOT_KERN] always point to the shadow). 790 */ 791 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 792 /* 793 * Xen doesn't want R/W mappings in L3 entries, it'll add it 794 * itself. 795 */ 796 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 797 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 798 " -> L3[%#x]\n", 799 (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i)); 800 } 801 addr += PAGE_SIZE; 802 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 803 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 804 " -> L3[%#x]\n", 805 (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3)); 806 807#else /* PAE */ 808 pde = bt_pgd; 809#endif /* PTP_LEVELS > 2 */ 810 811 /* Level 1 */ 812 page = KERNTEXTOFF; 813 for (i = 0; i < new_count; i ++) { 814 vaddr_t cur_page = page; 815 816 pte = (pd_entry_t *) avail; 817 avail += PAGE_SIZE; 818 819 memset(pte, 0, PAGE_SIZE); 820 while (pl2_pi(page) == pl2_pi (cur_page)) { 821 if (page >= map_end) { 822 /* not mapped at all */ 823 pte[pl1_pi(page)] = 0; 824 page += PAGE_SIZE; 825 continue; 826 } 827 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 828 if (page == (vaddr_t)HYPERVISOR_shared_info) { 829 pte[pl1_pi(page)] = xen_start_info.shared_info; 830 __PRINTK(("HYPERVISOR_shared_info " 831 "va %#lx pte %#" PRIxPADDR "\n", 832 HYPERVISOR_shared_info, pte[pl1_pi(page)])); 833 } 834 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 835 == xen_start_info.console.domU.mfn) { 836 xencons_interface = (void *)page; 837 pte[pl1_pi(page)] = xen_start_info.console_mfn; 838 pte[pl1_pi(page)] <<= PAGE_SHIFT; 839 __PRINTK(("xencons_interface " 840 "va %#lx pte %#" PRIxPADDR "\n", 841 xencons_interface, pte[pl1_pi(page)])); 842 } 843 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 844 == xen_start_info.store_mfn) { 845 xenstore_interface = (void *)page; 846 pte[pl1_pi(page)] = xen_start_info.store_mfn; 847 pte[pl1_pi(page)] <<= PAGE_SHIFT; 848 __PRINTK(("xenstore_interface " 849 "va %#lx pte %#" PRIxPADDR "\n", 850 xenstore_interface, pte[pl1_pi(page)])); 851 } 852#ifdef DOM0OPS 853 if (page >= (vaddr_t)atdevbase && 854 page < (vaddr_t)atdevbase + IOM_SIZE) { 855 pte[pl1_pi(page)] = 856 IOM_BEGIN + (page - (vaddr_t)atdevbase); 857 } 858#endif 859 pte[pl1_pi(page)] |= PG_k | PG_V; 860 if (page < text_end) { 861 /* map kernel text RO */ 862 pte[pl1_pi(page)] |= 0; 863 } else if (page >= old_pgd 864 && page < old_pgd + (old_count * PAGE_SIZE)) { 865 /* map old page tables RO */ 866 pte[pl1_pi(page)] |= 0; 867 } else if (page >= new_pgd && 868 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { 869 /* map new page tables RO */ 870 pte[pl1_pi(page)] |= 0; 871 } else { 872 /* map page RW */ 873 pte[pl1_pi(page)] |= PG_RW; 874 } 875 876 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE)) 877 || page >= new_pgd) { 878 __PRINTK(("va %#lx pa %#lx " 879 "entry 0x%" PRIxPADDR " -> L1[%#x]\n", 880 page, page - KERNBASE, 881 pte[pl1_pi(page)], pl1_pi(page))); 882 } 883 page += PAGE_SIZE; 884 } 885 886 addr = ((u_long) pte) - KERNBASE; 887 pde[pl2_pi(cur_page)] = 888 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 889 __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR 890 " -> L2[%#x]\n", 891 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page))); 892 /* Mark readonly */ 893 xen_bt_set_readonly((vaddr_t) pte); 894 } 895 896 /* Install recursive page tables mapping */ 897#ifdef PAE 898 /* 899 * we need a shadow page for the kernel's L2 page 900 * The real L2 kernel PD will be the last one (so that 901 * pde[L2_SLOT_KERN] always point to the shadow. 902 */ 903 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE); 904 pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG]; 905 pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE; 906 907 /* 908 * We don't enter a recursive entry from the L3 PD. Instead, 909 * we enter the first 4 L2 pages, which includes the kernel's L2 910 * shadow. But we have to entrer the shadow after switching 911 * %cr3, or Xen will refcount some PTE with the wrong type. 912 */ 913 addr = (u_long)pde - KERNBASE; 914 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 915 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 916 __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR 917 " entry %#" PRIxPADDR "\n", 918 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, 919 addr, pde[PDIR_SLOT_PTE + i])); 920 } 921#if 0 922 addr += PAGE_SIZE; /* point to shadow L2 */ 923 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 924 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n", 925 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr, 926 (int64_t)pde[PDIR_SLOT_PTE + 3])); 927#endif 928 /* Mark tables RO, and pin the kernel's shadow as L2 */ 929 addr = (u_long)pde - KERNBASE; 930 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 931 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 932 if (i == 2 || i == 3) 933 continue; 934#if 0 935 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr)); 936 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 937#endif 938 } 939 if (final) { 940 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 941 __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr)); 942 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 943 } 944#if 0 945 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE; 946 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr)); 947 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 948#endif 949#else /* PAE */ 950 /* recursive entry in higher-level PD */ 951 bt_pgd[PDIR_SLOT_PTE] = 952 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V; 953 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR 954 " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE, 955 bt_pgd[PDIR_SLOT_PTE])); 956 /* Mark tables RO */ 957 xen_bt_set_readonly((vaddr_t) pde); 958#endif 959#if PTP_LEVELS > 2 || defined(PAE) 960 xen_bt_set_readonly((vaddr_t) pdtpe); 961#endif 962#if PTP_LEVELS > 3 963 xen_bt_set_readonly(new_pgd); 964#endif 965 /* Pin the PGD */ 966 __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE)); 967#ifdef __x86_64__ 968 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 969#elif PAE 970 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 971#else 972 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 973#endif 974 975 /* Save phys. addr of PDP, for libkvm. */ 976#ifdef PAE 977 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */ 978#else 979 PDPpaddr = (u_long)new_pgd - KERNBASE; 980#endif 981 982 /* Switch to new tables */ 983 __PRINTK(("switch to PGD\n")); 984 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 985 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n", 986 bt_pgd[PDIR_SLOT_PTE])); 987 988#ifdef PAE 989 if (final) { 990 /* save the address of the L3 page */ 991 cpu_info_primary.ci_pae_l3_pdir = pdtpe; 992 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE); 993 994 /* now enter kernel's PTE mappings */ 995 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3; 996 xpq_queue_pte_update( 997 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE), 998 xpmap_ptom_masked(addr) | PG_k | PG_V); 999 xpq_flush_queue(); 1000 } 1001#endif 1002 1003 /* Now we can safely reclaim space taken by old tables */ 1004 1005 __PRINTK(("unpin old PGD\n")); 1006 /* Unpin old PGD */ 1007 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 1008 /* Mark old tables RW */ 1009 page = old_pgd; 1010 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME; 1011 addr = xpmap_mtop(addr); 1012 pte = (pd_entry_t *) ((u_long)addr + KERNBASE); 1013 pte += pl1_pi(page); 1014 __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n", 1015 pde[pl2_pi(page)], addr, (long)pte)); 1016 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 1017 addr = xpmap_ptom(((u_long) pte) - KERNBASE); 1018 XENPRINTK(("addr %#" PRIxPADDR " pte %#lx " 1019 "*pte %#" PRIxPADDR "\n", 1020 addr, (long)pte, *pte)); 1021 xpq_queue_pte_update(addr, *pte | PG_RW); 1022 page += PAGE_SIZE; 1023 /* 1024 * Our ptes are contiguous 1025 * so it's safe to just "++" here 1026 */ 1027 pte++; 1028 } 1029 xpq_flush_queue(); 1030 xpq_queue_unlock(); 1031} 1032 1033 1034/* 1035 * Bootstrap helper functions 1036 */ 1037 1038/* 1039 * Mark a page readonly 1040 * XXX: assuming vaddr = paddr + KERNBASE 1041 */ 1042 1043static void 1044xen_bt_set_readonly (vaddr_t page) 1045{ 1046 pt_entry_t entry; 1047 1048 entry = xpmap_ptom_masked(page - KERNBASE); 1049 entry |= PG_k | PG_V; 1050 1051 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG); 1052} 1053 1054#ifdef __x86_64__ 1055void 1056xen_set_user_pgd(paddr_t page) 1057{ 1058 struct mmuext_op op; 1059 int s = splvm(); 1060 1061 KASSERT(xpq_queue_locked()); 1062 xpq_flush_queue(); 1063 op.cmd = MMUEXT_NEW_USER_BASEPTR; 1064 op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT]; 1065 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 1066 panic("xen_set_user_pgd: failed to install new user page" 1067 " directory %#" PRIxPADDR, page); 1068 splx(s); 1069} 1070#endif /* __x86_64__ */ 1071