x86_xpmap.c revision 1.78
1/* $NetBSD: x86_xpmap.c,v 1.78 2018/07/26 15:46:09 maxv Exp $ */ 2 3/* 4 * Copyright (c) 2017 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Maxime Villard. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 34 * 35 * Permission to use, copy, modify, and distribute this software for any 36 * purpose with or without fee is hereby granted, provided that the above 37 * copyright notice and this permission notice appear in all copies. 38 * 39 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 40 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 41 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 42 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 43 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 44 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 45 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 46 */ 47 48/* 49 * Copyright (c) 2006, 2007 Manuel Bouyer. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 69 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72/* 73 * Copyright (c) 2004 Christian Limpach. 74 * All rights reserved. 75 * 76 * Redistribution and use in source and binary forms, with or without 77 * modification, are permitted provided that the following conditions 78 * are met: 79 * 1. Redistributions of source code must retain the above copyright 80 * notice, this list of conditions and the following disclaimer. 81 * 2. Redistributions in binary form must reproduce the above copyright 82 * notice, this list of conditions and the following disclaimer in the 83 * documentation and/or other materials provided with the distribution. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 86 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 87 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 88 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 89 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 90 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 91 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 92 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 93 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 94 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 97#include <sys/cdefs.h> 98__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.78 2018/07/26 15:46:09 maxv Exp $"); 99 100#include "opt_xen.h" 101#include "opt_ddb.h" 102#include "ksyms.h" 103 104#include <sys/param.h> 105#include <sys/systm.h> 106#include <sys/mutex.h> 107#include <sys/cpu.h> 108 109#include <uvm/uvm.h> 110 111#include <x86/pmap.h> 112#include <machine/gdt.h> 113#include <xen/xenfunc.h> 114 115#include <dev/isa/isareg.h> 116#include <machine/isa_machdep.h> 117 118#ifdef XENDEBUG 119#define __PRINTK(x) printk x 120#else 121#define __PRINTK(x) 122#endif 123 124/* Xen requires the start_info struct to be page aligned */ 125union start_info_union start_info_union __aligned(PAGE_SIZE); 126 127volatile shared_info_t *HYPERVISOR_shared_info __read_mostly; 128unsigned long *xpmap_phys_to_machine_mapping __read_mostly; 129kmutex_t pte_lock __cacheline_aligned; 130vaddr_t xen_dummy_page; 131pt_entry_t xpmap_pg_nx __read_mostly; 132 133#define XPQUEUE_SIZE 2048 134static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE]; 135 136void xen_failsafe_handler(void); 137 138extern volatile struct xencons_interface *xencons_interface; /* XXX */ 139extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 140 141static void xen_bt_set_readonly(vaddr_t); 142static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool); 143 144vaddr_t xen_locore(void); 145 146/* 147 * kcpuset internally uses an array of uint32_t while xen uses an array of 148 * u_long. As we're little-endian we can cast one to the other. 149 */ 150typedef union { 151#ifdef _LP64 152 uint32_t xcpum_km[2]; 153#else 154 uint32_t xcpum_km[1]; 155#endif 156 u_long xcpum_xm; 157} xcpumask_t; 158 159void 160xen_failsafe_handler(void) 161{ 162 163 panic("xen_failsafe_handler called!\n"); 164} 165 166void 167xen_set_ldt(vaddr_t base, uint32_t entries) 168{ 169 vaddr_t va; 170 vaddr_t end; 171 pt_entry_t *ptp; 172 int s; 173 174#ifdef __x86_64__ 175 end = base + (entries << 3); 176#else 177 end = base + entries * sizeof(union descriptor); 178#endif 179 180 for (va = base; va < end; va += PAGE_SIZE) { 181 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 182 ptp = kvtopte(va); 183 pmap_pte_clearbits(ptp, PG_RW); 184 } 185 s = splvm(); /* XXXSMP */ 186 xpq_queue_set_ldt(base, entries); 187 splx(s); 188} 189 190void 191xpq_flush_queue(void) 192{ 193 mmu_update_t *xpq_queue; 194 int done = 0, ret; 195 size_t xpq_idx; 196 197 xpq_idx = curcpu()->ci_xpq_idx; 198 xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 199 200retry: 201 ret = HYPERVISOR_mmu_update(xpq_queue, xpq_idx, &done, DOMID_SELF); 202 203 if (ret < 0 && xpq_idx != 0) { 204 printf("xpq_flush_queue: %zu entries (%d successful) on " 205 "cpu%d (%ld)\n", 206 xpq_idx, done, curcpu()->ci_index, curcpu()->ci_cpuid); 207 208 if (done != 0) { 209 xpq_queue += done; 210 xpq_idx -= done; 211 done = 0; 212 goto retry; 213 } 214 215 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret); 216 } 217 curcpu()->ci_xpq_idx = 0; 218} 219 220static inline void 221xpq_increment_idx(void) 222{ 223 224 if (__predict_false(++curcpu()->ci_xpq_idx == XPQUEUE_SIZE)) 225 xpq_flush_queue(); 226} 227 228void 229xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 230{ 231 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 232 size_t xpq_idx = curcpu()->ci_xpq_idx; 233 234 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 235 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT; 236 xpq_increment_idx(); 237} 238 239void 240xpq_queue_pte_update(paddr_t ptr, pt_entry_t val) 241{ 242 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 243 size_t xpq_idx = curcpu()->ci_xpq_idx; 244 245 xpq_queue[xpq_idx].ptr = ptr | MMU_NORMAL_PT_UPDATE; 246 xpq_queue[xpq_idx].val = val; 247 xpq_increment_idx(); 248} 249 250void 251xpq_queue_pt_switch(paddr_t pa) 252{ 253 struct mmuext_op op; 254 255 xpq_flush_queue(); 256 257 op.cmd = MMUEXT_NEW_BASEPTR; 258 op.arg1.mfn = pa >> PAGE_SHIFT; 259 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 260 panic(__func__); 261} 262 263void 264xpq_queue_pin_table(paddr_t pa, int lvl) 265{ 266 struct mmuext_op op; 267 268 xpq_flush_queue(); 269 270 op.cmd = lvl; 271 op.arg1.mfn = pa >> PAGE_SHIFT; 272 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 273 panic(__func__); 274} 275 276void 277xpq_queue_unpin_table(paddr_t pa) 278{ 279 struct mmuext_op op; 280 281 xpq_flush_queue(); 282 283 op.cmd = MMUEXT_UNPIN_TABLE; 284 op.arg1.mfn = pa >> PAGE_SHIFT; 285 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 286 panic(__func__); 287} 288 289void 290xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 291{ 292 struct mmuext_op op; 293 294 xpq_flush_queue(); 295 296 KASSERT(va == (va & ~PAGE_MASK)); 297 op.cmd = MMUEXT_SET_LDT; 298 op.arg1.linear_addr = va; 299 op.arg2.nr_ents = entries; 300 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 301 panic(__func__); 302} 303 304void 305xpq_queue_tlb_flush(void) 306{ 307 struct mmuext_op op; 308 309 xpq_flush_queue(); 310 311 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 312 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 313 panic(__func__); 314} 315 316void 317xpq_flush_cache(void) 318{ 319 int s = splvm(); /* XXXSMP */ 320 321 xpq_flush_queue(); 322 323 asm("wbinvd":::"memory"); 324 splx(s); /* XXX: removeme */ 325} 326 327void 328xpq_queue_invlpg(vaddr_t va) 329{ 330 struct mmuext_op op; 331 332 xpq_flush_queue(); 333 334 op.cmd = MMUEXT_INVLPG_LOCAL; 335 op.arg1.linear_addr = (va & ~PAGE_MASK); 336 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 337 panic(__func__); 338} 339 340void 341xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc) 342{ 343 xcpumask_t xcpumask; 344 mmuext_op_t op; 345 346 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 347 348 xpq_flush_queue(); 349 350 op.cmd = MMUEXT_INVLPG_MULTI; 351 op.arg1.linear_addr = va; 352 op.arg2.vcpumask = &xcpumask.xcpum_xm; 353 354 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 355 panic(__func__); 356} 357 358void 359xen_bcast_invlpg(vaddr_t va) 360{ 361 mmuext_op_t op; 362 363 xpq_flush_queue(); 364 365 op.cmd = MMUEXT_INVLPG_ALL; 366 op.arg1.linear_addr = va; 367 368 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 369 panic(__func__); 370} 371 372/* This is a synchronous call. */ 373void 374xen_mcast_tlbflush(kcpuset_t *kc) 375{ 376 xcpumask_t xcpumask; 377 mmuext_op_t op; 378 379 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 380 381 xpq_flush_queue(); 382 383 op.cmd = MMUEXT_TLB_FLUSH_MULTI; 384 op.arg2.vcpumask = &xcpumask.xcpum_xm; 385 386 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 387 panic(__func__); 388} 389 390/* This is a synchronous call. */ 391void 392xen_bcast_tlbflush(void) 393{ 394 mmuext_op_t op; 395 396 xpq_flush_queue(); 397 398 op.cmd = MMUEXT_TLB_FLUSH_ALL; 399 400 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 401 panic(__func__); 402} 403 404void 405xen_copy_page(paddr_t srcpa, paddr_t dstpa) 406{ 407 mmuext_op_t op; 408 409 op.cmd = MMUEXT_COPY_PAGE; 410 op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT; 411 op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT; 412 413 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 414 panic(__func__); 415} 416 417void 418xen_pagezero(paddr_t pa) 419{ 420 mmuext_op_t op; 421 422 op.cmd = MMUEXT_CLEAR_PAGE; 423 op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT; 424 425 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 426 panic(__func__); 427} 428 429int 430xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom) 431{ 432 mmu_update_t op; 433 int ok; 434 435 xpq_flush_queue(); 436 437 op.ptr = ptr; 438 op.val = val; 439 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 440 return EFAULT; 441 return 0; 442} 443 444#if L2_SLOT_KERNBASE > 0 445#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 446#else 447#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 448#endif 449 450#ifdef PAE 451/* 452 * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of 453 * them mapped by the L3 page. We also need a shadow page for L3[3]. 454 */ 455static const int l2_4_count = 6; 456#elif defined(__x86_64__) 457static const int l2_4_count = PTP_LEVELS; 458#else 459static const int l2_4_count = PTP_LEVELS - 1; 460#endif 461 462/* 463 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page 464 * tables. 465 * 466 * Virtual address space of the kernel when leaving this function: 467 * +--------------+------------------+-------------+------------+--------------- 468 * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED 469 * +--------------+------------------+-------------+------------+--------------- 470 * 471 * ------+-----------------+-------------+ 472 * INFO | EARLY ZERO PAGE | ISA I/O MEM | 473 * ------+-----------------+-------------+ 474 * 475 * DUMMY PAGE is either a PDG for amd64 or a GDT for i386. 476 * 477 * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical 478 * addresses preallocated. 479 */ 480vaddr_t 481xen_locore(void) 482{ 483 size_t count, oldcount, mapsize; 484 vaddr_t bootstrap_tables, init_tables; 485 u_int descs[4]; 486 487 xen_init_features(); 488 489 xpmap_phys_to_machine_mapping = 490 (unsigned long *)xen_start_info.mfn_list; 491 492 /* Set the NX/XD bit, if available. descs[3] = %edx. */ 493 x86_cpuid(0x80000001, descs); 494 xpmap_pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0; 495 496 /* Space after Xen boostrap tables should be free */ 497 init_tables = xen_start_info.pt_base; 498 bootstrap_tables = init_tables + 499 (xen_start_info.nr_pt_frames * PAGE_SIZE); 500 501 /* 502 * Calculate how much space we need. First, everything mapped before 503 * the Xen bootstrap tables. 504 */ 505 mapsize = init_tables - KERNTEXTOFF; 506 /* after the tables we'll have: 507 * - UAREA 508 * - dummy user PGD (x86_64) 509 * - HYPERVISOR_shared_info 510 * - early_zerop 511 * - ISA I/O mem (if needed) 512 */ 513 mapsize += UPAGES * PAGE_SIZE; 514#ifdef __x86_64__ 515 mapsize += PAGE_SIZE; 516#endif 517 mapsize += PAGE_SIZE; 518 mapsize += PAGE_SIZE; 519#ifdef DOM0OPS 520 if (xendomain_is_dom0()) { 521 mapsize += IOM_SIZE; 522 } 523#endif 524 525 /* 526 * At this point, mapsize doesn't include the table size. 527 */ 528#ifdef __x86_64__ 529 count = TABLE_L2_ENTRIES; 530#else 531 count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT; 532#endif 533 534 /* 535 * Now compute how many L2 pages we need exactly. This is useful only 536 * on i386, since the initial count for amd64 is already enough. 537 */ 538 while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE > 539 KERNBASE + (count << L2_SHIFT)) { 540 count++; 541 } 542 543#ifdef i386 544 /* 545 * One more L2 page: we'll allocate several pages after kva_start 546 * in pmap_bootstrap() before pmap_growkernel(), which have not been 547 * counted here. It's not a big issue to allocate one more L2 as 548 * pmap_growkernel() will be called anyway. 549 */ 550 count++; 551 nkptp[1] = count; 552#endif 553 554 /* 555 * Install bootstrap pages. We may need more L2 pages than will 556 * have the final table here, as it's installed after the final table. 557 */ 558 oldcount = count; 559 560bootstrap_again: 561 562 /* 563 * Xen space we'll reclaim may not be enough for our new page tables, 564 * move bootstrap tables if necessary. 565 */ 566 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 567 bootstrap_tables = init_tables + 568 ((count + l2_4_count) * PAGE_SIZE); 569 570 /* 571 * Make sure the number of L2 pages we have is enough to map everything 572 * from KERNBASE to the bootstrap tables themselves. 573 */ 574 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 575 KERNBASE + (oldcount << L2_SHIFT)) { 576 oldcount++; 577 goto bootstrap_again; 578 } 579 580 /* Create temporary tables */ 581 xen_bootstrap_tables(init_tables, bootstrap_tables, 582 xen_start_info.nr_pt_frames, oldcount, false); 583 584 /* Create final tables */ 585 xen_bootstrap_tables(bootstrap_tables, init_tables, 586 oldcount + l2_4_count, count, true); 587 588 /* Zero out PROC0 UAREA and DUMMY PAGE. */ 589 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 590 (UPAGES + 1) * PAGE_SIZE); 591 592 /* Finally, flush TLB. */ 593 xpq_queue_tlb_flush(); 594 595 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 596} 597 598/* 599 * Build a new table and switch to it. 600 * old_count is # of old tables (including PGD, PDTPE and PDE). 601 * new_count is # of new tables (PTE only). 602 * We assume the areas don't overlap. 603 */ 604static void 605xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count, 606 size_t new_count, bool final) 607{ 608 pd_entry_t *pdtpe, *pde, *pte; 609 pd_entry_t *bt_pgd; 610 paddr_t addr; 611 vaddr_t page, avail, map_end; 612 int i; 613 extern char __rodata_start; 614 extern char __data_start; 615 extern char __kernel_end; 616 extern char *early_zerop; /* from pmap.c */ 617#ifdef i386 618 extern union descriptor tmpgdt[]; 619#endif 620 621 /* 622 * Layout of RW area after the kernel image: 623 * xencons_interface (if present) 624 * xenstore_interface (if present) 625 * table pages (new_count + l2_4_count entries) 626 * Extra mappings (only when final is true): 627 * UAREA 628 * dummy user PGD (x86_64 only) / GDT page (i386 only) 629 * HYPERVISOR_shared_info 630 * early_zerop 631 * ISA I/O mem (if needed) 632 */ 633 map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE); 634 if (final) { 635 map_end += UPAGES * PAGE_SIZE; 636 xen_dummy_page = (vaddr_t)map_end; 637 map_end += PAGE_SIZE; 638 HYPERVISOR_shared_info = (shared_info_t *)map_end; 639 map_end += PAGE_SIZE; 640 early_zerop = (char *)map_end; 641 map_end += PAGE_SIZE; 642 } 643 644 /* 645 * We always set atdevbase, as it's used by init386 to find the first 646 * available VA. map_end is updated only if we are dom0, so 647 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 648 * this case. 649 */ 650 if (final) { 651 atdevbase = map_end; 652#ifdef DOM0OPS 653 if (xendomain_is_dom0()) { 654 /* ISA I/O mem */ 655 map_end += IOM_SIZE; 656 } 657#endif 658 } 659 660 __PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end)); 661 __PRINTK(("console %#lx ", xen_start_info.console_mfn)); 662 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn)); 663 664 /* 665 * Create bootstrap page tables. What we need: 666 * - a PGD (level 4) 667 * - a PDTPE (level 3) 668 * - a PDE (level 2) 669 * - some PTEs (level 1) 670 */ 671 672 bt_pgd = (pd_entry_t *)new_pgd; 673 memset(bt_pgd, 0, PAGE_SIZE); 674 avail = new_pgd + PAGE_SIZE; 675 676#ifdef __x86_64__ 677 /* Per-cpu L4 */ 678 pd_entry_t *bt_cpu_pgd = bt_pgd; 679 /* pmap_kernel() "shadow" L4 */ 680 bt_pgd = (pd_entry_t *)avail; 681 memset(bt_pgd, 0, PAGE_SIZE); 682 avail += PAGE_SIZE; 683 684 /* Install L3 */ 685 pdtpe = (pd_entry_t *)avail; 686 memset(pdtpe, 0, PAGE_SIZE); 687 avail += PAGE_SIZE; 688 689 addr = ((u_long)pdtpe) - KERNBASE; 690 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] = 691 xpmap_ptom_masked(addr) | PG_V | PG_RW; 692 693 /* Level 2 */ 694 pde = (pd_entry_t *)avail; 695 memset(pde, 0, PAGE_SIZE); 696 avail += PAGE_SIZE; 697 698 addr = ((u_long)pde) - KERNBASE; 699 pdtpe[pl3_pi(KERNTEXTOFF)] = 700 xpmap_ptom_masked(addr) | PG_V | PG_RW; 701#elif defined(PAE) 702 pdtpe = bt_pgd; 703 704 /* 705 * Our PAE-style level 2, 5 contiguous pages (4 L2 + 1 shadow). 706 * +-----------------+----------------+---------+ 707 * Physical layout: | 3 * USERLAND L2 | L2 KERN SHADOW | L2 KERN | 708 * +-----------------+----------------+---------+ 709 * However, we enter pdtpte[3] into L2 KERN, and not L2 KERN SHADOW. 710 * This way, pde[L2_SLOT_KERN] always points to the shadow. 711 */ 712 pde = (pd_entry_t *)avail; 713 memset(pde, 0, PAGE_SIZE * 5); 714 avail += PAGE_SIZE * 5; 715 716 /* 717 * Link L2 pages in L3, with a special case for L2 KERN. Xen doesn't 718 * want RW permissions in L3 entries, it'll add them itself. 719 */ 720 addr = ((u_long)pde) - KERNBASE; 721 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 722 pdtpe[i] = xpmap_ptom_masked(addr) | PG_V; 723 } 724 addr += PAGE_SIZE; 725 pdtpe[3] = xpmap_ptom_masked(addr) | PG_V; 726#else 727 pdtpe = bt_pgd; 728 pde = bt_pgd; 729#endif 730 731 /* Level 1 */ 732 page = KERNTEXTOFF; 733 for (i = 0; i < new_count; i ++) { 734 vaddr_t cur_page = page; 735 736 pte = (pd_entry_t *)avail; 737 avail += PAGE_SIZE; 738 739 memset(pte, 0, PAGE_SIZE); 740 while (pl2_pi(page) == pl2_pi(cur_page)) { 741 if (page >= map_end) { 742 /* not mapped at all */ 743 pte[pl1_pi(page)] = 0; 744 page += PAGE_SIZE; 745 continue; 746 } 747 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 748 if (page == (vaddr_t)HYPERVISOR_shared_info) { 749 pte[pl1_pi(page)] = xen_start_info.shared_info; 750 } 751 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 752 == xen_start_info.console.domU.mfn) { 753 xencons_interface = (void *)page; 754 pte[pl1_pi(page)] = xen_start_info.console_mfn; 755 pte[pl1_pi(page)] <<= PAGE_SHIFT; 756 } 757 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 758 == xen_start_info.store_mfn) { 759 xenstore_interface = (void *)page; 760 pte[pl1_pi(page)] = xen_start_info.store_mfn; 761 pte[pl1_pi(page)] <<= PAGE_SHIFT; 762 } 763#ifdef DOM0OPS 764 if (page >= (vaddr_t)atdevbase && 765 page < (vaddr_t)atdevbase + IOM_SIZE) { 766 pte[pl1_pi(page)] = 767 IOM_BEGIN + (page - (vaddr_t)atdevbase); 768 pte[pl1_pi(page)] |= xpmap_pg_nx; 769 } 770#endif 771 772 pte[pl1_pi(page)] |= PG_V; 773 if (page < (vaddr_t)&__rodata_start) { 774 /* Map the kernel text RX. */ 775 pte[pl1_pi(page)] |= PG_RO; 776 } else if (page >= (vaddr_t)&__rodata_start && 777 page < (vaddr_t)&__data_start) { 778 /* Map the kernel rodata R. */ 779 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx; 780 } else if (page >= old_pgd && 781 page < old_pgd + (old_count * PAGE_SIZE)) { 782 /* Map the old page tables R. */ 783 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx; 784 } else if (page >= new_pgd && 785 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { 786 /* Map the new page tables R. */ 787 pte[pl1_pi(page)] |= PG_RO | xpmap_pg_nx; 788#ifdef i386 789 } else if (page == (vaddr_t)tmpgdt) { 790 /* 791 * Map bootstrap gdt R/O. Later, we will re-add 792 * this page to uvm after making it writable. 793 */ 794 pte[pl1_pi(page)] = 0; 795 page += PAGE_SIZE; 796 continue; 797#endif 798 } else if (page >= (vaddr_t)&__data_start && 799 page < (vaddr_t)&__kernel_end) { 800 /* Map the kernel data+bss RW. */ 801 pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx; 802 } else { 803 /* Map the page RW. */ 804 pte[pl1_pi(page)] |= PG_RW | xpmap_pg_nx; 805 } 806 807 page += PAGE_SIZE; 808 } 809 810 addr = ((u_long)pte) - KERNBASE; 811 pde[pl2_pi(cur_page)] = 812 xpmap_ptom_masked(addr) | PG_RW | PG_V; 813 814 /* Mark readonly */ 815 xen_bt_set_readonly((vaddr_t)pte); 816 } 817 818 /* Install recursive page tables mapping */ 819#ifdef PAE 820 /* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */ 821 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE); 822 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG]; 823 cpu_info_primary.ci_kpm_pdirpa = 824 (vaddr_t)cpu_info_primary.ci_kpm_pdir - KERNBASE; 825 826 /* 827 * We don't enter a recursive entry from the L3 PD. Instead, we enter 828 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we 829 * have to enter the shadow after switching %cr3, or Xen will refcount 830 * some PTEs with the wrong type. 831 */ 832 addr = (u_long)pde - KERNBASE; 833 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 834 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_V | 835 xpmap_pg_nx; 836 } 837 838 /* Mark tables RO, and pin L2 KERN SHADOW. */ 839 addr = (u_long)pde - KERNBASE; 840 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 841 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 842 } 843 if (final) { 844 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 845 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 846 } 847#else 848 /* Recursive entry in pmap_kernel(). */ 849 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) 850 | PG_RO | PG_V | xpmap_pg_nx; 851#ifdef __x86_64__ 852 /* Recursive entry in higher-level per-cpu PD. */ 853 bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) 854 | PG_RO | PG_V | xpmap_pg_nx; 855#endif 856 857 /* Mark tables RO */ 858 xen_bt_set_readonly((vaddr_t)pde); 859#endif 860 861#if defined(__x86_64__) || defined(PAE) 862 xen_bt_set_readonly((vaddr_t)pdtpe); 863#endif 864#ifdef __x86_64__ 865 xen_bt_set_readonly(new_pgd); 866#endif 867 868 /* Pin the PGD */ 869#ifdef __x86_64__ 870 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 871#elif PAE 872 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 873#else 874 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 875#endif 876 877 /* Save phys. addr of PDP, for libkvm. */ 878#ifdef PAE 879 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */ 880#else 881 PDPpaddr = (u_long)bt_pgd - KERNBASE; 882#endif 883 884 /* Switch to new tables */ 885 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 886 887#ifdef PAE 888 if (final) { 889 /* Save the address of the L3 page */ 890 cpu_info_primary.ci_pae_l3_pdir = pdtpe; 891 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE); 892 893 /* Now enter the kernel's PTE mappings */ 894 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3; 895 xpq_queue_pte_update( 896 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE), 897 xpmap_ptom_masked(addr) | PG_V); 898 xpq_flush_queue(); 899 } 900#elif defined(__x86_64__) 901 if (final) { 902 /* Save the address of the real per-cpu L4 page. */ 903 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd; 904 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE); 905 } 906#endif 907 __USE(pdtpe); 908 909 /* 910 * Now we can safely reclaim the space taken by the old tables. 911 */ 912 913 /* Unpin old PGD */ 914 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 915 916 /* Mark old tables RW */ 917 page = old_pgd; 918 addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME); 919 pte = (pd_entry_t *)((u_long)addr + KERNBASE); 920 pte += pl1_pi(page); 921 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 922 addr = xpmap_ptom(((u_long)pte) - KERNBASE); 923 xpq_queue_pte_update(addr, *pte | PG_RW); 924 page += PAGE_SIZE; 925 /* 926 * Our PTEs are contiguous so it's safe to just "++" here. 927 */ 928 pte++; 929 } 930 xpq_flush_queue(); 931} 932 933/* 934 * Mark a page read-only, assuming vaddr = paddr + KERNBASE. 935 */ 936static void 937xen_bt_set_readonly(vaddr_t page) 938{ 939 pt_entry_t entry; 940 941 entry = xpmap_ptom_masked(page - KERNBASE); 942 entry |= PG_V | xpmap_pg_nx; 943 944 HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG); 945} 946 947#ifdef __x86_64__ 948void 949xen_set_user_pgd(paddr_t page) 950{ 951 struct mmuext_op op; 952 int s = splvm(); /* XXXSMP */ 953 954 xpq_flush_queue(); 955 op.cmd = MMUEXT_NEW_USER_BASEPTR; 956 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT; 957 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 958 panic("xen_set_user_pgd: failed to install new user page" 959 " directory %#" PRIxPADDR, page); 960 splx(s); 961} 962#endif /* __x86_64__ */ 963