x86_xpmap.c revision 1.68
1/* $NetBSD: x86_xpmap.c,v 1.68 2016/12/16 19:52:22 maxv Exp $ */ 2 3/* 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Copyright (c) 2006, 2007 Manuel Bouyer. 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 1. Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * 2. Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in the 29 * documentation and/or other materials provided with the distribution. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43/* 44 * Copyright (c) 2004 Christian Limpach. 45 * All rights reserved. 46 * 47 * Redistribution and use in source and binary forms, with or without 48 * modification, are permitted provided that the following conditions 49 * are met: 50 * 1. Redistributions of source code must retain the above copyright 51 * notice, this list of conditions and the following disclaimer. 52 * 2. Redistributions in binary form must reproduce the above copyright 53 * notice, this list of conditions and the following disclaimer in the 54 * documentation and/or other materials provided with the distribution. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68#include <sys/cdefs.h> 69__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.68 2016/12/16 19:52:22 maxv Exp $"); 70 71#include "opt_xen.h" 72#include "opt_ddb.h" 73#include "ksyms.h" 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/mutex.h> 78#include <sys/cpu.h> 79 80#include <uvm/uvm.h> 81 82#include <x86/pmap.h> 83#include <machine/gdt.h> 84#include <xen/xenfunc.h> 85 86#include <dev/isa/isareg.h> 87#include <machine/isa_machdep.h> 88 89#undef XENDEBUG 90/* #define XENDEBUG_SYNC */ 91 92#ifdef XENDEBUG 93#define XENPRINTF(x) printf x 94#define XENPRINTK2(x) /* printk x */ 95static char XBUF[256]; 96#else 97#define XENPRINTF(x) 98#define XENPRINTK2(x) 99#endif 100 101volatile shared_info_t *HYPERVISOR_shared_info; 102/* Xen requires the start_info struct to be page aligned */ 103union start_info_union start_info_union __aligned(PAGE_SIZE); 104unsigned long *xpmap_phys_to_machine_mapping; 105kmutex_t pte_lock; 106vaddr_t xen_dummy_page; 107 108void xen_failsafe_handler(void); 109 110#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 111 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF) 112 113extern volatile struct xencons_interface *xencons_interface; /* XXX */ 114extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 115 116static void xen_bt_set_readonly(vaddr_t); 117static void xen_bootstrap_tables(vaddr_t, vaddr_t, size_t, size_t, bool); 118 119vaddr_t xen_locore(void); 120 121/* 122 * kcpuset internally uses an array of uint32_t while xen uses an array of 123 * u_long. As we're little-endian we can cast one to the other. 124 */ 125typedef union { 126#ifdef _LP64 127 uint32_t xcpum_km[2]; 128#else 129 uint32_t xcpum_km[1]; 130#endif 131 u_long xcpum_xm; 132} xcpumask_t; 133 134void 135xen_failsafe_handler(void) 136{ 137 138 panic("xen_failsafe_handler called!\n"); 139} 140 141void 142xen_set_ldt(vaddr_t base, uint32_t entries) 143{ 144 vaddr_t va; 145 vaddr_t end; 146 pt_entry_t *ptp; 147 int s; 148 149#ifdef __x86_64__ 150 end = base + (entries << 3); 151#else 152 end = base + entries * sizeof(union descriptor); 153#endif 154 155 for (va = base; va < end; va += PAGE_SIZE) { 156 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 157 ptp = kvtopte(va); 158 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n", 159 base, entries, ptp)); 160 pmap_pte_clearbits(ptp, PG_RW); 161 } 162 s = splvm(); 163 xpq_queue_set_ldt(base, entries); 164 splx(s); 165} 166 167#ifdef XENDEBUG 168void xpq_debug_dump(void); 169#endif 170 171#define XPQUEUE_SIZE 2048 172static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE]; 173static int xpq_idx_array[MAXCPUS]; 174 175#ifdef i386 176extern union descriptor tmpgdt[]; 177#endif 178 179void 180xpq_flush_queue(void) 181{ 182 int i, ok = 0, ret; 183 184 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 185 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 186 187 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx)); 188 for (i = 0; i < xpq_idx; i++) 189 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i, 190 xpq_queue[i].ptr, xpq_queue[i].val)); 191 192retry: 193 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok); 194 195 if (xpq_idx != 0 && ret < 0) { 196 struct cpu_info *ci; 197 CPU_INFO_ITERATOR cii; 198 199 printf("xpq_flush_queue: %d entries (%d successful) on " 200 "cpu%d (%ld)\n", 201 xpq_idx, ok, curcpu()->ci_index, curcpu()->ci_cpuid); 202 203 if (ok != 0) { 204 xpq_queue += ok; 205 xpq_idx -= ok; 206 ok = 0; 207 goto retry; 208 } 209 210 for (CPU_INFO_FOREACH(cii, ci)) { 211 xpq_queue = xpq_queue_array[ci->ci_cpuid]; 212 xpq_idx = xpq_idx_array[ci->ci_cpuid]; 213 printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid); 214 for (i = 0; i < xpq_idx; i++) { 215 printf(" 0x%016" PRIx64 ": 0x%016" PRIx64 "\n", 216 xpq_queue[i].ptr, xpq_queue[i].val); 217 } 218#ifdef __x86_64__ 219 for (i = 0; i < PDIR_SLOT_PTE; i++) { 220 if (ci->ci_kpm_pdir[i] == 0) 221 continue; 222 printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n", 223 i, ci->ci_kpm_pdir[i]); 224 } 225#endif 226 } 227 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret); 228 } 229 xpq_idx_array[curcpu()->ci_cpuid] = 0; 230} 231 232static inline void 233xpq_increment_idx(void) 234{ 235 236 if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE)) 237 xpq_flush_queue(); 238} 239 240void 241xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 242{ 243 244 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 245 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 246 247 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64 248 "\n", (int64_t)ma, (int64_t)pa)); 249 250 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 251 xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT; 252 xpq_increment_idx(); 253#ifdef XENDEBUG_SYNC 254 xpq_flush_queue(); 255#endif 256} 257 258void 259xpq_queue_pte_update(paddr_t ptr, pt_entry_t val) 260{ 261 262 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 263 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 264 265 KASSERT((ptr & 3) == 0); 266 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 267 xpq_queue[xpq_idx].val = val; 268 xpq_increment_idx(); 269#ifdef XENDEBUG_SYNC 270 xpq_flush_queue(); 271#endif 272} 273 274void 275xpq_queue_pt_switch(paddr_t pa) 276{ 277 struct mmuext_op op; 278 xpq_flush_queue(); 279 280 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n", 281 (int64_t)pa, (int64_t)pa)); 282 op.cmd = MMUEXT_NEW_BASEPTR; 283 op.arg1.mfn = pa >> PAGE_SHIFT; 284 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 285 panic("xpq_queue_pt_switch"); 286} 287 288void 289xpq_queue_pin_table(paddr_t pa, int lvl) 290{ 291 struct mmuext_op op; 292 293 xpq_flush_queue(); 294 295 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n", 296 lvl + 1, pa)); 297 298 op.arg1.mfn = pa >> PAGE_SHIFT; 299 op.cmd = lvl; 300 301 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 302 panic("xpq_queue_pin_table"); 303} 304 305void 306xpq_queue_unpin_table(paddr_t pa) 307{ 308 struct mmuext_op op; 309 310 xpq_flush_queue(); 311 312 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa)); 313 op.arg1.mfn = pa >> PAGE_SHIFT; 314 op.cmd = MMUEXT_UNPIN_TABLE; 315 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 316 panic("xpq_queue_unpin_table"); 317} 318 319void 320xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 321{ 322 struct mmuext_op op; 323 324 xpq_flush_queue(); 325 326 XENPRINTK2(("xpq_queue_set_ldt\n")); 327 KASSERT(va == (va & ~PAGE_MASK)); 328 op.cmd = MMUEXT_SET_LDT; 329 op.arg1.linear_addr = va; 330 op.arg2.nr_ents = entries; 331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 332 panic("xpq_queue_set_ldt"); 333} 334 335void 336xpq_queue_tlb_flush(void) 337{ 338 struct mmuext_op op; 339 340 xpq_flush_queue(); 341 342 XENPRINTK2(("xpq_queue_tlb_flush\n")); 343 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 344 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 345 panic("xpq_queue_tlb_flush"); 346} 347 348void 349xpq_flush_cache(void) 350{ 351 int s = splvm(); 352 353 xpq_flush_queue(); 354 355 XENPRINTK2(("xpq_queue_flush_cache\n")); 356 asm("wbinvd":::"memory"); 357 splx(s); /* XXX: removeme */ 358} 359 360void 361xpq_queue_invlpg(vaddr_t va) 362{ 363 struct mmuext_op op; 364 xpq_flush_queue(); 365 366 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va)); 367 op.cmd = MMUEXT_INVLPG_LOCAL; 368 op.arg1.linear_addr = (va & ~PAGE_MASK); 369 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 370 panic("xpq_queue_invlpg"); 371} 372 373void 374xen_mcast_invlpg(vaddr_t va, kcpuset_t *kc) 375{ 376 xcpumask_t xcpumask; 377 mmuext_op_t op; 378 379 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 380 381 /* Flush pending page updates */ 382 xpq_flush_queue(); 383 384 op.cmd = MMUEXT_INVLPG_MULTI; 385 op.arg1.linear_addr = va; 386 op.arg2.vcpumask = &xcpumask.xcpum_xm; 387 388 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 389 panic("xpq_queue_invlpg_all"); 390 } 391 392 return; 393} 394 395void 396xen_bcast_invlpg(vaddr_t va) 397{ 398 mmuext_op_t op; 399 400 /* Flush pending page updates */ 401 xpq_flush_queue(); 402 403 op.cmd = MMUEXT_INVLPG_ALL; 404 op.arg1.linear_addr = va; 405 406 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 407 panic("xpq_queue_invlpg_all"); 408 } 409 410 return; 411} 412 413/* This is a synchronous call. */ 414void 415xen_mcast_tlbflush(kcpuset_t *kc) 416{ 417 xcpumask_t xcpumask; 418 mmuext_op_t op; 419 420 kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask)); 421 422 /* Flush pending page updates */ 423 xpq_flush_queue(); 424 425 op.cmd = MMUEXT_TLB_FLUSH_MULTI; 426 op.arg2.vcpumask = &xcpumask.xcpum_xm; 427 428 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 429 panic("xpq_queue_invlpg_all"); 430 } 431 432 return; 433} 434 435/* This is a synchronous call. */ 436void 437xen_bcast_tlbflush(void) 438{ 439 mmuext_op_t op; 440 441 /* Flush pending page updates */ 442 xpq_flush_queue(); 443 444 op.cmd = MMUEXT_TLB_FLUSH_ALL; 445 446 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 447 panic("xpq_queue_invlpg_all"); 448 } 449 450 return; 451} 452 453/* This is a synchronous call. */ 454void 455xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, kcpuset_t *kc) 456{ 457 KASSERT(eva > sva); 458 459 /* Flush pending page updates */ 460 xpq_flush_queue(); 461 462 /* Align to nearest page boundary */ 463 sva &= ~PAGE_MASK; 464 eva &= ~PAGE_MASK; 465 466 for ( ; sva <= eva; sva += PAGE_SIZE) { 467 xen_mcast_invlpg(sva, kc); 468 } 469 470 return; 471} 472 473/* This is a synchronous call. */ 474void 475xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva) 476{ 477 KASSERT(eva > sva); 478 479 /* Flush pending page updates */ 480 xpq_flush_queue(); 481 482 /* Align to nearest page boundary */ 483 sva &= ~PAGE_MASK; 484 eva &= ~PAGE_MASK; 485 486 for ( ; sva <= eva; sva += PAGE_SIZE) { 487 xen_bcast_invlpg(sva); 488 } 489 490 return; 491} 492 493/* Copy a page */ 494void 495xen_copy_page(paddr_t srcpa, paddr_t dstpa) 496{ 497 mmuext_op_t op; 498 499 op.cmd = MMUEXT_COPY_PAGE; 500 op.arg1.mfn = xpmap_ptom(dstpa) >> PAGE_SHIFT; 501 op.arg2.src_mfn = xpmap_ptom(srcpa) >> PAGE_SHIFT; 502 503 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 504 panic(__func__); 505 } 506} 507 508/* Zero a physical page */ 509void 510xen_pagezero(paddr_t pa) 511{ 512 mmuext_op_t op; 513 514 op.cmd = MMUEXT_CLEAR_PAGE; 515 op.arg1.mfn = xpmap_ptom(pa) >> PAGE_SHIFT; 516 517 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) { 518 panic(__func__); 519 } 520} 521 522int 523xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom) 524{ 525 mmu_update_t op; 526 int ok; 527 528 xpq_flush_queue(); 529 530 op.ptr = ptr; 531 op.val = val; 532 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 533 return EFAULT; 534 return (0); 535} 536 537#ifdef XENDEBUG 538void 539xpq_debug_dump(void) 540{ 541 int i; 542 543 mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid]; 544 int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid]; 545 546 XENPRINTK2(("idx: %d\n", xpq_idx)); 547 for (i = 0; i < xpq_idx; i++) { 548 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64, 549 xpq_queue[i].ptr, xpq_queue[i].val); 550 if (++i < xpq_idx) 551 snprintf(XBUF + strlen(XBUF), 552 sizeof(XBUF) - strlen(XBUF), 553 "%" PRIx64 " %08" PRIx64, 554 xpq_queue[i].ptr, xpq_queue[i].val); 555 if (++i < xpq_idx) 556 snprintf(XBUF + strlen(XBUF), 557 sizeof(XBUF) - strlen(XBUF), 558 "%" PRIx64 " %08" PRIx64, 559 xpq_queue[i].ptr, xpq_queue[i].val); 560 if (++i < xpq_idx) 561 snprintf(XBUF + strlen(XBUF), 562 sizeof(XBUF) - strlen(XBUF), 563 "%" PRIx64 " %08" PRIx64, 564 xpq_queue[i].ptr, xpq_queue[i].val); 565 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF)); 566 } 567} 568#endif 569 570 571#if L2_SLOT_KERNBASE > 0 572#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 573#else 574#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 575#endif 576 577#ifdef PAE 578/* 579 * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of 580 * them mapped by the L3 page. We also need a shadow page for L3[3]. 581 */ 582static const int l2_4_count = 6; 583#elif defined(__x86_64__) 584static const int l2_4_count = PTP_LEVELS; 585#else 586static const int l2_4_count = PTP_LEVELS - 1; 587#endif 588 589/* 590 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page 591 * tables. 592 * 593 * Virtual address space of the kernel when leaving this function: 594 * +--------------+------------------+-------------+------------+--------------- 595 * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED 596 * +--------------+------------------+-------------+------------+--------------- 597 * 598 * ------+-----------------+-------------+ 599 * INFO | EARLY ZERO PAGE | ISA I/O MEM | 600 * ------+-----------------+-------------+ 601 * 602 * DUMMY PAGE is either a PDG for amd64 or a GDT for i386. 603 * 604 * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical 605 * addresses preallocated. 606 */ 607vaddr_t 608xen_locore(void) 609{ 610 size_t count, oldcount, mapsize; 611 vaddr_t bootstrap_tables, init_tables; 612 613 xen_init_features(); 614 615 memset(xpq_idx_array, 0, sizeof(xpq_idx_array)); 616 617 xpmap_phys_to_machine_mapping = 618 (unsigned long *)xen_start_info.mfn_list; 619 620 /* Space after Xen boostrap tables should be free */ 621 init_tables = xen_start_info.pt_base; 622 bootstrap_tables = init_tables + 623 (xen_start_info.nr_pt_frames * PAGE_SIZE); 624 625 /* 626 * Calculate how much space we need. First, everything mapped before 627 * the Xen bootstrap tables. 628 */ 629 mapsize = init_tables - KERNTEXTOFF; 630 /* after the tables we'll have: 631 * - UAREA 632 * - dummy user PGD (x86_64) 633 * - HYPERVISOR_shared_info 634 * - early_zerop 635 * - ISA I/O mem (if needed) 636 */ 637 mapsize += UPAGES * PAGE_SIZE; 638#ifdef __x86_64__ 639 mapsize += PAGE_SIZE; 640#endif 641 mapsize += PAGE_SIZE; 642 mapsize += PAGE_SIZE; 643#ifdef DOM0OPS 644 if (xendomain_is_dom0()) { 645 mapsize += IOM_SIZE; 646 } 647#endif 648 649 /* 650 * At this point, mapsize doesn't include the table size. 651 */ 652#ifdef __x86_64__ 653 count = TABLE_L2_ENTRIES; 654#else 655 count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT; 656#endif 657 658 /* 659 * Now compute how many L2 pages we need exactly. This is useful only 660 * on i386, since the initial count for amd64 is already enough. 661 */ 662 while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE > 663 KERNBASE + (count << L2_SHIFT)) { 664 count++; 665 } 666 667#ifndef __x86_64__ 668 /* 669 * One more L2 page: we'll allocate several pages after kva_start 670 * in pmap_bootstrap() before pmap_growkernel(), which have not been 671 * counted here. It's not a big issue to allocate one more L2 as 672 * pmap_growkernel() will be called anyway. 673 */ 674 count++; 675 nkptp[1] = count; 676#endif 677 678 /* 679 * Install bootstrap pages. We may need more L2 pages than will 680 * have the final table here, as it's installed after the final table. 681 */ 682 oldcount = count; 683 684bootstrap_again: 685 686 /* 687 * Xen space we'll reclaim may not be enough for our new page tables, 688 * move bootstrap tables if necessary. 689 */ 690 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 691 bootstrap_tables = init_tables + 692 ((count + l2_4_count) * PAGE_SIZE); 693 694 /* 695 * Make sure the number of L2 pages we have is enough to map everything 696 * from KERNBASE to the bootstrap tables themselves. 697 */ 698 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 699 KERNBASE + (oldcount << L2_SHIFT)) { 700 oldcount++; 701 goto bootstrap_again; 702 } 703 704 /* Create temporary tables */ 705 xen_bootstrap_tables(init_tables, bootstrap_tables, 706 xen_start_info.nr_pt_frames, oldcount, false); 707 708 /* Create final tables */ 709 xen_bootstrap_tables(bootstrap_tables, init_tables, 710 oldcount + l2_4_count, count, true); 711 712 /* Zero out PROC0 UAREA and DUMMY PAGE. */ 713 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 714 (UPAGES + 1) * PAGE_SIZE); 715 716 /* Finally, flush TLB. */ 717 xpq_queue_tlb_flush(); 718 719 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 720} 721 722/* 723 * Build a new table and switch to it. 724 * old_count is # of old tables (including PGD, PDTPE and PDE). 725 * new_count is # of new tables (PTE only). 726 * We assume the areas don't overlap. 727 */ 728static void 729xen_bootstrap_tables(vaddr_t old_pgd, vaddr_t new_pgd, size_t old_count, 730 size_t new_count, bool final) 731{ 732 pd_entry_t *pdtpe, *pde, *pte; 733 pd_entry_t *bt_pgd; 734 paddr_t addr; 735 vaddr_t page, avail, map_end; 736 int i; 737 extern char __rodata_start; 738 extern char __data_start; 739 extern char __kernel_end; 740 extern char *early_zerop; /* from pmap.c */ 741 pt_entry_t pg_nx; 742 u_int descs[4]; 743 744 /* 745 * Set the NX/XD bit, if available. descs[3] = %edx. 746 */ 747 x86_cpuid(0x80000001, descs); 748 pg_nx = (descs[3] & CPUID_NOX) ? PG_NX : 0; 749 750 /* 751 * Layout of RW area after the kernel image: 752 * xencons_interface (if present) 753 * xenstore_interface (if present) 754 * table pages (new_count + l2_4_count entries) 755 * Extra mappings (only when final is true): 756 * UAREA 757 * dummy user PGD (x86_64 only) / GDT page (i386 only) 758 * HYPERVISOR_shared_info 759 * early_zerop 760 * ISA I/O mem (if needed) 761 */ 762 map_end = new_pgd + ((new_count + l2_4_count) * PAGE_SIZE); 763 if (final) { 764 map_end += UPAGES * PAGE_SIZE; 765 xen_dummy_page = (vaddr_t)map_end; 766 map_end += PAGE_SIZE; 767 HYPERVISOR_shared_info = (shared_info_t *)map_end; 768 map_end += PAGE_SIZE; 769 early_zerop = (char *)map_end; 770 map_end += PAGE_SIZE; 771 } 772 773 /* 774 * We always set atdevbase, as it's used by init386 to find the first 775 * available VA. map_end is updated only if we are dom0, so 776 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 777 * this case. 778 */ 779 if (final) { 780 atdevbase = map_end; 781#ifdef DOM0OPS 782 if (xendomain_is_dom0()) { 783 /* ISA I/O mem */ 784 map_end += IOM_SIZE; 785 } 786#endif 787 } 788 789 __PRINTK(("xen_bootstrap_tables map_end 0x%lx\n", map_end)); 790 __PRINTK(("console %#lx ", xen_start_info.console_mfn)); 791 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn)); 792 793 /* 794 * Create bootstrap page tables. What we need: 795 * - a PGD (level 4) 796 * - a PDTPE (level 3) 797 * - a PDE (level 2) 798 * - some PTEs (level 1) 799 */ 800 801 bt_pgd = (pd_entry_t *)new_pgd; 802 memset(bt_pgd, 0, PAGE_SIZE); 803 avail = new_pgd + PAGE_SIZE; 804 805#if PTP_LEVELS > 3 806 /* Per-cpu L4 */ 807 pd_entry_t *bt_cpu_pgd = bt_pgd; 808 /* pmap_kernel() "shadow" L4 */ 809 bt_pgd = (pd_entry_t *)avail; 810 memset(bt_pgd, 0, PAGE_SIZE); 811 avail += PAGE_SIZE; 812 813 /* Install L3 */ 814 pdtpe = (pd_entry_t *)avail; 815 memset(pdtpe, 0, PAGE_SIZE); 816 avail += PAGE_SIZE; 817 818 addr = ((u_long)pdtpe) - KERNBASE; 819 bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] = 820 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW; 821#else 822 pdtpe = bt_pgd; 823#endif 824 825#if PTP_LEVELS > 2 826 /* Level 2 */ 827 pde = (pd_entry_t *)avail; 828 memset(pde, 0, PAGE_SIZE); 829 avail += PAGE_SIZE; 830 831 addr = ((u_long)pde) - KERNBASE; 832 pdtpe[pl3_pi(KERNTEXTOFF)] = 833 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW; 834#elif defined(PAE) 835 /* Our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */ 836 pde = (pd_entry_t *)avail; 837 memset(pde, 0, PAGE_SIZE * 5); 838 avail += PAGE_SIZE * 5; 839 addr = ((u_long)pde) - KERNBASE; 840 841 /* 842 * Enter L2 pages in L3. The real L2 kernel PD will be the last one 843 * (so that pde[L2_SLOT_KERN] always points to the shadow). 844 */ 845 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 846 /* 847 * Xen doesn't want RW mappings in L3 entries, it'll add it 848 * itself. 849 */ 850 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V; 851 } 852 addr += PAGE_SIZE; 853 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 854#else 855 pde = bt_pgd; 856#endif 857 858 /* Level 1 */ 859 page = KERNTEXTOFF; 860 for (i = 0; i < new_count; i ++) { 861 vaddr_t cur_page = page; 862 863 pte = (pd_entry_t *)avail; 864 avail += PAGE_SIZE; 865 866 memset(pte, 0, PAGE_SIZE); 867 while (pl2_pi(page) == pl2_pi(cur_page)) { 868 if (page >= map_end) { 869 /* not mapped at all */ 870 pte[pl1_pi(page)] = 0; 871 page += PAGE_SIZE; 872 continue; 873 } 874 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 875 if (page == (vaddr_t)HYPERVISOR_shared_info) { 876 pte[pl1_pi(page)] = xen_start_info.shared_info; 877 } 878 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 879 == xen_start_info.console.domU.mfn) { 880 xencons_interface = (void *)page; 881 pte[pl1_pi(page)] = xen_start_info.console_mfn; 882 pte[pl1_pi(page)] <<= PAGE_SHIFT; 883 } 884 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT) 885 == xen_start_info.store_mfn) { 886 xenstore_interface = (void *)page; 887 pte[pl1_pi(page)] = xen_start_info.store_mfn; 888 pte[pl1_pi(page)] <<= PAGE_SHIFT; 889 } 890#ifdef DOM0OPS 891 if (page >= (vaddr_t)atdevbase && 892 page < (vaddr_t)atdevbase + IOM_SIZE) { 893 pte[pl1_pi(page)] = 894 IOM_BEGIN + (page - (vaddr_t)atdevbase); 895 pte[pl1_pi(page)] |= pg_nx; 896 } 897#endif 898 899 pte[pl1_pi(page)] |= PG_k | PG_V; 900 if (page < (vaddr_t)&__rodata_start) { 901 /* Map the kernel text RX. */ 902 pte[pl1_pi(page)] |= PG_RO; 903 } else if (page >= (vaddr_t)&__rodata_start && 904 page < (vaddr_t)&__data_start) { 905 /* Map the kernel rodata R. */ 906 pte[pl1_pi(page)] |= PG_RO | pg_nx; 907 } else if (page >= old_pgd && 908 page < old_pgd + (old_count * PAGE_SIZE)) { 909 /* Map the old page tables R. */ 910 pte[pl1_pi(page)] |= PG_RO | pg_nx; 911 } else if (page >= new_pgd && 912 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) { 913 /* Map the new page tables R. */ 914 pte[pl1_pi(page)] |= PG_RO | pg_nx; 915#ifdef i386 916 } else if (page == (vaddr_t)tmpgdt) { 917 /* 918 * Map bootstrap gdt R/O. Later, we will re-add 919 * this page to uvm after making it writable. 920 */ 921 pte[pl1_pi(page)] = 0; 922 page += PAGE_SIZE; 923 continue; 924#endif 925 } else if (page >= (vaddr_t)&__data_start && 926 page < (vaddr_t)&__kernel_end) { 927 /* Map the kernel data+bss RW. */ 928 pte[pl1_pi(page)] |= PG_RW | pg_nx; 929 } else { 930 /* Map the page RW. */ 931 pte[pl1_pi(page)] |= PG_RW | pg_nx; 932 } 933 934 page += PAGE_SIZE; 935 } 936 937 addr = ((u_long)pte) - KERNBASE; 938 pde[pl2_pi(cur_page)] = 939 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 940 941 /* Mark readonly */ 942 xen_bt_set_readonly((vaddr_t)pte); 943 } 944 945 /* Install recursive page tables mapping */ 946#ifdef PAE 947 /* 948 * We need a shadow page for the kernel's L2 page. 949 * The real L2 kernel PD will be the last one (so that 950 * pde[L2_SLOT_KERN] always points to the shadow). 951 */ 952 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE); 953 cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG]; 954 cpu_info_primary.ci_kpm_pdirpa = 955 (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE; 956 957 /* 958 * We don't enter a recursive entry from the L3 PD. Instead, we enter 959 * the first 4 L2 pages, which includes the kernel's L2 shadow. But we 960 * have to enter the shadow after switching %cr3, or Xen will refcount 961 * some PTEs with the wrong type. 962 */ 963 addr = (u_long)pde - KERNBASE; 964 for (i = 0; i < 3; i++, addr += PAGE_SIZE) { 965 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V | 966 pg_nx; 967 } 968#if 0 969 addr += PAGE_SIZE; /* point to shadow L2 */ 970 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V; 971#endif 972 /* Mark tables RO, and pin the kernel's shadow as L2 */ 973 addr = (u_long)pde - KERNBASE; 974 for (i = 0; i < 5; i++, addr += PAGE_SIZE) { 975 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i); 976#if 0 977 if (i == 2 || i == 3) 978 continue; 979 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 980#endif 981 } 982 if (final) { 983 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE; 984 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 985 } 986#if 0 987 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE; 988 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr)); 989#endif 990#else /* PAE */ 991 992 /* Recursive entry in pmap_kernel(). */ 993 bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) 994 | PG_k | PG_RO | PG_V | pg_nx; 995#ifdef __x86_64__ 996 /* Recursive entry in higher-level per-cpu PD. */ 997 bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) 998 | PG_k | PG_RO | PG_V | pg_nx; 999#endif 1000 1001 /* Mark tables RO */ 1002 xen_bt_set_readonly((vaddr_t)pde); 1003#endif 1004#if PTP_LEVELS > 2 || defined(PAE) 1005 xen_bt_set_readonly((vaddr_t)pdtpe); 1006#endif 1007#if PTP_LEVELS > 3 1008 xen_bt_set_readonly(new_pgd); 1009#endif 1010 1011 /* Pin the PGD */ 1012#ifdef __x86_64__ 1013 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1014#elif PAE 1015 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1016#else 1017 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 1018#endif 1019 1020 /* Save phys. addr of PDP, for libkvm. */ 1021#ifdef PAE 1022 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */ 1023#else 1024 PDPpaddr = (u_long)bt_pgd - KERNBASE; 1025#endif 1026 1027 /* Switch to new tables */ 1028 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 1029 1030#ifdef PAE 1031 if (final) { 1032 /* Save the address of the L3 page */ 1033 cpu_info_primary.ci_pae_l3_pdir = pdtpe; 1034 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE); 1035 1036 /* Now enter the kernel's PTE mappings */ 1037 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3; 1038 xpq_queue_pte_update( 1039 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE), 1040 xpmap_ptom_masked(addr) | PG_k | PG_V); 1041 xpq_flush_queue(); 1042 } 1043#elif defined(__x86_64__) 1044 if (final) { 1045 /* Save the address of the real per-cpu L4 page. */ 1046 cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd; 1047 cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE); 1048 } 1049#endif 1050 __USE(pdtpe); 1051 1052 /* 1053 * Now we can safely reclaim the space taken by the old tables. 1054 */ 1055 1056 /* Unpin old PGD */ 1057 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 1058 1059 /* Mark old tables RW */ 1060 page = old_pgd; 1061 addr = xpmap_mtop((paddr_t)pde[pl2_pi(page)] & PG_FRAME); 1062 pte = (pd_entry_t *)((u_long)addr + KERNBASE); 1063 pte += pl1_pi(page); 1064 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 1065 addr = xpmap_ptom(((u_long)pte) - KERNBASE); 1066 xpq_queue_pte_update(addr, *pte | PG_RW); 1067 page += PAGE_SIZE; 1068 /* 1069 * Our PTEs are contiguous so it's safe to just "++" here. 1070 */ 1071 pte++; 1072 } 1073 xpq_flush_queue(); 1074} 1075 1076 1077/* 1078 * Bootstrap helper functions 1079 */ 1080 1081/* 1082 * Mark a page readonly 1083 * XXX: assuming vaddr = paddr + KERNBASE 1084 */ 1085 1086static void 1087xen_bt_set_readonly(vaddr_t page) 1088{ 1089 pt_entry_t entry; 1090 1091 entry = xpmap_ptom_masked(page - KERNBASE); 1092 entry |= PG_k | PG_V; 1093 1094 HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG); 1095} 1096 1097#ifdef __x86_64__ 1098void 1099xen_set_user_pgd(paddr_t page) 1100{ 1101 struct mmuext_op op; 1102 int s = splvm(); 1103 1104 xpq_flush_queue(); 1105 op.cmd = MMUEXT_NEW_USER_BASEPTR; 1106 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT; 1107 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 1108 panic("xen_set_user_pgd: failed to install new user page" 1109 " directory %#" PRIxPADDR, page); 1110 splx(s); 1111} 1112#endif /* __x86_64__ */ 1113