x86_xpmap.c revision 1.4
1/* $NetBSD: x86_xpmap.c,v 1.4 2008/01/11 20:00:52 bouyer Exp $ */ 2 3/* 4 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19/* 20 * Copyright (c) 2006, 2007 Manuel Bouyer. 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 1. Redistributions of source code must retain the above copyright 26 * notice, this list of conditions and the following disclaimer. 27 * 2. Redistributions in binary form must reproduce the above copyright 28 * notice, this list of conditions and the following disclaimer in the 29 * documentation and/or other materials provided with the distribution. 30 * 3. All advertising materials mentioning features or use of this software 31 * must display the following acknowledgement: 32 * This product includes software developed by Manuel Bouyer. 33 * 4. The name of the author may not be used to endorse or promote products 34 * derived from this software without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 37 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 38 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 39 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 40 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 45 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 * 47 */ 48 49/* 50 * 51 * Copyright (c) 2004 Christian Limpach. 52 * All rights reserved. 53 * 54 * Redistribution and use in source and binary forms, with or without 55 * modification, are permitted provided that the following conditions 56 * are met: 57 * 1. Redistributions of source code must retain the above copyright 58 * notice, this list of conditions and the following disclaimer. 59 * 2. Redistributions in binary form must reproduce the above copyright 60 * notice, this list of conditions and the following disclaimer in the 61 * documentation and/or other materials provided with the distribution. 62 * 3. All advertising materials mentioning features or use of this software 63 * must display the following acknowledgement: 64 * This product includes software developed by Christian Limpach. 65 * 4. The name of the author may not be used to endorse or promote products 66 * derived from this software without specific prior written permission. 67 * 68 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 69 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 70 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 71 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 72 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 73 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 74 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 75 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 76 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 77 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 80 81#include <sys/cdefs.h> 82__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.4 2008/01/11 20:00:52 bouyer Exp $"); 83 84#include "opt_xen.h" 85#include "opt_ddb.h" 86#include "ksyms.h" 87 88#include <sys/param.h> 89#include <sys/systm.h> 90 91#include <uvm/uvm.h> 92 93#include <machine/pmap.h> 94#include <machine/gdt.h> 95#include <xen/xenfunc.h> 96 97#include <dev/isa/isareg.h> 98#include <machine/isa_machdep.h> 99 100#undef XENDEBUG 101/* #define XENDEBUG_SYNC */ 102/* #define XENDEBUG_LOW */ 103 104#ifdef XENDEBUG 105#define XENPRINTF(x) printf x 106#define XENPRINTK(x) printk x 107#define XENPRINTK2(x) /* printk x */ 108 109static char XBUF[256]; 110#else 111#define XENPRINTF(x) 112#define XENPRINTK(x) 113#define XENPRINTK2(x) 114#endif 115#define PRINTF(x) printf x 116#define PRINTK(x) printk x 117 118/* on x86_64 kernel runs in ring 3 */ 119#ifdef __x86_64__ 120#define PG_k PG_u 121#else 122#define PG_k 0 123#endif 124 125volatile shared_info_t *HYPERVISOR_shared_info; 126union start_info_union start_info_union; 127paddr_t *xpmap_phys_to_machine_mapping; 128 129void xen_failsafe_handler(void); 130 131#ifdef XEN3 132#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 133 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF) 134#else 135#define HYPERVISOR_mmu_update_self(req, count, success_count) \ 136 HYPERVISOR_mmu_update((req), (count), (success_count)) 137#endif 138 139void 140xen_failsafe_handler(void) 141{ 142 143 panic("xen_failsafe_handler called!\n"); 144} 145 146 147void 148xen_set_ldt(vaddr_t base, uint32_t entries) 149{ 150 vaddr_t va; 151 vaddr_t end; 152 pt_entry_t *ptp; 153 int s; 154 155#ifdef __x86_64__ 156 end = base + (entries << 3); 157#else 158 end = base + entries * sizeof(union descriptor); 159#endif 160 161 for (va = base; va < end; va += PAGE_SIZE) { 162 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 163 ptp = kvtopte(va); 164 XENPRINTF(("xen_set_ldt %p %d %p %p\n", (void *)base, 165 entries, ptp, maptp)); 166 pmap_pte_clearbits(ptp, PG_RW); 167 } 168 s = splvm(); 169 xpq_queue_set_ldt(base, entries); 170 xpq_flush_queue(); 171 splx(s); 172} 173 174#ifdef XENDEBUG 175void xpq_debug_dump(void); 176#endif 177 178#define XPQUEUE_SIZE 2048 179static mmu_update_t xpq_queue[XPQUEUE_SIZE]; 180static int xpq_idx = 0; 181 182void 183xpq_flush_queue() 184{ 185 int i, ok; 186 187 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx)); 188 for (i = 0; i < xpq_idx; i++) 189 XENPRINTK2(("%d: %p %08x\n", i, (u_int)xpq_queue[i].ptr, 190 (u_int)xpq_queue[i].val)); 191 if (xpq_idx != 0 && 192 HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) { 193 printf("xpq_flush_queue: %d entries \n", xpq_idx); 194 for (i = 0; i < xpq_idx; i++) 195 printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n", 196 (u_int64_t)xpq_queue[i].ptr, 197 (u_int64_t)xpq_queue[i].val); 198 panic("HYPERVISOR_mmu_update failed\n"); 199 } 200 xpq_idx = 0; 201} 202 203static inline void 204xpq_increment_idx(void) 205{ 206 207 xpq_idx++; 208 if (__predict_false(xpq_idx == XPQUEUE_SIZE)) 209 xpq_flush_queue(); 210} 211 212void 213xpq_queue_machphys_update(paddr_t ma, paddr_t pa) 214{ 215 XENPRINTK2(("xpq_queue_machphys_update ma=%p pa=%p\n", (void *)ma, (void *)pa)); 216 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE; 217 xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT; 218 xpq_increment_idx(); 219#ifdef XENDEBUG_SYNC 220 xpq_flush_queue(); 221#endif 222} 223 224void 225xpq_queue_pde_update(pd_entry_t *ptr, pd_entry_t val) 226{ 227 228 KASSERT(((paddr_t)ptr & 3) == 0); 229 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 230 xpq_queue[xpq_idx].val = val; 231 xpq_increment_idx(); 232#ifdef XENDEBUG_SYNC 233 xpq_flush_queue(); 234#endif 235} 236 237void 238xpq_queue_pte_update(pt_entry_t *ptr, pt_entry_t val) 239{ 240 241 KASSERT(((paddr_t)ptr & 3) == 0); 242 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE; 243 xpq_queue[xpq_idx].val = val; 244 xpq_increment_idx(); 245#ifdef XENDEBUG_SYNC 246 xpq_flush_queue(); 247#endif 248} 249 250#ifdef XEN3 251void 252xpq_queue_pt_switch(paddr_t pa) 253{ 254 struct mmuext_op op; 255 xpq_flush_queue(); 256 257 XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa)); 258 op.cmd = MMUEXT_NEW_BASEPTR; 259 op.arg1.mfn = pa >> PAGE_SHIFT; 260 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 261 panic("xpq_queue_pt_switch"); 262} 263 264void 265xpq_queue_pin_table(paddr_t pa) 266{ 267 struct mmuext_op op; 268 xpq_flush_queue(); 269 270 XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa)); 271 op.arg1.mfn = pa >> PAGE_SHIFT; 272 273#ifdef __x86_64__ 274 op.cmd = MMUEXT_PIN_L4_TABLE; 275#else 276 op.cmd = MMUEXT_PIN_L2_TABLE; 277#endif 278 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 279 panic("xpq_queue_pin_table"); 280} 281 282void 283xpq_queue_unpin_table(paddr_t pa) 284{ 285 struct mmuext_op op; 286 xpq_flush_queue(); 287 288 XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa)); 289 op.arg1.mfn = pa >> PAGE_SHIFT; 290 op.cmd = MMUEXT_UNPIN_TABLE; 291 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 292 panic("xpq_queue_unpin_table"); 293} 294 295void 296xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 297{ 298 struct mmuext_op op; 299 xpq_flush_queue(); 300 301 XENPRINTK2(("xpq_queue_set_ldt\n")); 302 KASSERT(va == (va & ~PAGE_MASK)); 303 op.cmd = MMUEXT_SET_LDT; 304 op.arg1.linear_addr = va; 305 op.arg2.nr_ents = entries; 306 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 307 panic("xpq_queue_set_ldt"); 308} 309 310void 311xpq_queue_tlb_flush() 312{ 313 struct mmuext_op op; 314 xpq_flush_queue(); 315 316 XENPRINTK2(("xpq_queue_tlb_flush\n")); 317 op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 318 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 319 panic("xpq_queue_tlb_flush"); 320} 321 322void 323xpq_flush_cache() 324{ 325 struct mmuext_op op; 326 int s = splvm(); 327 xpq_flush_queue(); 328 329 XENPRINTK2(("xpq_queue_flush_cache\n")); 330 op.cmd = MMUEXT_FLUSH_CACHE; 331 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 332 panic("xpq_flush_cache"); 333 splx(s); 334} 335 336void 337xpq_queue_invlpg(vaddr_t va) 338{ 339 struct mmuext_op op; 340 xpq_flush_queue(); 341 342 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va)); 343 op.cmd = MMUEXT_INVLPG_LOCAL; 344 op.arg1.linear_addr = (va & ~PAGE_MASK); 345 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 346 panic("xpq_queue_invlpg"); 347} 348 349int 350xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom) 351{ 352 mmu_update_t op; 353 int ok; 354 xpq_flush_queue(); 355 356 op.ptr = (paddr_t)ptr; 357 op.val = val; 358 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0) 359 return EFAULT; 360 return (0); 361} 362#else /* XEN3 */ 363void 364xpq_queue_pt_switch(paddr_t pa) 365{ 366 367 XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa)); 368 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND; 369 xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR; 370 xpq_increment_idx(); 371} 372 373void 374xpq_queue_pin_table(paddr_t pa) 375{ 376 377 XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa)); 378 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND; 379 xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE; 380 xpq_increment_idx(); 381} 382 383void 384xpq_queue_unpin_table(paddr_t pa) 385{ 386 387 XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa)); 388 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND; 389 xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE; 390 xpq_increment_idx(); 391} 392 393void 394xpq_queue_set_ldt(vaddr_t va, uint32_t entries) 395{ 396 397 XENPRINTK2(("xpq_queue_set_ldt\n")); 398 KASSERT(va == (va & ~PAGE_MASK)); 399 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va; 400 xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT); 401 xpq_increment_idx(); 402} 403 404void 405xpq_queue_tlb_flush() 406{ 407 408 XENPRINTK2(("xpq_queue_tlb_flush\n")); 409 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND; 410 xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH; 411 xpq_increment_idx(); 412} 413 414void 415xpq_flush_cache() 416{ 417 int s = splvm(); 418 419 XENPRINTK2(("xpq_queue_flush_cache\n")); 420 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND; 421 xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE; 422 xpq_increment_idx(); 423 xpq_flush_queue(); 424 splx(s); 425} 426 427void 428xpq_queue_invlpg(vaddr_t va) 429{ 430 431 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va)); 432 xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND; 433 xpq_queue[xpq_idx].val = MMUEXT_INVLPG; 434 xpq_increment_idx(); 435} 436 437int 438xpq_update_foreign(pt_entry_t *ptr, pt_entry_t val, int dom) 439{ 440 mmu_update_t xpq_up[3]; 441 442 xpq_up[0].ptr = MMU_EXTENDED_COMMAND; 443 xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16); 444 xpq_up[1].ptr = (paddr_t)ptr; 445 xpq_up[1].val = val; 446 if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0) 447 return EFAULT; 448 return (0); 449} 450#endif /* XEN3 */ 451 452#ifdef XENDEBUG 453void 454xpq_debug_dump() 455{ 456 int i; 457 458 XENPRINTK2(("idx: %d\n", xpq_idx)); 459 for (i = 0; i < xpq_idx; i++) { 460 sprintf(XBUF, "%x %08x ", (u_int)xpq_queue[i].ptr, 461 (u_int)xpq_queue[i].val); 462 if (++i < xpq_idx) 463 sprintf(XBUF + strlen(XBUF), "%x %08x ", 464 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val); 465 if (++i < xpq_idx) 466 sprintf(XBUF + strlen(XBUF), "%x %08x ", 467 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val); 468 if (++i < xpq_idx) 469 sprintf(XBUF + strlen(XBUF), "%x %08x ", 470 (u_int)xpq_queue[i].ptr, (u_int)xpq_queue[i].val); 471 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF)); 472 } 473} 474#endif 475 476 477extern volatile struct xencons_interface *xencons_interface; /* XXX */ 478extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 479 480static void xen_bt_set_readonly (vaddr_t); 481static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int); 482 483/* How many PDEs ? */ 484#if L2_SLOT_KERNBASE > 0 485#define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1)) 486#else 487#define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1) 488#endif 489 490/* 491 * Construct and switch to new pagetables 492 * first_avail is the first vaddr we can use after 493 * we get rid of Xen pagetables 494 */ 495 496vaddr_t xen_pmap_bootstrap (void); 497 498/* 499 * Function to get rid of Xen bootstrap tables 500 */ 501 502vaddr_t 503xen_pmap_bootstrap() 504{ 505 int count, oldcount; 506 long mapsize; 507 const int l2_4_count = PTP_LEVELS - 1; 508 vaddr_t bootstrap_tables, init_tables; 509 510 xpmap_phys_to_machine_mapping = (paddr_t *) xen_start_info.mfn_list; 511 init_tables = xen_start_info.pt_base; 512 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables)); 513 514 /* Space after Xen boostrap tables should be free */ 515 bootstrap_tables = xen_start_info.pt_base + 516 (xen_start_info.nr_pt_frames * PAGE_SIZE); 517 518 /* 519 * Calculate how many space we need 520 * first everything mapped before the Xen bootstrap tables 521 */ 522 mapsize = init_tables - KERNTEXTOFF; 523 /* after the tables we'll have: 524 * - UAREA 525 * - dummy user PGD (x86_64) 526 * - HYPERVISOR_shared_info 527 * - ISA I/O mem (if needed) 528 */ 529 mapsize += UPAGES * NBPG; 530#ifdef __x86_64__ 531 mapsize += NBPG; 532#endif 533 mapsize += NBPG; 534 535#ifdef DOM0OPS 536 if (xen_start_info.flags & SIF_INITDOMAIN) { 537 /* space for ISA I/O mem */ 538 mapsize += IOM_SIZE; 539 } 540#endif 541 /* at this point mapsize doens't include the table size */ 542 543#ifdef __x86_64__ 544 count = TABLE_L2_ENTRIES; 545#else 546 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT; 547#endif /* __x86_64__ */ 548 549 /* now compute how many L2 pages we need exactly */ 550 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count)); 551 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF > 552 ((long)count << L2_SHIFT) + KERNBASE) { 553 count++; 554 } 555#ifndef __x86_64__ 556 nkptp[1] = count; 557#endif 558 559 /* 560 * install bootstrap pages. We may need more L2 pages than will 561 * have the final table here, as it's installed after the final table 562 */ 563 oldcount = count; 564 565bootstrap_again: 566 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount)); 567 /* 568 * Xen space we'll reclaim may not be enough for our new page tables, 569 * move bootstrap tables if necessary 570 */ 571 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE)) 572 bootstrap_tables = init_tables + 573 ((count + l2_4_count) * PAGE_SIZE); 574 /* make sure we have enough to map the bootstrap_tables */ 575 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 576 ((long)oldcount << L2_SHIFT) + KERNBASE) { 577 oldcount++; 578 goto bootstrap_again; 579 } 580 581 /* Create temporary tables */ 582 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables, 583 xen_start_info.nr_pt_frames, oldcount, 0); 584 585 /* Create final tables */ 586 xen_bootstrap_tables(bootstrap_tables, init_tables, 587 oldcount + l2_4_count, count, 1); 588 589 /* zero out free space after tables */ 590 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0, 591 (UPAGES + 1) * NBPG); 592 return (init_tables + ((count + l2_4_count) * PAGE_SIZE)); 593} 594 595 596/* 597 * Build a new table and switch to it 598 * old_count is # of old tables (including PGD, PDTPE and PDE) 599 * new_count is # of new tables (PTE only) 600 * we assume areas don't overlap 601 */ 602 603 604static void 605xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd, 606 int old_count, int new_count, int final) 607{ 608 pd_entry_t *pdtpe, *pde, *pte; 609 pd_entry_t *cur_pgd, *bt_pgd; 610 paddr_t addr, page; 611 vaddr_t avail, text_end, map_end; 612 int i; 613 extern char __data_start; 614 615 __PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n", 616 old_pgd, new_pgd, old_count, new_count)); 617 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK; 618 /* 619 * size of R/W area after kernel text: 620 * xencons_interface (if present) 621 * xenstore_interface (if present) 622 * table pages (new_count + (PTP_LEVELS - 1) entries) 623 * extra mappings (only when final is true): 624 * UAREA 625 * dummy user PGD (x86_64 only)/gdt page (i386 only) 626 * HYPERVISOR_shared_info 627 * ISA I/O mem (if needed) 628 */ 629 map_end = new_pgd + ((new_count + PTP_LEVELS - 1) * NBPG); 630 if (final) { 631 map_end += (UPAGES + 1) * NBPG; 632 HYPERVISOR_shared_info = (shared_info_t *)map_end; 633 map_end += NBPG; 634 } 635 /* 636 * we always set atdevbase, as it's used by init386 to find the first 637 * available VA. map_end is updated only if we are dom0, so 638 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in 639 * this case. 640 */ 641 if (final) 642 atdevbase = map_end; 643#ifdef DOM0OPS 644 if (final && (xen_start_info.flags & SIF_INITDOMAIN)) { 645 /* ISA I/O mem */ 646 map_end += IOM_SIZE; 647 } 648#endif /* DOM0OPS */ 649 650 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n", 651 text_end, map_end)); 652 653 /* 654 * Create bootstrap page tables 655 * What we need: 656 * - a PGD (level 4) 657 * - a PDTPE (level 3) 658 * - a PDE (level2) 659 * - some PTEs (level 1) 660 */ 661 662 cur_pgd = (pd_entry_t *) old_pgd; 663 bt_pgd = (pd_entry_t *) new_pgd; 664 memset (bt_pgd, 0, PAGE_SIZE); 665 avail = new_pgd + PAGE_SIZE; 666#if PTP_LEVELS > 3 667 /* Install level 3 */ 668 pdtpe = (pd_entry_t *) avail; 669 memset (pdtpe, 0, PAGE_SIZE); 670 avail += PAGE_SIZE; 671 672 addr = ((paddr_t) pdtpe) - KERNBASE; 673 bt_pgd[pl4_pi(KERNTEXTOFF)] = 674 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 675 676 __PRINTK(("L3 va 0x%lx pa 0x%lx entry 0x%lx -> L4[0x%x]\n", 677 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF))); 678#else 679 pdtpe = bt_pgd; 680#endif /* PTP_LEVELS > 3 */ 681 682#if PTP_LEVELS > 2 683 /* Level 2 */ 684 pde = (pd_entry_t *) avail; 685 memset(pde, 0, PAGE_SIZE); 686 avail += PAGE_SIZE; 687 688 addr = ((paddr_t) pde) - KERNBASE; 689 pdtpe[pl3_pi(KERNTEXTOFF)] = 690 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 691 __PRINTK(("L2 va 0x%lx pa 0x%lx entry 0x%lx -> L3[0x%x]\n", 692 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF))); 693#else 694 pde = bt_pgd; 695#endif /* PTP_LEVELS > 3 */ 696 697 /* Level 1 */ 698 page = KERNTEXTOFF; 699 for (i = 0; i < new_count; i ++) { 700 paddr_t cur_page = page; 701 702 pte = (pd_entry_t *) avail; 703 avail += PAGE_SIZE; 704 705 memset(pte, 0, PAGE_SIZE); 706 while (pl2_pi(page) == pl2_pi (cur_page)) { 707 if (page >= map_end) { 708 /* not mapped at all */ 709 pte[pl1_pi(page)] = 0; 710 page += PAGE_SIZE; 711 continue; 712 } 713 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE); 714 if (page == (vaddr_t)HYPERVISOR_shared_info) { 715 pte[pl1_pi(page)] = xen_start_info.shared_info; 716 __PRINTK(("HYPERVISOR_shared_info " 717 "va 0x%lx pte 0x%lx\n", 718 HYPERVISOR_shared_info, pte[pl1_pi(page)])); 719 } 720#ifdef XEN3 721 if (xpmap_ptom_masked(page - KERNBASE) == 722 (xen_start_info.console_mfn << PAGE_SHIFT)) { 723 xencons_interface = (void *)page; 724 pte[pl1_pi(page)] = 725 (xen_start_info.console_mfn << PAGE_SHIFT); 726 __PRINTK(("xencons_interface " 727 "va 0x%lx pte 0x%lx\n", 728 xencons_interface, pte[pl1_pi(page)])); 729 } 730 if (xpmap_ptom_masked(page - KERNBASE) == 731 (xen_start_info.store_mfn << PAGE_SHIFT)) { 732 xenstore_interface = (void *)page; 733 pte[pl1_pi(page)] = 734 (xen_start_info.store_mfn << PAGE_SHIFT); 735 __PRINTK(("xenstore_interface " 736 "va 0x%lx pte 0x%lx\n", 737 xenstore_interface, pte[pl1_pi(page)])); 738 } 739#endif /* XEN3 */ 740#ifdef DOM0OPS 741 if (page >= (vaddr_t)atdevbase && 742 page < (vaddr_t)atdevbase + IOM_SIZE) { 743 pte[pl1_pi(page)] = 744 IOM_BEGIN + (page - (vaddr_t)atdevbase); 745 } 746#endif 747 pte[pl1_pi(page)] |= PG_k | PG_V; 748 if (page < text_end) { 749 /* map kernel text RO */ 750 pte[pl1_pi(page)] |= 0; 751 } else if (page >= old_pgd 752 && page < old_pgd + (old_count * PAGE_SIZE)) { 753 /* map old page tables RO */ 754 pte[pl1_pi(page)] |= 0; 755 } else if (page >= new_pgd && 756 page < new_pgd + ((new_count + PTP_LEVELS - 1) * PAGE_SIZE)) { 757 /* map new page tables RO */ 758 pte[pl1_pi(page)] |= 0; 759 } else { 760 /* map page RW */ 761 pte[pl1_pi(page)] |= PG_RW; 762 } 763 if (page == old_pgd || page >= new_pgd) 764 __PRINTK(("va 0x%lx pa 0x%lx " 765 "entry 0x%lx -> L1[0x%x]\n", 766 page, page - KERNBASE, 767 pte[pl1_pi(page)], pl1_pi(page))); 768 page += PAGE_SIZE; 769 } 770 771 addr = ((paddr_t) pte) - KERNBASE; 772 pde[pl2_pi(cur_page)] = 773 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V; 774 __PRINTK(("L1 va 0x%lx pa 0x%lx entry 0x%lx -> L2[0x%x]\n", 775 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page))); 776 /* Mark readonly */ 777 xen_bt_set_readonly((vaddr_t) pte); 778 } 779 780 /* Install recursive page tables mapping */ 781 bt_pgd[PDIR_SLOT_PTE] = 782 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V; 783 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%lx entry 0x%lx\n", 784 new_pgd, new_pgd - KERNBASE, bt_pgd[PDIR_SLOT_PTE])); 785 786 /* Mark tables RO */ 787 xen_bt_set_readonly((vaddr_t) pde); 788#if PTP_LEVELS > 2 789 xen_bt_set_readonly((vaddr_t) pdtpe); 790#endif 791#if PTP_LEVELS > 3 792 xen_bt_set_readonly(new_pgd); 793#endif 794 /* Pin the PGD */ 795 __PRINTK(("pin PDG\n")); 796 xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE)); 797#ifdef __i386__ 798 /* Save phys. addr of PDP, for libkvm. */ 799 PDPpaddr = new_pgd; 800#endif 801 /* Switch to new tables */ 802 __PRINTK(("switch to PDG\n")); 803 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE)); 804 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%lx\n", 805 bt_pgd[PDIR_SLOT_PTE])); 806 807 /* Now we can safely reclaim space taken by old tables */ 808 809 __PRINTK(("unpin old PDG\n")); 810 /* Unpin old PGD */ 811 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE)); 812 /* Mark old tables RW */ 813 page = old_pgd; 814 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME; 815 addr = xpmap_mtop(addr); 816 pte = (pd_entry_t *) (addr + KERNBASE); 817 pte += pl1_pi(page); 818 __PRINTK(("*pde 0x%lx addr 0x%lx pte 0x%lx\n", 819 pde[pl2_pi(page)], addr, pte)); 820 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) { 821 addr = xpmap_ptom(((paddr_t) pte) - KERNBASE); 822 XENPRINTK(("addr 0x%lx pte 0x%lx *pte 0x%lx\n", 823 addr, pte, *pte)); 824 xpq_queue_pte_update((pt_entry_t *) addr, *pte | PG_RW); 825 page += PAGE_SIZE; 826 /* 827 * Our ptes are contiguous 828 * so it's safe to just "++" here 829 */ 830 pte++; 831 } 832 xpq_flush_queue(); 833} 834 835 836/* 837 * Bootstrap helper functions 838 */ 839 840/* 841 * Mark a page readonly 842 * XXX: assuming vaddr = paddr + KERNBASE 843 */ 844 845static void 846xen_bt_set_readonly (vaddr_t page) 847{ 848 pt_entry_t entry; 849 850 entry = xpmap_ptom_masked(page - KERNBASE); 851 entry |= PG_k | PG_V; 852 853 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG); 854} 855 856#ifdef __x86_64__ 857void 858xen_set_user_pgd(paddr_t page) 859{ 860 struct mmuext_op op; 861 int s = splvm(); 862 863 xpq_flush_queue(); 864 op.cmd = MMUEXT_NEW_USER_BASEPTR; 865 op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT]; 866 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) 867 panic("xen_set_user_pgd: failed to install new user page" 868 " directory %lx", page); 869 splx(s); 870} 871#endif /* __x86_64__ */ 872