x86_mem.c revision 313898
1/*- 2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/x86/x86/x86_mem.c 313898 2017-02-17 21:08:32Z kib $"); 33 34#include <sys/param.h> 35#include <sys/kernel.h> 36#include <sys/systm.h> 37#include <sys/malloc.h> 38#include <sys/memrange.h> 39#include <sys/smp.h> 40#include <sys/sysctl.h> 41 42#include <vm/vm.h> 43#include <vm/vm_param.h> 44#include <vm/pmap.h> 45 46#include <machine/cputypes.h> 47#include <machine/md_var.h> 48#include <machine/specialreg.h> 49 50/* 51 * Pentium Pro+ memory range operations 52 * 53 * This code will probably be impenetrable without reference to the 54 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2. 55 */ 56 57static char *mem_owner_bios = "BIOS"; 58 59#define MR686_FIXMTRR (1<<0) 60 61#define mrwithin(mr, a) \ 62 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 63#define mroverlap(mra, mrb) \ 64 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 65 66#define mrvalid(base, len) \ 67 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 68 ((len) >= (1 << 12)) && /* length is >= 4k */ \ 69 powerof2((len)) && /* ... and power of two */ \ 70 !((base) & ((len) - 1))) /* range is not discontiuous */ 71 72#define mrcopyflags(curr, new) \ 73 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 74 75static int mtrrs_disabled; 76SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN, 77 &mtrrs_disabled, 0, 78 "Disable MTRRs."); 79 80static void x86_mrinit(struct mem_range_softc *sc); 81static int x86_mrset(struct mem_range_softc *sc, 82 struct mem_range_desc *mrd, int *arg); 83static void x86_mrAPinit(struct mem_range_softc *sc); 84static void x86_mrreinit(struct mem_range_softc *sc); 85 86static struct mem_range_ops x86_mrops = { 87 x86_mrinit, 88 x86_mrset, 89 x86_mrAPinit, 90 x86_mrreinit 91}; 92 93/* XXX for AP startup hook */ 94static u_int64_t mtrrcap, mtrrdef; 95 96/* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */ 97static u_int64_t mtrr_physmask; 98 99static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 100 struct mem_range_desc *mrd); 101static void x86_mrfetch(struct mem_range_softc *sc); 102static int x86_mtrrtype(int flags); 103static int x86_mrt2mtrr(int flags, int oldval); 104static int x86_mtrrconflict(int flag1, int flag2); 105static void x86_mrstore(struct mem_range_softc *sc); 106static void x86_mrstoreone(void *arg); 107static struct mem_range_desc *x86_mtrrfixsearch(struct mem_range_softc *sc, 108 u_int64_t addr); 109static int x86_mrsetlow(struct mem_range_softc *sc, 110 struct mem_range_desc *mrd, int *arg); 111static int x86_mrsetvariable(struct mem_range_softc *sc, 112 struct mem_range_desc *mrd, int *arg); 113 114/* ia32 MTRR type to memory range type conversion */ 115static int x86_mtrrtomrt[] = { 116 MDF_UNCACHEABLE, 117 MDF_WRITECOMBINE, 118 MDF_UNKNOWN, 119 MDF_UNKNOWN, 120 MDF_WRITETHROUGH, 121 MDF_WRITEPROTECT, 122 MDF_WRITEBACK 123}; 124 125#define MTRRTOMRTLEN nitems(x86_mtrrtomrt) 126 127static int 128x86_mtrr2mrt(int val) 129{ 130 131 if (val < 0 || val >= MTRRTOMRTLEN) 132 return (MDF_UNKNOWN); 133 return (x86_mtrrtomrt[val]); 134} 135 136/* 137 * x86 MTRR conflicts. Writeback and uncachable may overlap. 138 */ 139static int 140x86_mtrrconflict(int flag1, int flag2) 141{ 142 143 flag1 &= MDF_ATTRMASK; 144 flag2 &= MDF_ATTRMASK; 145 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN)) 146 return (1); 147 if (flag1 == flag2 || 148 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || 149 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) 150 return (0); 151 return (1); 152} 153 154/* 155 * Look for an exactly-matching range. 156 */ 157static struct mem_range_desc * 158mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 159{ 160 struct mem_range_desc *cand; 161 int i; 162 163 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 164 if ((cand->mr_base == mrd->mr_base) && 165 (cand->mr_len == mrd->mr_len)) 166 return (cand); 167 return (NULL); 168} 169 170/* 171 * Ensure that the direct map region does not contain any mappings 172 * that span MTRRs of different types. However, the fixed MTRRs can 173 * be ignored, because a large page mapping the first 1 MB of physical 174 * memory is a special case that the processor handles. Invalidate 175 * any old TLB entries that might hold inconsistent memory type 176 * information. 177 */ 178static void 179x86_mr_split_dmap(struct mem_range_softc *sc __unused) 180{ 181#ifdef __amd64__ 182 struct mem_range_desc *mrd; 183 int i; 184 185 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 186 mrd = sc->mr_desc + i; 187 for (; i < sc->mr_ndesc; i++, mrd++) { 188 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE) 189 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE); 190 } 191#endif 192} 193 194/* 195 * Fetch the current mtrr settings from the current CPU (assumed to 196 * all be in sync in the SMP case). Note that if we are here, we 197 * assume that MTRRs are enabled, and we may or may not have fixed 198 * MTRRs. 199 */ 200static void 201x86_mrfetch(struct mem_range_softc *sc) 202{ 203 struct mem_range_desc *mrd; 204 u_int64_t msrv; 205 int i, j, msr; 206 207 mrd = sc->mr_desc; 208 209 /* Get fixed-range MTRRs. */ 210 if (sc->mr_cap & MR686_FIXMTRR) { 211 msr = MSR_MTRR64kBase; 212 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 213 msrv = rdmsr(msr); 214 for (j = 0; j < 8; j++, mrd++) { 215 mrd->mr_flags = 216 (mrd->mr_flags & ~MDF_ATTRMASK) | 217 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 218 if (mrd->mr_owner[0] == 0) 219 strcpy(mrd->mr_owner, mem_owner_bios); 220 msrv = msrv >> 8; 221 } 222 } 223 msr = MSR_MTRR16kBase; 224 for (i = 0; i < MTRR_N16K / 8; i++, msr++) { 225 msrv = rdmsr(msr); 226 for (j = 0; j < 8; j++, mrd++) { 227 mrd->mr_flags = 228 (mrd->mr_flags & ~MDF_ATTRMASK) | 229 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 230 if (mrd->mr_owner[0] == 0) 231 strcpy(mrd->mr_owner, mem_owner_bios); 232 msrv = msrv >> 8; 233 } 234 } 235 msr = MSR_MTRR4kBase; 236 for (i = 0; i < MTRR_N4K / 8; i++, msr++) { 237 msrv = rdmsr(msr); 238 for (j = 0; j < 8; j++, mrd++) { 239 mrd->mr_flags = 240 (mrd->mr_flags & ~MDF_ATTRMASK) | 241 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 242 if (mrd->mr_owner[0] == 0) 243 strcpy(mrd->mr_owner, mem_owner_bios); 244 msrv = msrv >> 8; 245 } 246 } 247 } 248 249 /* Get remainder which must be variable MTRRs. */ 250 msr = MSR_MTRRVarBase; 251 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) { 252 msrv = rdmsr(msr); 253 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 254 x86_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE); 255 mrd->mr_base = msrv & mtrr_physmask; 256 msrv = rdmsr(msr + 1); 257 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ? 258 (mrd->mr_flags | MDF_ACTIVE) : 259 (mrd->mr_flags & ~MDF_ACTIVE); 260 261 /* Compute the range from the mask. Ick. */ 262 mrd->mr_len = (~(msrv & mtrr_physmask) & 263 (mtrr_physmask | 0xfffL)) + 1; 264 if (!mrvalid(mrd->mr_base, mrd->mr_len)) 265 mrd->mr_flags |= MDF_BOGUS; 266 267 /* If unclaimed and active, must be the BIOS. */ 268 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 269 strcpy(mrd->mr_owner, mem_owner_bios); 270 } 271} 272 273/* 274 * Return the MTRR memory type matching a region's flags 275 */ 276static int 277x86_mtrrtype(int flags) 278{ 279 int i; 280 281 flags &= MDF_ATTRMASK; 282 283 for (i = 0; i < MTRRTOMRTLEN; i++) { 284 if (x86_mtrrtomrt[i] == MDF_UNKNOWN) 285 continue; 286 if (flags == x86_mtrrtomrt[i]) 287 return (i); 288 } 289 return (-1); 290} 291 292static int 293x86_mrt2mtrr(int flags, int oldval) 294{ 295 int val; 296 297 if ((val = x86_mtrrtype(flags)) == -1) 298 return (oldval & 0xff); 299 return (val & 0xff); 300} 301 302/* 303 * Update running CPU(s) MTRRs to match the ranges in the descriptor 304 * list. 305 * 306 * XXX Must be called with interrupts enabled. 307 */ 308static void 309x86_mrstore(struct mem_range_softc *sc) 310{ 311 312#ifdef SMP 313 smp_rendezvous(NULL, x86_mrstoreone, NULL, sc); 314#else 315 disable_intr(); /* disable interrupts */ 316 x86_mrstoreone(sc); 317 enable_intr(); 318#endif 319} 320 321/* 322 * Update the current CPU's MTRRs with those represented in the 323 * descriptor list. Note that we do this wholesale rather than just 324 * stuffing one entry; this is simpler (but slower, of course). 325 */ 326static void 327x86_mrstoreone(void *arg) 328{ 329 struct mem_range_softc *sc = arg; 330 struct mem_range_desc *mrd; 331 u_int64_t omsrv, msrv; 332 int i, j, msr; 333 u_long cr0, cr4; 334 335 mrd = sc->mr_desc; 336 337 critical_enter(); 338 339 /* Disable PGE. */ 340 cr4 = rcr4(); 341 load_cr4(cr4 & ~CR4_PGE); 342 343 /* Disable caches (CD = 1, NW = 0). */ 344 cr0 = rcr0(); 345 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 346 347 /* Flushes caches and TLBs. */ 348 wbinvd(); 349 invltlb(); 350 351 /* Disable MTRRs (E = 0). */ 352 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE); 353 354 /* Set fixed-range MTRRs. */ 355 if (sc->mr_cap & MR686_FIXMTRR) { 356 msr = MSR_MTRR64kBase; 357 for (i = 0; i < MTRR_N64K / 8; i++, msr++) { 358 msrv = 0; 359 omsrv = rdmsr(msr); 360 for (j = 7; j >= 0; j--) { 361 msrv = msrv << 8; 362 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 363 omsrv >> (j * 8)); 364 } 365 wrmsr(msr, msrv); 366 mrd += 8; 367 } 368 msr = MSR_MTRR16kBase; 369 for (i = 0; i < MTRR_N16K / 8; i++, msr++) { 370 msrv = 0; 371 omsrv = rdmsr(msr); 372 for (j = 7; j >= 0; j--) { 373 msrv = msrv << 8; 374 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 375 omsrv >> (j * 8)); 376 } 377 wrmsr(msr, msrv); 378 mrd += 8; 379 } 380 msr = MSR_MTRR4kBase; 381 for (i = 0; i < MTRR_N4K / 8; i++, msr++) { 382 msrv = 0; 383 omsrv = rdmsr(msr); 384 for (j = 7; j >= 0; j--) { 385 msrv = msrv << 8; 386 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 387 omsrv >> (j * 8)); 388 } 389 wrmsr(msr, msrv); 390 mrd += 8; 391 } 392 } 393 394 /* Set remainder which must be variable MTRRs. */ 395 msr = MSR_MTRRVarBase; 396 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) { 397 /* base/type register */ 398 omsrv = rdmsr(msr); 399 if (mrd->mr_flags & MDF_ACTIVE) { 400 msrv = mrd->mr_base & mtrr_physmask; 401 msrv |= x86_mrt2mtrr(mrd->mr_flags, omsrv); 402 } else { 403 msrv = 0; 404 } 405 wrmsr(msr, msrv); 406 407 /* mask/active register */ 408 if (mrd->mr_flags & MDF_ACTIVE) { 409 msrv = MTRR_PHYSMASK_VALID | 410 rounddown2(mtrr_physmask, mrd->mr_len); 411 } else { 412 msrv = 0; 413 } 414 wrmsr(msr + 1, msrv); 415 } 416 417 /* Flush caches and TLBs. */ 418 wbinvd(); 419 invltlb(); 420 421 /* Enable MTRRs. */ 422 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE); 423 424 /* Restore caches and PGE. */ 425 load_cr0(cr0); 426 load_cr4(cr4); 427 428 critical_exit(); 429} 430 431/* 432 * Hunt for the fixed MTRR referencing (addr) 433 */ 434static struct mem_range_desc * 435x86_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 436{ 437 struct mem_range_desc *mrd; 438 int i; 439 440 for (i = 0, mrd = sc->mr_desc; i < MTRR_N64K + MTRR_N16K + MTRR_N4K; 441 i++, mrd++) 442 if (addr >= mrd->mr_base && 443 addr < mrd->mr_base + mrd->mr_len) 444 return (mrd); 445 return (NULL); 446} 447 448/* 449 * Try to satisfy the given range request by manipulating the fixed 450 * MTRRs that cover low memory. 451 * 452 * Note that we try to be generous here; we'll bloat the range out to 453 * the next higher/lower boundary to avoid the consumer having to know 454 * too much about the mechanisms here. 455 * 456 * XXX note that this will have to be updated when we start supporting 457 * "busy" ranges. 458 */ 459static int 460x86_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 461{ 462 struct mem_range_desc *first_md, *last_md, *curr_md; 463 464 /* Range check. */ 465 if ((first_md = x86_mtrrfixsearch(sc, mrd->mr_base)) == NULL || 466 (last_md = x86_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) 467 == NULL) 468 return (EINVAL); 469 470 /* Check that we aren't doing something risky. */ 471 if ((mrd->mr_flags & MDF_FORCE) == 0) { 472 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 473 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) 474 return (EACCES); 475 } 476 } 477 478 /* Set flags, clear set-by-firmware flag. */ 479 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 480 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & 481 ~MDF_FIRMWARE, mrd->mr_flags); 482 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 483 } 484 485 return (0); 486} 487 488/* 489 * Modify/add a variable MTRR to satisfy the request. 490 * 491 * XXX needs to be updated to properly support "busy" ranges. 492 */ 493static int 494x86_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, 495 int *arg) 496{ 497 struct mem_range_desc *curr_md, *free_md; 498 int i; 499 500 /* 501 * Scan the currently active variable descriptors, look for 502 * one we exactly match (straight takeover) and for possible 503 * accidental overlaps. 504 * 505 * Keep track of the first empty variable descriptor in case 506 * we can't perform a takeover. 507 */ 508 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 509 curr_md = sc->mr_desc + i; 510 free_md = NULL; 511 for (; i < sc->mr_ndesc; i++, curr_md++) { 512 if (curr_md->mr_flags & MDF_ACTIVE) { 513 /* Exact match? */ 514 if (curr_md->mr_base == mrd->mr_base && 515 curr_md->mr_len == mrd->mr_len) { 516 517 /* Whoops, owned by someone. */ 518 if (curr_md->mr_flags & MDF_BUSY) 519 return (EBUSY); 520 521 /* Check that we aren't doing something risky */ 522 if (!(mrd->mr_flags & MDF_FORCE) && 523 (curr_md->mr_flags & MDF_ATTRMASK) == 524 MDF_UNKNOWN) 525 return (EACCES); 526 527 /* Ok, just hijack this entry. */ 528 free_md = curr_md; 529 break; 530 } 531 532 /* Non-exact overlap? */ 533 if (mroverlap(curr_md, mrd)) { 534 /* Between conflicting region types? */ 535 if (x86_mtrrconflict(curr_md->mr_flags, 536 mrd->mr_flags)) 537 return (EINVAL); 538 } 539 } else if (free_md == NULL) { 540 free_md = curr_md; 541 } 542 } 543 544 /* Got somewhere to put it? */ 545 if (free_md == NULL) 546 return (ENOSPC); 547 548 /* Set up new descriptor. */ 549 free_md->mr_base = mrd->mr_base; 550 free_md->mr_len = mrd->mr_len; 551 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 552 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 553 return (0); 554} 555 556/* 557 * Handle requests to set memory range attributes by manipulating MTRRs. 558 */ 559static int 560x86_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 561{ 562 struct mem_range_desc *targ; 563 int error; 564 565 switch (*arg) { 566 case MEMRANGE_SET_UPDATE: 567 /* 568 * Make sure that what's being asked for is even 569 * possible at all. 570 */ 571 if (!mrvalid(mrd->mr_base, mrd->mr_len) || 572 x86_mtrrtype(mrd->mr_flags) == -1) 573 return (EINVAL); 574 575#define FIXTOP \ 576 ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 577 578 /* Are the "low memory" conditions applicable? */ 579 if ((sc->mr_cap & MR686_FIXMTRR) != 0 && 580 mrd->mr_base + mrd->mr_len <= FIXTOP) { 581 if ((error = x86_mrsetlow(sc, mrd, arg)) != 0) 582 return (error); 583 } else { 584 /* It's time to play with variable MTRRs. */ 585 if ((error = x86_mrsetvariable(sc, mrd, arg)) != 0) 586 return (error); 587 } 588 break; 589 590 case MEMRANGE_SET_REMOVE: 591 if ((targ = mem_range_match(sc, mrd)) == NULL) 592 return (ENOENT); 593 if (targ->mr_flags & MDF_FIXACTIVE) 594 return (EPERM); 595 if (targ->mr_flags & MDF_BUSY) 596 return (EBUSY); 597 targ->mr_flags &= ~MDF_ACTIVE; 598 targ->mr_owner[0] = 0; 599 break; 600 601 default: 602 return (EOPNOTSUPP); 603 } 604 605 x86_mr_split_dmap(sc); 606 607 /* Update the hardware. */ 608 x86_mrstore(sc); 609 610 /* Refetch to see where we're at. */ 611 x86_mrfetch(sc); 612 return (0); 613} 614 615/* 616 * Work out how many ranges we support, initialise storage for them, 617 * and fetch the initial settings. 618 */ 619static void 620x86_mrinit(struct mem_range_softc *sc) 621{ 622 struct mem_range_desc *mrd; 623 int i, nmdesc; 624 625 if (sc->mr_desc != NULL) 626 /* Already initialized. */ 627 return; 628 629 nmdesc = 0; 630 mtrrcap = rdmsr(MSR_MTRRcap); 631 mtrrdef = rdmsr(MSR_MTRRdefType); 632 633 /* For now, bail out if MTRRs are not enabled. */ 634 if (!(mtrrdef & MTRR_DEF_ENABLE)) { 635 if (bootverbose) 636 printf("CPU supports MTRRs but not enabled\n"); 637 return; 638 } 639 nmdesc = mtrrcap & MTRR_CAP_VCNT; 640 if (bootverbose) 641 printf("Pentium Pro MTRR support enabled\n"); 642 643 /* 644 * Determine the size of the PhysMask and PhysBase fields in 645 * the variable range MTRRs. 646 */ 647 mtrr_physmask = ((1UL << cpu_maxphyaddr) - 1) & ~0xfffUL; 648 649 /* If fixed MTRRs supported and enabled. */ 650 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) { 651 sc->mr_cap = MR686_FIXMTRR; 652 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 653 } 654 655 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC, 656 M_WAITOK | M_ZERO); 657 sc->mr_ndesc = nmdesc; 658 659 mrd = sc->mr_desc; 660 661 /* Populate the fixed MTRR entries' base/length. */ 662 if (sc->mr_cap & MR686_FIXMTRR) { 663 for (i = 0; i < MTRR_N64K; i++, mrd++) { 664 mrd->mr_base = i * 0x10000; 665 mrd->mr_len = 0x10000; 666 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 667 MDF_FIXACTIVE; 668 } 669 for (i = 0; i < MTRR_N16K; i++, mrd++) { 670 mrd->mr_base = i * 0x4000 + 0x80000; 671 mrd->mr_len = 0x4000; 672 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 673 MDF_FIXACTIVE; 674 } 675 for (i = 0; i < MTRR_N4K; i++, mrd++) { 676 mrd->mr_base = i * 0x1000 + 0xc0000; 677 mrd->mr_len = 0x1000; 678 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 679 MDF_FIXACTIVE; 680 } 681 } 682 683 /* 684 * Get current settings, anything set now is considered to 685 * have been set by the firmware. (XXX has something already 686 * played here?) 687 */ 688 x86_mrfetch(sc); 689 mrd = sc->mr_desc; 690 for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 691 if (mrd->mr_flags & MDF_ACTIVE) 692 mrd->mr_flags |= MDF_FIRMWARE; 693 } 694 695 x86_mr_split_dmap(sc); 696} 697 698/* 699 * Initialise MTRRs on an AP after the BSP has run the init code. 700 */ 701static void 702x86_mrAPinit(struct mem_range_softc *sc) 703{ 704 705 x86_mrstoreone(sc); 706 wrmsr(MSR_MTRRdefType, mtrrdef); 707} 708 709/* 710 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor 711 * list. 712 * 713 * XXX Must be called with interrupts enabled. 714 */ 715static void 716x86_mrreinit(struct mem_range_softc *sc) 717{ 718 719#ifdef SMP 720 smp_rendezvous(NULL, (void *)x86_mrAPinit, NULL, sc); 721#else 722 disable_intr(); /* disable interrupts */ 723 x86_mrAPinit(sc); 724 enable_intr(); 725#endif 726} 727 728static void 729x86_mem_drvinit(void *unused) 730{ 731 732 if (mtrrs_disabled) 733 return; 734 if (!(cpu_feature & CPUID_MTRR)) 735 return; 736 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00) 737 return; 738 switch (cpu_vendor_id) { 739 case CPU_VENDOR_INTEL: 740 case CPU_VENDOR_AMD: 741 case CPU_VENDOR_CENTAUR: 742 break; 743 default: 744 return; 745 } 746 mem_range_softc.mr_op = &x86_mrops; 747 x86_mrinit(&mem_range_softc); 748} 749SYSINIT(x86memdev, SI_SUB_CPU, SI_ORDER_ANY, x86_mem_drvinit, NULL); 750