x86_mem.c revision 177125
1/*- 2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/i686_mem.c 177125 2008-03-12 22:09:19Z jhb $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/systm.h> 33#include <sys/malloc.h> 34#include <sys/memrange.h> 35#include <sys/smp.h> 36#include <sys/sysctl.h> 37 38#include <machine/md_var.h> 39#include <machine/specialreg.h> 40 41/* 42 * i686 memory range operations 43 * 44 * This code will probably be impenetrable without reference to the 45 * Intel Pentium Pro documentation. 46 */ 47 48static char *mem_owner_bios = "BIOS"; 49 50#define MR686_FIXMTRR (1<<0) 51 52#define mrwithin(mr, a) \ 53 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 54#define mroverlap(mra, mrb) \ 55 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 56 57#define mrvalid(base, len) \ 58 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 59 ((len) >= (1 << 12)) && /* length is >= 4k */ \ 60 powerof2((len)) && /* ... and power of two */ \ 61 !((base) & ((len) - 1))) /* range is not discontiuous */ 62 63#define mrcopyflags(curr, new) \ 64 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 65 66static int mtrrs_disabled; 67TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled); 68SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN, 69 &mtrrs_disabled, 0, "Disable i686 MTRRs."); 70 71static void i686_mrinit(struct mem_range_softc *sc); 72static int i686_mrset(struct mem_range_softc *sc, 73 struct mem_range_desc *mrd, int *arg); 74static void i686_mrAPinit(struct mem_range_softc *sc); 75 76static struct mem_range_ops i686_mrops = { 77 i686_mrinit, 78 i686_mrset, 79 i686_mrAPinit 80}; 81 82/* XXX for AP startup hook */ 83static u_int64_t mtrrcap, mtrrdef; 84 85/* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */ 86static u_int64_t mtrr_physmask; 87 88static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 89 struct mem_range_desc *mrd); 90static void i686_mrfetch(struct mem_range_softc *sc); 91static int i686_mtrrtype(int flags); 92static int i686_mrt2mtrr(int flags, int oldval); 93static int i686_mtrrconflict(int flag1, int flag2); 94static void i686_mrstore(struct mem_range_softc *sc); 95static void i686_mrstoreone(void *arg); 96static struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, 97 u_int64_t addr); 98static int i686_mrsetlow(struct mem_range_softc *sc, 99 struct mem_range_desc *mrd, int *arg); 100static int i686_mrsetvariable(struct mem_range_softc *sc, 101 struct mem_range_desc *mrd, int *arg); 102 103/* i686 MTRR type to memory range type conversion */ 104static int i686_mtrrtomrt[] = { 105 MDF_UNCACHEABLE, 106 MDF_WRITECOMBINE, 107 MDF_UNKNOWN, 108 MDF_UNKNOWN, 109 MDF_WRITETHROUGH, 110 MDF_WRITEPROTECT, 111 MDF_WRITEBACK 112}; 113 114#define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])) 115 116static int 117i686_mtrr2mrt(int val) 118{ 119 120 if (val < 0 || val >= MTRRTOMRTLEN) 121 return (MDF_UNKNOWN); 122 return (i686_mtrrtomrt[val]); 123} 124 125/* 126 * i686 MTRR conflicts. Writeback and uncachable may overlap. 127 */ 128static int 129i686_mtrrconflict(int flag1, int flag2) 130{ 131 132 flag1 &= MDF_ATTRMASK; 133 flag2 &= MDF_ATTRMASK; 134 if (flag1 == flag2 || 135 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || 136 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) 137 return (0); 138 return (1); 139} 140 141/* 142 * Look for an exactly-matching range. 143 */ 144static struct mem_range_desc * 145mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 146{ 147 struct mem_range_desc *cand; 148 int i; 149 150 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 151 if ((cand->mr_base == mrd->mr_base) && 152 (cand->mr_len == mrd->mr_len)) 153 return (cand); 154 return (NULL); 155} 156 157/* 158 * Fetch the current mtrr settings from the current CPU (assumed to 159 * all be in sync in the SMP case). Note that if we are here, we 160 * assume that MTRRs are enabled, and we may or may not have fixed 161 * MTRRs. 162 */ 163static void 164i686_mrfetch(struct mem_range_softc *sc) 165{ 166 struct mem_range_desc *mrd; 167 u_int64_t msrv; 168 int i, j, msr; 169 170 mrd = sc->mr_desc; 171 172 /* Get fixed-range MTRRs. */ 173 if (sc->mr_cap & MR686_FIXMTRR) { 174 msr = MSR_MTRR64kBase; 175 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 176 msrv = rdmsr(msr); 177 for (j = 0; j < 8; j++, mrd++) { 178 mrd->mr_flags = 179 (mrd->mr_flags & ~MDF_ATTRMASK) | 180 i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 181 if (mrd->mr_owner[0] == 0) 182 strcpy(mrd->mr_owner, mem_owner_bios); 183 msrv = msrv >> 8; 184 } 185 } 186 msr = MSR_MTRR16kBase; 187 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 188 msrv = rdmsr(msr); 189 for (j = 0; j < 8; j++, mrd++) { 190 mrd->mr_flags = 191 (mrd->mr_flags & ~MDF_ATTRMASK) | 192 i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 193 if (mrd->mr_owner[0] == 0) 194 strcpy(mrd->mr_owner, mem_owner_bios); 195 msrv = msrv >> 8; 196 } 197 } 198 msr = MSR_MTRR4kBase; 199 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 200 msrv = rdmsr(msr); 201 for (j = 0; j < 8; j++, mrd++) { 202 mrd->mr_flags = 203 (mrd->mr_flags & ~MDF_ATTRMASK) | 204 i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 205 if (mrd->mr_owner[0] == 0) 206 strcpy(mrd->mr_owner, mem_owner_bios); 207 msrv = msrv >> 8; 208 } 209 } 210 } 211 212 /* Get remainder which must be variable MTRRs. */ 213 msr = MSR_MTRRVarBase; 214 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 215 msrv = rdmsr(msr); 216 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 217 i686_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE); 218 mrd->mr_base = msrv & mtrr_physmask; 219 msrv = rdmsr(msr + 1); 220 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ? 221 (mrd->mr_flags | MDF_ACTIVE) : 222 (mrd->mr_flags & ~MDF_ACTIVE); 223 224 /* Compute the range from the mask. Ick. */ 225 mrd->mr_len = (~(msrv & mtrr_physmask) & 226 (mtrr_physmask | 0xfffLL)) + 1; 227 if (!mrvalid(mrd->mr_base, mrd->mr_len)) 228 mrd->mr_flags |= MDF_BOGUS; 229 230 /* If unclaimed and active, must be the BIOS. */ 231 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 232 strcpy(mrd->mr_owner, mem_owner_bios); 233 } 234} 235 236/* 237 * Return the MTRR memory type matching a region's flags 238 */ 239static int 240i686_mtrrtype(int flags) 241{ 242 int i; 243 244 flags &= MDF_ATTRMASK; 245 246 for (i = 0; i < MTRRTOMRTLEN; i++) { 247 if (i686_mtrrtomrt[i] == MDF_UNKNOWN) 248 continue; 249 if (flags == i686_mtrrtomrt[i]) 250 return (i); 251 } 252 return (-1); 253} 254 255static int 256i686_mrt2mtrr(int flags, int oldval) 257{ 258 int val; 259 260 if ((val = i686_mtrrtype(flags)) == -1) 261 return (oldval & 0xff); 262 return (val & 0xff); 263} 264 265/* 266 * Update running CPU(s) MTRRs to match the ranges in the descriptor 267 * list. 268 * 269 * XXX Must be called with interrupts enabled. 270 */ 271static void 272i686_mrstore(struct mem_range_softc *sc) 273{ 274#ifdef SMP 275 /* 276 * We should use ipi_all_but_self() to call other CPUs into a 277 * locking gate, then call a target function to do this work. 278 * The "proper" solution involves a generalised locking gate 279 * implementation, not ready yet. 280 */ 281 smp_rendezvous(NULL, i686_mrstoreone, NULL, sc); 282#else 283 disable_intr(); /* disable interrupts */ 284 i686_mrstoreone(sc); 285 enable_intr(); 286#endif 287} 288 289/* 290 * Update the current CPU's MTRRs with those represented in the 291 * descriptor list. Note that we do this wholesale rather than just 292 * stuffing one entry; this is simpler (but slower, of course). 293 */ 294static void 295i686_mrstoreone(void *arg) 296{ 297 struct mem_range_softc *sc = arg; 298 struct mem_range_desc *mrd; 299 u_int64_t omsrv, msrv; 300 int i, j, msr; 301 u_int cr4save; 302 303 mrd = sc->mr_desc; 304 305 /* Disable PGE. */ 306 cr4save = rcr4(); 307 if (cr4save & CR4_PGE) 308 load_cr4(cr4save & ~CR4_PGE); 309 310 /* Disable caches (CD = 1, NW = 0). */ 311 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); 312 313 /* Flushes caches and TLBs. */ 314 wbinvd(); 315 316 /* Disable MTRRs (E = 0). */ 317 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE); 318 319 /* Set fixed-range MTRRs. */ 320 if (sc->mr_cap & MR686_FIXMTRR) { 321 msr = MSR_MTRR64kBase; 322 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 323 msrv = 0; 324 omsrv = rdmsr(msr); 325 for (j = 7; j >= 0; j--) { 326 msrv = msrv << 8; 327 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 328 omsrv >> (j * 8)); 329 } 330 wrmsr(msr, msrv); 331 mrd += 8; 332 } 333 msr = MSR_MTRR16kBase; 334 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 335 msrv = 0; 336 omsrv = rdmsr(msr); 337 for (j = 7; j >= 0; j--) { 338 msrv = msrv << 8; 339 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 340 omsrv >> (j * 8)); 341 } 342 wrmsr(msr, msrv); 343 mrd += 8; 344 } 345 msr = MSR_MTRR4kBase; 346 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 347 msrv = 0; 348 omsrv = rdmsr(msr); 349 for (j = 7; j >= 0; j--) { 350 msrv = msrv << 8; 351 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 352 omsrv >> (j * 8)); 353 } 354 wrmsr(msr, msrv); 355 mrd += 8; 356 } 357 } 358 359 /* Set remainder which must be variable MTRRs. */ 360 msr = MSR_MTRRVarBase; 361 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 362 /* base/type register */ 363 omsrv = rdmsr(msr); 364 if (mrd->mr_flags & MDF_ACTIVE) { 365 msrv = mrd->mr_base & mtrr_physmask; 366 msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv); 367 } else { 368 msrv = 0; 369 } 370 wrmsr(msr, msrv); 371 372 /* mask/active register */ 373 if (mrd->mr_flags & MDF_ACTIVE) { 374 msrv = MTRR_PHYSMASK_VALID | 375 (~(mrd->mr_len - 1) & mtrr_physmask); 376 } else { 377 msrv = 0; 378 } 379 wrmsr(msr + 1, msrv); 380 } 381 382 /* Flush caches, TLBs. */ 383 wbinvd(); 384 385 /* Enable MTRRs. */ 386 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE); 387 388 /* Enable caches (CD = 0, NW = 0). */ 389 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); 390 391 /* Restore PGE. */ 392 load_cr4(cr4save); 393} 394 395/* 396 * Hunt for the fixed MTRR referencing (addr) 397 */ 398static struct mem_range_desc * 399i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 400{ 401 struct mem_range_desc *mrd; 402 int i; 403 404 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); 405 i++, mrd++) 406 if ((addr >= mrd->mr_base) && 407 (addr < (mrd->mr_base + mrd->mr_len))) 408 return (mrd); 409 return (NULL); 410} 411 412/* 413 * Try to satisfy the given range request by manipulating the fixed 414 * MTRRs that cover low memory. 415 * 416 * Note that we try to be generous here; we'll bloat the range out to 417 * the next higher/lower boundary to avoid the consumer having to know 418 * too much about the mechanisms here. 419 * 420 * XXX note that this will have to be updated when we start supporting 421 * "busy" ranges. 422 */ 423static int 424i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 425{ 426 struct mem_range_desc *first_md, *last_md, *curr_md; 427 428 /* Range check. */ 429 if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || 430 ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) 431 return (EINVAL); 432 433 /* Check that we aren't doing something risky. */ 434 if (!(mrd->mr_flags & MDF_FORCE)) 435 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 436 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) 437 return (EACCES); 438 } 439 440 /* Set flags, clear set-by-firmware flag. */ 441 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 442 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & 443 ~MDF_FIRMWARE, mrd->mr_flags); 444 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 445 } 446 447 return (0); 448} 449 450/* 451 * Modify/add a variable MTRR to satisfy the request. 452 * 453 * XXX needs to be updated to properly support "busy" ranges. 454 */ 455static int 456i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, 457 int *arg) 458{ 459 struct mem_range_desc *curr_md, *free_md; 460 int i; 461 462 /* 463 * Scan the currently active variable descriptors, look for 464 * one we exactly match (straight takeover) and for possible 465 * accidental overlaps. 466 * 467 * Keep track of the first empty variable descriptor in case 468 * we can't perform a takeover. 469 */ 470 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 471 curr_md = sc->mr_desc + i; 472 free_md = NULL; 473 for (; i < sc->mr_ndesc; i++, curr_md++) { 474 if (curr_md->mr_flags & MDF_ACTIVE) { 475 /* Exact match? */ 476 if ((curr_md->mr_base == mrd->mr_base) && 477 (curr_md->mr_len == mrd->mr_len)) { 478 479 /* Whoops, owned by someone. */ 480 if (curr_md->mr_flags & MDF_BUSY) 481 return (EBUSY); 482 483 /* Check that we aren't doing something risky */ 484 if (!(mrd->mr_flags & MDF_FORCE) && 485 ((curr_md->mr_flags & MDF_ATTRMASK) == 486 MDF_UNKNOWN)) 487 return (EACCES); 488 489 /* Ok, just hijack this entry. */ 490 free_md = curr_md; 491 break; 492 } 493 494 /* Non-exact overlap? */ 495 if (mroverlap(curr_md, mrd)) { 496 /* Between conflicting region types? */ 497 if (i686_mtrrconflict(curr_md->mr_flags, 498 mrd->mr_flags)) 499 return (EINVAL); 500 } 501 } else if (free_md == NULL) { 502 free_md = curr_md; 503 } 504 } 505 506 /* Got somewhere to put it? */ 507 if (free_md == NULL) 508 return (ENOSPC); 509 510 /* Set up new descriptor. */ 511 free_md->mr_base = mrd->mr_base; 512 free_md->mr_len = mrd->mr_len; 513 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 514 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 515 return (0); 516} 517 518/* 519 * Handle requests to set memory range attributes by manipulating MTRRs. 520 */ 521static int 522i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 523{ 524 struct mem_range_desc *targ; 525 int error = 0; 526 527 switch(*arg) { 528 case MEMRANGE_SET_UPDATE: 529 /* 530 * Make sure that what's being asked for is even 531 * possible at all. 532 */ 533 if (!mrvalid(mrd->mr_base, mrd->mr_len) || 534 i686_mtrrtype(mrd->mr_flags) == -1) 535 return (EINVAL); 536 537#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 538 539 /* Are the "low memory" conditions applicable? */ 540 if ((sc->mr_cap & MR686_FIXMTRR) && 541 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { 542 if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) 543 return (error); 544 } else { 545 /* It's time to play with variable MTRRs. */ 546 if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) 547 return (error); 548 } 549 break; 550 551 case MEMRANGE_SET_REMOVE: 552 if ((targ = mem_range_match(sc, mrd)) == NULL) 553 return (ENOENT); 554 if (targ->mr_flags & MDF_FIXACTIVE) 555 return (EPERM); 556 if (targ->mr_flags & MDF_BUSY) 557 return (EBUSY); 558 targ->mr_flags &= ~MDF_ACTIVE; 559 targ->mr_owner[0] = 0; 560 break; 561 562 default: 563 return (EOPNOTSUPP); 564 } 565 566 /* Update the hardware. */ 567 i686_mrstore(sc); 568 569 /* Refetch to see where we're at. */ 570 i686_mrfetch(sc); 571 return (0); 572} 573 574/* 575 * Work out how many ranges we support, initialise storage for them, 576 * and fetch the initial settings. 577 */ 578static void 579i686_mrinit(struct mem_range_softc *sc) 580{ 581 struct mem_range_desc *mrd; 582 u_int regs[4]; 583 int i, nmdesc = 0, pabits; 584 585 mtrrcap = rdmsr(MSR_MTRRcap); 586 mtrrdef = rdmsr(MSR_MTRRdefType); 587 588 /* For now, bail out if MTRRs are not enabled. */ 589 if (!(mtrrdef & MTRR_DEF_ENABLE)) { 590 if (bootverbose) 591 printf("CPU supports MTRRs but not enabled\n"); 592 return; 593 } 594 nmdesc = mtrrcap & MTRR_CAP_VCNT; 595 if (bootverbose) 596 printf("Pentium Pro MTRR support enabled\n"); 597 598 /* 599 * Determine the size of the PhysMask and PhysBase fields in 600 * the variable range MTRRs. If the extended CPUID 0x80000008 601 * is present, use that to figure out how many physical 602 * address bits the CPU supports. Otherwise, default to 36 603 * address bits. 604 */ 605 if (cpu_exthigh >= 0x80000008) { 606 do_cpuid(0x80000008, regs); 607 pabits = regs[0] & 0xff; 608 } else 609 pabits = 36; 610 mtrr_physmask = ((1ULL << pabits) - 1) & ~0xfffULL; 611 612 /* If fixed MTRRs supported and enabled. */ 613 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) { 614 sc->mr_cap = MR686_FIXMTRR; 615 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 616 } 617 618 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC, 619 M_WAITOK | M_ZERO); 620 sc->mr_ndesc = nmdesc; 621 622 mrd = sc->mr_desc; 623 624 /* Populate the fixed MTRR entries' base/length. */ 625 if (sc->mr_cap & MR686_FIXMTRR) { 626 for (i = 0; i < MTRR_N64K; i++, mrd++) { 627 mrd->mr_base = i * 0x10000; 628 mrd->mr_len = 0x10000; 629 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 630 MDF_FIXACTIVE; 631 } 632 for (i = 0; i < MTRR_N16K; i++, mrd++) { 633 mrd->mr_base = i * 0x4000 + 0x80000; 634 mrd->mr_len = 0x4000; 635 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 636 MDF_FIXACTIVE; 637 } 638 for (i = 0; i < MTRR_N4K; i++, mrd++) { 639 mrd->mr_base = i * 0x1000 + 0xc0000; 640 mrd->mr_len = 0x1000; 641 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 642 MDF_FIXACTIVE; 643 } 644 } 645 646 /* 647 * Get current settings, anything set now is considered to 648 * have been set by the firmware. (XXX has something already 649 * played here?) 650 */ 651 i686_mrfetch(sc); 652 mrd = sc->mr_desc; 653 for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 654 if (mrd->mr_flags & MDF_ACTIVE) 655 mrd->mr_flags |= MDF_FIRMWARE; 656 } 657} 658 659/* 660 * Initialise MTRRs on an AP after the BSP has run the init code. 661 */ 662static void 663i686_mrAPinit(struct mem_range_softc *sc) 664{ 665 666 i686_mrstoreone(sc); 667 wrmsr(MSR_MTRRdefType, mtrrdef); 668} 669 670static void 671i686_mem_drvinit(void *unused) 672{ 673 674 if (mtrrs_disabled) 675 return; 676 if (!(cpu_feature & CPUID_MTRR)) 677 return; 678 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00) 679 return; 680 if ((strcmp(cpu_vendor, "GenuineIntel") != 0) && 681 (strcmp(cpu_vendor, "AuthenticAMD") != 0)) 682 return; 683 mem_range_softc.mr_op = &i686_mrops; 684} 685SYSINIT(i686memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, i686_mem_drvinit, NULL); 686