x86_mem.c revision 177125
145405Smsmith/*- 245405Smsmith * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 345405Smsmith * All rights reserved. 445405Smsmith * 545405Smsmith * Redistribution and use in source and binary forms, with or without 645405Smsmith * modification, are permitted provided that the following conditions 745405Smsmith * are met: 845405Smsmith * 1. Redistributions of source code must retain the above copyright 945405Smsmith * notice, this list of conditions and the following disclaimer. 1045405Smsmith * 2. Redistributions in binary form must reproduce the above copyright 1145405Smsmith * notice, this list of conditions and the following disclaimer in the 1245405Smsmith * documentation and/or other materials provided with the distribution. 1345405Smsmith * 1445405Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1545405Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1645405Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1745405Smsmith * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1845405Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1945405Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2045405Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2145405Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2245405Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2345405Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2445405Smsmith * SUCH DAMAGE. 2545405Smsmith */ 2645405Smsmith 27115683Sobrien#include <sys/cdefs.h> 28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/i686_mem.c 177125 2008-03-12 22:09:19Z jhb $"); 29115683Sobrien 3045405Smsmith#include <sys/param.h> 3145405Smsmith#include <sys/kernel.h> 3245405Smsmith#include <sys/systm.h> 3345405Smsmith#include <sys/malloc.h> 3445405Smsmith#include <sys/memrange.h> 3576078Sjhb#include <sys/smp.h> 36106842Smdodd#include <sys/sysctl.h> 3745405Smsmith 3845405Smsmith#include <machine/md_var.h> 3945405Smsmith#include <machine/specialreg.h> 4045405Smsmith 4145405Smsmith/* 4245405Smsmith * i686 memory range operations 4345405Smsmith * 4445405Smsmith * This code will probably be impenetrable without reference to the 4545405Smsmith * Intel Pentium Pro documentation. 4645405Smsmith */ 4745405Smsmith 4845405Smsmithstatic char *mem_owner_bios = "BIOS"; 4945405Smsmith 50177070Sjhb#define MR686_FIXMTRR (1<<0) 5145405Smsmith 52177070Sjhb#define mrwithin(mr, a) \ 53177070Sjhb (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 54177070Sjhb#define mroverlap(mra, mrb) \ 55177070Sjhb (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 5645405Smsmith 57177070Sjhb#define mrvalid(base, len) \ 58177070Sjhb ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 59177070Sjhb ((len) >= (1 << 12)) && /* length is >= 4k */ \ 60177070Sjhb powerof2((len)) && /* ... and power of two */ \ 61177070Sjhb !((base) & ((len) - 1))) /* range is not discontiuous */ 6245405Smsmith 63177070Sjhb#define mrcopyflags(curr, new) \ 64177070Sjhb (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 6545405Smsmith 66177070Sjhbstatic int mtrrs_disabled; 67106842SmdoddTUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled); 68121307SsilbySYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN, 69177070Sjhb &mtrrs_disabled, 0, "Disable i686 MTRRs."); 70106842Smdodd 71177070Sjhbstatic void i686_mrinit(struct mem_range_softc *sc); 72177070Sjhbstatic int i686_mrset(struct mem_range_softc *sc, 73177070Sjhb struct mem_range_desc *mrd, int *arg); 74177070Sjhbstatic void i686_mrAPinit(struct mem_range_softc *sc); 7545405Smsmith 7645405Smsmithstatic struct mem_range_ops i686_mrops = { 77177070Sjhb i686_mrinit, 78177070Sjhb i686_mrset, 79177070Sjhb i686_mrAPinit 8045405Smsmith}; 8145405Smsmith 8246215Smsmith/* XXX for AP startup hook */ 83177070Sjhbstatic u_int64_t mtrrcap, mtrrdef; 8446215Smsmith 85177125Sjhb/* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */ 86177125Sjhbstatic u_int64_t mtrr_physmask; 87177125Sjhb 88177070Sjhbstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 89177070Sjhb struct mem_range_desc *mrd); 90177070Sjhbstatic void i686_mrfetch(struct mem_range_softc *sc); 91177070Sjhbstatic int i686_mtrrtype(int flags); 92177070Sjhbstatic int i686_mrt2mtrr(int flags, int oldval); 93177070Sjhbstatic int i686_mtrrconflict(int flag1, int flag2); 94177070Sjhbstatic void i686_mrstore(struct mem_range_softc *sc); 95177070Sjhbstatic void i686_mrstoreone(void *arg); 96177070Sjhbstatic struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, 97177070Sjhb u_int64_t addr); 98177070Sjhbstatic int i686_mrsetlow(struct mem_range_softc *sc, 99177070Sjhb struct mem_range_desc *mrd, int *arg); 100177070Sjhbstatic int i686_mrsetvariable(struct mem_range_softc *sc, 101177070Sjhb struct mem_range_desc *mrd, int *arg); 10245405Smsmith 10345405Smsmith/* i686 MTRR type to memory range type conversion */ 10445405Smsmithstatic int i686_mtrrtomrt[] = { 105177070Sjhb MDF_UNCACHEABLE, 106177070Sjhb MDF_WRITECOMBINE, 107177070Sjhb MDF_UNKNOWN, 108177070Sjhb MDF_UNKNOWN, 109177070Sjhb MDF_WRITETHROUGH, 110177070Sjhb MDF_WRITEPROTECT, 111177070Sjhb MDF_WRITEBACK 11245405Smsmith}; 11345405Smsmith 114177070Sjhb#define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])) 11594683Sdwmalone 11694683Sdwmalonestatic int 117177070Sjhbi686_mtrr2mrt(int val) 118177070Sjhb{ 119177070Sjhb 12094683Sdwmalone if (val < 0 || val >= MTRRTOMRTLEN) 121177070Sjhb return (MDF_UNKNOWN); 122177070Sjhb return (i686_mtrrtomrt[val]); 12394683Sdwmalone} 12494683Sdwmalone 125177070Sjhb/* 12694683Sdwmalone * i686 MTRR conflicts. Writeback and uncachable may overlap. 12748925Smsmith */ 12894683Sdwmalonestatic int 129177070Sjhbi686_mtrrconflict(int flag1, int flag2) 130177070Sjhb{ 131177070Sjhb 13294683Sdwmalone flag1 &= MDF_ATTRMASK; 13394683Sdwmalone flag2 &= MDF_ATTRMASK; 13494683Sdwmalone if (flag1 == flag2 || 13594683Sdwmalone (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || 13694683Sdwmalone (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) 137177070Sjhb return (0); 138177070Sjhb return (1); 13994683Sdwmalone} 14045405Smsmith 14145405Smsmith/* 14245405Smsmith * Look for an exactly-matching range. 14345405Smsmith */ 14445405Smsmithstatic struct mem_range_desc * 145177070Sjhbmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 14645405Smsmith{ 147177070Sjhb struct mem_range_desc *cand; 148177070Sjhb int i; 149177070Sjhb 150177070Sjhb for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 151177070Sjhb if ((cand->mr_base == mrd->mr_base) && 152177070Sjhb (cand->mr_len == mrd->mr_len)) 153177070Sjhb return (cand); 154177070Sjhb return (NULL); 15545405Smsmith} 15645405Smsmith 15745405Smsmith/* 158177070Sjhb * Fetch the current mtrr settings from the current CPU (assumed to 159177070Sjhb * all be in sync in the SMP case). Note that if we are here, we 160177070Sjhb * assume that MTRRs are enabled, and we may or may not have fixed 161177070Sjhb * MTRRs. 16245405Smsmith */ 16345405Smsmithstatic void 16445405Smsmithi686_mrfetch(struct mem_range_softc *sc) 16545405Smsmith{ 166177070Sjhb struct mem_range_desc *mrd; 167177070Sjhb u_int64_t msrv; 168177070Sjhb int i, j, msr; 16945405Smsmith 170177070Sjhb mrd = sc->mr_desc; 17145405Smsmith 172177070Sjhb /* Get fixed-range MTRRs. */ 173177070Sjhb if (sc->mr_cap & MR686_FIXMTRR) { 174177070Sjhb msr = MSR_MTRR64kBase; 175177070Sjhb for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 176177070Sjhb msrv = rdmsr(msr); 177177070Sjhb for (j = 0; j < 8; j++, mrd++) { 178177070Sjhb mrd->mr_flags = 179177070Sjhb (mrd->mr_flags & ~MDF_ATTRMASK) | 180177070Sjhb i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 181177070Sjhb if (mrd->mr_owner[0] == 0) 182177070Sjhb strcpy(mrd->mr_owner, mem_owner_bios); 183177070Sjhb msrv = msrv >> 8; 184177070Sjhb } 185177070Sjhb } 186177070Sjhb msr = MSR_MTRR16kBase; 187177070Sjhb for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 188177070Sjhb msrv = rdmsr(msr); 189177070Sjhb for (j = 0; j < 8; j++, mrd++) { 190177070Sjhb mrd->mr_flags = 191177070Sjhb (mrd->mr_flags & ~MDF_ATTRMASK) | 192177070Sjhb i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 193177070Sjhb if (mrd->mr_owner[0] == 0) 194177070Sjhb strcpy(mrd->mr_owner, mem_owner_bios); 195177070Sjhb msrv = msrv >> 8; 196177070Sjhb } 197177070Sjhb } 198177070Sjhb msr = MSR_MTRR4kBase; 199177070Sjhb for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 200177070Sjhb msrv = rdmsr(msr); 201177070Sjhb for (j = 0; j < 8; j++, mrd++) { 202177070Sjhb mrd->mr_flags = 203177070Sjhb (mrd->mr_flags & ~MDF_ATTRMASK) | 204177070Sjhb i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 205177070Sjhb if (mrd->mr_owner[0] == 0) 206177070Sjhb strcpy(mrd->mr_owner, mem_owner_bios); 207177070Sjhb msrv = msrv >> 8; 208177070Sjhb } 209177070Sjhb } 21045405Smsmith } 211177070Sjhb 212177070Sjhb /* Get remainder which must be variable MTRRs. */ 213177070Sjhb msr = MSR_MTRRVarBase; 214177070Sjhb for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 215177070Sjhb msrv = rdmsr(msr); 21645405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 217177070Sjhb i686_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE); 218177125Sjhb mrd->mr_base = msrv & mtrr_physmask; 219177070Sjhb msrv = rdmsr(msr + 1); 220177070Sjhb mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ? 221177070Sjhb (mrd->mr_flags | MDF_ACTIVE) : 222177070Sjhb (mrd->mr_flags & ~MDF_ACTIVE); 223177070Sjhb 224177070Sjhb /* Compute the range from the mask. Ick. */ 225177125Sjhb mrd->mr_len = (~(msrv & mtrr_physmask) & 226177125Sjhb (mtrr_physmask | 0xfffLL)) + 1; 227177070Sjhb if (!mrvalid(mrd->mr_base, mrd->mr_len)) 228177070Sjhb mrd->mr_flags |= MDF_BOGUS; 229177070Sjhb 230177070Sjhb /* If unclaimed and active, must be the BIOS. */ 231177070Sjhb if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 232177070Sjhb strcpy(mrd->mr_owner, mem_owner_bios); 23345405Smsmith } 23445405Smsmith} 23545405Smsmith 23645405Smsmith/* 23745405Smsmith * Return the MTRR memory type matching a region's flags 23845405Smsmith */ 23945405Smsmithstatic int 24045405Smsmithi686_mtrrtype(int flags) 24145405Smsmith{ 242177070Sjhb int i; 24345405Smsmith 244177070Sjhb flags &= MDF_ATTRMASK; 24545405Smsmith 246177070Sjhb for (i = 0; i < MTRRTOMRTLEN; i++) { 247177070Sjhb if (i686_mtrrtomrt[i] == MDF_UNKNOWN) 248177070Sjhb continue; 249177070Sjhb if (flags == i686_mtrrtomrt[i]) 250177070Sjhb return (i); 251177070Sjhb } 252177070Sjhb return (-1); 25345405Smsmith} 25445405Smsmith 25594683Sdwmalonestatic int 25694683Sdwmalonei686_mrt2mtrr(int flags, int oldval) 25794683Sdwmalone{ 25894683Sdwmalone int val; 25994683Sdwmalone 26094683Sdwmalone if ((val = i686_mtrrtype(flags)) == -1) 261177070Sjhb return (oldval & 0xff); 262177070Sjhb return (val & 0xff); 26394683Sdwmalone} 26494683Sdwmalone 26545405Smsmith/* 26646215Smsmith * Update running CPU(s) MTRRs to match the ranges in the descriptor 26746215Smsmith * list. 26846215Smsmith * 26946215Smsmith * XXX Must be called with interrupts enabled. 27045405Smsmith */ 27148925Smsmithstatic void 27245405Smsmithi686_mrstore(struct mem_range_softc *sc) 27345405Smsmith{ 27445405Smsmith#ifdef SMP 275177070Sjhb /* 276177070Sjhb * We should use ipi_all_but_self() to call other CPUs into a 277177070Sjhb * locking gate, then call a target function to do this work. 278177070Sjhb * The "proper" solution involves a generalised locking gate 279177070Sjhb * implementation, not ready yet. 280177070Sjhb */ 281177070Sjhb smp_rendezvous(NULL, i686_mrstoreone, NULL, sc); 28248925Smsmith#else 283177070Sjhb disable_intr(); /* disable interrupts */ 284177070Sjhb i686_mrstoreone(sc); 285177070Sjhb enable_intr(); 28648925Smsmith#endif 28746215Smsmith} 28846215Smsmith 28946215Smsmith/* 29046215Smsmith * Update the current CPU's MTRRs with those represented in the 291177070Sjhb * descriptor list. Note that we do this wholesale rather than just 292177070Sjhb * stuffing one entry; this is simpler (but slower, of course). 29346215Smsmith */ 29448925Smsmithstatic void 29548925Smsmithi686_mrstoreone(void *arg) 29646215Smsmith{ 297177070Sjhb struct mem_range_softc *sc = arg; 298177070Sjhb struct mem_range_desc *mrd; 299177070Sjhb u_int64_t omsrv, msrv; 300177070Sjhb int i, j, msr; 301177070Sjhb u_int cr4save; 30246215Smsmith 303177070Sjhb mrd = sc->mr_desc; 30446215Smsmith 305177070Sjhb /* Disable PGE. */ 306177070Sjhb cr4save = rcr4(); 307177070Sjhb if (cr4save & CR4_PGE) 308177070Sjhb load_cr4(cr4save & ~CR4_PGE); 30945405Smsmith 310177070Sjhb /* Disable caches (CD = 1, NW = 0). */ 311177070Sjhb load_cr0((rcr0() & ~CR0_NW) | CR0_CD); 312177070Sjhb 313177070Sjhb /* Flushes caches and TLBs. */ 314177070Sjhb wbinvd(); 315177070Sjhb 316177070Sjhb /* Disable MTRRs (E = 0). */ 317177070Sjhb wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE); 318177070Sjhb 319177070Sjhb /* Set fixed-range MTRRs. */ 320177070Sjhb if (sc->mr_cap & MR686_FIXMTRR) { 321177070Sjhb msr = MSR_MTRR64kBase; 322177070Sjhb for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 323177070Sjhb msrv = 0; 324177070Sjhb omsrv = rdmsr(msr); 325177070Sjhb for (j = 7; j >= 0; j--) { 326177070Sjhb msrv = msrv << 8; 327177070Sjhb msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 328177070Sjhb omsrv >> (j * 8)); 329177070Sjhb } 330177070Sjhb wrmsr(msr, msrv); 331177070Sjhb mrd += 8; 332177070Sjhb } 333177070Sjhb msr = MSR_MTRR16kBase; 334177070Sjhb for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 335177070Sjhb msrv = 0; 336177070Sjhb omsrv = rdmsr(msr); 337177070Sjhb for (j = 7; j >= 0; j--) { 338177070Sjhb msrv = msrv << 8; 339177070Sjhb msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 340177070Sjhb omsrv >> (j * 8)); 341177070Sjhb } 342177070Sjhb wrmsr(msr, msrv); 343177070Sjhb mrd += 8; 344177070Sjhb } 345177070Sjhb msr = MSR_MTRR4kBase; 346177070Sjhb for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 347177070Sjhb msrv = 0; 348177070Sjhb omsrv = rdmsr(msr); 349177070Sjhb for (j = 7; j >= 0; j--) { 350177070Sjhb msrv = msrv << 8; 351177070Sjhb msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, 352177070Sjhb omsrv >> (j * 8)); 353177070Sjhb } 354177070Sjhb wrmsr(msr, msrv); 355177070Sjhb mrd += 8; 356177070Sjhb } 35745405Smsmith } 358177070Sjhb 359177070Sjhb /* Set remainder which must be variable MTRRs. */ 360177070Sjhb msr = MSR_MTRRVarBase; 361177070Sjhb for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 362177070Sjhb /* base/type register */ 363177070Sjhb omsrv = rdmsr(msr); 364177070Sjhb if (mrd->mr_flags & MDF_ACTIVE) { 365177125Sjhb msrv = mrd->mr_base & mtrr_physmask; 366177070Sjhb msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv); 367177070Sjhb } else { 368177070Sjhb msrv = 0; 369177070Sjhb } 370177070Sjhb wrmsr(msr, msrv); 371177070Sjhb 372177070Sjhb /* mask/active register */ 373177070Sjhb if (mrd->mr_flags & MDF_ACTIVE) { 374177070Sjhb msrv = MTRR_PHYSMASK_VALID | 375177125Sjhb (~(mrd->mr_len - 1) & mtrr_physmask); 376177070Sjhb } else { 377177070Sjhb msrv = 0; 378177070Sjhb } 379177070Sjhb wrmsr(msr + 1, msrv); 38045405Smsmith } 38145405Smsmith 382177070Sjhb /* Flush caches, TLBs. */ 383177070Sjhb wbinvd(); 384177070Sjhb 385177070Sjhb /* Enable MTRRs. */ 386177070Sjhb wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE); 387177070Sjhb 388177070Sjhb /* Enable caches (CD = 0, NW = 0). */ 389177070Sjhb load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); 390177070Sjhb 391177070Sjhb /* Restore PGE. */ 392177070Sjhb load_cr4(cr4save); 39345405Smsmith} 39445405Smsmith 39545405Smsmith/* 39645405Smsmith * Hunt for the fixed MTRR referencing (addr) 39745405Smsmith */ 39845405Smsmithstatic struct mem_range_desc * 39945405Smsmithi686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 40045405Smsmith{ 401177070Sjhb struct mem_range_desc *mrd; 402177070Sjhb int i; 403177070Sjhb 404177070Sjhb for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); 405177070Sjhb i++, mrd++) 406177070Sjhb if ((addr >= mrd->mr_base) && 407177070Sjhb (addr < (mrd->mr_base + mrd->mr_len))) 408177070Sjhb return (mrd); 409177070Sjhb return (NULL); 41045405Smsmith} 41145405Smsmith 41245405Smsmith/* 413177070Sjhb * Try to satisfy the given range request by manipulating the fixed 414177070Sjhb * MTRRs that cover low memory. 41545405Smsmith * 416177070Sjhb * Note that we try to be generous here; we'll bloat the range out to 417177070Sjhb * the next higher/lower boundary to avoid the consumer having to know 418177070Sjhb * too much about the mechanisms here. 41945405Smsmith * 420177070Sjhb * XXX note that this will have to be updated when we start supporting 421177070Sjhb * "busy" ranges. 42245405Smsmith */ 42345405Smsmithstatic int 42445405Smsmithi686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 42545405Smsmith{ 426177070Sjhb struct mem_range_desc *first_md, *last_md, *curr_md; 42745405Smsmith 428177070Sjhb /* Range check. */ 429177070Sjhb if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || 430177070Sjhb ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) 431177070Sjhb return (EINVAL); 43245405Smsmith 433177070Sjhb /* Check that we aren't doing something risky. */ 434177070Sjhb if (!(mrd->mr_flags & MDF_FORCE)) 435177070Sjhb for (curr_md = first_md; curr_md <= last_md; curr_md++) { 436177070Sjhb if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) 437177070Sjhb return (EACCES); 438177070Sjhb } 439177070Sjhb 440177070Sjhb /* Set flags, clear set-by-firmware flag. */ 441103346Sdwmalone for (curr_md = first_md; curr_md <= last_md; curr_md++) { 442177070Sjhb curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & 443177070Sjhb ~MDF_FIRMWARE, mrd->mr_flags); 444177070Sjhb bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 445103346Sdwmalone } 446103346Sdwmalone 447177070Sjhb return (0); 44845405Smsmith} 44945405Smsmith 45045405Smsmith/* 45145405Smsmith * Modify/add a variable MTRR to satisfy the request. 45245405Smsmith * 45345405Smsmith * XXX needs to be updated to properly support "busy" ranges. 45445405Smsmith */ 45545405Smsmithstatic int 456177070Sjhbi686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, 457177070Sjhb int *arg) 45845405Smsmith{ 459177070Sjhb struct mem_range_desc *curr_md, *free_md; 460177070Sjhb int i; 461177070Sjhb 462177070Sjhb /* 463177070Sjhb * Scan the currently active variable descriptors, look for 464177070Sjhb * one we exactly match (straight takeover) and for possible 465177070Sjhb * accidental overlaps. 466177070Sjhb * 467177070Sjhb * Keep track of the first empty variable descriptor in case 468177070Sjhb * we can't perform a takeover. 469177070Sjhb */ 470177070Sjhb i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 471177070Sjhb curr_md = sc->mr_desc + i; 472177070Sjhb free_md = NULL; 473177070Sjhb for (; i < sc->mr_ndesc; i++, curr_md++) { 474177070Sjhb if (curr_md->mr_flags & MDF_ACTIVE) { 475177070Sjhb /* Exact match? */ 476177070Sjhb if ((curr_md->mr_base == mrd->mr_base) && 477177070Sjhb (curr_md->mr_len == mrd->mr_len)) { 478177070Sjhb 479177070Sjhb /* Whoops, owned by someone. */ 480177070Sjhb if (curr_md->mr_flags & MDF_BUSY) 481177070Sjhb return (EBUSY); 482177070Sjhb 483177070Sjhb /* Check that we aren't doing something risky */ 484177070Sjhb if (!(mrd->mr_flags & MDF_FORCE) && 485177070Sjhb ((curr_md->mr_flags & MDF_ATTRMASK) == 486177070Sjhb MDF_UNKNOWN)) 487177070Sjhb return (EACCES); 488177070Sjhb 489177070Sjhb /* Ok, just hijack this entry. */ 490177070Sjhb free_md = curr_md; 491177070Sjhb break; 492177070Sjhb } 493177070Sjhb 494177070Sjhb /* Non-exact overlap? */ 495177070Sjhb if (mroverlap(curr_md, mrd)) { 496177070Sjhb /* Between conflicting region types? */ 497177070Sjhb if (i686_mtrrconflict(curr_md->mr_flags, 498177070Sjhb mrd->mr_flags)) 499177070Sjhb return (EINVAL); 500177070Sjhb } 501177070Sjhb } else if (free_md == NULL) { 502177070Sjhb free_md = curr_md; 503177070Sjhb } 50445405Smsmith } 50545405Smsmith 506177070Sjhb /* Got somewhere to put it? */ 507177070Sjhb if (free_md == NULL) 508177070Sjhb return (ENOSPC); 509177070Sjhb 510177070Sjhb /* Set up new descriptor. */ 511177070Sjhb free_md->mr_base = mrd->mr_base; 512177070Sjhb free_md->mr_len = mrd->mr_len; 513177070Sjhb free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 514177070Sjhb bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 515177070Sjhb return (0); 51645405Smsmith} 51745405Smsmith 51845405Smsmith/* 51945405Smsmith * Handle requests to set memory range attributes by manipulating MTRRs. 52045405Smsmith */ 52145405Smsmithstatic int 52245405Smsmithi686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 52345405Smsmith{ 524177070Sjhb struct mem_range_desc *targ; 525177070Sjhb int error = 0; 52645405Smsmith 527177070Sjhb switch(*arg) { 528177070Sjhb case MEMRANGE_SET_UPDATE: 529177070Sjhb /* 530177070Sjhb * Make sure that what's being asked for is even 531177070Sjhb * possible at all. 532177070Sjhb */ 533177070Sjhb if (!mrvalid(mrd->mr_base, mrd->mr_len) || 534177070Sjhb i686_mtrrtype(mrd->mr_flags) == -1) 535177070Sjhb return (EINVAL); 53645405Smsmith 537177070Sjhb#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 53845405Smsmith 539177070Sjhb /* Are the "low memory" conditions applicable? */ 540177070Sjhb if ((sc->mr_cap & MR686_FIXMTRR) && 541177070Sjhb ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { 542177070Sjhb if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) 543177070Sjhb return (error); 544177070Sjhb } else { 545177070Sjhb /* It's time to play with variable MTRRs. */ 546177070Sjhb if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) 547177070Sjhb return (error); 548177070Sjhb } 549177070Sjhb break; 550177070Sjhb 551177070Sjhb case MEMRANGE_SET_REMOVE: 552177070Sjhb if ((targ = mem_range_match(sc, mrd)) == NULL) 553177070Sjhb return (ENOENT); 554177070Sjhb if (targ->mr_flags & MDF_FIXACTIVE) 555177070Sjhb return (EPERM); 556177070Sjhb if (targ->mr_flags & MDF_BUSY) 557177070Sjhb return (EBUSY); 558177070Sjhb targ->mr_flags &= ~MDF_ACTIVE; 559177070Sjhb targ->mr_owner[0] = 0; 560177070Sjhb break; 561177070Sjhb 562177070Sjhb default: 563177070Sjhb return (EOPNOTSUPP); 56445405Smsmith } 56545405Smsmith 566177070Sjhb /* Update the hardware. */ 567177070Sjhb i686_mrstore(sc); 56845405Smsmith 569177070Sjhb /* Refetch to see where we're at. */ 570177070Sjhb i686_mrfetch(sc); 571177070Sjhb return (0); 57245405Smsmith} 57345405Smsmith 57445405Smsmith/* 575177070Sjhb * Work out how many ranges we support, initialise storage for them, 576177070Sjhb * and fetch the initial settings. 57745405Smsmith */ 57845405Smsmithstatic void 57945405Smsmithi686_mrinit(struct mem_range_softc *sc) 58045405Smsmith{ 581177070Sjhb struct mem_range_desc *mrd; 582177125Sjhb u_int regs[4]; 583177125Sjhb int i, nmdesc = 0, pabits; 58445405Smsmith 585177070Sjhb mtrrcap = rdmsr(MSR_MTRRcap); 586177070Sjhb mtrrdef = rdmsr(MSR_MTRRdefType); 58745405Smsmith 588177070Sjhb /* For now, bail out if MTRRs are not enabled. */ 589177070Sjhb if (!(mtrrdef & MTRR_DEF_ENABLE)) { 590177070Sjhb if (bootverbose) 591177070Sjhb printf("CPU supports MTRRs but not enabled\n"); 592177070Sjhb return; 593177070Sjhb } 594177070Sjhb nmdesc = mtrrcap & MTRR_CAP_VCNT; 59545405Smsmith if (bootverbose) 596177070Sjhb printf("Pentium Pro MTRR support enabled\n"); 59745405Smsmith 598177125Sjhb /* 599177125Sjhb * Determine the size of the PhysMask and PhysBase fields in 600177125Sjhb * the variable range MTRRs. If the extended CPUID 0x80000008 601177125Sjhb * is present, use that to figure out how many physical 602177125Sjhb * address bits the CPU supports. Otherwise, default to 36 603177125Sjhb * address bits. 604177125Sjhb */ 605177125Sjhb if (cpu_exthigh >= 0x80000008) { 606177125Sjhb do_cpuid(0x80000008, regs); 607177125Sjhb pabits = regs[0] & 0xff; 608177125Sjhb } else 609177125Sjhb pabits = 36; 610177125Sjhb mtrr_physmask = ((1ULL << pabits) - 1) & ~0xfffULL; 611177125Sjhb 612177070Sjhb /* If fixed MTRRs supported and enabled. */ 613177070Sjhb if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) { 614177070Sjhb sc->mr_cap = MR686_FIXMTRR; 615177070Sjhb nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 616177070Sjhb } 61745405Smsmith 618177070Sjhb sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC, 619177070Sjhb M_WAITOK | M_ZERO); 620177070Sjhb sc->mr_ndesc = nmdesc; 62145405Smsmith 622177070Sjhb mrd = sc->mr_desc; 62345405Smsmith 624177070Sjhb /* Populate the fixed MTRR entries' base/length. */ 625177070Sjhb if (sc->mr_cap & MR686_FIXMTRR) { 626177070Sjhb for (i = 0; i < MTRR_N64K; i++, mrd++) { 627177070Sjhb mrd->mr_base = i * 0x10000; 628177070Sjhb mrd->mr_len = 0x10000; 629177070Sjhb mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 630177070Sjhb MDF_FIXACTIVE; 631177070Sjhb } 632177070Sjhb for (i = 0; i < MTRR_N16K; i++, mrd++) { 633177070Sjhb mrd->mr_base = i * 0x4000 + 0x80000; 634177070Sjhb mrd->mr_len = 0x4000; 635177070Sjhb mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 636177070Sjhb MDF_FIXACTIVE; 637177070Sjhb } 638177070Sjhb for (i = 0; i < MTRR_N4K; i++, mrd++) { 639177070Sjhb mrd->mr_base = i * 0x1000 + 0xc0000; 640177070Sjhb mrd->mr_len = 0x1000; 641177070Sjhb mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 642177070Sjhb MDF_FIXACTIVE; 643177070Sjhb } 64445405Smsmith } 645177070Sjhb 646177070Sjhb /* 647177070Sjhb * Get current settings, anything set now is considered to 648177070Sjhb * have been set by the firmware. (XXX has something already 649177070Sjhb * played here?) 650177070Sjhb */ 651177070Sjhb i686_mrfetch(sc); 652177070Sjhb mrd = sc->mr_desc; 653177070Sjhb for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 654177070Sjhb if (mrd->mr_flags & MDF_ACTIVE) 655177070Sjhb mrd->mr_flags |= MDF_FIRMWARE; 65645405Smsmith } 65745405Smsmith} 65845405Smsmith 65946215Smsmith/* 66046215Smsmith * Initialise MTRRs on an AP after the BSP has run the init code. 66146215Smsmith */ 66245405Smsmithstatic void 66346215Smsmithi686_mrAPinit(struct mem_range_softc *sc) 66446215Smsmith{ 665177070Sjhb 666177070Sjhb i686_mrstoreone(sc); 667177070Sjhb wrmsr(MSR_MTRRdefType, mtrrdef); 66846215Smsmith} 66946215Smsmith 67046215Smsmithstatic void 67145405Smsmithi686_mem_drvinit(void *unused) 67245405Smsmith{ 673177070Sjhb 674177124Sjhb if (mtrrs_disabled) 675177124Sjhb return; 676177124Sjhb if (!(cpu_feature & CPUID_MTRR)) 677177124Sjhb return; 678177124Sjhb if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00) 679177124Sjhb return; 680177124Sjhb if ((strcmp(cpu_vendor, "GenuineIntel") != 0) && 681177124Sjhb (strcmp(cpu_vendor, "AuthenticAMD") != 0)) 682177124Sjhb return; 683177124Sjhb mem_range_softc.mr_op = &i686_mrops; 68445405Smsmith} 685177070SjhbSYSINIT(i686memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, i686_mem_drvinit, NULL); 686