x86_mem.c revision 69781
145405Smsmith/*- 245405Smsmith * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 345405Smsmith * All rights reserved. 445405Smsmith * 545405Smsmith * Redistribution and use in source and binary forms, with or without 645405Smsmith * modification, are permitted provided that the following conditions 745405Smsmith * are met: 845405Smsmith * 1. Redistributions of source code must retain the above copyright 945405Smsmith * notice, this list of conditions and the following disclaimer. 1045405Smsmith * 2. Redistributions in binary form must reproduce the above copyright 1145405Smsmith * notice, this list of conditions and the following disclaimer in the 1245405Smsmith * documentation and/or other materials provided with the distribution. 1345405Smsmith * 1445405Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1545405Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1645405Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1745405Smsmith * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1845405Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1945405Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2045405Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2145405Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2245405Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2345405Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2445405Smsmith * SUCH DAMAGE. 2545405Smsmith * 2650477Speter * $FreeBSD: head/sys/i386/i386/i686_mem.c 69781 2000-12-08 21:51:06Z dwmalone $ 2745405Smsmith */ 2845405Smsmith 2945405Smsmith#include <sys/param.h> 3045405Smsmith#include <sys/kernel.h> 3145405Smsmith#include <sys/systm.h> 3245405Smsmith#include <sys/malloc.h> 3345405Smsmith#include <sys/memrange.h> 3445405Smsmith 3545405Smsmith#include <machine/md_var.h> 3645405Smsmith#include <machine/specialreg.h> 3745405Smsmith 3848925Smsmith#ifdef SMP 3952121Speter#include <machine/smp.h> 4048925Smsmith#endif 4148925Smsmith 4245405Smsmith/* 4345405Smsmith * i686 memory range operations 4445405Smsmith * 4545405Smsmith * This code will probably be impenetrable without reference to the 4645405Smsmith * Intel Pentium Pro documentation. 4745405Smsmith */ 4845405Smsmith 4945405Smsmithstatic char *mem_owner_bios = "BIOS"; 5045405Smsmith 5145405Smsmith#define MR686_FIXMTRR (1<<0) 5245405Smsmith 5345405Smsmith#define mrwithin(mr, a) \ 5445405Smsmith (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 5545405Smsmith#define mroverlap(mra, mrb) \ 5645405Smsmith (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 5745405Smsmith 5845405Smsmith#define mrvalid(base, len) \ 5945405Smsmith ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 6045405Smsmith ((len) >= (1 << 12)) && /* length is >= 4k */ \ 6145405Smsmith powerof2((len)) && /* ... and power of two */ \ 6245405Smsmith !((base) & ((len) - 1))) /* range is not discontiuous */ 6345405Smsmith 6445405Smsmith#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 6545405Smsmith 6646215Smsmithstatic void i686_mrinit(struct mem_range_softc *sc); 6746215Smsmithstatic int i686_mrset(struct mem_range_softc *sc, 6846215Smsmith struct mem_range_desc *mrd, 6946215Smsmith int *arg); 7046215Smsmithstatic void i686_mrAPinit(struct mem_range_softc *sc); 7145405Smsmith 7245405Smsmithstatic struct mem_range_ops i686_mrops = { 7345405Smsmith i686_mrinit, 7446215Smsmith i686_mrset, 7546215Smsmith i686_mrAPinit 7645405Smsmith}; 7745405Smsmith 7846215Smsmith/* XXX for AP startup hook */ 7946215Smsmithstatic u_int64_t mtrrcap, mtrrdef; 8046215Smsmith 8145405Smsmithstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 8245405Smsmith struct mem_range_desc *mrd); 8345405Smsmithstatic void i686_mrfetch(struct mem_range_softc *sc); 8445405Smsmithstatic int i686_mtrrtype(int flags); 8548925Smsmithstatic void i686_mrstore(struct mem_range_softc *sc); 8648925Smsmithstatic void i686_mrstoreone(void *arg); 8745405Smsmithstatic struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, 8845405Smsmith u_int64_t addr); 8945405Smsmithstatic int i686_mrsetlow(struct mem_range_softc *sc, 9045405Smsmith struct mem_range_desc *mrd, 9145405Smsmith int *arg); 9245405Smsmithstatic int i686_mrsetvariable(struct mem_range_softc *sc, 9345405Smsmith struct mem_range_desc *mrd, 9445405Smsmith int *arg); 9545405Smsmith 9645405Smsmith/* i686 MTRR type to memory range type conversion */ 9745405Smsmithstatic int i686_mtrrtomrt[] = { 9845405Smsmith MDF_UNCACHEABLE, 9945405Smsmith MDF_WRITECOMBINE, 10045405Smsmith 0, 10145405Smsmith 0, 10245405Smsmith MDF_WRITETHROUGH, 10345405Smsmith MDF_WRITEPROTECT, 10445405Smsmith MDF_WRITEBACK 10545405Smsmith}; 10645405Smsmith 10748925Smsmith/* 10848925Smsmith * i686 MTRR conflict matrix for overlapping ranges 10948925Smsmith * 11048925Smsmith * Specifically, this matrix allows writeback and uncached ranges 11148925Smsmith * to overlap (the overlapped region is uncached). The array index 11248925Smsmith * is the translated i686 code for the flags (because they map well). 11348925Smsmith */ 11448925Smsmithstatic int i686_mtrrconflict[] = { 11548925Smsmith MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT, 11648925Smsmith MDF_ATTRMASK, 11748925Smsmith 0, 11848925Smsmith 0, 11948925Smsmith MDF_ATTRMASK, 12048925Smsmith MDF_ATTRMASK, 12148925Smsmith MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT 12245405Smsmith}; 12345405Smsmith 12445405Smsmith/* 12545405Smsmith * Look for an exactly-matching range. 12645405Smsmith */ 12745405Smsmithstatic struct mem_range_desc * 12845405Smsmithmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 12945405Smsmith{ 13045405Smsmith struct mem_range_desc *cand; 13145405Smsmith int i; 13245405Smsmith 13345405Smsmith for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 13445405Smsmith if ((cand->mr_base == mrd->mr_base) && 13545405Smsmith (cand->mr_len == mrd->mr_len)) 13645405Smsmith return(cand); 13745405Smsmith return(NULL); 13845405Smsmith} 13945405Smsmith 14045405Smsmith/* 14145405Smsmith * Fetch the current mtrr settings from the current CPU (assumed to all 14245405Smsmith * be in sync in the SMP case). Note that if we are here, we assume 14345405Smsmith * that MTRRs are enabled, and we may or may not have fixed MTRRs. 14445405Smsmith */ 14545405Smsmithstatic void 14645405Smsmithi686_mrfetch(struct mem_range_softc *sc) 14745405Smsmith{ 14845405Smsmith struct mem_range_desc *mrd; 14945405Smsmith u_int64_t msrv; 15045405Smsmith int i, j, msr; 15145405Smsmith 15245405Smsmith mrd = sc->mr_desc; 15345405Smsmith 15445405Smsmith /* Get fixed-range MTRRs */ 15545405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 15645405Smsmith msr = MSR_MTRR64kBase; 15745405Smsmith for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 15845405Smsmith msrv = rdmsr(msr); 15945405Smsmith for (j = 0; j < 8; j++, mrd++) { 16045405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 16145405Smsmith i686_mtrrtomrt[msrv & 0xff] | 16245405Smsmith MDF_ACTIVE; 16345405Smsmith if (mrd->mr_owner[0] == 0) 16445405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 16545405Smsmith msrv = msrv >> 8; 16645405Smsmith } 16745405Smsmith } 16845405Smsmith msr = MSR_MTRR16kBase; 16945405Smsmith for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 17045405Smsmith msrv = rdmsr(msr); 17145405Smsmith for (j = 0; j < 8; j++, mrd++) { 17245405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 17345405Smsmith i686_mtrrtomrt[msrv & 0xff] | 17445405Smsmith MDF_ACTIVE; 17545405Smsmith if (mrd->mr_owner[0] == 0) 17645405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 17745405Smsmith msrv = msrv >> 8; 17845405Smsmith } 17945405Smsmith } 18045405Smsmith msr = MSR_MTRR4kBase; 18145405Smsmith for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 18245405Smsmith msrv = rdmsr(msr); 18345405Smsmith for (j = 0; j < 8; j++, mrd++) { 18445405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 18545405Smsmith i686_mtrrtomrt[msrv & 0xff] | 18645405Smsmith MDF_ACTIVE; 18745405Smsmith if (mrd->mr_owner[0] == 0) 18845405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 18945405Smsmith msrv = msrv >> 8; 19045405Smsmith } 19145405Smsmith } 19245405Smsmith } 19345405Smsmith 19445405Smsmith /* Get remainder which must be variable MTRRs */ 19545405Smsmith msr = MSR_MTRRVarBase; 19645405Smsmith for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 19745405Smsmith msrv = rdmsr(msr); 19845405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 19945405Smsmith i686_mtrrtomrt[msrv & 0xff]; 20045405Smsmith mrd->mr_base = msrv & 0x0000000ffffff000LL; 20145405Smsmith msrv = rdmsr(msr + 1); 20245405Smsmith mrd->mr_flags = (msrv & 0x800) ? 20345405Smsmith (mrd->mr_flags | MDF_ACTIVE) : 20445405Smsmith (mrd->mr_flags & ~MDF_ACTIVE); 20545405Smsmith /* Compute the range from the mask. Ick. */ 20645405Smsmith mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1; 20745405Smsmith if (!mrvalid(mrd->mr_base, mrd->mr_len)) 20845405Smsmith mrd->mr_flags |= MDF_BOGUS; 20945405Smsmith /* If unclaimed and active, must be the BIOS */ 21045405Smsmith if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 21145405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 21245405Smsmith } 21345405Smsmith} 21445405Smsmith 21545405Smsmith/* 21645405Smsmith * Return the MTRR memory type matching a region's flags 21745405Smsmith */ 21845405Smsmithstatic int 21945405Smsmithi686_mtrrtype(int flags) 22045405Smsmith{ 22145405Smsmith int i; 22245405Smsmith 22345405Smsmith flags &= MDF_ATTRMASK; 22445405Smsmith 22545405Smsmith for (i = 0; i < (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])); i++) { 22645405Smsmith if (i686_mtrrtomrt[i] == 0) 22745405Smsmith continue; 22845405Smsmith if (flags == i686_mtrrtomrt[i]) 22945405Smsmith return(i); 23045405Smsmith } 23145405Smsmith return(-1); 23245405Smsmith} 23345405Smsmith 23445405Smsmith/* 23546215Smsmith * Update running CPU(s) MTRRs to match the ranges in the descriptor 23646215Smsmith * list. 23746215Smsmith * 23846215Smsmith * XXX Must be called with interrupts enabled. 23945405Smsmith */ 24048925Smsmithstatic void 24145405Smsmithi686_mrstore(struct mem_range_softc *sc) 24245405Smsmith{ 24345405Smsmith#ifdef SMP 24445405Smsmith /* 24545405Smsmith * We should use all_but_self_ipi() to call other CPUs into a 24645405Smsmith * locking gate, then call a target function to do this work. 24745405Smsmith * The "proper" solution involves a generalised locking gate 24845405Smsmith * implementation, not ready yet. 24945405Smsmith */ 25048925Smsmith smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc); 25148925Smsmith#else 25245405Smsmith disable_intr(); /* disable interrupts */ 25349421Smsmith i686_mrstoreone((void *)sc); 25446215Smsmith enable_intr(); 25548925Smsmith#endif 25646215Smsmith} 25746215Smsmith 25846215Smsmith/* 25946215Smsmith * Update the current CPU's MTRRs with those represented in the 26048925Smsmith * descriptor list. Note that we do this wholesale rather than 26148925Smsmith * just stuffing one entry; this is simpler (but slower, of course). 26246215Smsmith */ 26348925Smsmithstatic void 26448925Smsmithi686_mrstoreone(void *arg) 26546215Smsmith{ 26648925Smsmith struct mem_range_softc *sc = (struct mem_range_softc *)arg; 26746215Smsmith struct mem_range_desc *mrd; 26846215Smsmith u_int64_t msrv; 26946215Smsmith int i, j, msr; 27046215Smsmith u_int cr4save; 27146215Smsmith 27248925Smsmith mrd = sc->mr_desc; 27346215Smsmith 27445405Smsmith cr4save = rcr4(); /* save cr4 */ 27545405Smsmith if (cr4save & CR4_PGE) 27645405Smsmith load_cr4(cr4save & ~CR4_PGE); 27745405Smsmith load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ 27848925Smsmith wbinvd(); /* flush caches, TLBs */ 27945405Smsmith wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ 28045405Smsmith 28145405Smsmith /* Set fixed-range MTRRs */ 28245405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 28345405Smsmith msr = MSR_MTRR64kBase; 28445405Smsmith for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 28545405Smsmith msrv = 0; 28645405Smsmith for (j = 7; j >= 0; j--) { 28745405Smsmith msrv = msrv << 8; 28845405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 28945405Smsmith } 29045405Smsmith wrmsr(msr, msrv); 29145405Smsmith mrd += 8; 29245405Smsmith } 29345405Smsmith msr = MSR_MTRR16kBase; 29445405Smsmith for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 29545405Smsmith msrv = 0; 29645405Smsmith for (j = 7; j >= 0; j--) { 29745405Smsmith msrv = msrv << 8; 29845405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 29945405Smsmith } 30045405Smsmith wrmsr(msr, msrv); 30145405Smsmith mrd += 8; 30245405Smsmith } 30345405Smsmith msr = MSR_MTRR4kBase; 30445405Smsmith for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 30545405Smsmith msrv = 0; 30645405Smsmith for (j = 7; j >= 0; j--) { 30745405Smsmith msrv = msrv << 8; 30845405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 30945405Smsmith } 31045405Smsmith wrmsr(msr, msrv); 31145405Smsmith mrd += 8; 31245405Smsmith } 31345405Smsmith } 31445405Smsmith 31545405Smsmith /* Set remainder which must be variable MTRRs */ 31645405Smsmith msr = MSR_MTRRVarBase; 31745405Smsmith for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 31845405Smsmith /* base/type register */ 31945405Smsmith if (mrd->mr_flags & MDF_ACTIVE) { 32045405Smsmith msrv = mrd->mr_base & 0x0000000ffffff000LL; 32145405Smsmith msrv |= (i686_mtrrtype(mrd->mr_flags) & 0xff); 32245405Smsmith } else { 32345405Smsmith msrv = 0; 32445405Smsmith } 32545405Smsmith wrmsr(msr, msrv); 32645405Smsmith 32745405Smsmith /* mask/active register */ 32845405Smsmith if (mrd->mr_flags & MDF_ACTIVE) { 32945405Smsmith msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); 33045405Smsmith } else { 33145405Smsmith msrv = 0; 33245405Smsmith } 33345405Smsmith wrmsr(msr + 1, msrv); 33445405Smsmith } 33548925Smsmith wbinvd(); /* flush caches, TLBs */ 33645405Smsmith wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ 33745405Smsmith load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ 33845405Smsmith load_cr4(cr4save); /* restore cr4 */ 33945405Smsmith} 34045405Smsmith 34145405Smsmith/* 34245405Smsmith * Hunt for the fixed MTRR referencing (addr) 34345405Smsmith */ 34445405Smsmithstatic struct mem_range_desc * 34545405Smsmithi686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 34645405Smsmith{ 34745405Smsmith struct mem_range_desc *mrd; 34845405Smsmith int i; 34945405Smsmith 35045405Smsmith for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) 35145405Smsmith if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) 35245405Smsmith return(mrd); 35345405Smsmith return(NULL); 35445405Smsmith} 35545405Smsmith 35645405Smsmith/* 35745405Smsmith * Try to satisfy the given range request by manipulating the fixed MTRRs that 35845405Smsmith * cover low memory. 35945405Smsmith * 36045405Smsmith * Note that we try to be generous here; we'll bloat the range out to the 36145405Smsmith * next higher/lower boundary to avoid the consumer having to know too much 36245405Smsmith * about the mechanisms here. 36345405Smsmith * 36445405Smsmith * XXX note that this will have to be updated when we start supporting "busy" ranges. 36545405Smsmith */ 36645405Smsmithstatic int 36745405Smsmithi686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 36845405Smsmith{ 36945405Smsmith struct mem_range_desc *first_md, *last_md, *curr_md; 37045405Smsmith 37145405Smsmith /* range check */ 37245405Smsmith if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || 37345405Smsmith ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) 37445405Smsmith return(EINVAL); 37545405Smsmith 37645405Smsmith /* set flags, clear set-by-firmware flag */ 37745405Smsmith for (curr_md = first_md; curr_md <= last_md; curr_md++) { 37845405Smsmith curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); 37945405Smsmith bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 38045405Smsmith } 38145405Smsmith 38245405Smsmith return(0); 38345405Smsmith} 38445405Smsmith 38545405Smsmith 38645405Smsmith/* 38745405Smsmith * Modify/add a variable MTRR to satisfy the request. 38845405Smsmith * 38945405Smsmith * XXX needs to be updated to properly support "busy" ranges. 39045405Smsmith */ 39145405Smsmithstatic int 39245405Smsmithi686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 39345405Smsmith{ 39445405Smsmith struct mem_range_desc *curr_md, *free_md; 39545405Smsmith int i; 39645405Smsmith 39745405Smsmith /* 39845405Smsmith * Scan the currently active variable descriptors, look for 39945405Smsmith * one we exactly match (straight takeover) and for possible 40045405Smsmith * accidental overlaps. 40145405Smsmith * Keep track of the first empty variable descriptor in case we 40245405Smsmith * can't perform a takeover. 40345405Smsmith */ 40445405Smsmith i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 40545405Smsmith curr_md = sc->mr_desc + i; 40645405Smsmith free_md = NULL; 40745405Smsmith for (; i < sc->mr_ndesc; i++, curr_md++) { 40845405Smsmith if (curr_md->mr_flags & MDF_ACTIVE) { 40945405Smsmith /* exact match? */ 41045405Smsmith if ((curr_md->mr_base == mrd->mr_base) && 41145405Smsmith (curr_md->mr_len == mrd->mr_len)) { 41245405Smsmith /* whoops, owned by someone */ 41345405Smsmith if (curr_md->mr_flags & MDF_BUSY) 41445405Smsmith return(EBUSY); 41545405Smsmith /* Ok, just hijack this entry */ 41645405Smsmith free_md = curr_md; 41745405Smsmith break; 41845405Smsmith } 41948925Smsmith /* non-exact overlap ? */ 42048925Smsmith if (mroverlap(curr_md, mrd)) { 42148925Smsmith /* between conflicting region types? */ 42248925Smsmith if ((i686_mtrrconflict[i686_mtrrtype(curr_md->mr_flags)] & mrd->mr_flags) || 42348925Smsmith (i686_mtrrconflict[i686_mtrrtype(mrd->mr_flags)] & curr_md->mr_flags)) 42448925Smsmith return(EINVAL); 42548925Smsmith } 42645405Smsmith } else if (free_md == NULL) { 42745405Smsmith free_md = curr_md; 42845405Smsmith } 42945405Smsmith } 43045405Smsmith /* got somewhere to put it? */ 43145405Smsmith if (free_md == NULL) 43245405Smsmith return(ENOSPC); 43345405Smsmith 43445405Smsmith /* Set up new descriptor */ 43545405Smsmith free_md->mr_base = mrd->mr_base; 43645405Smsmith free_md->mr_len = mrd->mr_len; 43745405Smsmith free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 43845405Smsmith bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 43945405Smsmith return(0); 44045405Smsmith} 44145405Smsmith 44245405Smsmith/* 44345405Smsmith * Handle requests to set memory range attributes by manipulating MTRRs. 44445405Smsmith * 44545405Smsmith */ 44645405Smsmithstatic int 44745405Smsmithi686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 44845405Smsmith{ 44945405Smsmith struct mem_range_desc *targ; 45045405Smsmith int error = 0; 45145405Smsmith 45245405Smsmith switch(*arg) { 45345405Smsmith case MEMRANGE_SET_UPDATE: 45445405Smsmith /* make sure that what's being asked for is even possible at all */ 45545405Smsmith if (!mrvalid(mrd->mr_base, mrd->mr_len) || 45645405Smsmith (i686_mtrrtype(mrd->mr_flags & MDF_ATTRMASK) == -1)) 45745405Smsmith return(EINVAL); 45845405Smsmith 45945405Smsmith#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 46045405Smsmith 46145405Smsmith /* are the "low memory" conditions applicable? */ 46245405Smsmith if ((sc->mr_cap & MR686_FIXMTRR) && 46345405Smsmith ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { 46445405Smsmith if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) 46545405Smsmith return(error); 46645405Smsmith } else { 46745405Smsmith /* it's time to play with variable MTRRs */ 46845405Smsmith if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) 46945405Smsmith return(error); 47045405Smsmith } 47145405Smsmith break; 47245405Smsmith 47345405Smsmith case MEMRANGE_SET_REMOVE: 47445405Smsmith if ((targ = mem_range_match(sc, mrd)) == NULL) 47545405Smsmith return(ENOENT); 47645405Smsmith if (targ->mr_flags & MDF_FIXACTIVE) 47745405Smsmith return(EPERM); 47845405Smsmith if (targ->mr_flags & MDF_BUSY) 47945405Smsmith return(EBUSY); 48045405Smsmith targ->mr_flags &= ~MDF_ACTIVE; 48145405Smsmith targ->mr_owner[0] = 0; 48245405Smsmith break; 48345405Smsmith 48445405Smsmith default: 48545405Smsmith return(EOPNOTSUPP); 48645405Smsmith } 48745405Smsmith 48845405Smsmith /* update the hardware */ 48948925Smsmith i686_mrstore(sc); 49045405Smsmith i686_mrfetch(sc); /* refetch to see where we're at */ 49148925Smsmith return(0); 49245405Smsmith} 49345405Smsmith 49445405Smsmith/* 49545405Smsmith * Work out how many ranges we support, initialise storage for them, 49645405Smsmith * fetch the initial settings. 49745405Smsmith */ 49845405Smsmithstatic void 49945405Smsmithi686_mrinit(struct mem_range_softc *sc) 50045405Smsmith{ 50145405Smsmith struct mem_range_desc *mrd; 50245405Smsmith int nmdesc = 0; 50345405Smsmith int i; 50445405Smsmith 50545405Smsmith mtrrcap = rdmsr(MSR_MTRRcap); 50645405Smsmith mtrrdef = rdmsr(MSR_MTRRdefType); 50745405Smsmith 50845405Smsmith /* For now, bail out if MTRRs are not enabled */ 50945405Smsmith if (!(mtrrdef & 0x800)) { 51045405Smsmith if (bootverbose) 51145405Smsmith printf("CPU supports MTRRs but not enabled\n"); 51245405Smsmith return; 51345405Smsmith } 51445405Smsmith nmdesc = mtrrcap & 0xff; 51548925Smsmith printf("Pentium Pro MTRR support enabled\n"); 51645405Smsmith 51745405Smsmith /* If fixed MTRRs supported and enabled */ 51845405Smsmith if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { 51945405Smsmith sc->mr_cap = MR686_FIXMTRR; 52045405Smsmith nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 52145405Smsmith } 52245405Smsmith 52345405Smsmith sc->mr_desc = 52445405Smsmith (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), 52569781Sdwmalone M_MEMDESC, M_WAITOK | M_ZERO); 52645405Smsmith sc->mr_ndesc = nmdesc; 52745405Smsmith 52845405Smsmith mrd = sc->mr_desc; 52945405Smsmith 53045405Smsmith /* Populate the fixed MTRR entries' base/length */ 53145405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 53245405Smsmith for (i = 0; i < MTRR_N64K; i++, mrd++) { 53345405Smsmith mrd->mr_base = i * 0x10000; 53445405Smsmith mrd->mr_len = 0x10000; 53545405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 53645405Smsmith } 53745405Smsmith for (i = 0; i < MTRR_N16K; i++, mrd++) { 53845405Smsmith mrd->mr_base = i * 0x4000 + 0x80000; 53945405Smsmith mrd->mr_len = 0x4000; 54045405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 54145405Smsmith } 54245405Smsmith for (i = 0; i < MTRR_N4K; i++, mrd++) { 54345405Smsmith mrd->mr_base = i * 0x1000 + 0xc0000; 54445405Smsmith mrd->mr_len = 0x1000; 54545405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 54645405Smsmith } 54745405Smsmith } 54845405Smsmith 54945405Smsmith /* 55045405Smsmith * Get current settings, anything set now is considered to have 55145405Smsmith * been set by the firmware. (XXX has something already played here?) 55245405Smsmith */ 55345405Smsmith i686_mrfetch(sc); 55445405Smsmith mrd = sc->mr_desc; 55545405Smsmith for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 55645405Smsmith if (mrd->mr_flags & MDF_ACTIVE) 55745405Smsmith mrd->mr_flags |= MDF_FIRMWARE; 55845405Smsmith } 55945405Smsmith} 56045405Smsmith 56146215Smsmith/* 56246215Smsmith * Initialise MTRRs on an AP after the BSP has run the init code. 56346215Smsmith */ 56445405Smsmithstatic void 56546215Smsmithi686_mrAPinit(struct mem_range_softc *sc) 56646215Smsmith{ 56748925Smsmith i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */ 56846215Smsmith wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ 56946215Smsmith} 57046215Smsmith 57146215Smsmithstatic void 57245405Smsmithi686_mem_drvinit(void *unused) 57345405Smsmith{ 57445405Smsmith /* Try for i686 MTRRs */ 57552177Sgreen if ((cpu_feature & CPUID_MTRR) && 57652177Sgreen ((cpu_id & 0xf00) == 0x600) && 57752177Sgreen ((strcmp(cpu_vendor, "GenuineIntel") == 0) || 57852177Sgreen (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { 57945405Smsmith mem_range_softc.mr_op = &i686_mrops; 58045405Smsmith } 58145405Smsmith} 58245405Smsmith 58345405SmsmithSYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL) 58445405Smsmith 58545405Smsmith 586