x86_mem.c revision 52177
145405Smsmith/*- 245405Smsmith * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 345405Smsmith * All rights reserved. 445405Smsmith * 545405Smsmith * Redistribution and use in source and binary forms, with or without 645405Smsmith * modification, are permitted provided that the following conditions 745405Smsmith * are met: 845405Smsmith * 1. Redistributions of source code must retain the above copyright 945405Smsmith * notice, this list of conditions and the following disclaimer. 1045405Smsmith * 2. Redistributions in binary form must reproduce the above copyright 1145405Smsmith * notice, this list of conditions and the following disclaimer in the 1245405Smsmith * documentation and/or other materials provided with the distribution. 1345405Smsmith * 1445405Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1545405Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1645405Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1745405Smsmith * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1845405Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1945405Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2045405Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2145405Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2245405Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2345405Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2445405Smsmith * SUCH DAMAGE. 2545405Smsmith * 2650477Speter * $FreeBSD: head/sys/i386/i386/i686_mem.c 52177 1999-10-12 22:53:05Z green $ 2745405Smsmith */ 2845405Smsmith 2945405Smsmith#include "opt_smp.h" 3045405Smsmith 3145405Smsmith#include <sys/param.h> 3245405Smsmith#include <sys/kernel.h> 3345405Smsmith#include <sys/systm.h> 3445405Smsmith#include <sys/malloc.h> 3545405Smsmith#include <sys/memrange.h> 3645405Smsmith 3745405Smsmith#include <machine/md_var.h> 3845405Smsmith#include <machine/specialreg.h> 3945405Smsmith 4048925Smsmith#ifdef SMP 4152121Speter#include <machine/smp.h> 4248925Smsmith#endif 4348925Smsmith 4445405Smsmith/* 4545405Smsmith * i686 memory range operations 4645405Smsmith * 4745405Smsmith * This code will probably be impenetrable without reference to the 4845405Smsmith * Intel Pentium Pro documentation. 4945405Smsmith */ 5045405Smsmith 5145405Smsmithstatic char *mem_owner_bios = "BIOS"; 5245405Smsmith 5345405Smsmith#define MR686_FIXMTRR (1<<0) 5445405Smsmith 5545405Smsmith#define mrwithin(mr, a) \ 5645405Smsmith (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 5745405Smsmith#define mroverlap(mra, mrb) \ 5845405Smsmith (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 5945405Smsmith 6045405Smsmith#define mrvalid(base, len) \ 6145405Smsmith ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 6245405Smsmith ((len) >= (1 << 12)) && /* length is >= 4k */ \ 6345405Smsmith powerof2((len)) && /* ... and power of two */ \ 6445405Smsmith !((base) & ((len) - 1))) /* range is not discontiuous */ 6545405Smsmith 6645405Smsmith#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 6745405Smsmith 6846215Smsmithstatic void i686_mrinit(struct mem_range_softc *sc); 6946215Smsmithstatic int i686_mrset(struct mem_range_softc *sc, 7046215Smsmith struct mem_range_desc *mrd, 7146215Smsmith int *arg); 7246215Smsmithstatic void i686_mrAPinit(struct mem_range_softc *sc); 7345405Smsmith 7445405Smsmithstatic struct mem_range_ops i686_mrops = { 7545405Smsmith i686_mrinit, 7646215Smsmith i686_mrset, 7746215Smsmith i686_mrAPinit 7845405Smsmith}; 7945405Smsmith 8046215Smsmith/* XXX for AP startup hook */ 8146215Smsmithstatic u_int64_t mtrrcap, mtrrdef; 8246215Smsmith 8345405Smsmithstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 8445405Smsmith struct mem_range_desc *mrd); 8545405Smsmithstatic void i686_mrfetch(struct mem_range_softc *sc); 8645405Smsmithstatic int i686_mtrrtype(int flags); 8748925Smsmithstatic void i686_mrstore(struct mem_range_softc *sc); 8848925Smsmithstatic void i686_mrstoreone(void *arg); 8945405Smsmithstatic struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc, 9045405Smsmith u_int64_t addr); 9145405Smsmithstatic int i686_mrsetlow(struct mem_range_softc *sc, 9245405Smsmith struct mem_range_desc *mrd, 9345405Smsmith int *arg); 9445405Smsmithstatic int i686_mrsetvariable(struct mem_range_softc *sc, 9545405Smsmith struct mem_range_desc *mrd, 9645405Smsmith int *arg); 9745405Smsmith 9845405Smsmith/* i686 MTRR type to memory range type conversion */ 9945405Smsmithstatic int i686_mtrrtomrt[] = { 10045405Smsmith MDF_UNCACHEABLE, 10145405Smsmith MDF_WRITECOMBINE, 10245405Smsmith 0, 10345405Smsmith 0, 10445405Smsmith MDF_WRITETHROUGH, 10545405Smsmith MDF_WRITEPROTECT, 10645405Smsmith MDF_WRITEBACK 10745405Smsmith}; 10845405Smsmith 10948925Smsmith/* 11048925Smsmith * i686 MTRR conflict matrix for overlapping ranges 11148925Smsmith * 11248925Smsmith * Specifically, this matrix allows writeback and uncached ranges 11348925Smsmith * to overlap (the overlapped region is uncached). The array index 11448925Smsmith * is the translated i686 code for the flags (because they map well). 11548925Smsmith */ 11648925Smsmithstatic int i686_mtrrconflict[] = { 11748925Smsmith MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT, 11848925Smsmith MDF_ATTRMASK, 11948925Smsmith 0, 12048925Smsmith 0, 12148925Smsmith MDF_ATTRMASK, 12248925Smsmith MDF_ATTRMASK, 12348925Smsmith MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT 12445405Smsmith}; 12545405Smsmith 12645405Smsmith/* 12745405Smsmith * Look for an exactly-matching range. 12845405Smsmith */ 12945405Smsmithstatic struct mem_range_desc * 13045405Smsmithmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 13145405Smsmith{ 13245405Smsmith struct mem_range_desc *cand; 13345405Smsmith int i; 13445405Smsmith 13545405Smsmith for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 13645405Smsmith if ((cand->mr_base == mrd->mr_base) && 13745405Smsmith (cand->mr_len == mrd->mr_len)) 13845405Smsmith return(cand); 13945405Smsmith return(NULL); 14045405Smsmith} 14145405Smsmith 14245405Smsmith/* 14345405Smsmith * Fetch the current mtrr settings from the current CPU (assumed to all 14445405Smsmith * be in sync in the SMP case). Note that if we are here, we assume 14545405Smsmith * that MTRRs are enabled, and we may or may not have fixed MTRRs. 14645405Smsmith */ 14745405Smsmithstatic void 14845405Smsmithi686_mrfetch(struct mem_range_softc *sc) 14945405Smsmith{ 15045405Smsmith struct mem_range_desc *mrd; 15145405Smsmith u_int64_t msrv; 15245405Smsmith int i, j, msr; 15345405Smsmith 15445405Smsmith mrd = sc->mr_desc; 15545405Smsmith 15645405Smsmith /* Get fixed-range MTRRs */ 15745405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 15845405Smsmith msr = MSR_MTRR64kBase; 15945405Smsmith for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 16045405Smsmith msrv = rdmsr(msr); 16145405Smsmith for (j = 0; j < 8; j++, mrd++) { 16245405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 16345405Smsmith i686_mtrrtomrt[msrv & 0xff] | 16445405Smsmith MDF_ACTIVE; 16545405Smsmith if (mrd->mr_owner[0] == 0) 16645405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 16745405Smsmith msrv = msrv >> 8; 16845405Smsmith } 16945405Smsmith } 17045405Smsmith msr = MSR_MTRR16kBase; 17145405Smsmith for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 17245405Smsmith msrv = rdmsr(msr); 17345405Smsmith for (j = 0; j < 8; j++, mrd++) { 17445405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 17545405Smsmith i686_mtrrtomrt[msrv & 0xff] | 17645405Smsmith MDF_ACTIVE; 17745405Smsmith if (mrd->mr_owner[0] == 0) 17845405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 17945405Smsmith msrv = msrv >> 8; 18045405Smsmith } 18145405Smsmith } 18245405Smsmith msr = MSR_MTRR4kBase; 18345405Smsmith for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 18445405Smsmith msrv = rdmsr(msr); 18545405Smsmith for (j = 0; j < 8; j++, mrd++) { 18645405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 18745405Smsmith i686_mtrrtomrt[msrv & 0xff] | 18845405Smsmith MDF_ACTIVE; 18945405Smsmith if (mrd->mr_owner[0] == 0) 19045405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 19145405Smsmith msrv = msrv >> 8; 19245405Smsmith } 19345405Smsmith } 19445405Smsmith } 19545405Smsmith 19645405Smsmith /* Get remainder which must be variable MTRRs */ 19745405Smsmith msr = MSR_MTRRVarBase; 19845405Smsmith for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 19945405Smsmith msrv = rdmsr(msr); 20045405Smsmith mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 20145405Smsmith i686_mtrrtomrt[msrv & 0xff]; 20245405Smsmith mrd->mr_base = msrv & 0x0000000ffffff000LL; 20345405Smsmith msrv = rdmsr(msr + 1); 20445405Smsmith mrd->mr_flags = (msrv & 0x800) ? 20545405Smsmith (mrd->mr_flags | MDF_ACTIVE) : 20645405Smsmith (mrd->mr_flags & ~MDF_ACTIVE); 20745405Smsmith /* Compute the range from the mask. Ick. */ 20845405Smsmith mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1; 20945405Smsmith if (!mrvalid(mrd->mr_base, mrd->mr_len)) 21045405Smsmith mrd->mr_flags |= MDF_BOGUS; 21145405Smsmith /* If unclaimed and active, must be the BIOS */ 21245405Smsmith if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 21345405Smsmith strcpy(mrd->mr_owner, mem_owner_bios); 21445405Smsmith } 21545405Smsmith} 21645405Smsmith 21745405Smsmith/* 21845405Smsmith * Return the MTRR memory type matching a region's flags 21945405Smsmith */ 22045405Smsmithstatic int 22145405Smsmithi686_mtrrtype(int flags) 22245405Smsmith{ 22345405Smsmith int i; 22445405Smsmith 22545405Smsmith flags &= MDF_ATTRMASK; 22645405Smsmith 22745405Smsmith for (i = 0; i < (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])); i++) { 22845405Smsmith if (i686_mtrrtomrt[i] == 0) 22945405Smsmith continue; 23045405Smsmith if (flags == i686_mtrrtomrt[i]) 23145405Smsmith return(i); 23245405Smsmith } 23345405Smsmith return(-1); 23445405Smsmith} 23545405Smsmith 23645405Smsmith/* 23746215Smsmith * Update running CPU(s) MTRRs to match the ranges in the descriptor 23846215Smsmith * list. 23946215Smsmith * 24046215Smsmith * XXX Must be called with interrupts enabled. 24145405Smsmith */ 24248925Smsmithstatic void 24345405Smsmithi686_mrstore(struct mem_range_softc *sc) 24445405Smsmith{ 24545405Smsmith#ifdef SMP 24645405Smsmith /* 24745405Smsmith * We should use all_but_self_ipi() to call other CPUs into a 24845405Smsmith * locking gate, then call a target function to do this work. 24945405Smsmith * The "proper" solution involves a generalised locking gate 25045405Smsmith * implementation, not ready yet. 25145405Smsmith */ 25248925Smsmith smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc); 25348925Smsmith#else 25445405Smsmith disable_intr(); /* disable interrupts */ 25549421Smsmith i686_mrstoreone((void *)sc); 25646215Smsmith enable_intr(); 25748925Smsmith#endif 25846215Smsmith} 25946215Smsmith 26046215Smsmith/* 26146215Smsmith * Update the current CPU's MTRRs with those represented in the 26248925Smsmith * descriptor list. Note that we do this wholesale rather than 26348925Smsmith * just stuffing one entry; this is simpler (but slower, of course). 26446215Smsmith */ 26548925Smsmithstatic void 26648925Smsmithi686_mrstoreone(void *arg) 26746215Smsmith{ 26848925Smsmith struct mem_range_softc *sc = (struct mem_range_softc *)arg; 26946215Smsmith struct mem_range_desc *mrd; 27046215Smsmith u_int64_t msrv; 27146215Smsmith int i, j, msr; 27246215Smsmith u_int cr4save; 27346215Smsmith 27448925Smsmith mrd = sc->mr_desc; 27546215Smsmith 27645405Smsmith cr4save = rcr4(); /* save cr4 */ 27745405Smsmith if (cr4save & CR4_PGE) 27845405Smsmith load_cr4(cr4save & ~CR4_PGE); 27945405Smsmith load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ 28048925Smsmith wbinvd(); /* flush caches, TLBs */ 28145405Smsmith wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ 28245405Smsmith 28345405Smsmith /* Set fixed-range MTRRs */ 28445405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 28545405Smsmith msr = MSR_MTRR64kBase; 28645405Smsmith for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 28745405Smsmith msrv = 0; 28845405Smsmith for (j = 7; j >= 0; j--) { 28945405Smsmith msrv = msrv << 8; 29045405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 29145405Smsmith } 29245405Smsmith wrmsr(msr, msrv); 29345405Smsmith mrd += 8; 29445405Smsmith } 29545405Smsmith msr = MSR_MTRR16kBase; 29645405Smsmith for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { 29745405Smsmith msrv = 0; 29845405Smsmith for (j = 7; j >= 0; j--) { 29945405Smsmith msrv = msrv << 8; 30045405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 30145405Smsmith } 30245405Smsmith wrmsr(msr, msrv); 30345405Smsmith mrd += 8; 30445405Smsmith } 30545405Smsmith msr = MSR_MTRR4kBase; 30645405Smsmith for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { 30745405Smsmith msrv = 0; 30845405Smsmith for (j = 7; j >= 0; j--) { 30945405Smsmith msrv = msrv << 8; 31045405Smsmith msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); 31145405Smsmith } 31245405Smsmith wrmsr(msr, msrv); 31345405Smsmith mrd += 8; 31445405Smsmith } 31545405Smsmith } 31645405Smsmith 31745405Smsmith /* Set remainder which must be variable MTRRs */ 31845405Smsmith msr = MSR_MTRRVarBase; 31945405Smsmith for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { 32045405Smsmith /* base/type register */ 32145405Smsmith if (mrd->mr_flags & MDF_ACTIVE) { 32245405Smsmith msrv = mrd->mr_base & 0x0000000ffffff000LL; 32345405Smsmith msrv |= (i686_mtrrtype(mrd->mr_flags) & 0xff); 32445405Smsmith } else { 32545405Smsmith msrv = 0; 32645405Smsmith } 32745405Smsmith wrmsr(msr, msrv); 32845405Smsmith 32945405Smsmith /* mask/active register */ 33045405Smsmith if (mrd->mr_flags & MDF_ACTIVE) { 33145405Smsmith msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); 33245405Smsmith } else { 33345405Smsmith msrv = 0; 33445405Smsmith } 33545405Smsmith wrmsr(msr + 1, msrv); 33645405Smsmith } 33748925Smsmith wbinvd(); /* flush caches, TLBs */ 33845405Smsmith wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ 33945405Smsmith load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ 34045405Smsmith load_cr4(cr4save); /* restore cr4 */ 34145405Smsmith} 34245405Smsmith 34345405Smsmith/* 34445405Smsmith * Hunt for the fixed MTRR referencing (addr) 34545405Smsmith */ 34645405Smsmithstatic struct mem_range_desc * 34745405Smsmithi686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 34845405Smsmith{ 34945405Smsmith struct mem_range_desc *mrd; 35045405Smsmith int i; 35145405Smsmith 35245405Smsmith for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) 35345405Smsmith if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) 35445405Smsmith return(mrd); 35545405Smsmith return(NULL); 35645405Smsmith} 35745405Smsmith 35845405Smsmith/* 35945405Smsmith * Try to satisfy the given range request by manipulating the fixed MTRRs that 36045405Smsmith * cover low memory. 36145405Smsmith * 36245405Smsmith * Note that we try to be generous here; we'll bloat the range out to the 36345405Smsmith * next higher/lower boundary to avoid the consumer having to know too much 36445405Smsmith * about the mechanisms here. 36545405Smsmith * 36645405Smsmith * XXX note that this will have to be updated when we start supporting "busy" ranges. 36745405Smsmith */ 36845405Smsmithstatic int 36945405Smsmithi686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 37045405Smsmith{ 37145405Smsmith struct mem_range_desc *first_md, *last_md, *curr_md; 37245405Smsmith 37345405Smsmith /* range check */ 37445405Smsmith if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || 37545405Smsmith ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) 37645405Smsmith return(EINVAL); 37745405Smsmith 37845405Smsmith /* set flags, clear set-by-firmware flag */ 37945405Smsmith for (curr_md = first_md; curr_md <= last_md; curr_md++) { 38045405Smsmith curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); 38145405Smsmith bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 38245405Smsmith } 38345405Smsmith 38445405Smsmith return(0); 38545405Smsmith} 38645405Smsmith 38745405Smsmith 38845405Smsmith/* 38945405Smsmith * Modify/add a variable MTRR to satisfy the request. 39045405Smsmith * 39145405Smsmith * XXX needs to be updated to properly support "busy" ranges. 39245405Smsmith */ 39345405Smsmithstatic int 39445405Smsmithi686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 39545405Smsmith{ 39645405Smsmith struct mem_range_desc *curr_md, *free_md; 39745405Smsmith int i; 39845405Smsmith 39945405Smsmith /* 40045405Smsmith * Scan the currently active variable descriptors, look for 40145405Smsmith * one we exactly match (straight takeover) and for possible 40245405Smsmith * accidental overlaps. 40345405Smsmith * Keep track of the first empty variable descriptor in case we 40445405Smsmith * can't perform a takeover. 40545405Smsmith */ 40645405Smsmith i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 40745405Smsmith curr_md = sc->mr_desc + i; 40845405Smsmith free_md = NULL; 40945405Smsmith for (; i < sc->mr_ndesc; i++, curr_md++) { 41045405Smsmith if (curr_md->mr_flags & MDF_ACTIVE) { 41145405Smsmith /* exact match? */ 41245405Smsmith if ((curr_md->mr_base == mrd->mr_base) && 41345405Smsmith (curr_md->mr_len == mrd->mr_len)) { 41445405Smsmith /* whoops, owned by someone */ 41545405Smsmith if (curr_md->mr_flags & MDF_BUSY) 41645405Smsmith return(EBUSY); 41745405Smsmith /* Ok, just hijack this entry */ 41845405Smsmith free_md = curr_md; 41945405Smsmith break; 42045405Smsmith } 42148925Smsmith /* non-exact overlap ? */ 42248925Smsmith if (mroverlap(curr_md, mrd)) { 42348925Smsmith /* between conflicting region types? */ 42448925Smsmith if ((i686_mtrrconflict[i686_mtrrtype(curr_md->mr_flags)] & mrd->mr_flags) || 42548925Smsmith (i686_mtrrconflict[i686_mtrrtype(mrd->mr_flags)] & curr_md->mr_flags)) 42648925Smsmith return(EINVAL); 42748925Smsmith } 42845405Smsmith } else if (free_md == NULL) { 42945405Smsmith free_md = curr_md; 43045405Smsmith } 43145405Smsmith } 43245405Smsmith /* got somewhere to put it? */ 43345405Smsmith if (free_md == NULL) 43445405Smsmith return(ENOSPC); 43545405Smsmith 43645405Smsmith /* Set up new descriptor */ 43745405Smsmith free_md->mr_base = mrd->mr_base; 43845405Smsmith free_md->mr_len = mrd->mr_len; 43945405Smsmith free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 44045405Smsmith bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 44145405Smsmith return(0); 44245405Smsmith} 44345405Smsmith 44445405Smsmith/* 44545405Smsmith * Handle requests to set memory range attributes by manipulating MTRRs. 44645405Smsmith * 44745405Smsmith */ 44845405Smsmithstatic int 44945405Smsmithi686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 45045405Smsmith{ 45145405Smsmith struct mem_range_desc *targ; 45245405Smsmith int error = 0; 45345405Smsmith 45445405Smsmith switch(*arg) { 45545405Smsmith case MEMRANGE_SET_UPDATE: 45645405Smsmith /* make sure that what's being asked for is even possible at all */ 45745405Smsmith if (!mrvalid(mrd->mr_base, mrd->mr_len) || 45845405Smsmith (i686_mtrrtype(mrd->mr_flags & MDF_ATTRMASK) == -1)) 45945405Smsmith return(EINVAL); 46045405Smsmith 46145405Smsmith#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 46245405Smsmith 46345405Smsmith /* are the "low memory" conditions applicable? */ 46445405Smsmith if ((sc->mr_cap & MR686_FIXMTRR) && 46545405Smsmith ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { 46645405Smsmith if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) 46745405Smsmith return(error); 46845405Smsmith } else { 46945405Smsmith /* it's time to play with variable MTRRs */ 47045405Smsmith if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) 47145405Smsmith return(error); 47245405Smsmith } 47345405Smsmith break; 47445405Smsmith 47545405Smsmith case MEMRANGE_SET_REMOVE: 47645405Smsmith if ((targ = mem_range_match(sc, mrd)) == NULL) 47745405Smsmith return(ENOENT); 47845405Smsmith if (targ->mr_flags & MDF_FIXACTIVE) 47945405Smsmith return(EPERM); 48045405Smsmith if (targ->mr_flags & MDF_BUSY) 48145405Smsmith return(EBUSY); 48245405Smsmith targ->mr_flags &= ~MDF_ACTIVE; 48345405Smsmith targ->mr_owner[0] = 0; 48445405Smsmith break; 48545405Smsmith 48645405Smsmith default: 48745405Smsmith return(EOPNOTSUPP); 48845405Smsmith } 48945405Smsmith 49045405Smsmith /* update the hardware */ 49148925Smsmith i686_mrstore(sc); 49245405Smsmith i686_mrfetch(sc); /* refetch to see where we're at */ 49348925Smsmith return(0); 49445405Smsmith} 49545405Smsmith 49645405Smsmith/* 49745405Smsmith * Work out how many ranges we support, initialise storage for them, 49845405Smsmith * fetch the initial settings. 49945405Smsmith */ 50045405Smsmithstatic void 50145405Smsmithi686_mrinit(struct mem_range_softc *sc) 50245405Smsmith{ 50345405Smsmith struct mem_range_desc *mrd; 50445405Smsmith int nmdesc = 0; 50545405Smsmith int i; 50645405Smsmith 50745405Smsmith mtrrcap = rdmsr(MSR_MTRRcap); 50845405Smsmith mtrrdef = rdmsr(MSR_MTRRdefType); 50945405Smsmith 51045405Smsmith /* For now, bail out if MTRRs are not enabled */ 51145405Smsmith if (!(mtrrdef & 0x800)) { 51245405Smsmith if (bootverbose) 51345405Smsmith printf("CPU supports MTRRs but not enabled\n"); 51445405Smsmith return; 51545405Smsmith } 51645405Smsmith nmdesc = mtrrcap & 0xff; 51748925Smsmith printf("Pentium Pro MTRR support enabled\n"); 51845405Smsmith 51945405Smsmith /* If fixed MTRRs supported and enabled */ 52045405Smsmith if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { 52145405Smsmith sc->mr_cap = MR686_FIXMTRR; 52245405Smsmith nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 52345405Smsmith } 52445405Smsmith 52545405Smsmith sc->mr_desc = 52645405Smsmith (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), 52745405Smsmith M_MEMDESC, M_WAITOK); 52845405Smsmith bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc)); 52945405Smsmith sc->mr_ndesc = nmdesc; 53045405Smsmith 53145405Smsmith mrd = sc->mr_desc; 53245405Smsmith 53345405Smsmith /* Populate the fixed MTRR entries' base/length */ 53445405Smsmith if (sc->mr_cap & MR686_FIXMTRR) { 53545405Smsmith for (i = 0; i < MTRR_N64K; i++, mrd++) { 53645405Smsmith mrd->mr_base = i * 0x10000; 53745405Smsmith mrd->mr_len = 0x10000; 53845405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 53945405Smsmith } 54045405Smsmith for (i = 0; i < MTRR_N16K; i++, mrd++) { 54145405Smsmith mrd->mr_base = i * 0x4000 + 0x80000; 54245405Smsmith mrd->mr_len = 0x4000; 54345405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 54445405Smsmith } 54545405Smsmith for (i = 0; i < MTRR_N4K; i++, mrd++) { 54645405Smsmith mrd->mr_base = i * 0x1000 + 0xc0000; 54745405Smsmith mrd->mr_len = 0x1000; 54845405Smsmith mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; 54945405Smsmith } 55045405Smsmith } 55145405Smsmith 55245405Smsmith /* 55345405Smsmith * Get current settings, anything set now is considered to have 55445405Smsmith * been set by the firmware. (XXX has something already played here?) 55545405Smsmith */ 55645405Smsmith i686_mrfetch(sc); 55745405Smsmith mrd = sc->mr_desc; 55845405Smsmith for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 55945405Smsmith if (mrd->mr_flags & MDF_ACTIVE) 56045405Smsmith mrd->mr_flags |= MDF_FIRMWARE; 56145405Smsmith } 56245405Smsmith} 56345405Smsmith 56446215Smsmith/* 56546215Smsmith * Initialise MTRRs on an AP after the BSP has run the init code. 56646215Smsmith */ 56745405Smsmithstatic void 56846215Smsmithi686_mrAPinit(struct mem_range_softc *sc) 56946215Smsmith{ 57048925Smsmith i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */ 57146215Smsmith wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ 57246215Smsmith} 57346215Smsmith 57446215Smsmithstatic void 57545405Smsmithi686_mem_drvinit(void *unused) 57645405Smsmith{ 57745405Smsmith /* Try for i686 MTRRs */ 57852177Sgreen if ((cpu_feature & CPUID_MTRR) && 57952177Sgreen ((cpu_id & 0xf00) == 0x600) && 58052177Sgreen ((strcmp(cpu_vendor, "GenuineIntel") == 0) || 58152177Sgreen (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { 58245405Smsmith mem_range_softc.mr_op = &i686_mrops; 58345405Smsmith } 58445405Smsmith} 58545405Smsmith 58645405SmsmithSYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL) 58745405Smsmith 58845405Smsmith 589