x86_mem.c revision 177070
1139804Simp/*-
222521Sdyson * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
31541Srgrimes * All rights reserved.
41541Srgrimes *
522521Sdyson * Redistribution and use in source and binary forms, with or without
622521Sdyson * modification, are permitted provided that the following conditions
722521Sdyson * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions and the following disclaimer.
101541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer in the
121541Srgrimes *    documentation and/or other materials provided with the distribution.
131541Srgrimes *
141541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
181541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241541Srgrimes * SUCH DAMAGE.
251541Srgrimes */
261541Srgrimes
271541Srgrimes#include <sys/cdefs.h>
281541Srgrimes__FBSDID("$FreeBSD: head/sys/i386/i386/i686_mem.c 177070 2008-03-11 21:41:36Z jhb $");
291541Srgrimes
301541Srgrimes#include <sys/param.h>
311541Srgrimes#include <sys/kernel.h>
3223521Sbde#include <sys/systm.h>
331541Srgrimes#include <sys/malloc.h>
341541Srgrimes#include <sys/memrange.h>
35116182Sobrien#include <sys/smp.h>
36116182Sobrien#include <sys/sysctl.h>
37116182Sobrien
38190829Srwatson#include <machine/md_var.h>
39190141Skib#include <machine/specialreg.h>
40190141Skib
411541Srgrimes/*
42228433Savg * i686 memory range operations
43183155Sjhb *
44183155Sjhb * This code will probably be impenetrable without reference to the
4512820Sphk * Intel Pentium Pro documentation.
4676166Smarkm */
47183155Sjhb
48230129Smmstatic char *mem_owner_bios = "BIOS";
49183155Sjhb
501541Srgrimes#define	MR686_FIXMTRR	(1<<0)
51183155Sjhb
52187839Sjhb#define	mrwithin(mr, a)							\
53190829Srwatson	(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
54102870Siedowse#define	mroverlap(mra, mrb)						\
55183155Sjhb	(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
5651906Sphk
57183155Sjhb#define	mrvalid(base, len) 						\
58190141Skib	((!(base & ((1 << 12) - 1))) &&	/* base is multiple of 4k */	\
59190141Skib	    ((len) >= (1 << 12)) &&	/* length is >= 4k */		\
60190141Skib	    powerof2((len)) &&		/* ... and power of two */	\
611541Srgrimes	    !((base) & ((len) - 1)))	/* range is not discontiuous */
62116289Sdes
63116289Sdes#define	mrcopyflags(curr, new)						\
64190829Srwatson	(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
65211616Srpaulo
66190829Srwatsonstatic int mtrrs_disabled;
67211616SrpauloTUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
68190829SrwatsonSYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
69211616Srpaulo    &mtrrs_disabled, 0, "Disable i686 MTRRs.");
70211616Srpaulo
71253075Savgstatic void	i686_mrinit(struct mem_range_softc *sc);
72211616Srpaulostatic int	i686_mrset(struct mem_range_softc *sc,
73211616Srpaulo		    struct mem_range_desc *mrd, int *arg);
74253075Savgstatic void	i686_mrAPinit(struct mem_range_softc *sc);
75211616Srpaulo
76190829Srwatsonstatic struct mem_range_ops i686_mrops = {
77211616Srpaulo	i686_mrinit,
78211616Srpaulo	i686_mrset,
79211616Srpaulo	i686_mrAPinit
80190829Srwatson};
81211616Srpaulo
82211616Srpaulo/* XXX for AP startup hook */
83211616Srpaulostatic u_int64_t mtrrcap, mtrrdef;
84211616Srpaulo
85190829Srwatsonstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
86211616Srpaulo		    struct mem_range_desc *mrd);
87190829Srwatsonstatic void	i686_mrfetch(struct mem_range_softc *sc);
88190829Srwatsonstatic int	i686_mtrrtype(int flags);
8951906Sphkstatic int	i686_mrt2mtrr(int flags, int oldval);
9059652Sgreenstatic int	i686_mtrrconflict(int flag1, int flag2);
9159652Sgreenstatic void	i686_mrstore(struct mem_range_softc *sc);
9259652Sgreenstatic void	i686_mrstoreone(void *arg);
9359652Sgreenstatic struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
9459652Sgreen		    u_int64_t addr);
9560938Sjakestatic int	i686_mrsetlow(struct mem_range_softc *sc,
9660938Sjake		    struct mem_range_desc *mrd, int *arg);
9760938Sjakestatic int	i686_mrsetvariable(struct mem_range_softc *sc,
9859652Sgreen		    struct mem_range_desc *mrd, int *arg);
9959652Sgreen
10059652Sgreen/* i686 MTRR type to memory range type conversion */
10159652Sgreenstatic int i686_mtrrtomrt[] = {
102190829Srwatson	MDF_UNCACHEABLE,
10359652Sgreen	MDF_WRITECOMBINE,
10459652Sgreen	MDF_UNKNOWN,
10559652Sgreen	MDF_UNKNOWN,
106230441Skib	MDF_WRITETHROUGH,
107230441Skib	MDF_WRITEPROTECT,
108232420Srmacklem	MDF_WRITEBACK
109232420Srmacklem};
110232420Srmacklem
111232420Srmacklem#define	MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
112230441Skib
113230441Skibstatic int
114230441Skibi686_mtrr2mrt(int val)
115230441Skib{
116230441Skib
117230441Skib	if (val < 0 || val >= MTRRTOMRTLEN)
118230441Skib		return (MDF_UNKNOWN);
119230441Skib	return (i686_mtrrtomrt[val]);
120230441Skib}
121230441Skib
122232420Srmacklem/*
123230441Skib * i686 MTRR conflicts. Writeback and uncachable may overlap.
124230441Skib */
125230441Skibstatic int
126230441Skibi686_mtrrconflict(int flag1, int flag2)
127230441Skib{
128230441Skib
129230441Skib	flag1 &= MDF_ATTRMASK;
130230441Skib	flag2 &= MDF_ATTRMASK;
131230441Skib	if (flag1 == flag2 ||
132230441Skib	    (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
133232420Srmacklem	    (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
134230441Skib		return (0);
135230441Skib	return (1);
1361541Srgrimes}
1371541Srgrimes
1381541Srgrimes/*
1391541Srgrimes * Look for an exactly-matching range.
1401541Srgrimes */
1411541Srgrimesstatic struct mem_range_desc *
1421541Srgrimesmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
1431541Srgrimes{
14422521Sdyson	struct mem_range_desc *cand;
14522521Sdyson	int i;
1466968Sphk
1471541Srgrimes	for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
1481541Srgrimes		if ((cand->mr_base == mrd->mr_base) &&
1491541Srgrimes		    (cand->mr_len == mrd->mr_len))
1501541Srgrimes			return (cand);
1511541Srgrimes	return (NULL);
1521541Srgrimes}
1531541Srgrimes
1541541Srgrimes/*
15574501Speter * Fetch the current mtrr settings from the current CPU (assumed to
15674501Speter * all be in sync in the SMP case).  Note that if we are here, we
15760938Sjake * assume that MTRRs are enabled, and we may or may not have fixed
15860938Sjake * MTRRs.
15923521Sbde */
160213916Skibstatic void
161213916Skibi686_mrfetch(struct mem_range_softc *sc)
16225453Sphk{
163213916Skib	struct mem_range_desc *mrd;
164213916Skib	u_int64_t msrv;
165213916Skib	int i, j, msr;
166213916Skib
167213916Skib	mrd = sc->mr_desc;
16823521Sbde
169213916Skib	/* Get fixed-range MTRRs. */
170213916Skib	if (sc->mr_cap & MR686_FIXMTRR) {
17175654Stanimura		msr = MSR_MTRR64kBase;
172213916Skib		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
173213916Skib			msrv = rdmsr(msr);
174213916Skib			for (j = 0; j < 8; j++, mrd++) {
175213916Skib				mrd->mr_flags =
176213916Skib				    (mrd->mr_flags & ~MDF_ATTRMASK) |
1771541Srgrimes				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
178213916Skib				if (mrd->mr_owner[0] == 0)
179213916Skib					strcpy(mrd->mr_owner, mem_owner_bios);
180187839Sjhb				msrv = msrv >> 8;
181187839Sjhb			}
182120792Sjeff		}
183187839Sjhb		msr = MSR_MTRR16kBase;
184187839Sjhb		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
185187839Sjhb			msrv = rdmsr(msr);
186187839Sjhb			for (j = 0; j < 8; j++, mrd++) {
187187839Sjhb				mrd->mr_flags =
188120792Sjeff				    (mrd->mr_flags & ~MDF_ATTRMASK) |
189116289Sdes				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
190116289Sdes				if (mrd->mr_owner[0] == 0)
191116289Sdes					strcpy(mrd->mr_owner, mem_owner_bios);
192116289Sdes				msrv = msrv >> 8;
193116289Sdes			}
194116289Sdes		}
195116289Sdes		msr = MSR_MTRR4kBase;
196116289Sdes		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
197230441Skib			msrv = rdmsr(msr);
198116289Sdes			for (j = 0; j < 8; j++, mrd++) {
199232420Srmacklem				mrd->mr_flags =
200116289Sdes				    (mrd->mr_flags & ~MDF_ATTRMASK) |
201190829Srwatson				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
202116289Sdes				if (mrd->mr_owner[0] == 0)
203230441Skib					strcpy(mrd->mr_owner, mem_owner_bios);
204230441Skib				msrv = msrv >> 8;
205230441Skib			}
206116289Sdes		}
207232420Srmacklem	}
208232420Srmacklem
209232420Srmacklem	/* Get remainder which must be variable MTRRs. */
210232420Srmacklem	msr = MSR_MTRRVarBase;
211232420Srmacklem	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
212232420Srmacklem		msrv = rdmsr(msr);
213230441Skib		mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
214230441Skib		    i686_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
215230441Skib		mrd->mr_base = msrv & MTRR_PHYSBASE_PHYSBASE;
216230441Skib		msrv = rdmsr(msr + 1);
217230441Skib		mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
218230441Skib		    (mrd->mr_flags | MDF_ACTIVE) :
219230441Skib		    (mrd->mr_flags & ~MDF_ACTIVE);
220230441Skib
221230441Skib		/* Compute the range from the mask. Ick. */
222230441Skib		mrd->mr_len = (~(msrv & MTRR_PHYSMASK_PHYSMASK) &
223230441Skib		    (MTRR_PHYSMASK_PHYSMASK | 0xfffLL)) + 1;
224230441Skib		if (!mrvalid(mrd->mr_base, mrd->mr_len))
225230441Skib			mrd->mr_flags |= MDF_BOGUS;
226230441Skib
227230441Skib		/* If unclaimed and active, must be the BIOS. */
228230441Skib		if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
229230441Skib			strcpy(mrd->mr_owner, mem_owner_bios);
230230441Skib	}
231230441Skib}
232232420Srmacklem
233232420Srmacklem/*
234232420Srmacklem * Return the MTRR memory type matching a region's flags
235230441Skib */
236230441Skibstatic int
237230441Skibi686_mtrrtype(int flags)
238230441Skib{
239230441Skib	int i;
240230441Skib
241230441Skib	flags &= MDF_ATTRMASK;
242230441Skib
243230441Skib	for (i = 0; i < MTRRTOMRTLEN; i++) {
244230441Skib		if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
245230441Skib			continue;
246230441Skib		if (flags == i686_mtrrtomrt[i])
247230441Skib			return (i);
248230441Skib	}
249230489Skib	return (-1);
250230489Skib}
251230489Skib
252230489Skibstatic int
253230552Skibi686_mrt2mtrr(int flags, int oldval)
254230552Skib{
255230552Skib	int val;
256230489Skib
257230489Skib	if ((val = i686_mtrrtype(flags)) == -1)
258230489Skib		return (oldval & 0xff);
259230489Skib	return (val & 0xff);
260230489Skib}
261230489Skib
262230489Skib/*
26323521Sbde * Update running CPU(s) MTRRs to match the ranges in the descriptor
264213916Skib * list.
265213916Skib *
26691690Seivind * XXX Must be called with interrupts enabled.
26791690Seivind */
268157799Sjmgstatic void
269215304Sbruceci686_mrstore(struct mem_range_softc *sc)
27023521Sbde{
27129788Sphk#ifdef SMP
27229788Sphk	/*
27329788Sphk	 * We should use ipi_all_but_self() to call other CPUs into a
274215283Sbrucec	 * locking gate, then call a target function to do this work.
275215304Sbrucec	 * The "proper" solution involves a generalised locking gate
276215281Sbrucec	 * implementation, not ready yet.
277215281Sbrucec	 */
278215281Sbrucec	smp_rendezvous(NULL, i686_mrstoreone, NULL, sc);
279215281Sbrucec#else
280215283Sbrucec	disable_intr();				/* disable interrupts */
281215304Sbrucec	i686_mrstoreone(sc);
282215283Sbrucec	enable_intr();
283215304Sbrucec#endif
284215283Sbrucec}
285215304Sbrucec
286215283Sbrucec/*
287215304Sbrucec * Update the current CPU's MTRRs with those represented in the
288215283Sbrucec * descriptor list.  Note that we do this wholesale rather than just
289215304Sbrucec * stuffing one entry; this is simpler (but slower, of course).
290215283Sbrucec */
291215304Sbrucecstatic void
292215283Sbruceci686_mrstoreone(void *arg)
293215304Sbrucec{
294215283Sbrucec	struct mem_range_softc *sc = arg;
295215304Sbrucec	struct mem_range_desc *mrd;
296215283Sbrucec	u_int64_t omsrv, msrv;
297215304Sbrucec	int i, j, msr;
298215283Sbrucec	u_int cr4save;
299215304Sbrucec
300215283Sbrucec	mrd = sc->mr_desc;
301215304Sbrucec
30229788Sphk	/* Disable PGE. */
303187658Sjhb	cr4save = rcr4();
304215304Sbrucec	if (cr4save & CR4_PGE)
305215304Sbrucec		load_cr4(cr4save & ~CR4_PGE);
30629788Sphk
30768922Srwatson	/* Disable caches (CD = 1, NW = 0). */
30868922Srwatson	load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
309140712Sjeff
310194601Skib	/* Flushes caches and TLBs. */
311194601Skib	wbinvd();
312144318Sdas
313144318Sdas	/* Disable MTRRs (E = 0). */
3146968Sphk	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
31569774Sphk
31651906Sphk	/* Set fixed-range MTRRs. */
317189593Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
31825453Sphk		msr = MSR_MTRR64kBase;
31975402Speter		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
32075402Speter			msrv = 0;
321227309Sed			omsrv = rdmsr(msr);
322227309Sed			for (j = 7; j >= 0; j--) {
32375402Speter				msrv = msrv << 8;
32475402Speter				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
32575402Speter				    omsrv >> (j * 8));
32675402Speter			}
32775402Speter			wrmsr(msr, msrv);
32875402Speter			mrd += 8;
32975402Speter		}
33075402Speter		msr = MSR_MTRR16kBase;
33175402Speter		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
33275402Speter			msrv = 0;
33375402Speter			omsrv = rdmsr(msr);
33475402Speter			for (j = 7; j >= 0; j--) {
33575402Speter				msrv = msrv << 8;
33675402Speter				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
33775402Speter				    omsrv >> (j * 8));
33875402Speter			}
339187839Sjhb			wrmsr(msr, msrv);
34075402Speter			mrd += 8;
34175402Speter		}
34275402Speter		msr = MSR_MTRR4kBase;
34375402Speter		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
344187839Sjhb			msrv = 0;
34598994Salfred			omsrv = rdmsr(msr);
34675402Speter			for (j = 7; j >= 0; j--) {
34775402Speter				msrv = msrv << 8;
34875402Speter				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
34975402Speter				    omsrv >> (j * 8));
35075402Speter			}
351187658Sjhb			wrmsr(msr, msrv);
352215304Sbrucec			mrd += 8;
353215304Sbrucec		}
35475402Speter	}
35575402Speter
35675402Speter	/* Set remainder which must be variable MTRRs. */
35775402Speter	msr = MSR_MTRRVarBase;
35875402Speter	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
35975402Speter		/* base/type register */
36075402Speter		omsrv = rdmsr(msr);
36175402Speter		if (mrd->mr_flags & MDF_ACTIVE) {
36275402Speter			msrv = mrd->mr_base & MTRR_PHYSBASE_PHYSBASE;
36375402Speter			msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
36475402Speter		} else {
36575402Speter			msrv = 0;
36675402Speter		}
36775402Speter		wrmsr(msr, msrv);
36875402Speter
36975402Speter		/* mask/active register */
37075402Speter		if (mrd->mr_flags & MDF_ACTIVE) {
37175402Speter			msrv = MTRR_PHYSMASK_VALID |
37275402Speter			    (~(mrd->mr_len - 1) & MTRR_PHYSMASK_PHYSMASK);
37375402Speter		} else {
374187839Sjhb			msrv = 0;
37575402Speter		}
37675402Speter		wrmsr(msr + 1, msrv);
37775402Speter	}
378187839Sjhb
37975402Speter	/* Flush caches, TLBs. */
38075402Speter	wbinvd();
38175402Speter
38275402Speter	/* Enable MTRRs. */
38375402Speter	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
38475402Speter
385232156Smaxim	/* Enable caches (CD = 0, NW = 0). */
38698994Salfred	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
38775402Speter
38875402Speter	/* Restore PGE. */
38998994Salfred	load_cr4(cr4save);
39075402Speter}
39175402Speter
39298994Salfred/*
39375402Speter * Hunt for the fixed MTRR referencing (addr)
39475402Speter */
39598994Salfredstatic struct mem_range_desc *
39675402Speteri686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
39775402Speter{
39875402Speter	struct mem_range_desc *mrd;
39975402Speter	int i;
400187658Sjhb
401215304Sbrucec	for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
402232156Smaxim	     i++, mrd++)
403189593Sjhb		if ((addr >= mrd->mr_base) &&
40475402Speter		    (addr < (mrd->mr_base + mrd->mr_len)))
40575402Speter			return (mrd);
406110952Sarr	return (NULL);
407110952Sarr}
408110952Sarr
409110952Sarr/*
41022521Sdyson * Try to satisfy the given range request by manipulating the fixed
41125453Sphk * MTRRs that cover low memory.
412140712Sjeff *
41325453Sphk * Note that we try to be generous here; we'll bloat the range out to
41425453Sphk * the next higher/lower boundary to avoid the consumer having to know
415120792Sjeff * too much about the mechanisms here.
416120792Sjeff *
417187839Sjhb * XXX note that this will have to be updated when we start supporting
418147326Sjeff * "busy" ranges.
419190829Srwatson */
420190829Srwatsonstatic int
421190829Srwatsoni686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
422230441Skib{
423190829Srwatson	struct mem_range_desc *first_md, *last_md, *curr_md;
424190829Srwatson
425230441Skib	/* Range check. */
426190829Srwatson	if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
427190829Srwatson	    ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
428120792Sjeff		return (EINVAL);
42925453Sphk
430190533Skan	/* Check that we aren't doing something risky. */
431190533Skan	if (!(mrd->mr_flags & MDF_FORCE))
432190533Skan		for (curr_md = first_md; curr_md <= last_md; curr_md++) {
433190533Skan			if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
434190533Skan				return (EACCES);
435190533Skan		}
436190533Skan
437190533Skan	/* Set flags, clear set-by-firmware flag. */
438190533Skan	for (curr_md = first_md; curr_md <= last_md; curr_md++) {
43975654Stanimura		curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
44025453Sphk		    ~MDF_FIRMWARE, mrd->mr_flags);
44125453Sphk		bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
442190533Skan	}
443190533Skan
44425453Sphk	return (0);
44525453Sphk}
44625453Sphk
44725453Sphk/*
44825453Sphk * Modify/add a variable MTRR to satisfy the request.
449116289Sdes *
450120792Sjeff * XXX needs to be updated to properly support "busy" ranges.
451120792Sjeff */
45222521Sdysonstatic int
4536968Sphki686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
45422521Sdyson    int *arg)
45523521Sbde{
4566968Sphk	struct mem_range_desc *curr_md, *free_md;
4576968Sphk	int i;
45822521Sdyson
45922521Sdyson	/*
46022521Sdyson	 * Scan the currently active variable descriptors, look for
46122521Sdyson	 * one we exactly match (straight takeover) and for possible
462183330Sjhb	 * accidental overlaps.
463183330Sjhb	 *
464190387Sjhb	 * Keep track of the first empty variable descriptor in case
465144296Sjeff	 * we can't perform a takeover.
466144296Sjeff	 */
467144296Sjeff	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
468144296Sjeff	curr_md = sc->mr_desc + i;
4691541Srgrimes	free_md = NULL;
4706968Sphk	for (; i < sc->mr_ndesc; i++, curr_md++) {
4711541Srgrimes		if (curr_md->mr_flags & MDF_ACTIVE) {
472231088Sjhb			/* Exact match? */
4731541Srgrimes			if ((curr_md->mr_base == mrd->mr_base) &&
4741541Srgrimes			    (curr_md->mr_len == mrd->mr_len)) {
4751541Srgrimes
476230394Sjhb				/* Whoops, owned by someone. */
477230394Sjhb				if (curr_md->mr_flags & MDF_BUSY)
4781541Srgrimes					return (EBUSY);
47951906Sphk
480209390Sed				/* Check that we aren't doing something risky */
481187839Sjhb				if (!(mrd->mr_flags & MDF_FORCE) &&
4821541Srgrimes				    ((curr_md->mr_flags & MDF_ATTRMASK) ==
4836928Sphk				    MDF_UNKNOWN))
4846928Sphk					return (EACCES);
4851541Srgrimes
4866928Sphk				/* Ok, just hijack this entry. */
487144296Sjeff				free_md = curr_md;
488187839Sjhb				break;
489187839Sjhb			}
49029788Sphk
491187839Sjhb			/* Non-exact overlap? */
49229788Sphk			if (mroverlap(curr_md, mrd)) {
493187839Sjhb				/* Between conflicting region types? */
49425453Sphk				if (i686_mtrrconflict(curr_md->mr_flags,
49525453Sphk				    mrd->mr_flags))
49625453Sphk					return (EINVAL);
497147326Sjeff			}
498147326Sjeff		} else if (free_md == NULL) {
49929788Sphk			free_md = curr_md;
500190829Srwatson		}
501190829Srwatson	}
502230394Sjhb
503230394Sjhb	/* Got somewhere to put it? */
504230394Sjhb	if (free_md == NULL)
505230394Sjhb		return (ENOSPC);
506144296Sjeff
50725453Sphk	/* Set up new descriptor. */
50825453Sphk	free_md->mr_base = mrd->mr_base;
50929788Sphk	free_md->mr_len = mrd->mr_len;
510190829Srwatson	free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
511190829Srwatson	bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
512190829Srwatson	return (0);
513187839Sjhb}
514190829Srwatson
515190533Skan/*
516190942Skib * Handle requests to set memory range attributes by manipulating MTRRs.
517190942Skib */
518190533Skanstatic int
519190533Skani686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
520190533Skan{
521196203Skib	struct mem_range_desc *targ;
522196203Skib	int error = 0;
52325453Sphk
524230394Sjhb	switch(*arg) {
525230394Sjhb	case MEMRANGE_SET_UPDATE:
526230394Sjhb		/*
527190533Skan		 * Make sure that what's being asked for is even
528230394Sjhb		 * possible at all.
529191081Skan		 */
530230394Sjhb		if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
531191082Skan		    i686_mtrrtype(mrd->mr_flags) == -1)
532147326Sjeff			return (EINVAL);
533147326Sjeff
534190829Srwatson#define	FIXTOP	((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
535190829Srwatson
536230489Skib		/* Are the "low memory" conditions applicable? */
537232420Srmacklem		if ((sc->mr_cap & MR686_FIXMTRR) &&
538232420Srmacklem		    ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
539232420Srmacklem			if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
540232420Srmacklem				return (error);
541144296Sjeff		} else {
54225453Sphk			/* It's time to play with variable MTRRs. */
5431541Srgrimes			if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
5446968Sphk				return (error);
54574501Speter		}
546144319Sdas		break;
54774501Speter
54829788Sphk	case MEMRANGE_SET_REMOVE:
54925453Sphk		if ((targ = mem_range_match(sc, mrd)) == NULL)
550230441Skib			return (ENOENT);
55122521Sdyson		if (targ->mr_flags & MDF_FIXACTIVE)
5521541Srgrimes			return (EPERM);
5536968Sphk		if (targ->mr_flags & MDF_BUSY)
55422521Sdyson			return (EBUSY);
555187839Sjhb		targ->mr_flags &= ~MDF_ACTIVE;
556190829Srwatson		targ->mr_owner[0] = 0;
557190829Srwatson		break;
55829804Sphk
55929804Sphk	default:
56029804Sphk		return (EOPNOTSUPP);
56129804Sphk	}
56229804Sphk
56322521Sdyson	/* Update the hardware. */
564187839Sjhb	i686_mrstore(sc);
56522521Sdyson
56622521Sdyson	/* Refetch to see where we're at. */
5676968Sphk	i686_mrfetch(sc);
5686928Sphk	return (0);
56929788Sphk}
5701541Srgrimes
571187839Sjhb/*
572187839Sjhb * Work out how many ranges we support, initialise storage for them,
573140712Sjeff * and fetch the initial settings.
574187839Sjhb */
5756968Sphkstatic void
57623521Sbdei686_mrinit(struct mem_range_softc *sc)
5776968Sphk{
5786968Sphk	struct mem_range_desc *mrd;
579116201Sdes	int i, nmdesc = 0;
58029788Sphk
5811541Srgrimes	mtrrcap = rdmsr(MSR_MTRRcap);
5821541Srgrimes	mtrrdef = rdmsr(MSR_MTRRdefType);
583147326Sjeff
584147326Sjeff	/* For now, bail out if MTRRs are not enabled. */
585230441Skib	if (!(mtrrdef & MTRR_DEF_ENABLE)) {
586190829Srwatson		if (bootverbose)
587230489Skib			printf("CPU supports MTRRs but not enabled\n");
588144296Sjeff		return;
5891541Srgrimes	}
5901541Srgrimes	nmdesc = mtrrcap & MTRR_CAP_VCNT;
591191082Skan	if (bootverbose)
5926968Sphk		printf("Pentium Pro MTRR support enabled\n");
5936968Sphk
59429788Sphk	/* If fixed MTRRs supported and enabled. */
5957013Sphk	if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
596187839Sjhb		sc->mr_cap = MR686_FIXMTRR;
597187839Sjhb		nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
598140712Sjeff	}
599187839Sjhb
6006968Sphk	sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
6016968Sphk	    M_WAITOK | M_ZERO);
6026968Sphk	sc->mr_ndesc = nmdesc;
603187839Sjhb
604187839Sjhb	mrd = sc->mr_desc;
60529788Sphk
60622521Sdyson	/* Populate the fixed MTRR entries' base/length. */
607110967Sarr	if (sc->mr_cap & MR686_FIXMTRR) {
608110967Sarr		for (i = 0; i < MTRR_N64K; i++, mrd++) {
609110967Sarr			mrd->mr_base = i * 0x10000;
610110967Sarr			mrd->mr_len = 0x10000;
61122521Sdyson			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
61225453Sphk			    MDF_FIXACTIVE;
61325453Sphk		}
6146968Sphk		for (i = 0; i < MTRR_N16K; i++, mrd++) {
61525453Sphk			mrd->mr_base = i * 0x4000 + 0x80000;
61625453Sphk			mrd->mr_len = 0x4000;
617230441Skib			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
618190829Srwatson			    MDF_FIXACTIVE;
619230489Skib		}
620187839Sjhb		for (i = 0; i < MTRR_N4K; i++, mrd++) {
6216968Sphk			mrd->mr_base = i * 0x1000 + 0xc0000;
622144296Sjeff			mrd->mr_len = 0x1000;
623187839Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
624187839Sjhb			    MDF_FIXACTIVE;
625187839Sjhb		}
626187839Sjhb	}
627187839Sjhb
628187839Sjhb	/*
629187839Sjhb	 * Get current settings, anything set now is considered to
630187839Sjhb	 * have been set by the firmware. (XXX has something already
631187839Sjhb	 * played here?)
632187839Sjhb	 */
633187839Sjhb	i686_mrfetch(sc);
634144296Sjeff	mrd = sc->mr_desc;
635144296Sjeff	for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
636144296Sjeff		if (mrd->mr_flags & MDF_ACTIVE)
637144296Sjeff			mrd->mr_flags |= MDF_FIRMWARE;
638144296Sjeff	}
639144296Sjeff}
640144296Sjeff
641187839Sjhb/*
642187839Sjhb * Initialise MTRRs on an AP after the BSP has run the init code.
643187839Sjhb */
644187839Sjhbstatic void
645172274Spjdi686_mrAPinit(struct mem_range_softc *sc)
646172274Spjd{
647172274Spjd
648172274Spjd	i686_mrstoreone(sc);
649178046Spjd	wrmsr(MSR_MTRRdefType, mtrrdef);
650183330Sjhb}
651183330Sjhb
652183330Sjhbstatic void
653183330Sjhbi686_mem_drvinit(void *unused)
654183330Sjhb{
655183330Sjhb
656183330Sjhb	/* Try for i686 MTRRs */
657190387Sjhb	if (!mtrrs_disabled && (cpu_feature & CPUID_MTRR) &&
658183330Sjhb	    ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) &&
659183330Sjhb	    ((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
660183330Sjhb	    (strcmp(cpu_vendor, "AuthenticAMD") == 0))) {
661183330Sjhb		mem_range_softc.mr_op = &i686_mrops;
662144296Sjeff	}
663144296Sjeff}
664170000SpjdSYSINIT(i686memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, i686_mem_drvinit, NULL);
665170000Spjd