x86_mem.c revision 298433
145405Smsmith/*-
245405Smsmith * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
345405Smsmith * All rights reserved.
445405Smsmith *
545405Smsmith * Redistribution and use in source and binary forms, with or without
645405Smsmith * modification, are permitted provided that the following conditions
745405Smsmith * are met:
845405Smsmith * 1. Redistributions of source code must retain the above copyright
945405Smsmith *    notice, this list of conditions and the following disclaimer.
1045405Smsmith * 2. Redistributions in binary form must reproduce the above copyright
1145405Smsmith *    notice, this list of conditions and the following disclaimer in the
1245405Smsmith *    documentation and/or other materials provided with the distribution.
1345405Smsmith *
1445405Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1545405Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1645405Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1745405Smsmith * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1845405Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1945405Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2045405Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2145405Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2245405Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2345405Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2445405Smsmith * SUCH DAMAGE.
2545405Smsmith */
2645405Smsmith
27115683Sobrien#include <sys/cdefs.h>
28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/i686_mem.c 298433 2016-04-21 19:57:40Z pfg $");
29115683Sobrien
3045405Smsmith#include <sys/param.h>
3145405Smsmith#include <sys/kernel.h>
3245405Smsmith#include <sys/systm.h>
3345405Smsmith#include <sys/malloc.h>
3445405Smsmith#include <sys/memrange.h>
3576078Sjhb#include <sys/smp.h>
36106842Smdodd#include <sys/sysctl.h>
3745405Smsmith
38185341Sjkim#include <machine/cputypes.h>
3945405Smsmith#include <machine/md_var.h>
4045405Smsmith#include <machine/specialreg.h>
4145405Smsmith
4245405Smsmith/*
4345405Smsmith * i686 memory range operations
4445405Smsmith *
4545405Smsmith * This code will probably be impenetrable without reference to the
4645405Smsmith * Intel Pentium Pro documentation.
4745405Smsmith */
4845405Smsmith
4945405Smsmithstatic char *mem_owner_bios = "BIOS";
5045405Smsmith
51177070Sjhb#define	MR686_FIXMTRR	(1<<0)
5245405Smsmith
53177070Sjhb#define	mrwithin(mr, a)							\
54177070Sjhb	(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
55177070Sjhb#define	mroverlap(mra, mrb)						\
56177070Sjhb	(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
5745405Smsmith
58177070Sjhb#define	mrvalid(base, len) 						\
59177070Sjhb	((!(base & ((1 << 12) - 1))) &&	/* base is multiple of 4k */	\
60177070Sjhb	    ((len) >= (1 << 12)) &&	/* length is >= 4k */		\
61177070Sjhb	    powerof2((len)) &&		/* ... and power of two */	\
62177070Sjhb	    !((base) & ((len) - 1)))	/* range is not discontiuous */
6345405Smsmith
64177070Sjhb#define	mrcopyflags(curr, new)						\
65177070Sjhb	(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
6645405Smsmith
67177070Sjhbstatic int mtrrs_disabled;
68121307SsilbySYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
69177070Sjhb    &mtrrs_disabled, 0, "Disable i686 MTRRs.");
70106842Smdodd
71177070Sjhbstatic void	i686_mrinit(struct mem_range_softc *sc);
72177070Sjhbstatic int	i686_mrset(struct mem_range_softc *sc,
73177070Sjhb		    struct mem_range_desc *mrd, int *arg);
74177070Sjhbstatic void	i686_mrAPinit(struct mem_range_softc *sc);
75189903Sjkimstatic void	i686_mrreinit(struct mem_range_softc *sc);
7645405Smsmith
7745405Smsmithstatic struct mem_range_ops i686_mrops = {
78177070Sjhb	i686_mrinit,
79177070Sjhb	i686_mrset,
80189903Sjkim	i686_mrAPinit,
81189903Sjkim	i686_mrreinit
8245405Smsmith};
8345405Smsmith
8446215Smsmith/* XXX for AP startup hook */
85177070Sjhbstatic u_int64_t mtrrcap, mtrrdef;
8646215Smsmith
87177125Sjhb/* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
88177125Sjhbstatic u_int64_t mtrr_physmask;
89177125Sjhb
90177070Sjhbstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
91177070Sjhb		    struct mem_range_desc *mrd);
92177070Sjhbstatic void	i686_mrfetch(struct mem_range_softc *sc);
93177070Sjhbstatic int	i686_mtrrtype(int flags);
94177070Sjhbstatic int	i686_mrt2mtrr(int flags, int oldval);
95177070Sjhbstatic int	i686_mtrrconflict(int flag1, int flag2);
96177070Sjhbstatic void	i686_mrstore(struct mem_range_softc *sc);
97177070Sjhbstatic void	i686_mrstoreone(void *arg);
98177070Sjhbstatic struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
99177070Sjhb		    u_int64_t addr);
100177070Sjhbstatic int	i686_mrsetlow(struct mem_range_softc *sc,
101177070Sjhb		    struct mem_range_desc *mrd, int *arg);
102177070Sjhbstatic int	i686_mrsetvariable(struct mem_range_softc *sc,
103177070Sjhb		    struct mem_range_desc *mrd, int *arg);
10445405Smsmith
10545405Smsmith/* i686 MTRR type to memory range type conversion */
10645405Smsmithstatic int i686_mtrrtomrt[] = {
107177070Sjhb	MDF_UNCACHEABLE,
108177070Sjhb	MDF_WRITECOMBINE,
109177070Sjhb	MDF_UNKNOWN,
110177070Sjhb	MDF_UNKNOWN,
111177070Sjhb	MDF_WRITETHROUGH,
112177070Sjhb	MDF_WRITEPROTECT,
113177070Sjhb	MDF_WRITEBACK
11445405Smsmith};
11545405Smsmith
116298308Spfg#define	MTRRTOMRTLEN (nitems(i686_mtrrtomrt))
11794683Sdwmalone
11894683Sdwmalonestatic int
119177070Sjhbi686_mtrr2mrt(int val)
120177070Sjhb{
121177070Sjhb
12294683Sdwmalone	if (val < 0 || val >= MTRRTOMRTLEN)
123177070Sjhb		return (MDF_UNKNOWN);
124177070Sjhb	return (i686_mtrrtomrt[val]);
12594683Sdwmalone}
12694683Sdwmalone
127177070Sjhb/*
12894683Sdwmalone * i686 MTRR conflicts. Writeback and uncachable may overlap.
12948925Smsmith */
13094683Sdwmalonestatic int
131177070Sjhbi686_mtrrconflict(int flag1, int flag2)
132177070Sjhb{
133177070Sjhb
13494683Sdwmalone	flag1 &= MDF_ATTRMASK;
13594683Sdwmalone	flag2 &= MDF_ATTRMASK;
13694683Sdwmalone	if (flag1 == flag2 ||
13794683Sdwmalone	    (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
13894683Sdwmalone	    (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
139177070Sjhb		return (0);
140177070Sjhb	return (1);
14194683Sdwmalone}
14245405Smsmith
14345405Smsmith/*
14445405Smsmith * Look for an exactly-matching range.
14545405Smsmith */
14645405Smsmithstatic struct mem_range_desc *
147177070Sjhbmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
14845405Smsmith{
149177070Sjhb	struct mem_range_desc *cand;
150177070Sjhb	int i;
151177070Sjhb
152177070Sjhb	for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
153177070Sjhb		if ((cand->mr_base == mrd->mr_base) &&
154177070Sjhb		    (cand->mr_len == mrd->mr_len))
155177070Sjhb			return (cand);
156177070Sjhb	return (NULL);
15745405Smsmith}
15845405Smsmith
15945405Smsmith/*
160177070Sjhb * Fetch the current mtrr settings from the current CPU (assumed to
161177070Sjhb * all be in sync in the SMP case).  Note that if we are here, we
162177070Sjhb * assume that MTRRs are enabled, and we may or may not have fixed
163177070Sjhb * MTRRs.
16445405Smsmith */
16545405Smsmithstatic void
16645405Smsmithi686_mrfetch(struct mem_range_softc *sc)
16745405Smsmith{
168177070Sjhb	struct mem_range_desc *mrd;
169177070Sjhb	u_int64_t msrv;
170177070Sjhb	int i, j, msr;
17145405Smsmith
172177070Sjhb	mrd = sc->mr_desc;
17345405Smsmith
174177070Sjhb	/* Get fixed-range MTRRs. */
175177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
176177070Sjhb		msr = MSR_MTRR64kBase;
177177070Sjhb		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
178177070Sjhb			msrv = rdmsr(msr);
179177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
180177070Sjhb				mrd->mr_flags =
181177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
182177070Sjhb				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
183177070Sjhb				if (mrd->mr_owner[0] == 0)
184177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
185177070Sjhb				msrv = msrv >> 8;
186177070Sjhb			}
187177070Sjhb		}
188177070Sjhb		msr = MSR_MTRR16kBase;
189177070Sjhb		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
190177070Sjhb			msrv = rdmsr(msr);
191177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
192177070Sjhb				mrd->mr_flags =
193177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
194177070Sjhb				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
195177070Sjhb				if (mrd->mr_owner[0] == 0)
196177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
197177070Sjhb				msrv = msrv >> 8;
198177070Sjhb			}
199177070Sjhb		}
200177070Sjhb		msr = MSR_MTRR4kBase;
201177070Sjhb		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
202177070Sjhb			msrv = rdmsr(msr);
203177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
204177070Sjhb				mrd->mr_flags =
205177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
206177070Sjhb				    i686_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
207177070Sjhb				if (mrd->mr_owner[0] == 0)
208177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
209177070Sjhb				msrv = msrv >> 8;
210177070Sjhb			}
211177070Sjhb		}
21245405Smsmith	}
213177070Sjhb
214177070Sjhb	/* Get remainder which must be variable MTRRs. */
215177070Sjhb	msr = MSR_MTRRVarBase;
216177070Sjhb	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
217177070Sjhb		msrv = rdmsr(msr);
21845405Smsmith		mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
219177070Sjhb		    i686_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
220177125Sjhb		mrd->mr_base = msrv & mtrr_physmask;
221177070Sjhb		msrv = rdmsr(msr + 1);
222177070Sjhb		mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
223177070Sjhb		    (mrd->mr_flags | MDF_ACTIVE) :
224177070Sjhb		    (mrd->mr_flags & ~MDF_ACTIVE);
225177070Sjhb
226177070Sjhb		/* Compute the range from the mask. Ick. */
227177125Sjhb		mrd->mr_len = (~(msrv & mtrr_physmask) &
228177125Sjhb		    (mtrr_physmask | 0xfffLL)) + 1;
229177070Sjhb		if (!mrvalid(mrd->mr_base, mrd->mr_len))
230177070Sjhb			mrd->mr_flags |= MDF_BOGUS;
231177070Sjhb
232177070Sjhb		/* If unclaimed and active, must be the BIOS. */
233177070Sjhb		if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
234177070Sjhb			strcpy(mrd->mr_owner, mem_owner_bios);
23545405Smsmith	}
23645405Smsmith}
23745405Smsmith
23845405Smsmith/*
23945405Smsmith * Return the MTRR memory type matching a region's flags
24045405Smsmith */
24145405Smsmithstatic int
24245405Smsmithi686_mtrrtype(int flags)
24345405Smsmith{
244177070Sjhb	int i;
24545405Smsmith
246177070Sjhb	flags &= MDF_ATTRMASK;
24745405Smsmith
248177070Sjhb	for (i = 0; i < MTRRTOMRTLEN; i++) {
249177070Sjhb		if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
250177070Sjhb			continue;
251177070Sjhb		if (flags == i686_mtrrtomrt[i])
252177070Sjhb			return (i);
253177070Sjhb	}
254177070Sjhb	return (-1);
25545405Smsmith}
25645405Smsmith
25794683Sdwmalonestatic int
25894683Sdwmalonei686_mrt2mtrr(int flags, int oldval)
25994683Sdwmalone{
26094683Sdwmalone	int val;
26194683Sdwmalone
26294683Sdwmalone	if ((val = i686_mtrrtype(flags)) == -1)
263177070Sjhb		return (oldval & 0xff);
264177070Sjhb	return (val & 0xff);
26594683Sdwmalone}
26694683Sdwmalone
26745405Smsmith/*
26846215Smsmith * Update running CPU(s) MTRRs to match the ranges in the descriptor
26946215Smsmith * list.
27046215Smsmith *
27146215Smsmith * XXX Must be called with interrupts enabled.
27245405Smsmith */
27348925Smsmithstatic void
27445405Smsmithi686_mrstore(struct mem_range_softc *sc)
27545405Smsmith{
27645405Smsmith#ifdef SMP
277177070Sjhb	/*
278177070Sjhb	 * We should use ipi_all_but_self() to call other CPUs into a
279177070Sjhb	 * locking gate, then call a target function to do this work.
280177070Sjhb	 * The "proper" solution involves a generalised locking gate
281177070Sjhb	 * implementation, not ready yet.
282177070Sjhb	 */
283177070Sjhb	smp_rendezvous(NULL, i686_mrstoreone, NULL, sc);
28448925Smsmith#else
285177070Sjhb	disable_intr();				/* disable interrupts */
286177070Sjhb	i686_mrstoreone(sc);
287177070Sjhb	enable_intr();
28848925Smsmith#endif
28946215Smsmith}
29046215Smsmith
29146215Smsmith/*
29246215Smsmith * Update the current CPU's MTRRs with those represented in the
293177070Sjhb * descriptor list.  Note that we do this wholesale rather than just
294177070Sjhb * stuffing one entry; this is simpler (but slower, of course).
29546215Smsmith */
29648925Smsmithstatic void
29748925Smsmithi686_mrstoreone(void *arg)
29846215Smsmith{
299177070Sjhb	struct mem_range_softc *sc = arg;
300177070Sjhb	struct mem_range_desc *mrd;
301177070Sjhb	u_int64_t omsrv, msrv;
302177070Sjhb	int i, j, msr;
303215415Sjkim	u_long cr0, cr4;
30446215Smsmith
305177070Sjhb	mrd = sc->mr_desc;
30646215Smsmith
307217506Sjkim	critical_enter();
308217506Sjkim
309177070Sjhb	/* Disable PGE. */
310215415Sjkim	cr4 = rcr4();
311215415Sjkim	load_cr4(cr4 & ~CR4_PGE);
31245405Smsmith
313177070Sjhb	/* Disable caches (CD = 1, NW = 0). */
314215415Sjkim	cr0 = rcr0();
315215415Sjkim	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
316177070Sjhb
317177070Sjhb	/* Flushes caches and TLBs. */
318177070Sjhb	wbinvd();
319215414Sjkim	invltlb();
320177070Sjhb
321177070Sjhb	/* Disable MTRRs (E = 0). */
322177070Sjhb	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
323177070Sjhb
324177070Sjhb	/* Set fixed-range MTRRs. */
325177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
326177070Sjhb		msr = MSR_MTRR64kBase;
327177070Sjhb		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
328177070Sjhb			msrv = 0;
329177070Sjhb			omsrv = rdmsr(msr);
330177070Sjhb			for (j = 7; j >= 0; j--) {
331177070Sjhb				msrv = msrv << 8;
332177070Sjhb				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
333177070Sjhb				    omsrv >> (j * 8));
334177070Sjhb			}
335177070Sjhb			wrmsr(msr, msrv);
336177070Sjhb			mrd += 8;
337177070Sjhb		}
338177070Sjhb		msr = MSR_MTRR16kBase;
339177070Sjhb		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
340177070Sjhb			msrv = 0;
341177070Sjhb			omsrv = rdmsr(msr);
342177070Sjhb			for (j = 7; j >= 0; j--) {
343177070Sjhb				msrv = msrv << 8;
344177070Sjhb				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
345177070Sjhb				    omsrv >> (j * 8));
346177070Sjhb			}
347177070Sjhb			wrmsr(msr, msrv);
348177070Sjhb			mrd += 8;
349177070Sjhb		}
350177070Sjhb		msr = MSR_MTRR4kBase;
351177070Sjhb		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
352177070Sjhb			msrv = 0;
353177070Sjhb			omsrv = rdmsr(msr);
354177070Sjhb			for (j = 7; j >= 0; j--) {
355177070Sjhb				msrv = msrv << 8;
356177070Sjhb				msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
357177070Sjhb				    omsrv >> (j * 8));
358177070Sjhb			}
359177070Sjhb			wrmsr(msr, msrv);
360177070Sjhb			mrd += 8;
361177070Sjhb		}
36245405Smsmith	}
363177070Sjhb
364177070Sjhb	/* Set remainder which must be variable MTRRs. */
365177070Sjhb	msr = MSR_MTRRVarBase;
366177070Sjhb	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
367177070Sjhb		/* base/type register */
368177070Sjhb		omsrv = rdmsr(msr);
369177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE) {
370177125Sjhb			msrv = mrd->mr_base & mtrr_physmask;
371177070Sjhb			msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
372177070Sjhb		} else {
373177070Sjhb			msrv = 0;
374177070Sjhb		}
375177070Sjhb		wrmsr(msr, msrv);
376177070Sjhb
377177070Sjhb		/* mask/active register */
378177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE) {
379177070Sjhb			msrv = MTRR_PHYSMASK_VALID |
380298433Spfg			    rounddown2(mtrr_physmask, mrd->mr_len);
381177070Sjhb		} else {
382177070Sjhb			msrv = 0;
383177070Sjhb		}
384177070Sjhb		wrmsr(msr + 1, msrv);
38545405Smsmith	}
38645405Smsmith
387215414Sjkim	/* Flush caches and TLBs. */
388177070Sjhb	wbinvd();
389215414Sjkim	invltlb();
390177070Sjhb
391177070Sjhb	/* Enable MTRRs. */
392177070Sjhb	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
393177070Sjhb
394215415Sjkim	/* Restore caches and PGE. */
395215415Sjkim	load_cr0(cr0);
396215415Sjkim	load_cr4(cr4);
397217506Sjkim
398217506Sjkim	critical_exit();
39945405Smsmith}
40045405Smsmith
40145405Smsmith/*
40245405Smsmith * Hunt for the fixed MTRR referencing (addr)
40345405Smsmith */
40445405Smsmithstatic struct mem_range_desc *
40545405Smsmithi686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
40645405Smsmith{
407177070Sjhb	struct mem_range_desc *mrd;
408177070Sjhb	int i;
409177070Sjhb
410177070Sjhb	for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
411177070Sjhb	     i++, mrd++)
412177070Sjhb		if ((addr >= mrd->mr_base) &&
413177070Sjhb		    (addr < (mrd->mr_base + mrd->mr_len)))
414177070Sjhb			return (mrd);
415177070Sjhb	return (NULL);
41645405Smsmith}
41745405Smsmith
41845405Smsmith/*
419177070Sjhb * Try to satisfy the given range request by manipulating the fixed
420177070Sjhb * MTRRs that cover low memory.
42145405Smsmith *
422177070Sjhb * Note that we try to be generous here; we'll bloat the range out to
423177070Sjhb * the next higher/lower boundary to avoid the consumer having to know
424177070Sjhb * too much about the mechanisms here.
42545405Smsmith *
426177070Sjhb * XXX note that this will have to be updated when we start supporting
427177070Sjhb * "busy" ranges.
42845405Smsmith */
42945405Smsmithstatic int
43045405Smsmithi686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
43145405Smsmith{
432177070Sjhb	struct mem_range_desc *first_md, *last_md, *curr_md;
43345405Smsmith
434177070Sjhb	/* Range check. */
435177070Sjhb	if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
436177070Sjhb	    ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
437177070Sjhb		return (EINVAL);
43845405Smsmith
439177070Sjhb	/* Check that we aren't doing something risky. */
440177070Sjhb	if (!(mrd->mr_flags & MDF_FORCE))
441177070Sjhb		for (curr_md = first_md; curr_md <= last_md; curr_md++) {
442177070Sjhb			if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
443177070Sjhb				return (EACCES);
444177070Sjhb		}
445177070Sjhb
446177070Sjhb	/* Set flags, clear set-by-firmware flag. */
447103346Sdwmalone	for (curr_md = first_md; curr_md <= last_md; curr_md++) {
448177070Sjhb		curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
449177070Sjhb		    ~MDF_FIRMWARE, mrd->mr_flags);
450177070Sjhb		bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
451103346Sdwmalone	}
452103346Sdwmalone
453177070Sjhb	return (0);
45445405Smsmith}
45545405Smsmith
45645405Smsmith/*
45745405Smsmith * Modify/add a variable MTRR to satisfy the request.
45845405Smsmith *
45945405Smsmith * XXX needs to be updated to properly support "busy" ranges.
46045405Smsmith */
46145405Smsmithstatic int
462177070Sjhbi686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
463177070Sjhb    int *arg)
46445405Smsmith{
465177070Sjhb	struct mem_range_desc *curr_md, *free_md;
466177070Sjhb	int i;
467177070Sjhb
468177070Sjhb	/*
469177070Sjhb	 * Scan the currently active variable descriptors, look for
470177070Sjhb	 * one we exactly match (straight takeover) and for possible
471177070Sjhb	 * accidental overlaps.
472177070Sjhb	 *
473177070Sjhb	 * Keep track of the first empty variable descriptor in case
474177070Sjhb	 * we can't perform a takeover.
475177070Sjhb	 */
476177070Sjhb	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
477177070Sjhb	curr_md = sc->mr_desc + i;
478177070Sjhb	free_md = NULL;
479177070Sjhb	for (; i < sc->mr_ndesc; i++, curr_md++) {
480177070Sjhb		if (curr_md->mr_flags & MDF_ACTIVE) {
481177070Sjhb			/* Exact match? */
482177070Sjhb			if ((curr_md->mr_base == mrd->mr_base) &&
483177070Sjhb			    (curr_md->mr_len == mrd->mr_len)) {
484177070Sjhb
485177070Sjhb				/* Whoops, owned by someone. */
486177070Sjhb				if (curr_md->mr_flags & MDF_BUSY)
487177070Sjhb					return (EBUSY);
488177070Sjhb
489177070Sjhb				/* Check that we aren't doing something risky */
490177070Sjhb				if (!(mrd->mr_flags & MDF_FORCE) &&
491177070Sjhb				    ((curr_md->mr_flags & MDF_ATTRMASK) ==
492177070Sjhb				    MDF_UNKNOWN))
493177070Sjhb					return (EACCES);
494177070Sjhb
495177070Sjhb				/* Ok, just hijack this entry. */
496177070Sjhb				free_md = curr_md;
497177070Sjhb				break;
498177070Sjhb			}
499177070Sjhb
500177070Sjhb			/* Non-exact overlap? */
501177070Sjhb			if (mroverlap(curr_md, mrd)) {
502177070Sjhb				/* Between conflicting region types? */
503177070Sjhb				if (i686_mtrrconflict(curr_md->mr_flags,
504177070Sjhb				    mrd->mr_flags))
505177070Sjhb					return (EINVAL);
506177070Sjhb			}
507177070Sjhb		} else if (free_md == NULL) {
508177070Sjhb			free_md = curr_md;
509177070Sjhb		}
51045405Smsmith	}
51145405Smsmith
512177070Sjhb	/* Got somewhere to put it? */
513177070Sjhb	if (free_md == NULL)
514177070Sjhb		return (ENOSPC);
515177070Sjhb
516177070Sjhb	/* Set up new descriptor. */
517177070Sjhb	free_md->mr_base = mrd->mr_base;
518177070Sjhb	free_md->mr_len = mrd->mr_len;
519177070Sjhb	free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
520177070Sjhb	bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
521177070Sjhb	return (0);
52245405Smsmith}
52345405Smsmith
52445405Smsmith/*
52545405Smsmith * Handle requests to set memory range attributes by manipulating MTRRs.
52645405Smsmith */
52745405Smsmithstatic int
52845405Smsmithi686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
52945405Smsmith{
530177070Sjhb	struct mem_range_desc *targ;
531177070Sjhb	int error = 0;
53245405Smsmith
533177070Sjhb	switch(*arg) {
534177070Sjhb	case MEMRANGE_SET_UPDATE:
535177070Sjhb		/*
536177070Sjhb		 * Make sure that what's being asked for is even
537177070Sjhb		 * possible at all.
538177070Sjhb		 */
539177070Sjhb		if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
540177070Sjhb		    i686_mtrrtype(mrd->mr_flags) == -1)
541177070Sjhb			return (EINVAL);
54245405Smsmith
543177070Sjhb#define	FIXTOP	((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
54445405Smsmith
545177070Sjhb		/* Are the "low memory" conditions applicable? */
546177070Sjhb		if ((sc->mr_cap & MR686_FIXMTRR) &&
547177070Sjhb		    ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
548177070Sjhb			if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
549177070Sjhb				return (error);
550177070Sjhb		} else {
551177070Sjhb			/* It's time to play with variable MTRRs. */
552177070Sjhb			if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
553177070Sjhb				return (error);
554177070Sjhb		}
555177070Sjhb		break;
556177070Sjhb
557177070Sjhb	case MEMRANGE_SET_REMOVE:
558177070Sjhb		if ((targ = mem_range_match(sc, mrd)) == NULL)
559177070Sjhb			return (ENOENT);
560177070Sjhb		if (targ->mr_flags & MDF_FIXACTIVE)
561177070Sjhb			return (EPERM);
562177070Sjhb		if (targ->mr_flags & MDF_BUSY)
563177070Sjhb			return (EBUSY);
564177070Sjhb		targ->mr_flags &= ~MDF_ACTIVE;
565177070Sjhb		targ->mr_owner[0] = 0;
566177070Sjhb		break;
567177070Sjhb
568177070Sjhb	default:
569177070Sjhb		return (EOPNOTSUPP);
57045405Smsmith	}
57145405Smsmith
572177070Sjhb	/* Update the hardware. */
573177070Sjhb	i686_mrstore(sc);
57445405Smsmith
575177070Sjhb	/* Refetch to see where we're at. */
576177070Sjhb	i686_mrfetch(sc);
577177070Sjhb	return (0);
57845405Smsmith}
57945405Smsmith
58045405Smsmith/*
581177070Sjhb * Work out how many ranges we support, initialise storage for them,
582177070Sjhb * and fetch the initial settings.
58345405Smsmith */
58445405Smsmithstatic void
58545405Smsmithi686_mrinit(struct mem_range_softc *sc)
58645405Smsmith{
587177070Sjhb	struct mem_range_desc *mrd;
588177125Sjhb	u_int regs[4];
589177125Sjhb	int i, nmdesc = 0, pabits;
59045405Smsmith
591177070Sjhb	mtrrcap = rdmsr(MSR_MTRRcap);
592177070Sjhb	mtrrdef = rdmsr(MSR_MTRRdefType);
59345405Smsmith
594177070Sjhb	/* For now, bail out if MTRRs are not enabled. */
595177070Sjhb	if (!(mtrrdef & MTRR_DEF_ENABLE)) {
596177070Sjhb		if (bootverbose)
597177070Sjhb			printf("CPU supports MTRRs but not enabled\n");
598177070Sjhb		return;
599177070Sjhb	}
600177070Sjhb	nmdesc = mtrrcap & MTRR_CAP_VCNT;
60145405Smsmith	if (bootverbose)
602177070Sjhb		printf("Pentium Pro MTRR support enabled\n");
60345405Smsmith
604177125Sjhb	/*
605177125Sjhb	 * Determine the size of the PhysMask and PhysBase fields in
606177125Sjhb	 * the variable range MTRRs.  If the extended CPUID 0x80000008
607177125Sjhb	 * is present, use that to figure out how many physical
608177125Sjhb	 * address bits the CPU supports.  Otherwise, default to 36
609177125Sjhb	 * address bits.
610177125Sjhb	 */
611177125Sjhb	if (cpu_exthigh >= 0x80000008) {
612177125Sjhb		do_cpuid(0x80000008, regs);
613177125Sjhb		pabits = regs[0] & 0xff;
614177125Sjhb	} else
615177125Sjhb		pabits = 36;
616177125Sjhb	mtrr_physmask = ((1ULL << pabits) - 1) & ~0xfffULL;
617177125Sjhb
618177070Sjhb	/* If fixed MTRRs supported and enabled. */
619177070Sjhb	if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
620177070Sjhb		sc->mr_cap = MR686_FIXMTRR;
621177070Sjhb		nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
622177070Sjhb	}
62345405Smsmith
624177070Sjhb	sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
625177070Sjhb	    M_WAITOK | M_ZERO);
626177070Sjhb	sc->mr_ndesc = nmdesc;
62745405Smsmith
628177070Sjhb	mrd = sc->mr_desc;
62945405Smsmith
630177070Sjhb	/* Populate the fixed MTRR entries' base/length. */
631177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
632177070Sjhb		for (i = 0; i < MTRR_N64K; i++, mrd++) {
633177070Sjhb			mrd->mr_base = i * 0x10000;
634177070Sjhb			mrd->mr_len = 0x10000;
635177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
636177070Sjhb			    MDF_FIXACTIVE;
637177070Sjhb		}
638177070Sjhb		for (i = 0; i < MTRR_N16K; i++, mrd++) {
639177070Sjhb			mrd->mr_base = i * 0x4000 + 0x80000;
640177070Sjhb			mrd->mr_len = 0x4000;
641177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
642177070Sjhb			    MDF_FIXACTIVE;
643177070Sjhb		}
644177070Sjhb		for (i = 0; i < MTRR_N4K; i++, mrd++) {
645177070Sjhb			mrd->mr_base = i * 0x1000 + 0xc0000;
646177070Sjhb			mrd->mr_len = 0x1000;
647177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
648177070Sjhb			    MDF_FIXACTIVE;
649177070Sjhb		}
65045405Smsmith	}
651177070Sjhb
652177070Sjhb	/*
653177070Sjhb	 * Get current settings, anything set now is considered to
654177070Sjhb	 * have been set by the firmware. (XXX has something already
655177070Sjhb	 * played here?)
656177070Sjhb	 */
657177070Sjhb	i686_mrfetch(sc);
658177070Sjhb	mrd = sc->mr_desc;
659177070Sjhb	for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
660177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE)
661177070Sjhb			mrd->mr_flags |= MDF_FIRMWARE;
66245405Smsmith	}
66345405Smsmith}
66445405Smsmith
66546215Smsmith/*
66646215Smsmith * Initialise MTRRs on an AP after the BSP has run the init code.
66746215Smsmith */
66845405Smsmithstatic void
66946215Smsmithi686_mrAPinit(struct mem_range_softc *sc)
67046215Smsmith{
671177070Sjhb
672177070Sjhb	i686_mrstoreone(sc);
673177070Sjhb	wrmsr(MSR_MTRRdefType, mtrrdef);
67446215Smsmith}
67546215Smsmith
676189903Sjkim/*
677189903Sjkim * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
678189903Sjkim * list.
679189903Sjkim *
680189903Sjkim * XXX Must be called with interrupts enabled.
681189903Sjkim */
68246215Smsmithstatic void
683189903Sjkimi686_mrreinit(struct mem_range_softc *sc)
684189903Sjkim{
685189903Sjkim#ifdef SMP
686189903Sjkim	/*
687189903Sjkim	 * We should use ipi_all_but_self() to call other CPUs into a
688189903Sjkim	 * locking gate, then call a target function to do this work.
689189903Sjkim	 * The "proper" solution involves a generalised locking gate
690189903Sjkim	 * implementation, not ready yet.
691189903Sjkim	 */
692189903Sjkim	smp_rendezvous(NULL, (void *)i686_mrAPinit, NULL, sc);
693189903Sjkim#else
694189903Sjkim	disable_intr();				/* disable interrupts */
695189903Sjkim	i686_mrAPinit(sc);
696189903Sjkim	enable_intr();
697189903Sjkim#endif
698189903Sjkim}
699189903Sjkim
700189903Sjkimstatic void
70145405Smsmithi686_mem_drvinit(void *unused)
70245405Smsmith{
703177070Sjhb
704177124Sjhb	if (mtrrs_disabled)
705177124Sjhb		return;
706177124Sjhb	if (!(cpu_feature & CPUID_MTRR))
707177124Sjhb		return;
708177124Sjhb	if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
709177124Sjhb		return;
710187118Sjkim	switch (cpu_vendor_id) {
711187118Sjkim	case CPU_VENDOR_INTEL:
712187118Sjkim	case CPU_VENDOR_AMD:
713203289Srnoland	case CPU_VENDOR_CENTAUR:
714187118Sjkim		break;
715187118Sjkim	default:
716177124Sjhb		return;
717187118Sjkim	}
718177124Sjhb	mem_range_softc.mr_op = &i686_mrops;
71945405Smsmith}
720177070SjhbSYSINIT(i686memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, i686_mem_drvinit, NULL);
721