145405Smsmith/*-
245405Smsmith * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3313898Skib * Copyright (c) 2017 The FreeBSD Foundation
445405Smsmith * All rights reserved.
545405Smsmith *
6313898Skib * Portions of this software were developed by Konstantin Belousov
7313898Skib * under sponsorship from the FreeBSD Foundation.
8313898Skib *
945405Smsmith * Redistribution and use in source and binary forms, with or without
1045405Smsmith * modification, are permitted provided that the following conditions
1145405Smsmith * are met:
1245405Smsmith * 1. Redistributions of source code must retain the above copyright
1345405Smsmith *    notice, this list of conditions and the following disclaimer.
1445405Smsmith * 2. Redistributions in binary form must reproduce the above copyright
1545405Smsmith *    notice, this list of conditions and the following disclaimer in the
1645405Smsmith *    documentation and/or other materials provided with the distribution.
1745405Smsmith *
1845405Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1945405Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2045405Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2145405Smsmith * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2245405Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2345405Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2445405Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2545405Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2645405Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2745405Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2845405Smsmith * SUCH DAMAGE.
2945405Smsmith */
3045405Smsmith
31115683Sobrien#include <sys/cdefs.h>
32115683Sobrien__FBSDID("$FreeBSD: stable/11/sys/x86/x86/x86_mem.c 314591 2017-03-03 10:30:30Z kib $");
33115683Sobrien
3445405Smsmith#include <sys/param.h>
3545405Smsmith#include <sys/kernel.h>
3645405Smsmith#include <sys/systm.h>
3745405Smsmith#include <sys/malloc.h>
3845405Smsmith#include <sys/memrange.h>
3976078Sjhb#include <sys/smp.h>
40106842Smdodd#include <sys/sysctl.h>
4145405Smsmith
42313898Skib#include <vm/vm.h>
43313898Skib#include <vm/vm_param.h>
44313898Skib#include <vm/pmap.h>
45313898Skib
46185341Sjkim#include <machine/cputypes.h>
4745405Smsmith#include <machine/md_var.h>
4845405Smsmith#include <machine/specialreg.h>
4945405Smsmith
5045405Smsmith/*
51313898Skib * Pentium Pro+ memory range operations
5245405Smsmith *
5345405Smsmith * This code will probably be impenetrable without reference to the
54313898Skib * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
5545405Smsmith */
5645405Smsmith
5745405Smsmithstatic char *mem_owner_bios = "BIOS";
5845405Smsmith
59177070Sjhb#define	MR686_FIXMTRR	(1<<0)
6045405Smsmith
61177070Sjhb#define	mrwithin(mr, a)							\
62177070Sjhb	(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
63177070Sjhb#define	mroverlap(mra, mrb)						\
64177070Sjhb	(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
6545405Smsmith
66177070Sjhb#define	mrvalid(base, len) 						\
67177070Sjhb	((!(base & ((1 << 12) - 1))) &&	/* base is multiple of 4k */	\
68177070Sjhb	    ((len) >= (1 << 12)) &&	/* length is >= 4k */		\
69177070Sjhb	    powerof2((len)) &&		/* ... and power of two */	\
70177070Sjhb	    !((base) & ((len) - 1)))	/* range is not discontiuous */
7145405Smsmith
72177070Sjhb#define	mrcopyflags(curr, new)						\
73177070Sjhb	(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
7445405Smsmith
75177070Sjhbstatic int mtrrs_disabled;
76121307SsilbySYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
77313898Skib    &mtrrs_disabled, 0,
78313898Skib    "Disable MTRRs.");
79106842Smdodd
80313898Skibstatic void	x86_mrinit(struct mem_range_softc *sc);
81313898Skibstatic int	x86_mrset(struct mem_range_softc *sc,
82177070Sjhb		    struct mem_range_desc *mrd, int *arg);
83313898Skibstatic void	x86_mrAPinit(struct mem_range_softc *sc);
84313898Skibstatic void	x86_mrreinit(struct mem_range_softc *sc);
8545405Smsmith
86313898Skibstatic struct mem_range_ops x86_mrops = {
87313898Skib	x86_mrinit,
88313898Skib	x86_mrset,
89313898Skib	x86_mrAPinit,
90313898Skib	x86_mrreinit
9145405Smsmith};
9245405Smsmith
9346215Smsmith/* XXX for AP startup hook */
94177070Sjhbstatic u_int64_t mtrrcap, mtrrdef;
9546215Smsmith
96177125Sjhb/* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
97177125Sjhbstatic u_int64_t mtrr_physmask;
98177125Sjhb
99177070Sjhbstatic struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
100177070Sjhb		    struct mem_range_desc *mrd);
101313898Skibstatic void	x86_mrfetch(struct mem_range_softc *sc);
102313898Skibstatic int	x86_mtrrtype(int flags);
103313898Skibstatic int	x86_mrt2mtrr(int flags, int oldval);
104313898Skibstatic int	x86_mtrrconflict(int flag1, int flag2);
105313898Skibstatic void	x86_mrstore(struct mem_range_softc *sc);
106313898Skibstatic void	x86_mrstoreone(void *arg);
107313898Skibstatic struct mem_range_desc *x86_mtrrfixsearch(struct mem_range_softc *sc,
108177070Sjhb		    u_int64_t addr);
109313898Skibstatic int	x86_mrsetlow(struct mem_range_softc *sc,
110177070Sjhb		    struct mem_range_desc *mrd, int *arg);
111313898Skibstatic int	x86_mrsetvariable(struct mem_range_softc *sc,
112177070Sjhb		    struct mem_range_desc *mrd, int *arg);
11345405Smsmith
114313898Skib/* ia32 MTRR type to memory range type conversion */
115313898Skibstatic int x86_mtrrtomrt[] = {
116177070Sjhb	MDF_UNCACHEABLE,
117177070Sjhb	MDF_WRITECOMBINE,
118177070Sjhb	MDF_UNKNOWN,
119177070Sjhb	MDF_UNKNOWN,
120177070Sjhb	MDF_WRITETHROUGH,
121177070Sjhb	MDF_WRITEPROTECT,
122177070Sjhb	MDF_WRITEBACK
12345405Smsmith};
12445405Smsmith
125313898Skib#define	MTRRTOMRTLEN nitems(x86_mtrrtomrt)
12694683Sdwmalone
12794683Sdwmalonestatic int
128313898Skibx86_mtrr2mrt(int val)
129177070Sjhb{
130177070Sjhb
13194683Sdwmalone	if (val < 0 || val >= MTRRTOMRTLEN)
132177070Sjhb		return (MDF_UNKNOWN);
133313898Skib	return (x86_mtrrtomrt[val]);
13494683Sdwmalone}
13594683Sdwmalone
136177070Sjhb/*
137313898Skib * x86 MTRR conflicts. Writeback and uncachable may overlap.
13848925Smsmith */
13994683Sdwmalonestatic int
140313898Skibx86_mtrrconflict(int flag1, int flag2)
141177070Sjhb{
142177070Sjhb
14394683Sdwmalone	flag1 &= MDF_ATTRMASK;
14494683Sdwmalone	flag2 &= MDF_ATTRMASK;
145313898Skib	if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
146313898Skib		return (1);
14794683Sdwmalone	if (flag1 == flag2 ||
14894683Sdwmalone	    (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
14994683Sdwmalone	    (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
150177070Sjhb		return (0);
151177070Sjhb	return (1);
15294683Sdwmalone}
15345405Smsmith
15445405Smsmith/*
15545405Smsmith * Look for an exactly-matching range.
15645405Smsmith */
15745405Smsmithstatic struct mem_range_desc *
158177070Sjhbmem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
15945405Smsmith{
160177070Sjhb	struct mem_range_desc *cand;
161177070Sjhb	int i;
162177070Sjhb
163177070Sjhb	for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
164177070Sjhb		if ((cand->mr_base == mrd->mr_base) &&
165177070Sjhb		    (cand->mr_len == mrd->mr_len))
166177070Sjhb			return (cand);
167177070Sjhb	return (NULL);
16845405Smsmith}
16945405Smsmith
17045405Smsmith/*
171313898Skib * Ensure that the direct map region does not contain any mappings
172313898Skib * that span MTRRs of different types.  However, the fixed MTRRs can
173313898Skib * be ignored, because a large page mapping the first 1 MB of physical
174313898Skib * memory is a special case that the processor handles.  Invalidate
175313898Skib * any old TLB entries that might hold inconsistent memory type
176313898Skib * information.
177313898Skib */
178313898Skibstatic void
179313898Skibx86_mr_split_dmap(struct mem_range_softc *sc __unused)
180313898Skib{
181313898Skib#ifdef __amd64__
182313898Skib	struct mem_range_desc *mrd;
183313898Skib	int i;
184313898Skib
185313898Skib	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
186313898Skib	mrd = sc->mr_desc + i;
187313898Skib	for (; i < sc->mr_ndesc; i++, mrd++) {
188313898Skib		if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
189313898Skib			pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
190313898Skib	}
191313898Skib#endif
192313898Skib}
193313898Skib
194313898Skib/*
195177070Sjhb * Fetch the current mtrr settings from the current CPU (assumed to
196177070Sjhb * all be in sync in the SMP case).  Note that if we are here, we
197177070Sjhb * assume that MTRRs are enabled, and we may or may not have fixed
198177070Sjhb * MTRRs.
19945405Smsmith */
20045405Smsmithstatic void
201313898Skibx86_mrfetch(struct mem_range_softc *sc)
20245405Smsmith{
203177070Sjhb	struct mem_range_desc *mrd;
204177070Sjhb	u_int64_t msrv;
205177070Sjhb	int i, j, msr;
20645405Smsmith
207177070Sjhb	mrd = sc->mr_desc;
20845405Smsmith
209177070Sjhb	/* Get fixed-range MTRRs. */
210177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
211177070Sjhb		msr = MSR_MTRR64kBase;
212177070Sjhb		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
213177070Sjhb			msrv = rdmsr(msr);
214177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
215177070Sjhb				mrd->mr_flags =
216177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
217313898Skib				    x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
218177070Sjhb				if (mrd->mr_owner[0] == 0)
219177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
220177070Sjhb				msrv = msrv >> 8;
221177070Sjhb			}
222177070Sjhb		}
223177070Sjhb		msr = MSR_MTRR16kBase;
224313898Skib		for (i = 0; i < MTRR_N16K / 8; i++, msr++) {
225177070Sjhb			msrv = rdmsr(msr);
226177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
227177070Sjhb				mrd->mr_flags =
228177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
229313898Skib				    x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
230177070Sjhb				if (mrd->mr_owner[0] == 0)
231177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
232177070Sjhb				msrv = msrv >> 8;
233177070Sjhb			}
234177070Sjhb		}
235177070Sjhb		msr = MSR_MTRR4kBase;
236313898Skib		for (i = 0; i < MTRR_N4K / 8; i++, msr++) {
237177070Sjhb			msrv = rdmsr(msr);
238177070Sjhb			for (j = 0; j < 8; j++, mrd++) {
239177070Sjhb				mrd->mr_flags =
240177070Sjhb				    (mrd->mr_flags & ~MDF_ATTRMASK) |
241313898Skib				    x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
242177070Sjhb				if (mrd->mr_owner[0] == 0)
243177070Sjhb					strcpy(mrd->mr_owner, mem_owner_bios);
244177070Sjhb				msrv = msrv >> 8;
245177070Sjhb			}
246177070Sjhb		}
24745405Smsmith	}
248177070Sjhb
249177070Sjhb	/* Get remainder which must be variable MTRRs. */
250177070Sjhb	msr = MSR_MTRRVarBase;
251313898Skib	for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) {
252177070Sjhb		msrv = rdmsr(msr);
25345405Smsmith		mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
254313898Skib		    x86_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
255177125Sjhb		mrd->mr_base = msrv & mtrr_physmask;
256177070Sjhb		msrv = rdmsr(msr + 1);
257177070Sjhb		mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
258177070Sjhb		    (mrd->mr_flags | MDF_ACTIVE) :
259177070Sjhb		    (mrd->mr_flags & ~MDF_ACTIVE);
260177070Sjhb
261177070Sjhb		/* Compute the range from the mask. Ick. */
262177125Sjhb		mrd->mr_len = (~(msrv & mtrr_physmask) &
263314591Skib		    (mtrr_physmask | 0xfff)) + 1;
264177070Sjhb		if (!mrvalid(mrd->mr_base, mrd->mr_len))
265177070Sjhb			mrd->mr_flags |= MDF_BOGUS;
266177070Sjhb
267177070Sjhb		/* If unclaimed and active, must be the BIOS. */
268177070Sjhb		if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
269177070Sjhb			strcpy(mrd->mr_owner, mem_owner_bios);
27045405Smsmith	}
27145405Smsmith}
27245405Smsmith
27345405Smsmith/*
27445405Smsmith * Return the MTRR memory type matching a region's flags
27545405Smsmith */
27645405Smsmithstatic int
277313898Skibx86_mtrrtype(int flags)
27845405Smsmith{
279177070Sjhb	int i;
28045405Smsmith
281177070Sjhb	flags &= MDF_ATTRMASK;
28245405Smsmith
283177070Sjhb	for (i = 0; i < MTRRTOMRTLEN; i++) {
284313898Skib		if (x86_mtrrtomrt[i] == MDF_UNKNOWN)
285177070Sjhb			continue;
286313898Skib		if (flags == x86_mtrrtomrt[i])
287177070Sjhb			return (i);
288177070Sjhb	}
289177070Sjhb	return (-1);
29045405Smsmith}
29145405Smsmith
29294683Sdwmalonestatic int
293313898Skibx86_mrt2mtrr(int flags, int oldval)
29494683Sdwmalone{
29594683Sdwmalone	int val;
29694683Sdwmalone
297313898Skib	if ((val = x86_mtrrtype(flags)) == -1)
298177070Sjhb		return (oldval & 0xff);
299177070Sjhb	return (val & 0xff);
30094683Sdwmalone}
30194683Sdwmalone
30245405Smsmith/*
30346215Smsmith * Update running CPU(s) MTRRs to match the ranges in the descriptor
30446215Smsmith * list.
30546215Smsmith *
306314591Skib * Must be called with interrupts enabled.
30745405Smsmith */
30848925Smsmithstatic void
309313898Skibx86_mrstore(struct mem_range_softc *sc)
31045405Smsmith{
311313898Skib
312313898Skib	smp_rendezvous(NULL, x86_mrstoreone, NULL, sc);
31346215Smsmith}
31446215Smsmith
31546215Smsmith/*
31646215Smsmith * Update the current CPU's MTRRs with those represented in the
317177070Sjhb * descriptor list.  Note that we do this wholesale rather than just
318177070Sjhb * stuffing one entry; this is simpler (but slower, of course).
31946215Smsmith */
32048925Smsmithstatic void
321313898Skibx86_mrstoreone(void *arg)
32246215Smsmith{
323177070Sjhb	struct mem_range_softc *sc = arg;
324177070Sjhb	struct mem_range_desc *mrd;
325177070Sjhb	u_int64_t omsrv, msrv;
326177070Sjhb	int i, j, msr;
327215415Sjkim	u_long cr0, cr4;
32846215Smsmith
329177070Sjhb	mrd = sc->mr_desc;
33046215Smsmith
331217506Sjkim	critical_enter();
332217506Sjkim
333177070Sjhb	/* Disable PGE. */
334215415Sjkim	cr4 = rcr4();
335215415Sjkim	load_cr4(cr4 & ~CR4_PGE);
33645405Smsmith
337177070Sjhb	/* Disable caches (CD = 1, NW = 0). */
338215415Sjkim	cr0 = rcr0();
339215415Sjkim	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
340177070Sjhb
341177070Sjhb	/* Flushes caches and TLBs. */
342177070Sjhb	wbinvd();
343215414Sjkim	invltlb();
344177070Sjhb
345177070Sjhb	/* Disable MTRRs (E = 0). */
346177070Sjhb	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
347177070Sjhb
348177070Sjhb	/* Set fixed-range MTRRs. */
349177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
350177070Sjhb		msr = MSR_MTRR64kBase;
351313898Skib		for (i = 0; i < MTRR_N64K / 8; i++, msr++) {
352177070Sjhb			msrv = 0;
353177070Sjhb			omsrv = rdmsr(msr);
354177070Sjhb			for (j = 7; j >= 0; j--) {
355177070Sjhb				msrv = msrv << 8;
356313898Skib				msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
357177070Sjhb				    omsrv >> (j * 8));
358177070Sjhb			}
359177070Sjhb			wrmsr(msr, msrv);
360177070Sjhb			mrd += 8;
361177070Sjhb		}
362177070Sjhb		msr = MSR_MTRR16kBase;
363313898Skib		for (i = 0; i < MTRR_N16K / 8; i++, msr++) {
364177070Sjhb			msrv = 0;
365177070Sjhb			omsrv = rdmsr(msr);
366177070Sjhb			for (j = 7; j >= 0; j--) {
367177070Sjhb				msrv = msrv << 8;
368313898Skib				msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
369177070Sjhb				    omsrv >> (j * 8));
370177070Sjhb			}
371177070Sjhb			wrmsr(msr, msrv);
372177070Sjhb			mrd += 8;
373177070Sjhb		}
374177070Sjhb		msr = MSR_MTRR4kBase;
375313898Skib		for (i = 0; i < MTRR_N4K / 8; i++, msr++) {
376177070Sjhb			msrv = 0;
377177070Sjhb			omsrv = rdmsr(msr);
378177070Sjhb			for (j = 7; j >= 0; j--) {
379177070Sjhb				msrv = msrv << 8;
380313898Skib				msrv |= x86_mrt2mtrr((mrd + j)->mr_flags,
381177070Sjhb				    omsrv >> (j * 8));
382177070Sjhb			}
383177070Sjhb			wrmsr(msr, msrv);
384177070Sjhb			mrd += 8;
385177070Sjhb		}
38645405Smsmith	}
387177070Sjhb
388177070Sjhb	/* Set remainder which must be variable MTRRs. */
389177070Sjhb	msr = MSR_MTRRVarBase;
390313898Skib	for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) {
391177070Sjhb		/* base/type register */
392177070Sjhb		omsrv = rdmsr(msr);
393177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE) {
394177125Sjhb			msrv = mrd->mr_base & mtrr_physmask;
395313898Skib			msrv |= x86_mrt2mtrr(mrd->mr_flags, omsrv);
396177070Sjhb		} else {
397177070Sjhb			msrv = 0;
398177070Sjhb		}
399177070Sjhb		wrmsr(msr, msrv);
400177070Sjhb
401177070Sjhb		/* mask/active register */
402177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE) {
403177070Sjhb			msrv = MTRR_PHYSMASK_VALID |
404298433Spfg			    rounddown2(mtrr_physmask, mrd->mr_len);
405177070Sjhb		} else {
406177070Sjhb			msrv = 0;
407177070Sjhb		}
408177070Sjhb		wrmsr(msr + 1, msrv);
40945405Smsmith	}
41045405Smsmith
411215414Sjkim	/* Flush caches and TLBs. */
412177070Sjhb	wbinvd();
413215414Sjkim	invltlb();
414177070Sjhb
415177070Sjhb	/* Enable MTRRs. */
416177070Sjhb	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
417177070Sjhb
418215415Sjkim	/* Restore caches and PGE. */
419215415Sjkim	load_cr0(cr0);
420215415Sjkim	load_cr4(cr4);
421217506Sjkim
422217506Sjkim	critical_exit();
42345405Smsmith}
42445405Smsmith
42545405Smsmith/*
42645405Smsmith * Hunt for the fixed MTRR referencing (addr)
42745405Smsmith */
42845405Smsmithstatic struct mem_range_desc *
429313898Skibx86_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
43045405Smsmith{
431177070Sjhb	struct mem_range_desc *mrd;
432177070Sjhb	int i;
433177070Sjhb
434313898Skib	for (i = 0, mrd = sc->mr_desc; i < MTRR_N64K + MTRR_N16K + MTRR_N4K;
435177070Sjhb	     i++, mrd++)
436313898Skib		if (addr >= mrd->mr_base &&
437313898Skib		    addr < mrd->mr_base + mrd->mr_len)
438177070Sjhb			return (mrd);
439177070Sjhb	return (NULL);
44045405Smsmith}
44145405Smsmith
44245405Smsmith/*
443177070Sjhb * Try to satisfy the given range request by manipulating the fixed
444177070Sjhb * MTRRs that cover low memory.
44545405Smsmith *
446177070Sjhb * Note that we try to be generous here; we'll bloat the range out to
447177070Sjhb * the next higher/lower boundary to avoid the consumer having to know
448177070Sjhb * too much about the mechanisms here.
44945405Smsmith *
450177070Sjhb * XXX note that this will have to be updated when we start supporting
451177070Sjhb * "busy" ranges.
45245405Smsmith */
45345405Smsmithstatic int
454313898Skibx86_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
45545405Smsmith{
456177070Sjhb	struct mem_range_desc *first_md, *last_md, *curr_md;
45745405Smsmith
458177070Sjhb	/* Range check. */
459313898Skib	if ((first_md = x86_mtrrfixsearch(sc, mrd->mr_base)) == NULL ||
460313898Skib	    (last_md = x86_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1))
461313898Skib	    == NULL)
462177070Sjhb		return (EINVAL);
46345405Smsmith
464177070Sjhb	/* Check that we aren't doing something risky. */
465313898Skib	if ((mrd->mr_flags & MDF_FORCE) == 0) {
466177070Sjhb		for (curr_md = first_md; curr_md <= last_md; curr_md++) {
467177070Sjhb			if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
468177070Sjhb				return (EACCES);
469177070Sjhb		}
470313898Skib	}
471177070Sjhb
472177070Sjhb	/* Set flags, clear set-by-firmware flag. */
473103346Sdwmalone	for (curr_md = first_md; curr_md <= last_md; curr_md++) {
474177070Sjhb		curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
475177070Sjhb		    ~MDF_FIRMWARE, mrd->mr_flags);
476177070Sjhb		bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
477103346Sdwmalone	}
478103346Sdwmalone
479177070Sjhb	return (0);
48045405Smsmith}
48145405Smsmith
48245405Smsmith/*
48345405Smsmith * Modify/add a variable MTRR to satisfy the request.
48445405Smsmith *
48545405Smsmith * XXX needs to be updated to properly support "busy" ranges.
48645405Smsmith */
48745405Smsmithstatic int
488313898Skibx86_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
489177070Sjhb    int *arg)
49045405Smsmith{
491177070Sjhb	struct mem_range_desc *curr_md, *free_md;
492177070Sjhb	int i;
493177070Sjhb
494177070Sjhb	/*
495177070Sjhb	 * Scan the currently active variable descriptors, look for
496177070Sjhb	 * one we exactly match (straight takeover) and for possible
497177070Sjhb	 * accidental overlaps.
498177070Sjhb	 *
499177070Sjhb	 * Keep track of the first empty variable descriptor in case
500177070Sjhb	 * we can't perform a takeover.
501177070Sjhb	 */
502177070Sjhb	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
503177070Sjhb	curr_md = sc->mr_desc + i;
504177070Sjhb	free_md = NULL;
505177070Sjhb	for (; i < sc->mr_ndesc; i++, curr_md++) {
506177070Sjhb		if (curr_md->mr_flags & MDF_ACTIVE) {
507177070Sjhb			/* Exact match? */
508313898Skib			if (curr_md->mr_base == mrd->mr_base &&
509313898Skib			    curr_md->mr_len == mrd->mr_len) {
510177070Sjhb
511177070Sjhb				/* Whoops, owned by someone. */
512177070Sjhb				if (curr_md->mr_flags & MDF_BUSY)
513177070Sjhb					return (EBUSY);
514177070Sjhb
515177070Sjhb				/* Check that we aren't doing something risky */
516177070Sjhb				if (!(mrd->mr_flags & MDF_FORCE) &&
517313898Skib				    (curr_md->mr_flags & MDF_ATTRMASK) ==
518313898Skib				    MDF_UNKNOWN)
519177070Sjhb					return (EACCES);
520177070Sjhb
521177070Sjhb				/* Ok, just hijack this entry. */
522177070Sjhb				free_md = curr_md;
523177070Sjhb				break;
524177070Sjhb			}
525177070Sjhb
526177070Sjhb			/* Non-exact overlap? */
527177070Sjhb			if (mroverlap(curr_md, mrd)) {
528177070Sjhb				/* Between conflicting region types? */
529313898Skib				if (x86_mtrrconflict(curr_md->mr_flags,
530177070Sjhb				    mrd->mr_flags))
531177070Sjhb					return (EINVAL);
532177070Sjhb			}
533177070Sjhb		} else if (free_md == NULL) {
534177070Sjhb			free_md = curr_md;
535177070Sjhb		}
53645405Smsmith	}
53745405Smsmith
538177070Sjhb	/* Got somewhere to put it? */
539177070Sjhb	if (free_md == NULL)
540177070Sjhb		return (ENOSPC);
541177070Sjhb
542177070Sjhb	/* Set up new descriptor. */
543177070Sjhb	free_md->mr_base = mrd->mr_base;
544177070Sjhb	free_md->mr_len = mrd->mr_len;
545177070Sjhb	free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
546177070Sjhb	bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
547177070Sjhb	return (0);
54845405Smsmith}
54945405Smsmith
55045405Smsmith/*
55145405Smsmith * Handle requests to set memory range attributes by manipulating MTRRs.
55245405Smsmith */
55345405Smsmithstatic int
554313898Skibx86_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
55545405Smsmith{
556177070Sjhb	struct mem_range_desc *targ;
557313898Skib	int error;
55845405Smsmith
559313898Skib	switch (*arg) {
560177070Sjhb	case MEMRANGE_SET_UPDATE:
561177070Sjhb		/*
562177070Sjhb		 * Make sure that what's being asked for is even
563177070Sjhb		 * possible at all.
564177070Sjhb		 */
565177070Sjhb		if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
566313898Skib		    x86_mtrrtype(mrd->mr_flags) == -1)
567177070Sjhb			return (EINVAL);
56845405Smsmith
569313898Skib#define	FIXTOP	\
570313898Skib    ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
57145405Smsmith
572177070Sjhb		/* Are the "low memory" conditions applicable? */
573313898Skib		if ((sc->mr_cap & MR686_FIXMTRR) != 0 &&
574313898Skib		    mrd->mr_base + mrd->mr_len <= FIXTOP) {
575313898Skib			if ((error = x86_mrsetlow(sc, mrd, arg)) != 0)
576177070Sjhb				return (error);
577177070Sjhb		} else {
578177070Sjhb			/* It's time to play with variable MTRRs. */
579313898Skib			if ((error = x86_mrsetvariable(sc, mrd, arg)) != 0)
580177070Sjhb				return (error);
581177070Sjhb		}
582177070Sjhb		break;
583177070Sjhb
584177070Sjhb	case MEMRANGE_SET_REMOVE:
585177070Sjhb		if ((targ = mem_range_match(sc, mrd)) == NULL)
586177070Sjhb			return (ENOENT);
587177070Sjhb		if (targ->mr_flags & MDF_FIXACTIVE)
588177070Sjhb			return (EPERM);
589177070Sjhb		if (targ->mr_flags & MDF_BUSY)
590177070Sjhb			return (EBUSY);
591177070Sjhb		targ->mr_flags &= ~MDF_ACTIVE;
592177070Sjhb		targ->mr_owner[0] = 0;
593177070Sjhb		break;
594177070Sjhb
595177070Sjhb	default:
596177070Sjhb		return (EOPNOTSUPP);
59745405Smsmith	}
59845405Smsmith
599313898Skib	x86_mr_split_dmap(sc);
600313898Skib
601177070Sjhb	/* Update the hardware. */
602313898Skib	x86_mrstore(sc);
60345405Smsmith
604177070Sjhb	/* Refetch to see where we're at. */
605313898Skib	x86_mrfetch(sc);
606177070Sjhb	return (0);
60745405Smsmith}
60845405Smsmith
60945405Smsmith/*
610177070Sjhb * Work out how many ranges we support, initialise storage for them,
611177070Sjhb * and fetch the initial settings.
61245405Smsmith */
61345405Smsmithstatic void
614313898Skibx86_mrinit(struct mem_range_softc *sc)
61545405Smsmith{
616177070Sjhb	struct mem_range_desc *mrd;
617313898Skib	int i, nmdesc;
61845405Smsmith
619313871Sroyger	if (sc->mr_desc != NULL)
620313871Sroyger		/* Already initialized. */
621313871Sroyger		return;
622313871Sroyger
623313898Skib	nmdesc = 0;
624177070Sjhb	mtrrcap = rdmsr(MSR_MTRRcap);
625177070Sjhb	mtrrdef = rdmsr(MSR_MTRRdefType);
62645405Smsmith
627177070Sjhb	/* For now, bail out if MTRRs are not enabled. */
628177070Sjhb	if (!(mtrrdef & MTRR_DEF_ENABLE)) {
629177070Sjhb		if (bootverbose)
630177070Sjhb			printf("CPU supports MTRRs but not enabled\n");
631177070Sjhb		return;
632177070Sjhb	}
633177070Sjhb	nmdesc = mtrrcap & MTRR_CAP_VCNT;
63445405Smsmith	if (bootverbose)
635177070Sjhb		printf("Pentium Pro MTRR support enabled\n");
63645405Smsmith
637177125Sjhb	/*
638177125Sjhb	 * Determine the size of the PhysMask and PhysBase fields in
639313898Skib	 * the variable range MTRRs.
640177125Sjhb	 */
641314591Skib	mtrr_physmask = (((uint64_t)1 << cpu_maxphyaddr) - 1) &
642314591Skib	    ~(uint64_t)0xfff;
643177125Sjhb
644177070Sjhb	/* If fixed MTRRs supported and enabled. */
645177070Sjhb	if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
646177070Sjhb		sc->mr_cap = MR686_FIXMTRR;
647177070Sjhb		nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
648177070Sjhb	}
64945405Smsmith
650177070Sjhb	sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
651177070Sjhb	    M_WAITOK | M_ZERO);
652177070Sjhb	sc->mr_ndesc = nmdesc;
65345405Smsmith
654177070Sjhb	mrd = sc->mr_desc;
65545405Smsmith
656177070Sjhb	/* Populate the fixed MTRR entries' base/length. */
657177070Sjhb	if (sc->mr_cap & MR686_FIXMTRR) {
658177070Sjhb		for (i = 0; i < MTRR_N64K; i++, mrd++) {
659177070Sjhb			mrd->mr_base = i * 0x10000;
660177070Sjhb			mrd->mr_len = 0x10000;
661177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
662177070Sjhb			    MDF_FIXACTIVE;
663177070Sjhb		}
664177070Sjhb		for (i = 0; i < MTRR_N16K; i++, mrd++) {
665177070Sjhb			mrd->mr_base = i * 0x4000 + 0x80000;
666177070Sjhb			mrd->mr_len = 0x4000;
667177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
668177070Sjhb			    MDF_FIXACTIVE;
669177070Sjhb		}
670177070Sjhb		for (i = 0; i < MTRR_N4K; i++, mrd++) {
671177070Sjhb			mrd->mr_base = i * 0x1000 + 0xc0000;
672177070Sjhb			mrd->mr_len = 0x1000;
673177070Sjhb			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
674177070Sjhb			    MDF_FIXACTIVE;
675177070Sjhb		}
67645405Smsmith	}
677177070Sjhb
678177070Sjhb	/*
679177070Sjhb	 * Get current settings, anything set now is considered to
680177070Sjhb	 * have been set by the firmware. (XXX has something already
681177070Sjhb	 * played here?)
682177070Sjhb	 */
683313898Skib	x86_mrfetch(sc);
684177070Sjhb	mrd = sc->mr_desc;
685177070Sjhb	for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
686177070Sjhb		if (mrd->mr_flags & MDF_ACTIVE)
687177070Sjhb			mrd->mr_flags |= MDF_FIRMWARE;
68845405Smsmith	}
689313898Skib
690313898Skib	x86_mr_split_dmap(sc);
69145405Smsmith}
69245405Smsmith
69346215Smsmith/*
69446215Smsmith * Initialise MTRRs on an AP after the BSP has run the init code.
69546215Smsmith */
69645405Smsmithstatic void
697313898Skibx86_mrAPinit(struct mem_range_softc *sc)
69846215Smsmith{
699177070Sjhb
700313898Skib	x86_mrstoreone(sc);
701177070Sjhb	wrmsr(MSR_MTRRdefType, mtrrdef);
70246215Smsmith}
70346215Smsmith
704189903Sjkim/*
705189903Sjkim * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
706189903Sjkim * list.
707189903Sjkim *
708314591Skib * Must be called with interrupts enabled.
709189903Sjkim */
71046215Smsmithstatic void
711313898Skibx86_mrreinit(struct mem_range_softc *sc)
712189903Sjkim{
713313898Skib
714314591Skib	smp_rendezvous(NULL, (void (*)(void *))x86_mrAPinit, NULL, sc);
715189903Sjkim}
716189903Sjkim
717189903Sjkimstatic void
718313898Skibx86_mem_drvinit(void *unused)
71945405Smsmith{
720177070Sjhb
721177124Sjhb	if (mtrrs_disabled)
722177124Sjhb		return;
723177124Sjhb	if (!(cpu_feature & CPUID_MTRR))
724177124Sjhb		return;
725313898Skib	mem_range_softc.mr_op = &x86_mrops;
726313898Skib	x86_mrinit(&mem_range_softc);
72745405Smsmith}
728313898SkibSYSINIT(x86memdev, SI_SUB_CPU, SI_ORDER_ANY, x86_mem_drvinit, NULL);
729