1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2   because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
6#include <linux/module.h>
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
12#include <asm/tlbflush.h>
13#include "mtrr.h"
14
15struct mtrr_state {
16	struct mtrr_var_range *var_ranges;
17	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18	unsigned char enabled;
19	unsigned char have_fixed;
20	mtrr_type def_type;
21};
22
23struct fixed_range_block {
24	int base_msr; /* start address of an MTRR block */
25	int ranges;   /* number of MTRRs in this block  */
26};
27
28static struct fixed_range_block fixed_range_blocks[] = {
29	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
30	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
31	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
32	{}
33};
34
35static unsigned long smp_changes_mask;
36static struct mtrr_state mtrr_state = {};
37
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "mtrr."
40
41static int mtrr_show;
42module_param_named(show, mtrr_show, bool, 0);
43
44/*  Get the MSR pair relating to a var range  */
45static void
46get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
47{
48	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
49	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
50}
51
52static void
53get_fixed_ranges(mtrr_type * frs)
54{
55	unsigned int *p = (unsigned int *) frs;
56	int i;
57
58	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
59
60	for (i = 0; i < 2; i++)
61		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
62	for (i = 0; i < 8; i++)
63		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
64}
65
66void mtrr_save_fixed_ranges(void *info)
67{
68	if (cpu_has_mtrr)
69		get_fixed_ranges(mtrr_state.fixed_ranges);
70}
71
72static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
73{
74	unsigned i;
75
76	for (i = 0; i < 8; ++i, ++types, base += step)
77		printk(KERN_INFO "MTRR %05X-%05X %s\n",
78			base, base + step - 1, mtrr_attrib_to_str(*types));
79}
80
81/*  Grab all of the MTRR state for this CPU into *state  */
82void get_mtrr_state(void)
83{
84	unsigned int i;
85	struct mtrr_var_range *vrs;
86	unsigned lo, dummy;
87
88	if (!mtrr_state.var_ranges) {
89		mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
90						GFP_KERNEL);
91		if (!mtrr_state.var_ranges)
92			return;
93	}
94	vrs = mtrr_state.var_ranges;
95
96	rdmsr(MTRRcap_MSR, lo, dummy);
97	mtrr_state.have_fixed = (lo >> 8) & 1;
98
99	for (i = 0; i < num_var_ranges; i++)
100		get_mtrr_var_range(i, &vrs[i]);
101	if (mtrr_state.have_fixed)
102		get_fixed_ranges(mtrr_state.fixed_ranges);
103
104	rdmsr(MTRRdefType_MSR, lo, dummy);
105	mtrr_state.def_type = (lo & 0xff);
106	mtrr_state.enabled = (lo & 0xc00) >> 10;
107
108	if (mtrr_show) {
109		int high_width;
110
111		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
112		if (mtrr_state.have_fixed) {
113			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
114			       mtrr_state.enabled & 1 ? "en" : "dis");
115			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
116			for (i = 0; i < 2; ++i)
117				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
118			for (i = 0; i < 8; ++i)
119				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
120		}
121		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
122		       mtrr_state.enabled & 2 ? "en" : "dis");
123		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
124		for (i = 0; i < num_var_ranges; ++i) {
125			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
126				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
127				       i,
128				       high_width,
129				       mtrr_state.var_ranges[i].base_hi,
130				       mtrr_state.var_ranges[i].base_lo >> 12,
131				       high_width,
132				       mtrr_state.var_ranges[i].mask_hi,
133				       mtrr_state.var_ranges[i].mask_lo >> 12,
134				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
135			else
136				printk(KERN_INFO "MTRR %u disabled\n", i);
137		}
138	}
139}
140
141/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
142void __init mtrr_state_warn(void)
143{
144	unsigned long mask = smp_changes_mask;
145
146	if (!mask)
147		return;
148	if (mask & MTRR_CHANGE_MASK_FIXED)
149		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
150	if (mask & MTRR_CHANGE_MASK_VARIABLE)
151		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
152	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
153		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
154	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
155	printk(KERN_INFO "mtrr: corrected configuration.\n");
156}
157
158/* Doesn't attempt to pass an error out to MTRR users
159   because it's quite complicated in some cases and probably not
160   worth it because the best error handling is to ignore it. */
161void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
162{
163	if (wrmsr_safe(msr, a, b) < 0)
164		printk(KERN_ERR
165			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
166			smp_processor_id(), msr, a, b);
167}
168
169/**
170 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
171 * see AMD publication no. 24593, chapter 3.2.1 for more information
172 */
173static inline void k8_enable_fixed_iorrs(void)
174{
175	unsigned lo, hi;
176
177	rdmsr(MSR_K8_SYSCFG, lo, hi);
178	mtrr_wrmsr(MSR_K8_SYSCFG, lo
179				| K8_MTRRFIXRANGE_DRAM_ENABLE
180				| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
181}
182
183/**
184 * Checks and updates an fixed-range MTRR if it differs from the value it
185 * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also.
186 * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
187 * \param msr MSR address of the MTTR which should be checked and updated
188 * \param changed pointer which indicates whether the MTRR needed to be changed
189 * \param msrwords pointer to the MSR values which the MSR should have
190 */
191static void set_fixed_range(int msr, int * changed, unsigned int * msrwords)
192{
193	unsigned lo, hi;
194
195	rdmsr(msr, lo, hi);
196
197	if (lo != msrwords[0] || hi != msrwords[1]) {
198		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
199		    boot_cpu_data.x86 == 15 &&
200		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
201			k8_enable_fixed_iorrs();
202		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
203		*changed = TRUE;
204	}
205}
206
207int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
208/*  [SUMMARY] Get a free MTRR.
209    <base> The starting (base) address of the region.
210    <size> The size (in bytes) of the region.
211    [RETURNS] The index of the region on success, else -1 on error.
212*/
213{
214	int i, max;
215	mtrr_type ltype;
216	unsigned long lbase, lsize;
217
218	max = num_var_ranges;
219	if (replace_reg >= 0 && replace_reg < max)
220		return replace_reg;
221	for (i = 0; i < max; ++i) {
222		mtrr_if->get(i, &lbase, &lsize, &ltype);
223		if (lsize == 0)
224			return i;
225	}
226	return -ENOSPC;
227}
228
229static void generic_get_mtrr(unsigned int reg, unsigned long *base,
230			     unsigned long *size, mtrr_type *type)
231{
232	unsigned int mask_lo, mask_hi, base_lo, base_hi;
233
234	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
235	if ((mask_lo & 0x800) == 0) {
236		/*  Invalid (i.e. free) range  */
237		*base = 0;
238		*size = 0;
239		*type = 0;
240		return;
241	}
242
243	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
244
245	/* Work out the shifted address mask. */
246	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
247	    | mask_lo >> PAGE_SHIFT;
248
249	/* This works correctly if size is a power of two, i.e. a
250	   contiguous range. */
251	*size = -mask_lo;
252	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
253	*type = base_lo & 0xff;
254}
255
256/**
257 * Checks and updates the fixed-range MTRRs if they differ from the saved set
258 * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
259 */
260static int set_fixed_ranges(mtrr_type * frs)
261{
262	unsigned long long *saved = (unsigned long long *) frs;
263	int changed = FALSE;
264	int block=-1, range;
265
266	while (fixed_range_blocks[++block].ranges)
267	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
268		set_fixed_range(fixed_range_blocks[block].base_msr + range,
269		    &changed, (unsigned int *) saved++);
270
271	return changed;
272}
273
274/*  Set the MSR pair relating to a var range. Returns TRUE if
275    changes are made  */
276static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
277{
278	unsigned int lo, hi;
279	int changed = FALSE;
280
281	rdmsr(MTRRphysBase_MSR(index), lo, hi);
282	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
283	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
284		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
285		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
286		changed = TRUE;
287	}
288
289	rdmsr(MTRRphysMask_MSR(index), lo, hi);
290
291	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
292	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
293		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
294		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
295		changed = TRUE;
296	}
297	return changed;
298}
299
300static u32 deftype_lo, deftype_hi;
301
302static unsigned long set_mtrr_state(void)
303/*  [SUMMARY] Set the MTRR state for this CPU.
304    <state> The MTRR state information to read.
305    <ctxt> Some relevant CPU context.
306    [NOTE] The CPU must already be in a safe state for MTRR changes.
307    [RETURNS] 0 if no changes made, else a mask indication what was changed.
308*/
309{
310	unsigned int i;
311	unsigned long change_mask = 0;
312
313	for (i = 0; i < num_var_ranges; i++)
314		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
315			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
316
317	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
318		change_mask |= MTRR_CHANGE_MASK_FIXED;
319
320	/*  Set_mtrr_restore restores the old value of MTRRdefType,
321	   so to set it we fiddle with the saved value  */
322	if ((deftype_lo & 0xff) != mtrr_state.def_type
323	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
324		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
325		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
326	}
327
328	return change_mask;
329}
330
331
332static unsigned long cr4 = 0;
333static DEFINE_SPINLOCK(set_atomicity_lock);
334
335/*
336 * Since we are disabling the cache don't allow any interrupts - they
337 * would run extremely slow and would only increase the pain.  The caller must
338 * ensure that local interrupts are disabled and are reenabled after post_set()
339 * has been called.
340 */
341
342static void prepare_set(void) __acquires(set_atomicity_lock)
343{
344	unsigned long cr0;
345
346	/*  Note that this is not ideal, since the cache is only flushed/disabled
347	   for this CPU while the MTRRs are changed, but changing this requires
348	   more invasive changes to the way the kernel boots  */
349
350	spin_lock(&set_atomicity_lock);
351
352	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
353	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
354	write_cr0(cr0);
355	wbinvd();
356
357	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
358	if ( cpu_has_pge ) {
359		cr4 = read_cr4();
360		write_cr4(cr4 & ~X86_CR4_PGE);
361	}
362
363	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
364	__flush_tlb();
365
366	/*  Save MTRR state */
367	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
368
369	/*  Disable MTRRs, and set the default type to uncached  */
370	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
371}
372
373static void post_set(void) __releases(set_atomicity_lock)
374{
375	/*  Flush TLBs (no need to flush caches - they are disabled)  */
376	__flush_tlb();
377
378	/* Intel (P6) standard MTRRs */
379	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
380
381	/*  Enable caches  */
382	write_cr0(read_cr0() & 0xbfffffff);
383
384	/*  Restore value of CR4  */
385	if ( cpu_has_pge )
386		write_cr4(cr4);
387	spin_unlock(&set_atomicity_lock);
388}
389
390static void generic_set_all(void)
391{
392	unsigned long mask, count;
393	unsigned long flags;
394
395	local_irq_save(flags);
396	prepare_set();
397
398	/* Actually set the state */
399	mask = set_mtrr_state();
400
401	post_set();
402	local_irq_restore(flags);
403
404	/*  Use the atomic bitops to update the global mask  */
405	for (count = 0; count < sizeof mask * 8; ++count) {
406		if (mask & 0x01)
407			set_bit(count, &smp_changes_mask);
408		mask >>= 1;
409	}
410
411}
412
413static void generic_set_mtrr(unsigned int reg, unsigned long base,
414			     unsigned long size, mtrr_type type)
415/*  [SUMMARY] Set variable MTRR register on the local CPU.
416    <reg> The register to set.
417    <base> The base address of the region.
418    <size> The size of the region. If this is 0 the region is disabled.
419    <type> The type of the region.
420    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
421    be done externally.
422    [RETURNS] Nothing.
423*/
424{
425	unsigned long flags;
426	struct mtrr_var_range *vr;
427
428	vr = &mtrr_state.var_ranges[reg];
429
430	local_irq_save(flags);
431	prepare_set();
432
433	if (size == 0) {
434		/* The invalid bit is kept in the mask, so we simply clear the
435		   relevant mask register to disable a range. */
436		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
437		memset(vr, 0, sizeof(struct mtrr_var_range));
438	} else {
439		vr->base_lo = base << PAGE_SHIFT | type;
440		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
441		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
442		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
443
444		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
445		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
446	}
447
448	post_set();
449	local_irq_restore(flags);
450}
451
452int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
453{
454	unsigned long lbase, last;
455
456	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
457	    and not touch 0x70000000->0x7003FFFF */
458	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
459	    boot_cpu_data.x86_model == 1 &&
460	    boot_cpu_data.x86_mask <= 7) {
461		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
462			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
463			return -EINVAL;
464		}
465		if (!(base + size < 0x70000 || base > 0x7003F) &&
466		    (type == MTRR_TYPE_WRCOMB
467		     || type == MTRR_TYPE_WRBACK)) {
468			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
469			return -EINVAL;
470		}
471	}
472
473	/*  Check upper bits of base and last are equal and lower bits are 0
474	    for base and 1 for last  */
475	last = base + size - 1;
476	for (lbase = base; !(lbase & 1) && (last & 1);
477	     lbase = lbase >> 1, last = last >> 1) ;
478	if (lbase != last) {
479		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
480		       base, size);
481		return -EINVAL;
482	}
483	return 0;
484}
485
486
487static int generic_have_wrcomb(void)
488{
489	unsigned long config, dummy;
490	rdmsr(MTRRcap_MSR, config, dummy);
491	return (config & (1 << 10));
492}
493
494int positive_have_wrcomb(void)
495{
496	return 1;
497}
498
499/* generic structure...
500 */
501struct mtrr_ops generic_mtrr_ops = {
502	.use_intel_if      = 1,
503	.set_all	   = generic_set_all,
504	.get               = generic_get_mtrr,
505	.get_free_region   = generic_get_free_region,
506	.set               = generic_set_mtrr,
507	.validate_add_page = generic_validate_add_page,
508	.have_wrcomb       = generic_have_wrcomb,
509};
510