1
2#include <linux/types.h>
3#include <linux/errno.h>
4#include <linux/sched.h>
5#include <linux/tty.h>
6#include <linux/timer.h>
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/wait.h>
10#include <linux/string.h>
11#include <linux/slab.h>
12#include <linux/ioport.h>
13#include <linux/delay.h>
14#include <linux/fs.h>
15#include <linux/ctype.h>
16#include <linux/proc_fs.h>
17#include <linux/devfs_fs_kernel.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#define MTRR_NEED_STRINGS
22#include <asm/mtrr.h>
23#include <linux/init.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26
27#include <asm/uaccess.h>
28#include <asm/io.h>
29#include <asm/processor.h>
30#include <asm/system.h>
31#include <asm/pgtable.h>
32#include <asm/segment.h>
33#include <asm/bitops.h>
34#include <asm/atomic.h>
35#include <asm/msr.h>
36
37#include <asm/hardirq.h>
38#include <linux/irq.h>
39
40#define MTRR_VERSION            "1.40 (20010327)"
41
42#define TRUE  1
43#define FALSE 0
44
45/*
46 * The code assumes all processors support the same MTRR
47 * interface.  This is generally a good assumption, but could
48 * potentially be a problem.
49 */
50enum mtrr_if_type {
51    MTRR_IF_NONE,		/* No MTRRs supported */
52    MTRR_IF_INTEL,		/* Intel (P6) standard MTRRs */
53    MTRR_IF_AMD_K6,		/* AMD pre-Athlon MTRRs */
54    MTRR_IF_CYRIX_ARR,		/* Cyrix ARRs */
55    MTRR_IF_CENTAUR_MCR,	/* Centaur MCRs */
56} mtrr_if = MTRR_IF_NONE;
57
58static __initdata char *mtrr_if_name[] = {
59    "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
60};
61
62#define MTRRcap_MSR     0x0fe
63#define MTRRdefType_MSR 0x2ff
64
65#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
66#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
67
68#define NUM_FIXED_RANGES 88
69#define MTRRfix64K_00000_MSR 0x250
70#define MTRRfix16K_80000_MSR 0x258
71#define MTRRfix16K_A0000_MSR 0x259
72#define MTRRfix4K_C0000_MSR 0x268
73#define MTRRfix4K_C8000_MSR 0x269
74#define MTRRfix4K_D0000_MSR 0x26a
75#define MTRRfix4K_D8000_MSR 0x26b
76#define MTRRfix4K_E0000_MSR 0x26c
77#define MTRRfix4K_E8000_MSR 0x26d
78#define MTRRfix4K_F0000_MSR 0x26e
79#define MTRRfix4K_F8000_MSR 0x26f
80
81#ifdef CONFIG_SMP
82#  define MTRR_CHANGE_MASK_FIXED     0x01
83#  define MTRR_CHANGE_MASK_VARIABLE  0x02
84#  define MTRR_CHANGE_MASK_DEFTYPE   0x04
85#endif
86
87/* In the Intel processor's MTRR interface, the MTRR type is always held in
88   an 8 bit field: */
89typedef u8 mtrr_type;
90
91#define LINE_SIZE      80
92#define JIFFIE_TIMEOUT 100
93
94#ifdef CONFIG_SMP
95#  define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
96#else
97#  define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
98						       TRUE)
99#endif
100
101#if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
102# define USERSPACE_INTERFACE
103#endif
104
105#ifndef USERSPACE_INTERFACE
106#  define compute_ascii() while (0)
107#endif
108
109#ifdef USERSPACE_INTERFACE
110static char *ascii_buffer;
111static unsigned int ascii_buf_bytes;
112#endif
113static unsigned int *usage_table;
114static DECLARE_MUTEX(main_lock);
115
116/*  Private functions  */
117#ifdef USERSPACE_INTERFACE
118static void compute_ascii (void);
119#endif
120
121
122struct set_mtrr_context
123{
124    unsigned long flags;
125    unsigned long deftype_lo;
126    unsigned long deftype_hi;
127    unsigned long cr4val;
128    unsigned long ccr3;
129};
130
131static int arr3_protected;
132
133/*  Put the processor into a state where MTRRs can be safely set  */
134static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
135{
136    /*  Disable interrupts locally  */
137    __save_flags (ctxt->flags); __cli ();
138
139    if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
140	 return;
141
142    /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
143    if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) {
144	ctxt->cr4val = read_cr4();
145	write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
146    }
147
148    /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
149	a side-effect  */
150    {
151	unsigned int cr0 = read_cr0() | 0x40000000;
152	wbinvd();
153	write_cr0( cr0 );
154	wbinvd();
155    }
156
157    if ( mtrr_if == MTRR_IF_INTEL ) {
158	/*  Save MTRR state */
159	rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
160    } else {
161	/* Cyrix ARRs - everything else were excluded at the top */
162	ctxt->ccr3 = getCx86 (CX86_CCR3);
163    }
164}   /*  End Function set_mtrr_prepare_save  */
165
166static void set_mtrr_disable (struct set_mtrr_context *ctxt)
167{
168    if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
169	 return;
170
171    if ( mtrr_if == MTRR_IF_INTEL ) {
172	/*  Disable MTRRs, and set the default type to uncached  */
173	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
174    } else {
175	/* Cyrix ARRs - everything else were excluded at the top */
176	setCx86 (CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
177    }
178}   /*  End Function set_mtrr_disable  */
179
180/*  Restore the processor after a set_mtrr_prepare  */
181static void set_mtrr_done (struct set_mtrr_context *ctxt)
182{
183    if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) {
184	 __restore_flags (ctxt->flags);
185	 return;
186    }
187
188    /*  Flush caches and TLBs  */
189    wbinvd();
190
191    /*  Restore MTRRdefType  */
192    if ( mtrr_if == MTRR_IF_INTEL ) {
193	/* Intel (P6) standard MTRRs */
194	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
195    } else {
196	/* Cyrix ARRs - everything else was excluded at the top */
197	setCx86 (CX86_CCR3, ctxt->ccr3);
198    }
199
200    /*  Enable caches  */
201    write_cr0( read_cr0() & 0xbfffffff );
202
203    /*  Restore value of CR4  */
204    if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
205	write_cr4(ctxt->cr4val);
206
207    /*  Re-enable interrupts locally (if enabled previously)  */
208    __restore_flags (ctxt->flags);
209}   /*  End Function set_mtrr_done  */
210
211/*  This function returns the number of variable MTRRs  */
212static unsigned int get_num_var_ranges (void)
213{
214    unsigned long config, dummy;
215
216    switch ( mtrr_if )
217    {
218    case MTRR_IF_INTEL:
219	rdmsr (MTRRcap_MSR, config, dummy);
220	return (config & 0xff);
221    case MTRR_IF_AMD_K6:
222	return 2;
223    case MTRR_IF_CYRIX_ARR:
224	return 8;
225    case MTRR_IF_CENTAUR_MCR:
226	return 8;
227    default:
228	return 0;
229    }
230}   /*  End Function get_num_var_ranges  */
231
232/*  Returns non-zero if we have the write-combining memory type  */
233static int have_wrcomb (void)
234{
235    unsigned long config, dummy;
236    struct pci_dev *dev = NULL;
237
238   /* ServerWorks LE chipsets have problems with write-combining
239      Don't allow it and leave room for other chipsets to be tagged */
240
241	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
242		if ((dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
243			(dev->device == PCI_DEVICE_ID_SERVERWORKS_LE)) {
244		printk (KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
245		return 0;
246		}
247	}
248
249    switch ( mtrr_if )
250    {
251    case MTRR_IF_INTEL:
252	rdmsr (MTRRcap_MSR, config, dummy);
253	return (config & (1<<10));
254	return 1;
255    case MTRR_IF_AMD_K6:
256    case MTRR_IF_CENTAUR_MCR:
257    case MTRR_IF_CYRIX_ARR:
258	return 1;
259    default:
260	return 0;
261    }
262}   /*  End Function have_wrcomb  */
263
264static u32 size_or_mask, size_and_mask;
265
266static void intel_get_mtrr (unsigned int reg, unsigned long *base,
267			    unsigned long *size, mtrr_type *type)
268{
269    unsigned long mask_lo, mask_hi, base_lo, base_hi;
270
271    rdmsr (MTRRphysMask_MSR(reg), mask_lo, mask_hi);
272    if ( (mask_lo & 0x800) == 0 )
273    {
274	/*  Invalid (i.e. free) range  */
275	*base = 0;
276	*size = 0;
277	*type = 0;
278	return;
279    }
280
281    rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
282
283    /* Work out the shifted address mask. */
284    mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
285		| mask_lo >> PAGE_SHIFT;
286
287    /* This works correctly if size is a power of two, i.e. a
288       contiguous range. */
289     *size = -mask_lo;
290     *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
291     *type = base_lo & 0xff;
292}   /*  End Function intel_get_mtrr  */
293
294static void cyrix_get_arr (unsigned int reg, unsigned long *base,
295			   unsigned long *size, mtrr_type *type)
296{
297    unsigned long flags;
298    unsigned char arr, ccr3, rcr, shift;
299
300    arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
301
302    /* Save flags and disable interrupts */
303    __save_flags (flags); __cli ();
304
305    ccr3 = getCx86 (CX86_CCR3);
306    setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10);		/* enable MAPEN */
307    ((unsigned char *) base)[3]  = getCx86 (arr);
308    ((unsigned char *) base)[2]  = getCx86 (arr+1);
309    ((unsigned char *) base)[1]  = getCx86 (arr+2);
310    rcr = getCx86(CX86_RCR_BASE + reg);
311    setCx86 (CX86_CCR3, ccr3);				/* disable MAPEN */
312
313    /* Enable interrupts if it was enabled previously */
314    __restore_flags (flags);
315    shift = ((unsigned char *) base)[1] & 0x0f;
316    *base >>= PAGE_SHIFT;
317
318    /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
319     * Note: shift==0xf means 4G, this is unsupported.
320     */
321    if (shift)
322      *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
323    else
324      *size = 0;
325
326    /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
327    if (reg < 7)
328    {
329	switch (rcr)
330	{
331	  case  1: *type = MTRR_TYPE_UNCACHABLE; break;
332	  case  8: *type = MTRR_TYPE_WRBACK;     break;
333	  case  9: *type = MTRR_TYPE_WRCOMB;     break;
334	  case 24:
335	  default: *type = MTRR_TYPE_WRTHROUGH;  break;
336	}
337    } else
338    {
339	switch (rcr)
340	{
341	  case  0: *type = MTRR_TYPE_UNCACHABLE; break;
342	  case  8: *type = MTRR_TYPE_WRCOMB;     break;
343	  case  9: *type = MTRR_TYPE_WRBACK;     break;
344	  case 25:
345	  default: *type = MTRR_TYPE_WRTHROUGH;  break;
346	}
347    }
348}   /*  End Function cyrix_get_arr  */
349
350static void amd_get_mtrr (unsigned int reg, unsigned long *base,
351			  unsigned long *size, mtrr_type *type)
352{
353    unsigned long low, high;
354
355    rdmsr (MSR_K6_UWCCR, low, high);
356    /*  Upper dword is region 1, lower is region 0  */
357    if (reg == 1) low = high;
358    /*  The base masks off on the right alignment  */
359    *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
360    *type = 0;
361    if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
362    if (low & 2) *type = MTRR_TYPE_WRCOMB;
363    if ( !(low & 3) )
364    {
365	*size = 0;
366	return;
367    }
368    /*
369     *	This needs a little explaining. The size is stored as an
370     *	inverted mask of bits of 128K granularity 15 bits long offset
371     *	2 bits
372     *
373     *	So to get a size we do invert the mask and add 1 to the lowest
374     *	mask bit (4 as its 2 bits in). This gives us a size we then shift
375     *	to turn into 128K blocks
376     *
377     *	eg		111 1111 1111 1100      is 512K
378     *
379     *	invert		000 0000 0000 0011
380     *	+1		000 0000 0000 0100
381     *	*128K	...
382     */
383    low = (~low) & 0x1FFFC;
384    *size = (low + 4) << (15 - PAGE_SHIFT);
385    return;
386}   /*  End Function amd_get_mtrr  */
387
388static struct
389{
390    unsigned long high;
391    unsigned long low;
392} centaur_mcr[8];
393
394static u8 centaur_mcr_reserved;
395static u8 centaur_mcr_type;		/* 0 for winchip, 1 for winchip2 */
396
397/*
398 *	Report boot time MCR setups
399 */
400
401void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
402{
403	centaur_mcr[mcr].low = lo;
404	centaur_mcr[mcr].high = hi;
405}
406
407static void centaur_get_mcr (unsigned int reg, unsigned long *base,
408			     unsigned long *size, mtrr_type *type)
409{
410    *base = centaur_mcr[reg].high >> PAGE_SHIFT;
411    *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
412    *type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
413    if(centaur_mcr_type==1 && ((centaur_mcr[reg].low&31)&2))
414    	*type = MTRR_TYPE_UNCACHABLE;
415    if(centaur_mcr_type==1 && (centaur_mcr[reg].low&31)==25)
416    	*type = MTRR_TYPE_WRBACK;
417    if(centaur_mcr_type==0 && (centaur_mcr[reg].low&31)==31)
418    	*type = MTRR_TYPE_WRBACK;
419
420}   /*  End Function centaur_get_mcr  */
421
422static void (*get_mtrr) (unsigned int reg, unsigned long *base,
423			 unsigned long *size, mtrr_type *type);
424
425static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
426			       unsigned long size, mtrr_type type, int do_safe)
427/*  [SUMMARY] Set variable MTRR register on the local CPU.
428    <reg> The register to set.
429    <base> The base address of the region.
430    <size> The size of the region. If this is 0 the region is disabled.
431    <type> The type of the region.
432    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
433    be done externally.
434    [RETURNS] Nothing.
435*/
436{
437    struct set_mtrr_context ctxt;
438
439    if (do_safe) {
440	set_mtrr_prepare_save (&ctxt);
441	set_mtrr_disable (&ctxt);
442	}
443    if (size == 0)
444    {
445	/* The invalid bit is kept in the mask, so we simply clear the
446	   relevant mask register to disable a range. */
447	wrmsr (MTRRphysMask_MSR (reg), 0, 0);
448    }
449    else
450    {
451	wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type,
452		(base & size_and_mask) >> (32 - PAGE_SHIFT));
453	wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800,
454		(-size & size_and_mask) >> (32 - PAGE_SHIFT));
455    }
456    if (do_safe) set_mtrr_done (&ctxt);
457}   /*  End Function intel_set_mtrr_up  */
458
459static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
460			      unsigned long size, mtrr_type type, int do_safe)
461{
462    struct set_mtrr_context ctxt;
463    unsigned char arr, arr_type, arr_size;
464
465    arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
466
467    /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
468    if (reg >= 7)
469	size >>= 6;
470
471    size &= 0x7fff; /* make sure arr_size <= 14 */
472    for(arr_size = 0; size; arr_size++, size >>= 1);
473
474    if (reg<7)
475    {
476	switch (type) {
477	  case MTRR_TYPE_UNCACHABLE:	arr_type =  1; break;
478	  case MTRR_TYPE_WRCOMB:		arr_type =  9; break;
479	  case MTRR_TYPE_WRTHROUGH:	arr_type = 24; break;
480	  default:			arr_type =  8; break;
481	}
482    }
483    else
484    {
485	switch (type)
486	{
487	  case MTRR_TYPE_UNCACHABLE:	arr_type =  0; break;
488	  case MTRR_TYPE_WRCOMB:		arr_type =  8; break;
489	  case MTRR_TYPE_WRTHROUGH:	arr_type = 25; break;
490	  default:			arr_type =  9; break;
491	}
492    }
493
494    if (do_safe) {
495	set_mtrr_prepare_save (&ctxt);
496	set_mtrr_disable (&ctxt);
497    }
498    base <<= PAGE_SHIFT;
499    setCx86(arr,    ((unsigned char *) &base)[3]);
500    setCx86(arr+1,  ((unsigned char *) &base)[2]);
501    setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
502    setCx86(CX86_RCR_BASE + reg, arr_type);
503    if (do_safe) set_mtrr_done (&ctxt);
504}   /*  End Function cyrix_set_arr_up  */
505
506static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
507			     unsigned long size, mtrr_type type, int do_safe)
508/*  [SUMMARY] Set variable MTRR register on the local CPU.
509    <reg> The register to set.
510    <base> The base address of the region.
511    <size> The size of the region. If this is 0 the region is disabled.
512    <type> The type of the region.
513    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
514    be done externally.
515    [RETURNS] Nothing.
516*/
517{
518    u32 regs[2];
519    struct set_mtrr_context ctxt;
520
521    if (do_safe) {
522	set_mtrr_prepare_save (&ctxt);
523	set_mtrr_disable (&ctxt);
524    }
525    /*
526     *	Low is MTRR0 , High MTRR 1
527     */
528    rdmsr (MSR_K6_UWCCR, regs[0], regs[1]);
529    /*
530     *	Blank to disable
531     */
532    if (size == 0)
533	regs[reg] = 0;
534    else
535	/* Set the register to the base, the type (off by one) and an
536	   inverted bitmask of the size The size is the only odd
537	   bit. We are fed say 512K We invert this and we get 111 1111
538	   1111 1011 but if you subtract one and invert you get the
539	   desired 111 1111 1111 1100 mask
540
541	   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
542	regs[reg] = (-size>>(15-PAGE_SHIFT) & 0x0001FFFC)
543				| (base<<PAGE_SHIFT) | (type+1);
544
545    /*
546     *	The writeback rule is quite specific. See the manual. Its
547     *	disable local interrupts, write back the cache, set the mtrr
548     */
549	wbinvd();
550	wrmsr (MSR_K6_UWCCR, regs[0], regs[1]);
551    if (do_safe) set_mtrr_done (&ctxt);
552}   /*  End Function amd_set_mtrr_up  */
553
554
555static void centaur_set_mcr_up (unsigned int reg, unsigned long base,
556				unsigned long size, mtrr_type type,
557				int do_safe)
558{
559    struct set_mtrr_context ctxt;
560    unsigned long low, high;
561
562    if (do_safe) {
563	set_mtrr_prepare_save (&ctxt);
564	set_mtrr_disable (&ctxt);
565    }
566    if (size == 0)
567    {
568        /*  Disable  */
569        high = low = 0;
570    }
571    else
572    {
573	high = base << PAGE_SHIFT;
574	if(centaur_mcr_type == 0)
575		low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
576	else
577	{
578		if(type == MTRR_TYPE_UNCACHABLE)
579			low = -size << PAGE_SHIFT | 0x02;	/* NC */
580		else
581			low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
582	}
583    }
584    centaur_mcr[reg].high = high;
585    centaur_mcr[reg].low = low;
586    wrmsr (MSR_IDT_MCR0 + reg, low, high);
587    if (do_safe) set_mtrr_done( &ctxt );
588}   /*  End Function centaur_set_mtrr_up  */
589
590static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
591			    unsigned long size, mtrr_type type,
592			    int do_safe);
593
594#ifdef CONFIG_SMP
595
596struct mtrr_var_range
597{
598    unsigned long base_lo;
599    unsigned long base_hi;
600    unsigned long mask_lo;
601    unsigned long mask_hi;
602};
603
604
605/*  Get the MSR pair relating to a var range  */
606static void __init get_mtrr_var_range (unsigned int index,
607					   struct mtrr_var_range *vr)
608{
609    rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
610    rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
611}   /*  End Function get_mtrr_var_range  */
612
613
614/*  Set the MSR pair relating to a var range. Returns TRUE if
615    changes are made  */
616static int __init set_mtrr_var_range_testing (unsigned int index,
617						  struct mtrr_var_range *vr)
618{
619    unsigned int lo, hi;
620    int changed = FALSE;
621
622    rdmsr(MTRRphysBase_MSR(index), lo, hi);
623    if ( (vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
624	 || (vr->base_hi & 0xfUL) != (hi & 0xfUL) )
625    {
626	wrmsr (MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
627	changed = TRUE;
628    }
629
630    rdmsr (MTRRphysMask_MSR(index), lo, hi);
631
632    if ( (vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
633	 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL) )
634    {
635	wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
636	changed = TRUE;
637    }
638    return changed;
639}   /*  End Function set_mtrr_var_range_testing  */
640
641static void __init get_fixed_ranges(mtrr_type *frs)
642{
643    unsigned long *p = (unsigned long *)frs;
644    int i;
645
646    rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
647
648    for (i = 0; i < 2; i++)
649	rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
650    for (i = 0; i < 8; i++)
651	rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
652}   /*  End Function get_fixed_ranges  */
653
654static int __init set_fixed_ranges_testing(mtrr_type *frs)
655{
656    unsigned long *p = (unsigned long *)frs;
657    int changed = FALSE;
658    int i;
659    unsigned long lo, hi;
660
661    rdmsr(MTRRfix64K_00000_MSR, lo, hi);
662    if (p[0] != lo || p[1] != hi)
663    {
664	wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
665	changed = TRUE;
666    }
667
668    for (i = 0; i < 2; i++)
669    {
670	rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
671	if (p[2 + i*2] != lo || p[3 + i*2] != hi)
672	{
673	    wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i*2], p[3 + i*2]);
674	    changed = TRUE;
675	}
676    }
677
678    for (i = 0; i < 8; i++)
679    {
680	rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
681	if (p[6 + i*2] != lo || p[7 + i*2] != hi)
682	{
683	    wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
684	    changed = TRUE;
685	}
686    }
687    return changed;
688}   /*  End Function set_fixed_ranges_testing  */
689
690struct mtrr_state
691{
692    unsigned int num_var_ranges;
693    struct mtrr_var_range *var_ranges;
694    mtrr_type fixed_ranges[NUM_FIXED_RANGES];
695    unsigned char enabled;
696    mtrr_type def_type;
697};
698
699
700/*  Grab all of the MTRR state for this CPU into *state  */
701static void __init get_mtrr_state(struct mtrr_state *state)
702{
703    unsigned int nvrs, i;
704    struct mtrr_var_range *vrs;
705    unsigned long lo, dummy;
706
707    nvrs = state->num_var_ranges = get_num_var_ranges();
708    vrs = state->var_ranges
709              = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
710    if (vrs == NULL)
711	nvrs = state->num_var_ranges = 0;
712
713    for (i = 0; i < nvrs; i++)
714	get_mtrr_var_range (i, &vrs[i]);
715    get_fixed_ranges (state->fixed_ranges);
716
717    rdmsr (MTRRdefType_MSR, lo, dummy);
718    state->def_type = (lo & 0xff);
719    state->enabled = (lo & 0xc00) >> 10;
720}   /*  End Function get_mtrr_state  */
721
722
723/*  Free resources associated with a struct mtrr_state  */
724static void __init finalize_mtrr_state(struct mtrr_state *state)
725{
726    if (state->var_ranges) kfree (state->var_ranges);
727}   /*  End Function finalize_mtrr_state  */
728
729
730static unsigned long __init set_mtrr_state (struct mtrr_state *state,
731						struct set_mtrr_context *ctxt)
732/*  [SUMMARY] Set the MTRR state for this CPU.
733    <state> The MTRR state information to read.
734    <ctxt> Some relevant CPU context.
735    [NOTE] The CPU must already be in a safe state for MTRR changes.
736    [RETURNS] 0 if no changes made, else a mask indication what was changed.
737*/
738{
739    unsigned int i;
740    unsigned long change_mask = 0;
741
742    for (i = 0; i < state->num_var_ranges; i++)
743	if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
744	    change_mask |= MTRR_CHANGE_MASK_VARIABLE;
745
746    if ( set_fixed_ranges_testing(state->fixed_ranges) )
747	change_mask |= MTRR_CHANGE_MASK_FIXED;
748    /*  Set_mtrr_restore restores the old value of MTRRdefType,
749	so to set it we fiddle with the saved value  */
750    if ( (ctxt->deftype_lo & 0xff) != state->def_type
751	 || ( (ctxt->deftype_lo & 0xc00) >> 10 ) != state->enabled)
752    {
753	ctxt->deftype_lo |= (state->def_type | state->enabled << 10);
754	change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
755    }
756
757    return change_mask;
758}   /*  End Function set_mtrr_state  */
759
760
761static atomic_t undone_count;
762static volatile int wait_barrier_mtrr_disable = FALSE;
763static volatile int wait_barrier_execute = FALSE;
764static volatile int wait_barrier_cache_enable = FALSE;
765
766struct set_mtrr_data
767{
768    unsigned long smp_base;
769    unsigned long smp_size;
770    unsigned int smp_reg;
771    mtrr_type smp_type;
772};
773
774static void ipi_handler (void *info)
775/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
776    [RETURNS] Nothing.
777*/
778{
779    struct set_mtrr_data *data = info;
780    struct set_mtrr_context ctxt;
781    set_mtrr_prepare_save (&ctxt);
782    /*  Notify master that I've flushed and disabled my cache  */
783    atomic_dec (&undone_count);
784    while (wait_barrier_mtrr_disable) { rep_nop(); barrier(); }
785    set_mtrr_disable (&ctxt);
786    /*  Notify master that I've flushed and disabled my cache  */
787    atomic_dec (&undone_count);
788    while (wait_barrier_execute) { rep_nop(); barrier(); }
789    /*  The master has cleared me to execute  */
790    (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
791		    data->smp_type, FALSE);
792    /*  Notify master CPU that I've executed the function  */
793    atomic_dec (&undone_count);
794    /*  Wait for master to clear me to enable cache and return  */
795    while (wait_barrier_cache_enable) { rep_nop(); barrier(); }
796    set_mtrr_done (&ctxt);
797}   /*  End Function ipi_handler  */
798
799static void set_mtrr_smp (unsigned int reg, unsigned long base,
800			  unsigned long size, mtrr_type type)
801{
802    struct set_mtrr_data data;
803    struct set_mtrr_context ctxt;
804
805    data.smp_reg = reg;
806    data.smp_base = base;
807    data.smp_size = size;
808    data.smp_type = type;
809    wait_barrier_mtrr_disable = TRUE;
810    wait_barrier_execute = TRUE;
811    wait_barrier_cache_enable = TRUE;
812    atomic_set (&undone_count, smp_num_cpus - 1);
813    /*  Start the ball rolling on other CPUs  */
814    if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
815	panic ("mtrr: timed out waiting for other CPUs\n");
816    /* Flush and disable the local CPU's cache */
817    set_mtrr_prepare_save (&ctxt);
818    /*  Wait for all other CPUs to flush and disable their caches  */
819    while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
820    /* Set up for completion wait and then release other CPUs to change MTRRs*/
821    atomic_set (&undone_count, smp_num_cpus - 1);
822    wait_barrier_mtrr_disable = FALSE;
823    set_mtrr_disable (&ctxt);
824
825    /*  Wait for all other CPUs to flush and disable their caches  */
826    while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
827    /* Set up for completion wait and then release other CPUs to change MTRRs*/
828    atomic_set (&undone_count, smp_num_cpus - 1);
829    wait_barrier_execute = FALSE;
830    (*set_mtrr_up) (reg, base, size, type, FALSE);
831    /*  Now wait for other CPUs to complete the function  */
832    while (atomic_read (&undone_count) > 0) { rep_nop(); barrier(); }
833    /*  Now all CPUs should have finished the function. Release the barrier to
834	allow them to re-enable their caches and return from their interrupt,
835	then enable the local cache and return  */
836    wait_barrier_cache_enable = FALSE;
837    set_mtrr_done (&ctxt);
838}   /*  End Function set_mtrr_smp  */
839
840
841/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
842static void __init mtrr_state_warn(unsigned long mask)
843{
844    if (!mask) return;
845    if (mask & MTRR_CHANGE_MASK_FIXED)
846	printk ("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
847    if (mask & MTRR_CHANGE_MASK_VARIABLE)
848	printk ("mtrr: your CPUs had inconsistent variable MTRR settings\n");
849    if (mask & MTRR_CHANGE_MASK_DEFTYPE)
850	printk ("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
851    printk ("mtrr: probably your BIOS does not setup all CPUs\n");
852}   /*  End Function mtrr_state_warn  */
853
854#endif  /*  CONFIG_SMP  */
855
856static char *attrib_to_str (int x)
857{
858    return (x <= 6) ? mtrr_strings[x] : "?";
859}   /*  End Function attrib_to_str  */
860
861static void init_table (void)
862{
863    int i, max;
864
865    max = get_num_var_ranges ();
866    if ( ( usage_table = kmalloc (max * sizeof *usage_table, GFP_KERNEL) )
867	 == NULL )
868    {
869	printk ("mtrr: could not allocate\n");
870	return;
871    }
872    for (i = 0; i < max; i++) usage_table[i] = 1;
873#ifdef USERSPACE_INTERFACE
874    if ( ( ascii_buffer = kmalloc (max * LINE_SIZE, GFP_KERNEL) ) == NULL )
875    {
876	printk ("mtrr: could not allocate\n");
877	return;
878    }
879    ascii_buf_bytes = 0;
880    compute_ascii ();
881#endif
882}   /*  End Function init_table  */
883
884static int generic_get_free_region (unsigned long base, unsigned long size)
885/*  [SUMMARY] Get a free MTRR.
886    <base> The starting (base) address of the region.
887    <size> The size (in bytes) of the region.
888    [RETURNS] The index of the region on success, else -1 on error.
889*/
890{
891    int i, max;
892    mtrr_type ltype;
893    unsigned long lbase, lsize;
894
895    max = get_num_var_ranges ();
896    for (i = 0; i < max; ++i)
897    {
898	(*get_mtrr) (i, &lbase, &lsize, &ltype);
899	if (lsize == 0) return i;
900    }
901    return -ENOSPC;
902}   /*  End Function generic_get_free_region  */
903
904static int centaur_get_free_region (unsigned long base, unsigned long size)
905/*  [SUMMARY] Get a free MTRR.
906    <base> The starting (base) address of the region.
907    <size> The size (in bytes) of the region.
908    [RETURNS] The index of the region on success, else -1 on error.
909*/
910{
911    int i, max;
912    mtrr_type ltype;
913    unsigned long lbase, lsize;
914
915    max = get_num_var_ranges ();
916    for (i = 0; i < max; ++i)
917    {
918    	if(centaur_mcr_reserved & (1<<i))
919    		continue;
920	(*get_mtrr) (i, &lbase, &lsize, &ltype);
921	if (lsize == 0) return i;
922    }
923    return -ENOSPC;
924}   /*  End Function generic_get_free_region  */
925
926static int cyrix_get_free_region (unsigned long base, unsigned long size)
927/*  [SUMMARY] Get a free ARR.
928    <base> The starting (base) address of the region.
929    <size> The size (in bytes) of the region.
930    [RETURNS] The index of the region on success, else -1 on error.
931*/
932{
933    int i;
934    mtrr_type ltype;
935    unsigned long lbase, lsize;
936
937    /* If we are to set up a region >32M then look at ARR7 immediately */
938    if (size > 0x2000)
939    {
940	cyrix_get_arr (7, &lbase, &lsize, &ltype);
941	if (lsize == 0) return 7;
942	/*  Else try ARR0-ARR6 first  */
943    }
944    else
945    {
946	for (i = 0; i < 7; i++)
947	{
948	    cyrix_get_arr (i, &lbase, &lsize, &ltype);
949	    if ((i == 3) && arr3_protected) continue;
950	    if (lsize == 0) return i;
951	}
952	/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
953	cyrix_get_arr (i, &lbase, &lsize, &ltype);
954	if ((lsize == 0) && (size >= 0x40)) return i;
955    }
956    return -ENOSPC;
957}   /*  End Function cyrix_get_free_region  */
958
959static int (*get_free_region) (unsigned long base,
960			       unsigned long size) = generic_get_free_region;
961
962/**
963 *	mtrr_add_page - Add a memory type region
964 *	@base: Physical base address of region in pages (4 KB)
965 *	@size: Physical size of region in pages (4 KB)
966 *	@type: Type of MTRR desired
967 *	@increment: If this is true do usage counting on the region
968 *
969 *	Memory type region registers control the caching on newer Intel and
970 *	non Intel processors. This function allows drivers to request an
971 *	MTRR is added. The details and hardware specifics of each processor's
972 *	implementation are hidden from the caller, but nevertheless the
973 *	caller should expect to need to provide a power of two size on an
974 *	equivalent power of two boundary.
975 *
976 *	If the region cannot be added either because all regions are in use
977 *	or the CPU cannot support it a negative value is returned. On success
978 *	the register number for this entry is returned, but should be treated
979 *	as a cookie only.
980 *
981 *	On a multiprocessor machine the changes are made to all processors.
982 *	This is required on x86 by the Intel processors.
983 *
984 *	The available types are
985 *
986 *	%MTRR_TYPE_UNCACHABLE	-	No caching
987 *
988 *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
989 *
990 *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
991 *
992 *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
993 *
994 *	BUGS: Needs a quiet flag for the cases where drivers do not mind
995 *	failures and do not wish system log messages to be sent.
996 */
997
998int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, char increment)
999{
1000/*  [SUMMARY] Add an MTRR entry.
1001    <base> The starting (base, in pages) address of the region.
1002    <size> The size of the region. (in pages)
1003    <type> The type of the new region.
1004    <increment> If true and the region already exists, the usage count will be
1005    incremented.
1006    [RETURNS] The MTRR register on success, else a negative number indicating
1007    the error code.
1008    [NOTE] This routine uses a spinlock.
1009*/
1010    int i, max;
1011    mtrr_type ltype;
1012    unsigned long lbase, lsize, last;
1013
1014    switch ( mtrr_if )
1015    {
1016    case MTRR_IF_NONE:
1017	return -ENXIO;		/* No MTRRs whatsoever */
1018
1019    case MTRR_IF_AMD_K6:
1020	/* Apply the K6 block alignment and size rules
1021	   In order
1022	   o Uncached or gathering only
1023	   o 128K or bigger block
1024	   o Power of 2 block
1025	   o base suitably aligned to the power
1026	*/
1027	if ( type > MTRR_TYPE_WRCOMB || size < (1 << (17-PAGE_SHIFT)) ||
1028	     (size & ~(size-1))-size || ( base & (size-1) ) )
1029	    return -EINVAL;
1030	break;
1031
1032    case MTRR_IF_INTEL:
1033	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
1034	    and not touch 0x70000000->0x7003FFFF */
1035	if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
1036	     boot_cpu_data.x86 == 6 &&
1037	     boot_cpu_data.x86_model == 1 &&
1038	     boot_cpu_data.x86_mask <= 7 )
1039	{
1040	    if ( base & ((1 << (22-PAGE_SHIFT))-1) )
1041	    {
1042		printk (KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1043		return -EINVAL;
1044	    }
1045	    if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
1046		 (type == MTRR_TYPE_WRCOMB || type == MTRR_TYPE_WRBACK))
1047	    {
1048		printk (KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
1049	        return -EINVAL;
1050	    }
1051	}
1052	/* Fall through */
1053
1054    case MTRR_IF_CYRIX_ARR:
1055    case MTRR_IF_CENTAUR_MCR:
1056        if ( mtrr_if == MTRR_IF_CENTAUR_MCR )
1057	{
1058	    if (type != MTRR_TYPE_WRCOMB && (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE))
1059	    {
1060		printk (KERN_WARNING "mtrr: only write-combining%s supported\n",
1061			centaur_mcr_type?" and uncacheable are":" is");
1062		return -EINVAL;
1063	    }
1064	}
1065	else if (base + size < 0x100)
1066	{
1067	    printk (KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
1068		    base, size);
1069	    return -EINVAL;
1070	}
1071	/*  Check upper bits of base and last are equal and lower bits are 0
1072	    for base and 1 for last  */
1073	last = base + size - 1;
1074	for (lbase = base; !(lbase & 1) && (last & 1);
1075	     lbase = lbase >> 1, last = last >> 1);
1076	if (lbase != last)
1077	{
1078	    printk (KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
1079		    base, size);
1080	    return -EINVAL;
1081	}
1082	break;
1083
1084    default:
1085	return -EINVAL;
1086    }
1087
1088    if (type >= MTRR_NUM_TYPES)
1089    {
1090	printk ("mtrr: type: %u illegal\n", type);
1091	return -EINVAL;
1092    }
1093
1094    /*  If the type is WC, check that this processor supports it  */
1095    if ( (type == MTRR_TYPE_WRCOMB) && !have_wrcomb () )
1096    {
1097        printk (KERN_WARNING "mtrr: your processor doesn't support write-combining\n");
1098        return -ENOSYS;
1099    }
1100
1101    if ( base & size_or_mask || size  & size_or_mask )
1102    {
1103	printk ("mtrr: base or size exceeds the MTRR width\n");
1104	return -EINVAL;
1105    }
1106
1107    increment = increment ? 1 : 0;
1108    max = get_num_var_ranges ();
1109    /*  Search for existing MTRR  */
1110    down(&main_lock);
1111    for (i = 0; i < max; ++i)
1112    {
1113	(*get_mtrr) (i, &lbase, &lsize, &ltype);
1114	if (base >= lbase + lsize) continue;
1115	if ( (base < lbase) && (base + size <= lbase) ) continue;
1116	/*  At this point we know there is some kind of overlap/enclosure  */
1117	if ( (base < lbase) || (base + size > lbase + lsize) )
1118	{
1119	    up(&main_lock);
1120	    printk (KERN_WARNING "mtrr: 0x%lx000,0x%lx000 overlaps existing"
1121		    " 0x%lx000,0x%lx000\n",
1122		    base, size, lbase, lsize);
1123	    return -EINVAL;
1124	}
1125	/*  New region is enclosed by an existing region  */
1126	if (ltype != type)
1127	{
1128	    if (type == MTRR_TYPE_UNCACHABLE) continue;
1129	    up(&main_lock);
1130	    printk ( "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
1131		     base, size, attrib_to_str (ltype), attrib_to_str (type) );
1132	    return -EINVAL;
1133	}
1134	if (increment) ++usage_table[i];
1135	compute_ascii ();
1136	up(&main_lock);
1137	return i;
1138    }
1139    /*  Search for an empty MTRR  */
1140    i = (*get_free_region) (base, size);
1141    if (i < 0)
1142    {
1143	up(&main_lock);
1144	printk ("mtrr: no more MTRRs available\n");
1145	return i;
1146    }
1147    set_mtrr (i, base, size, type);
1148    usage_table[i] = 1;
1149    compute_ascii ();
1150    up(&main_lock);
1151    return i;
1152}   /*  End Function mtrr_add_page  */
1153
1154/**
1155 *	mtrr_add - Add a memory type region
1156 *	@base: Physical base address of region
1157 *	@size: Physical size of region
1158 *	@type: Type of MTRR desired
1159 *	@increment: If this is true do usage counting on the region
1160 *
1161 *	Memory type region registers control the caching on newer Intel and
1162 *	non Intel processors. This function allows drivers to request an
1163 *	MTRR is added. The details and hardware specifics of each processor's
1164 *	implementation are hidden from the caller, but nevertheless the
1165 *	caller should expect to need to provide a power of two size on an
1166 *	equivalent power of two boundary.
1167 *
1168 *	If the region cannot be added either because all regions are in use
1169 *	or the CPU cannot support it a negative value is returned. On success
1170 *	the register number for this entry is returned, but should be treated
1171 *	as a cookie only.
1172 *
1173 *	On a multiprocessor machine the changes are made to all processors.
1174 *	This is required on x86 by the Intel processors.
1175 *
1176 *	The available types are
1177 *
1178 *	%MTRR_TYPE_UNCACHABLE	-	No caching
1179 *
1180 *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
1181 *
1182 *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
1183 *
1184 *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
1185 *
1186 *	BUGS: Needs a quiet flag for the cases where drivers do not mind
1187 *	failures and do not wish system log messages to be sent.
1188 */
1189
1190int mtrr_add(unsigned long base, unsigned long size, unsigned int type, char increment)
1191{
1192/*  [SUMMARY] Add an MTRR entry.
1193    <base> The starting (base) address of the region.
1194    <size> The size (in bytes) of the region.
1195    <type> The type of the new region.
1196    <increment> If true and the region already exists, the usage count will be
1197    incremented.
1198    [RETURNS] The MTRR register on success, else a negative number indicating
1199    the error code.
1200*/
1201
1202    if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1203    {
1204	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1205	printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1206	return -EINVAL;
1207    }
1208    return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment);
1209}   /*  End Function mtrr_add  */
1210
1211/**
1212 *	mtrr_del_page - delete a memory type region
1213 *	@reg: Register returned by mtrr_add
1214 *	@base: Physical base address
1215 *	@size: Size of region
1216 *
1217 *	If register is supplied then base and size are ignored. This is
1218 *	how drivers should call it.
1219 *
1220 *	Releases an MTRR region. If the usage count drops to zero the
1221 *	register is freed and the region returns to default state.
1222 *	On success the register is returned, on failure a negative error
1223 *	code.
1224 */
1225
1226int mtrr_del_page (int reg, unsigned long base, unsigned long size)
1227/*  [SUMMARY] Delete MTRR/decrement usage count.
1228    <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1229    be supplied.
1230    <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1231    <size> The size of the region. This is ignored if <<reg>> is >= 0.
1232    [RETURNS] The register on success, else a negative number indicating
1233    the error code.
1234    [NOTE] This routine uses a spinlock.
1235*/
1236{
1237    int i, max;
1238    mtrr_type ltype;
1239    unsigned long lbase, lsize;
1240
1241    if ( mtrr_if == MTRR_IF_NONE ) return -ENXIO;
1242
1243    max = get_num_var_ranges ();
1244    down (&main_lock);
1245    if (reg < 0)
1246    {
1247	/*  Search for existing MTRR  */
1248	for (i = 0; i < max; ++i)
1249	{
1250	    (*get_mtrr) (i, &lbase, &lsize, &ltype);
1251	    if (lbase == base && lsize == size)
1252	    {
1253		reg = i;
1254		break;
1255	    }
1256	}
1257	if (reg < 0)
1258	{
1259	    up(&main_lock);
1260	    printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base, size);
1261	    return -EINVAL;
1262	}
1263    }
1264    if (reg >= max)
1265    {
1266	up (&main_lock);
1267	printk ("mtrr: register: %d too big\n", reg);
1268	return -EINVAL;
1269    }
1270    if ( mtrr_if == MTRR_IF_CYRIX_ARR )
1271    {
1272	if ( (reg == 3) && arr3_protected )
1273	{
1274	    up (&main_lock);
1275	    printk ("mtrr: ARR3 cannot be changed\n");
1276	    return -EINVAL;
1277	}
1278    }
1279    (*get_mtrr) (reg, &lbase, &lsize, &ltype);
1280    if (lsize < 1)
1281    {
1282	up (&main_lock);
1283	printk ("mtrr: MTRR %d not used\n", reg);
1284	return -EINVAL;
1285    }
1286    if (usage_table[reg] < 1)
1287    {
1288	up (&main_lock);
1289	printk ("mtrr: reg: %d has count=0\n", reg);
1290	return -EINVAL;
1291    }
1292    if (--usage_table[reg] < 1) set_mtrr (reg, 0, 0, 0);
1293    compute_ascii ();
1294    up (&main_lock);
1295    return reg;
1296}   /*  End Function mtrr_del_page  */
1297
1298/**
1299 *	mtrr_del - delete a memory type region
1300 *	@reg: Register returned by mtrr_add
1301 *	@base: Physical base address
1302 *	@size: Size of region
1303 *
1304 *	If register is supplied then base and size are ignored. This is
1305 *	how drivers should call it.
1306 *
1307 *	Releases an MTRR region. If the usage count drops to zero the
1308 *	register is freed and the region returns to default state.
1309 *	On success the register is returned, on failure a negative error
1310 *	code.
1311 */
1312
1313int mtrr_del (int reg, unsigned long base, unsigned long size)
1314/*  [SUMMARY] Delete MTRR/decrement usage count.
1315    <reg> The register. If this is less than 0 then <<base>> and <<size>> must
1316    be supplied.
1317    <base> The base address of the region. This is ignored if <<reg>> is >= 0.
1318    <size> The size of the region. This is ignored if <<reg>> is >= 0.
1319    [RETURNS] The register on success, else a negative number indicating
1320    the error code.
1321*/
1322{
1323    if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1324    {
1325	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1326	printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1327	return -EINVAL;
1328    }
1329    return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
1330}
1331
1332#ifdef USERSPACE_INTERFACE
1333
1334static int mtrr_file_add (unsigned long base, unsigned long size,
1335			  unsigned int type, char increment, struct file *file, int page)
1336{
1337    int reg, max;
1338    unsigned int *fcount = file->private_data;
1339
1340    max = get_num_var_ranges ();
1341    if (fcount == NULL)
1342    {
1343	if ( ( fcount = kmalloc (max * sizeof *fcount, GFP_KERNEL) ) == NULL )
1344	{
1345	    printk ("mtrr: could not allocate\n");
1346	    return -ENOMEM;
1347	}
1348	memset (fcount, 0, max * sizeof *fcount);
1349	file->private_data = fcount;
1350    }
1351    if (!page) {
1352	if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1353	{
1354	    printk ("mtrr: size and base must be multiples of 4 kiB\n");
1355	    printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1356	    return -EINVAL;
1357	}
1358	base >>= PAGE_SHIFT;
1359	size >>= PAGE_SHIFT;
1360    }
1361    reg = mtrr_add_page (base, size, type, 1);
1362    if (reg >= 0) ++fcount[reg];
1363    return reg;
1364}   /*  End Function mtrr_file_add  */
1365
1366static int mtrr_file_del (unsigned long base, unsigned long size,
1367			  struct file *file, int page)
1368{
1369    int reg;
1370    unsigned int *fcount = file->private_data;
1371
1372    if (!page) {
1373	if ( (base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)) )
1374	{
1375	    printk ("mtrr: size and base must be multiples of 4 kiB\n");
1376	    printk ("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
1377	    return -EINVAL;
1378	}
1379	base >>= PAGE_SHIFT;
1380	size >>= PAGE_SHIFT;
1381    }
1382    reg = mtrr_del_page (-1, base, size);
1383    if (reg < 0) return reg;
1384    if (fcount == NULL) return reg;
1385    if (fcount[reg] < 1) return -EINVAL;
1386    --fcount[reg];
1387    return reg;
1388}   /*  End Function mtrr_file_del  */
1389
1390static ssize_t mtrr_read (struct file *file, char *buf, size_t len,
1391			  loff_t *ppos)
1392{
1393    if (*ppos >= ascii_buf_bytes) return 0;
1394    if (*ppos + len > ascii_buf_bytes) len = ascii_buf_bytes - *ppos;
1395    if ( copy_to_user (buf, ascii_buffer + *ppos, len) ) return -EFAULT;
1396    *ppos += len;
1397    return len;
1398}   /*  End Function mtrr_read  */
1399
1400static ssize_t mtrr_write (struct file *file, const char *buf, size_t len,
1401			   loff_t *ppos)
1402/*  Format of control line:
1403    "base=%Lx size=%Lx type=%s"     OR:
1404    "disable=%d"
1405*/
1406{
1407    int i, err;
1408    unsigned long reg;
1409    unsigned long long base, size;
1410    char *ptr;
1411    char line[LINE_SIZE];
1412
1413    if ( !suser () ) return -EPERM;
1414    /*  Can't seek (pwrite) on this device  */
1415    if (ppos != &file->f_pos) return -ESPIPE;
1416    memset (line, 0, LINE_SIZE);
1417    if (len > LINE_SIZE) len = LINE_SIZE;
1418    if ( copy_from_user (line, buf, len - 1) ) return -EFAULT;
1419    ptr = line + strlen (line) - 1;
1420    if (*ptr == '\n') *ptr = '\0';
1421    if ( !strncmp (line, "disable=", 8) )
1422    {
1423	reg = simple_strtoul (line + 8, &ptr, 0);
1424	err = mtrr_del_page (reg, 0, 0);
1425	if (err < 0) return err;
1426	return len;
1427    }
1428    if ( strncmp (line, "base=", 5) )
1429    {
1430	printk ("mtrr: no \"base=\" in line: \"%s\"\n", line);
1431	return -EINVAL;
1432    }
1433    base = simple_strtoull (line + 5, &ptr, 0);
1434    for (; isspace (*ptr); ++ptr);
1435    if ( strncmp (ptr, "size=", 5) )
1436    {
1437	printk ("mtrr: no \"size=\" in line: \"%s\"\n", line);
1438	return -EINVAL;
1439    }
1440    size = simple_strtoull (ptr + 5, &ptr, 0);
1441    if ( (base & 0xfff) || (size & 0xfff) )
1442    {
1443	printk ("mtrr: size and base must be multiples of 4 kiB\n");
1444	printk ("mtrr: size: 0x%Lx  base: 0x%Lx\n", size, base);
1445	return -EINVAL;
1446    }
1447    for (; isspace (*ptr); ++ptr);
1448    if ( strncmp (ptr, "type=", 5) )
1449    {
1450	printk ("mtrr: no \"type=\" in line: \"%s\"\n", line);
1451	return -EINVAL;
1452    }
1453    ptr += 5;
1454    for (; isspace (*ptr); ++ptr);
1455    for (i = 0; i < MTRR_NUM_TYPES; ++i)
1456    {
1457	if ( strcmp (ptr, mtrr_strings[i]) ) continue;
1458	base >>= PAGE_SHIFT;
1459	size >>= PAGE_SHIFT;
1460	err = mtrr_add_page ((unsigned long)base, (unsigned long)size, i, 1);
1461	if (err < 0) return err;
1462	return len;
1463    }
1464    printk ("mtrr: illegal type: \"%s\"\n", ptr);
1465    return -EINVAL;
1466}   /*  End Function mtrr_write  */
1467
1468static int mtrr_ioctl (struct inode *inode, struct file *file,
1469		       unsigned int cmd, unsigned long arg)
1470{
1471    int err;
1472    mtrr_type type;
1473    struct mtrr_sentry sentry;
1474    struct mtrr_gentry gentry;
1475
1476    switch (cmd)
1477    {
1478      default:
1479	return -ENOIOCTLCMD;
1480      case MTRRIOC_ADD_ENTRY:
1481	if ( !suser () ) return -EPERM;
1482	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1483	    return -EFAULT;
1484	err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 0);
1485	if (err < 0) return err;
1486	break;
1487      case MTRRIOC_SET_ENTRY:
1488	if ( !suser () ) return -EPERM;
1489	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1490	    return -EFAULT;
1491	err = mtrr_add (sentry.base, sentry.size, sentry.type, 0);
1492	if (err < 0) return err;
1493	break;
1494      case MTRRIOC_DEL_ENTRY:
1495	if ( !suser () ) return -EPERM;
1496	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1497	    return -EFAULT;
1498	err = mtrr_file_del (sentry.base, sentry.size, file, 0);
1499	if (err < 0) return err;
1500	break;
1501      case MTRRIOC_KILL_ENTRY:
1502	if ( !suser () ) return -EPERM;
1503	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1504	    return -EFAULT;
1505	err = mtrr_del (-1, sentry.base, sentry.size);
1506	if (err < 0) return err;
1507	break;
1508      case MTRRIOC_GET_ENTRY:
1509	if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1510	    return -EFAULT;
1511	if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1512	(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1513
1514	/* Hide entries that go above 4GB */
1515	if (gentry.base + gentry.size > 0x100000 || gentry.size == 0x100000)
1516	    gentry.base = gentry.size = gentry.type = 0;
1517	else {
1518	    gentry.base <<= PAGE_SHIFT;
1519	    gentry.size <<= PAGE_SHIFT;
1520	    gentry.type = type;
1521	}
1522
1523	if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1524	     return -EFAULT;
1525	break;
1526      case MTRRIOC_ADD_PAGE_ENTRY:
1527	if ( !suser () ) return -EPERM;
1528	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1529	    return -EFAULT;
1530	err = mtrr_file_add (sentry.base, sentry.size, sentry.type, 1, file, 1);
1531	if (err < 0) return err;
1532	break;
1533      case MTRRIOC_SET_PAGE_ENTRY:
1534	if ( !suser () ) return -EPERM;
1535	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1536	    return -EFAULT;
1537	err = mtrr_add_page (sentry.base, sentry.size, sentry.type, 0);
1538	if (err < 0) return err;
1539	break;
1540      case MTRRIOC_DEL_PAGE_ENTRY:
1541	if ( !suser () ) return -EPERM;
1542	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1543	    return -EFAULT;
1544	err = mtrr_file_del (sentry.base, sentry.size, file, 1);
1545	if (err < 0) return err;
1546	break;
1547      case MTRRIOC_KILL_PAGE_ENTRY:
1548	if ( !suser () ) return -EPERM;
1549	if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
1550	    return -EFAULT;
1551	err = mtrr_del_page (-1, sentry.base, sentry.size);
1552	if (err < 0) return err;
1553	break;
1554      case MTRRIOC_GET_PAGE_ENTRY:
1555	if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
1556	    return -EFAULT;
1557	if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
1558	(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
1559	gentry.type = type;
1560
1561	if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
1562	     return -EFAULT;
1563	break;
1564    }
1565    return 0;
1566}   /*  End Function mtrr_ioctl  */
1567
1568static int mtrr_close (struct inode *ino, struct file *file)
1569{
1570    int i, max;
1571    unsigned int *fcount = file->private_data;
1572
1573    if (fcount == NULL) return 0;
1574    lock_kernel();
1575    max = get_num_var_ranges ();
1576    for (i = 0; i < max; ++i)
1577    {
1578	while (fcount[i] > 0)
1579	{
1580	    if (mtrr_del (i, 0, 0) < 0) printk ("mtrr: reg %d not used\n", i);
1581	    --fcount[i];
1582	}
1583    }
1584    unlock_kernel();
1585    kfree (fcount);
1586    file->private_data = NULL;
1587    return 0;
1588}   /*  End Function mtrr_close  */
1589
1590static struct file_operations mtrr_fops =
1591{
1592    owner:	THIS_MODULE,
1593    read:	mtrr_read,
1594    write:	mtrr_write,
1595    ioctl:	mtrr_ioctl,
1596    release:	mtrr_close,
1597};
1598
1599#  ifdef CONFIG_PROC_FS
1600
1601static struct proc_dir_entry *proc_root_mtrr;
1602
1603#  endif  /*  CONFIG_PROC_FS  */
1604
1605static devfs_handle_t devfs_handle;
1606
1607static void compute_ascii (void)
1608{
1609    char factor;
1610    int i, max;
1611    mtrr_type type;
1612    unsigned long base, size;
1613
1614    ascii_buf_bytes = 0;
1615    max = get_num_var_ranges ();
1616    for (i = 0; i < max; i++)
1617    {
1618	(*get_mtrr) (i, &base, &size, &type);
1619	if (size == 0) usage_table[i] = 0;
1620	else
1621	{
1622	    if (size < (0x100000 >> PAGE_SHIFT))
1623	    {
1624		/* less than 1MB */
1625		factor = 'K';
1626		size <<= PAGE_SHIFT - 10;
1627	    }
1628	    else
1629	    {
1630		factor = 'M';
1631		size >>= 20 - PAGE_SHIFT;
1632	    }
1633	    sprintf
1634		(ascii_buffer + ascii_buf_bytes,
1635		 "reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",
1636		 i, base, base >> (20 - PAGE_SHIFT), size, factor,
1637		 attrib_to_str (type), usage_table[i]);
1638	    ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
1639	}
1640    }
1641    devfs_set_file_size (devfs_handle, ascii_buf_bytes);
1642#  ifdef CONFIG_PROC_FS
1643    if (proc_root_mtrr)
1644	proc_root_mtrr->size = ascii_buf_bytes;
1645#  endif  /*  CONFIG_PROC_FS  */
1646}   /*  End Function compute_ascii  */
1647
1648#endif  /*  USERSPACE_INTERFACE  */
1649
1650EXPORT_SYMBOL(mtrr_add);
1651EXPORT_SYMBOL(mtrr_del);
1652
1653#ifdef CONFIG_SMP
1654
1655typedef struct
1656{
1657    unsigned long base;
1658    unsigned long size;
1659    mtrr_type type;
1660} arr_state_t;
1661
1662arr_state_t arr_state[8] __initdata =
1663{
1664    {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
1665    {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
1666};
1667
1668unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
1669
1670static void __init cyrix_arr_init_secondary(void)
1671{
1672    struct set_mtrr_context ctxt;
1673    int i;
1674
1675    /* flush cache and enable MAPEN */
1676    set_mtrr_prepare_save (&ctxt);
1677    set_mtrr_disable (&ctxt);
1678
1679     /* the CCRs are not contiguous */
1680    for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
1681    for(   ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
1682    for(i=0; i<8; i++)
1683      cyrix_set_arr_up(i,
1684        arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
1685
1686    set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1687}   /*  End Function cyrix_arr_init_secondary  */
1688
1689#endif
1690
1691/*
1692 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
1693 * with the SMM (System Management Mode) mode. So we need the following:
1694 * Check whether SMI_LOCK (CCR3 bit 0) is set
1695 *   if it is set, write a warning message: ARR3 cannot be changed!
1696 *     (it cannot be changed until the next processor reset)
1697 *   if it is reset, then we can change it, set all the needed bits:
1698 *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
1699 *   - disable access to SMM memory (CCR1 bit 2 reset)
1700 *   - disable SMM mode (CCR1 bit 1 reset)
1701 *   - disable write protection of ARR3 (CCR6 bit 1 reset)
1702 *   - (maybe) disable ARR3
1703 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
1704 */
1705static void __init cyrix_arr_init(void)
1706{
1707    struct set_mtrr_context ctxt;
1708    unsigned char ccr[7];
1709    int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
1710#ifdef CONFIG_SMP
1711    int i;
1712#endif
1713
1714    /* flush cache and enable MAPEN */
1715    set_mtrr_prepare_save (&ctxt);
1716    set_mtrr_disable (&ctxt);
1717
1718    /* Save all CCRs locally */
1719    ccr[0] = getCx86 (CX86_CCR0);
1720    ccr[1] = getCx86 (CX86_CCR1);
1721    ccr[2] = getCx86 (CX86_CCR2);
1722    ccr[3] = ctxt.ccr3;
1723    ccr[4] = getCx86 (CX86_CCR4);
1724    ccr[5] = getCx86 (CX86_CCR5);
1725    ccr[6] = getCx86 (CX86_CCR6);
1726
1727    if (ccr[3] & 1)
1728    {
1729	ccrc[3] = 1;
1730	arr3_protected = 1;
1731    }
1732    else
1733    {
1734	/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
1735	 * access to SMM memory through ARR3 (bit 7).
1736	 */
1737	if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
1738	if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
1739	if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
1740	arr3_protected = 0;
1741	if (ccr[6] & 0x02) {
1742	    ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3 */
1743	    setCx86 (CX86_CCR6, ccr[6]);
1744	}
1745	/* Disable ARR3. This is safe now that we disabled SMM. */
1746	/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
1747    }
1748    /* If we changed CCR1 in memory, change it in the processor, too. */
1749    if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
1750
1751    /* Enable ARR usage by the processor */
1752    if (!(ccr[5] & 0x20))
1753    {
1754	ccr[5] |= 0x20; ccrc[5] = 1;
1755	setCx86 (CX86_CCR5, ccr[5]);
1756    }
1757
1758#ifdef CONFIG_SMP
1759    for(i=0; i<7; i++) ccr_state[i] = ccr[i];
1760    for(i=0; i<8; i++)
1761      cyrix_get_arr(i,
1762        &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
1763#endif
1764
1765    set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
1766
1767    if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
1768    if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
1769/*
1770    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
1771    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
1772    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
1773*/
1774    if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
1775}   /*  End Function cyrix_arr_init  */
1776
1777/*
1778 *	Initialise the later (saner) Winchip MCR variant. In this version
1779 *	the BIOS can pass us the registers it has used (but not their values)
1780 *	and the control register is read/write
1781 */
1782
1783static void __init centaur_mcr1_init(void)
1784{
1785    unsigned i;
1786    u32 lo, hi;
1787
1788    /* Unfortunately, MCR's are read-only, so there is no way to
1789     * find out what the bios might have done.
1790     */
1791
1792    rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1793    if(((lo>>17)&7)==1)		/* Type 1 Winchip2 MCR */
1794    {
1795    	lo&= ~0x1C0;		/* clear key */
1796    	lo|= 0x040;		/* set key to 1 */
1797	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
1798    }
1799
1800    centaur_mcr_type = 1;
1801
1802    /*
1803     *	Clear any unconfigured MCR's.
1804     */
1805
1806    for (i = 0; i < 8; ++i)
1807    {
1808    	if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
1809    	{
1810    		if(!(lo & (1<<(9+i))))
1811			wrmsr (MSR_IDT_MCR0 + i , 0, 0);
1812		else
1813			/*
1814			 *	If the BIOS set up an MCR we cannot see it
1815			 *	but we don't wish to obliterate it
1816			 */
1817			centaur_mcr_reserved |= (1<<i);
1818	}
1819    }
1820    /*
1821     *	Throw the main write-combining switch...
1822     *	However if OOSTORE is enabled then people have already done far
1823     *  cleverer things and we should behave.
1824     */
1825
1826    lo |= 15;			/* Write combine enables */
1827    wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1828}   /*  End Function centaur_mcr1_init  */
1829
1830/*
1831 *	Initialise the original winchip with read only MCR registers
1832 *	no used bitmask for the BIOS to pass on and write only control
1833 */
1834
1835static void __init centaur_mcr0_init(void)
1836{
1837    unsigned i;
1838
1839    /* Unfortunately, MCR's are read-only, so there is no way to
1840     * find out what the bios might have done.
1841     */
1842
1843    /* Clear any unconfigured MCR's.
1844     * This way we are sure that the centaur_mcr array contains the actual
1845     * values. The disadvantage is that any BIOS tweaks are thus undone.
1846     *
1847     */
1848    for (i = 0; i < 8; ++i)
1849    {
1850    	if(centaur_mcr[i]. high == 0 && centaur_mcr[i].low == 0)
1851		wrmsr (MSR_IDT_MCR0 + i , 0, 0);
1852    }
1853
1854    wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
1855}   /*  End Function centaur_mcr0_init  */
1856
1857/*
1858 *	Initialise Winchip series MCR registers
1859 */
1860
1861static void __init centaur_mcr_init(void)
1862{
1863    struct set_mtrr_context ctxt;
1864
1865    set_mtrr_prepare_save (&ctxt);
1866    set_mtrr_disable (&ctxt);
1867
1868    if(boot_cpu_data.x86_model==4)
1869    	centaur_mcr0_init();
1870    else if(boot_cpu_data.x86_model==8 || boot_cpu_data.x86_model == 9)
1871    	centaur_mcr1_init();
1872
1873    set_mtrr_done (&ctxt);
1874}   /*  End Function centaur_mcr_init  */
1875
1876static int __init mtrr_setup(void)
1877{
1878    if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
1879	/* Intel (P6) standard MTRRs */
1880	mtrr_if = MTRR_IF_INTEL;
1881	get_mtrr = intel_get_mtrr;
1882	set_mtrr_up = intel_set_mtrr_up;
1883	switch (boot_cpu_data.x86_vendor) {
1884
1885	case X86_VENDOR_AMD:
1886		/* The original Athlon docs said that
1887		   total addressable memory is 44 bits wide.
1888		   It was not really clear whether its MTRRs
1889		   follow this or not. (Read: 44 or 36 bits).
1890		   However, "x86-64_overview.pdf" explicitly
1891		   states that "previous implementations support
1892		   36 bit MTRRs" and also provides a way to
1893		   query the width (in bits) of the physical
1894		   addressable memory on the Hammer family.
1895		 */
1896		if (boot_cpu_data.x86 == 7 && (cpuid_eax(0x80000000) >= 0x80000008)) {
1897			u32	phys_addr;
1898			phys_addr = cpuid_eax(0x80000008) & 0xff ;
1899			size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
1900			size_and_mask = ~size_or_mask & 0xfff00000;
1901			break;
1902		}
1903		size_or_mask  = 0xff000000; /* 36 bits */
1904		size_and_mask = 0x00f00000;
1905		break;
1906
1907	case X86_VENDOR_CENTAUR:
1908		/* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
1909		if (boot_cpu_data.x86 == 6) {
1910			size_or_mask  = 0xfff00000; /* 32 bits */
1911			size_and_mask = 0;
1912		}
1913		break;
1914
1915	default:
1916		/* Intel, etc. */
1917		size_or_mask  = 0xff000000; /* 36 bits */
1918		size_and_mask = 0x00f00000;
1919		break;
1920	}
1921
1922    } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
1923	/* Pre-Athlon (K6) AMD CPU MTRRs */
1924	mtrr_if = MTRR_IF_AMD_K6;
1925	get_mtrr = amd_get_mtrr;
1926	set_mtrr_up = amd_set_mtrr_up;
1927	size_or_mask  = 0xfff00000; /* 32 bits */
1928	size_and_mask = 0;
1929    } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
1930	/* Cyrix ARRs */
1931	mtrr_if = MTRR_IF_CYRIX_ARR;
1932	get_mtrr = cyrix_get_arr;
1933	set_mtrr_up = cyrix_set_arr_up;
1934	get_free_region = cyrix_get_free_region;
1935	cyrix_arr_init();
1936	size_or_mask  = 0xfff00000; /* 32 bits */
1937	size_and_mask = 0;
1938    } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
1939	/* Centaur MCRs */
1940	mtrr_if = MTRR_IF_CENTAUR_MCR;
1941	get_mtrr = centaur_get_mcr;
1942	set_mtrr_up = centaur_set_mcr_up;
1943	get_free_region = centaur_get_free_region;
1944	centaur_mcr_init();
1945	size_or_mask  = 0xfff00000; /* 32 bits */
1946	size_and_mask = 0;
1947    } else {
1948	/* No supported MTRR interface */
1949	mtrr_if = MTRR_IF_NONE;
1950    }
1951
1952    printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
1953	    "mtrr: detected mtrr type: %s\n",
1954	    MTRR_VERSION, mtrr_if_name[mtrr_if]);
1955
1956    return (mtrr_if != MTRR_IF_NONE);
1957}   /*  End Function mtrr_setup  */
1958
1959#ifdef CONFIG_SMP
1960
1961static volatile unsigned long smp_changes_mask __initdata = 0;
1962static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
1963
1964void __init mtrr_init_boot_cpu(void)
1965{
1966    if ( !mtrr_setup () )
1967	return;
1968
1969    if ( mtrr_if == MTRR_IF_INTEL ) {
1970	/* Only for Intel MTRRs */
1971	get_mtrr_state (&smp_mtrr_state);
1972    }
1973}   /*  End Function mtrr_init_boot_cpu  */
1974
1975static void __init intel_mtrr_init_secondary_cpu(void)
1976{
1977    unsigned long mask, count;
1978    struct set_mtrr_context ctxt;
1979
1980    /*  Note that this is not ideal, since the cache is only flushed/disabled
1981	for this CPU while the MTRRs are changed, but changing this requires
1982	more invasive changes to the way the kernel boots  */
1983    set_mtrr_prepare_save (&ctxt);
1984    set_mtrr_disable (&ctxt);
1985    mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
1986    set_mtrr_done (&ctxt);
1987    /*  Use the atomic bitops to update the global mask  */
1988    for (count = 0; count < sizeof mask * 8; ++count)
1989    {
1990	if (mask & 0x01) set_bit (count, &smp_changes_mask);
1991	mask >>= 1;
1992    }
1993}   /*  End Function intel_mtrr_init_secondary_cpu  */
1994
1995void __init mtrr_init_secondary_cpu(void)
1996{
1997    switch ( mtrr_if ) {
1998    case MTRR_IF_INTEL:
1999	/* Intel (P6) standard MTRRs */
2000	intel_mtrr_init_secondary_cpu();
2001	break;
2002    case MTRR_IF_CYRIX_ARR:
2003	/* This is _completely theoretical_!
2004	 * I assume here that one day Cyrix will support Intel APIC.
2005	 * In reality on non-Intel CPUs we won't even get to this routine.
2006	 * Hopefully no one will plug two Cyrix processors in a dual P5 board.
2007	 *  :-)
2008	 */
2009	cyrix_arr_init_secondary ();
2010	break;
2011    case MTRR_IF_NONE:
2012	break;
2013    default:
2014	/* I see no MTRRs I can support in SMP mode... */
2015	printk ("mtrr: SMP support incomplete for this vendor\n");
2016    }
2017}   /*  End Function mtrr_init_secondary_cpu  */
2018#endif  /*  CONFIG_SMP  */
2019
2020int __init mtrr_init(void)
2021{
2022#ifdef CONFIG_SMP
2023    /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
2024
2025    if ( mtrr_if == MTRR_IF_INTEL ) {
2026	finalize_mtrr_state (&smp_mtrr_state);
2027	mtrr_state_warn (smp_changes_mask);
2028    }
2029#else
2030    if ( !mtrr_setup() )
2031	return 0;		/* MTRRs not supported? */
2032#endif
2033
2034#ifdef CONFIG_PROC_FS
2035    proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
2036    if (proc_root_mtrr) {
2037	proc_root_mtrr->owner = THIS_MODULE;
2038	proc_root_mtrr->proc_fops = &mtrr_fops;
2039    }
2040#endif
2041#ifdef USERSPACE_INTERFACE
2042    devfs_handle = devfs_register (NULL, "cpu/mtrr", DEVFS_FL_DEFAULT, 0, 0,
2043				   S_IFREG | S_IRUGO | S_IWUSR,
2044				   &mtrr_fops, NULL);
2045#endif
2046    init_table ();
2047    return 0;
2048}   /*  End Function mtrr_init  */
2049
2050/*
2051 * Local Variables:
2052 * mode:c
2053 * c-file-style:"k&r"
2054 * c-basic-offset:4
2055 * End:
2056 */
2057