• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/kernel/cpu/mtrr/
1/*  Generic MTRR (Memory Type Range Register) driver.
2
3    Copyright (C) 1997-2000  Richard Gooch
4    Copyright (c) 2002	     Patrick Mochel
5
6    This library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Library General Public
8    License as published by the Free Software Foundation; either
9    version 2 of the License, or (at your option) any later version.
10
11    This library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Library General Public License for more details.
15
16    You should have received a copy of the GNU Library General Public
17    License along with this library; if not, write to the Free
18    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
21    The postal address is:
22      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23
24    Source: "Pentium Pro Family Developer's Manual, Volume 3:
25    Operating System Writer's Guide" (Intel document number 242692),
26    section 11.11.7
27
28    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29    on 6-7 March 2002.
30    Source: Intel Architecture Software Developers Manual, Volume 3:
31    System Programming Guide; Section 9.11. (1997 edition - PPro).
32*/
33
34#define DEBUG
35
36#include <linux/types.h>
37
38#include <linux/stop_machine.h>
39#include <linux/kvm_para.h>
40#include <linux/uaccess.h>
41#include <linux/module.h>
42#include <linux/mutex.h>
43#include <linux/init.h>
44#include <linux/sort.h>
45#include <linux/cpu.h>
46#include <linux/pci.h>
47#include <linux/smp.h>
48
49#include <asm/processor.h>
50#include <asm/e820.h>
51#include <asm/mtrr.h>
52#include <asm/msr.h>
53
54#include "mtrr.h"
55
56u32 num_var_ranges;
57
58unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
59static DEFINE_MUTEX(mtrr_mutex);
60
61u64 size_or_mask, size_and_mask;
62static bool mtrr_aps_delayed_init;
63
64static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
65
66const struct mtrr_ops *mtrr_if;
67
68static void set_mtrr(unsigned int reg, unsigned long base,
69		     unsigned long size, mtrr_type type);
70
71void set_mtrr_ops(const struct mtrr_ops *ops)
72{
73	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
74		mtrr_ops[ops->vendor] = ops;
75}
76
77/*  Returns non-zero if we have the write-combining memory type  */
78static int have_wrcomb(void)
79{
80	struct pci_dev *dev;
81	u8 rev;
82
83	dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
84	if (dev != NULL) {
85		/*
86		 * ServerWorks LE chipsets < rev 6 have problems with
87		 * write-combining. Don't allow it and leave room for other
88		 * chipsets to be tagged
89		 */
90		if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
91		    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
92			pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
93			if (rev <= 5) {
94				pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
95				pci_dev_put(dev);
96				return 0;
97			}
98		}
99		/*
100		 * Intel 450NX errata # 23. Non ascending cacheline evictions to
101		 * write combining memory may resulting in data corruption
102		 */
103		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
104		    dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
105			pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
106			pci_dev_put(dev);
107			return 0;
108		}
109		pci_dev_put(dev);
110	}
111	return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
112}
113
114/*  This function returns the number of variable MTRRs  */
115static void __init set_num_var_ranges(void)
116{
117	unsigned long config = 0, dummy;
118
119	if (use_intel())
120		rdmsr(MSR_MTRRcap, config, dummy);
121	else if (is_cpu(AMD))
122		config = 2;
123	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
124		config = 8;
125
126	num_var_ranges = config & 0xff;
127}
128
129static void __init init_table(void)
130{
131	int i, max;
132
133	max = num_var_ranges;
134	for (i = 0; i < max; i++)
135		mtrr_usage_table[i] = 1;
136}
137
138struct set_mtrr_data {
139	atomic_t	count;
140	atomic_t	gate;
141	unsigned long	smp_base;
142	unsigned long	smp_size;
143	unsigned int	smp_reg;
144	mtrr_type	smp_type;
145};
146
147static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
148
149/**
150 * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
151 * @info: pointer to mtrr configuration data
152 *
153 * Returns nothing.
154 */
155static int mtrr_work_handler(void *info)
156{
157#ifdef CONFIG_SMP
158	struct set_mtrr_data *data = info;
159	unsigned long flags;
160
161	atomic_dec(&data->count);
162	while (!atomic_read(&data->gate))
163		cpu_relax();
164
165	local_irq_save(flags);
166
167	atomic_dec(&data->count);
168	while (atomic_read(&data->gate))
169		cpu_relax();
170
171	/*  The master has cleared me to execute  */
172	if (data->smp_reg != ~0U) {
173		mtrr_if->set(data->smp_reg, data->smp_base,
174			     data->smp_size, data->smp_type);
175	} else if (mtrr_aps_delayed_init) {
176		/*
177		 * Initialize the MTRRs inaddition to the synchronisation.
178		 */
179		mtrr_if->set_all();
180	}
181
182	atomic_dec(&data->count);
183	while (!atomic_read(&data->gate))
184		cpu_relax();
185
186	atomic_dec(&data->count);
187	local_irq_restore(flags);
188#endif
189	return 0;
190}
191
192static inline int types_compatible(mtrr_type type1, mtrr_type type2)
193{
194	return type1 == MTRR_TYPE_UNCACHABLE ||
195	       type2 == MTRR_TYPE_UNCACHABLE ||
196	       (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
197	       (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
198}
199
200/**
201 * set_mtrr - update mtrrs on all processors
202 * @reg:	mtrr in question
203 * @base:	mtrr base
204 * @size:	mtrr size
205 * @type:	mtrr type
206 *
207 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
208 *
209 * 1. Queue work to do the following on all processors:
210 * 2. Disable Interrupts
211 * 3. Wait for all procs to do so
212 * 4. Enter no-fill cache mode
213 * 5. Flush caches
214 * 6. Clear PGE bit
215 * 7. Flush all TLBs
216 * 8. Disable all range registers
217 * 9. Update the MTRRs
218 * 10. Enable all range registers
219 * 11. Flush all TLBs and caches again
220 * 12. Enter normal cache mode and reenable caching
221 * 13. Set PGE
222 * 14. Wait for buddies to catch up
223 * 15. Enable interrupts.
224 *
225 * What does that mean for us? Well, first we set data.count to the number
226 * of CPUs. As each CPU announces that it started the rendezvous handler by
227 * decrementing the count, We reset data.count and set the data.gate flag
228 * allowing all the cpu's to proceed with the work. As each cpu disables
229 * interrupts, it'll decrement data.count once. We wait until it hits 0 and
230 * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
231 * are waiting for that flag to be cleared. Once it's cleared, each
232 * CPU goes through the transition of updating MTRRs.
233 * The CPU vendors may each do it differently,
234 * so we call mtrr_if->set() callback and let them take care of it.
235 * When they're done, they again decrement data->count and wait for data.gate
236 * to be set.
237 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
238 * Everyone then enables interrupts and we all continue on.
239 *
240 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
241 * becomes nops.
242 */
243static void
244set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
245{
246	struct set_mtrr_data data;
247	unsigned long flags;
248	int cpu;
249
250	preempt_disable();
251
252	data.smp_reg = reg;
253	data.smp_base = base;
254	data.smp_size = size;
255	data.smp_type = type;
256	atomic_set(&data.count, num_booting_cpus() - 1);
257
258	/* Make sure data.count is visible before unleashing other CPUs */
259	smp_wmb();
260	atomic_set(&data.gate, 0);
261
262	/* Start the ball rolling on other CPUs */
263	for_each_online_cpu(cpu) {
264		struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
265
266		if (cpu == smp_processor_id())
267			continue;
268
269		stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
270	}
271
272
273	while (atomic_read(&data.count))
274		cpu_relax();
275
276	/* Ok, reset count and toggle gate */
277	atomic_set(&data.count, num_booting_cpus() - 1);
278	smp_wmb();
279	atomic_set(&data.gate, 1);
280
281	local_irq_save(flags);
282
283	while (atomic_read(&data.count))
284		cpu_relax();
285
286	/* Ok, reset count and toggle gate */
287	atomic_set(&data.count, num_booting_cpus() - 1);
288	smp_wmb();
289	atomic_set(&data.gate, 0);
290
291	/* Do our MTRR business */
292
293	/*
294	 * HACK!
295	 * We use this same function to initialize the mtrrs on boot.
296	 * The state of the boot cpu's mtrrs has been saved, and we want
297	 * to replicate across all the APs.
298	 * If we're doing that @reg is set to something special...
299	 */
300	if (reg != ~0U)
301		mtrr_if->set(reg, base, size, type);
302	else if (!mtrr_aps_delayed_init)
303		mtrr_if->set_all();
304
305	/* Wait for the others */
306	while (atomic_read(&data.count))
307		cpu_relax();
308
309	atomic_set(&data.count, num_booting_cpus() - 1);
310	smp_wmb();
311	atomic_set(&data.gate, 1);
312
313	/*
314	 * Wait here for everyone to have seen the gate change
315	 * So we're the last ones to touch 'data'
316	 */
317	while (atomic_read(&data.count))
318		cpu_relax();
319
320	local_irq_restore(flags);
321	preempt_enable();
322}
323
324/**
325 * mtrr_add_page - Add a memory type region
326 * @base: Physical base address of region in pages (in units of 4 kB!)
327 * @size: Physical size of region in pages (4 kB)
328 * @type: Type of MTRR desired
329 * @increment: If this is true do usage counting on the region
330 *
331 * Memory type region registers control the caching on newer Intel and
332 * non Intel processors. This function allows drivers to request an
333 * MTRR is added. The details and hardware specifics of each processor's
334 * implementation are hidden from the caller, but nevertheless the
335 * caller should expect to need to provide a power of two size on an
336 * equivalent power of two boundary.
337 *
338 * If the region cannot be added either because all regions are in use
339 * or the CPU cannot support it a negative value is returned. On success
340 * the register number for this entry is returned, but should be treated
341 * as a cookie only.
342 *
343 * On a multiprocessor machine the changes are made to all processors.
344 * This is required on x86 by the Intel processors.
345 *
346 * The available types are
347 *
348 * %MTRR_TYPE_UNCACHABLE - No caching
349 *
350 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
351 *
352 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
353 *
354 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
355 *
356 * BUGS: Needs a quiet flag for the cases where drivers do not mind
357 * failures and do not wish system log messages to be sent.
358 */
359int mtrr_add_page(unsigned long base, unsigned long size,
360		  unsigned int type, bool increment)
361{
362	unsigned long lbase, lsize;
363	int i, replace, error;
364	mtrr_type ltype;
365
366	if (!mtrr_if)
367		return -ENXIO;
368
369	error = mtrr_if->validate_add_page(base, size, type);
370	if (error)
371		return error;
372
373	if (type >= MTRR_NUM_TYPES) {
374		pr_warning("mtrr: type: %u invalid\n", type);
375		return -EINVAL;
376	}
377
378	/* If the type is WC, check that this processor supports it */
379	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
380		pr_warning("mtrr: your processor doesn't support write-combining\n");
381		return -ENOSYS;
382	}
383
384	if (!size) {
385		pr_warning("mtrr: zero sized request\n");
386		return -EINVAL;
387	}
388
389	if (base & size_or_mask || size & size_or_mask) {
390		pr_warning("mtrr: base or size exceeds the MTRR width\n");
391		return -EINVAL;
392	}
393
394	error = -EINVAL;
395	replace = -1;
396
397	/* No CPU hotplug when we change MTRR entries */
398	get_online_cpus();
399
400	/* Search for existing MTRR  */
401	mutex_lock(&mtrr_mutex);
402	for (i = 0; i < num_var_ranges; ++i) {
403		mtrr_if->get(i, &lbase, &lsize, &ltype);
404		if (!lsize || base > lbase + lsize - 1 ||
405		    base + size - 1 < lbase)
406			continue;
407		/*
408		 * At this point we know there is some kind of
409		 * overlap/enclosure
410		 */
411		if (base < lbase || base + size - 1 > lbase + lsize - 1) {
412			if (base <= lbase &&
413			    base + size - 1 >= lbase + lsize - 1) {
414				/*  New region encloses an existing region  */
415				if (type == ltype) {
416					replace = replace == -1 ? i : -2;
417					continue;
418				} else if (types_compatible(type, ltype))
419					continue;
420			}
421			pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
422				" 0x%lx000,0x%lx000\n", base, size, lbase,
423				lsize);
424			goto out;
425		}
426		/* New region is enclosed by an existing region */
427		if (ltype != type) {
428			if (types_compatible(type, ltype))
429				continue;
430			pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
431				base, size, mtrr_attrib_to_str(ltype),
432				mtrr_attrib_to_str(type));
433			goto out;
434		}
435		if (increment)
436			++mtrr_usage_table[i];
437		error = i;
438		goto out;
439	}
440	/* Search for an empty MTRR */
441	i = mtrr_if->get_free_region(base, size, replace);
442	if (i >= 0) {
443		set_mtrr(i, base, size, type);
444		if (likely(replace < 0)) {
445			mtrr_usage_table[i] = 1;
446		} else {
447			mtrr_usage_table[i] = mtrr_usage_table[replace];
448			if (increment)
449				mtrr_usage_table[i]++;
450			if (unlikely(replace != i)) {
451				set_mtrr(replace, 0, 0, 0);
452				mtrr_usage_table[replace] = 0;
453			}
454		}
455	} else {
456		pr_info("mtrr: no more MTRRs available\n");
457	}
458	error = i;
459 out:
460	mutex_unlock(&mtrr_mutex);
461	put_online_cpus();
462	return error;
463}
464
465static int mtrr_check(unsigned long base, unsigned long size)
466{
467	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
468		pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
469		pr_debug("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
470		dump_stack();
471		return -1;
472	}
473	return 0;
474}
475
476/**
477 * mtrr_add - Add a memory type region
478 * @base: Physical base address of region
479 * @size: Physical size of region
480 * @type: Type of MTRR desired
481 * @increment: If this is true do usage counting on the region
482 *
483 * Memory type region registers control the caching on newer Intel and
484 * non Intel processors. This function allows drivers to request an
485 * MTRR is added. The details and hardware specifics of each processor's
486 * implementation are hidden from the caller, but nevertheless the
487 * caller should expect to need to provide a power of two size on an
488 * equivalent power of two boundary.
489 *
490 * If the region cannot be added either because all regions are in use
491 * or the CPU cannot support it a negative value is returned. On success
492 * the register number for this entry is returned, but should be treated
493 * as a cookie only.
494 *
495 * On a multiprocessor machine the changes are made to all processors.
496 * This is required on x86 by the Intel processors.
497 *
498 * The available types are
499 *
500 * %MTRR_TYPE_UNCACHABLE - No caching
501 *
502 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
503 *
504 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
505 *
506 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
507 *
508 * BUGS: Needs a quiet flag for the cases where drivers do not mind
509 * failures and do not wish system log messages to be sent.
510 */
511int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
512	     bool increment)
513{
514	if (mtrr_check(base, size))
515		return -EINVAL;
516	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
517			     increment);
518}
519EXPORT_SYMBOL(mtrr_add);
520
521/**
522 * mtrr_del_page - delete a memory type region
523 * @reg: Register returned by mtrr_add
524 * @base: Physical base address
525 * @size: Size of region
526 *
527 * If register is supplied then base and size are ignored. This is
528 * how drivers should call it.
529 *
530 * Releases an MTRR region. If the usage count drops to zero the
531 * register is freed and the region returns to default state.
532 * On success the register is returned, on failure a negative error
533 * code.
534 */
535int mtrr_del_page(int reg, unsigned long base, unsigned long size)
536{
537	int i, max;
538	mtrr_type ltype;
539	unsigned long lbase, lsize;
540	int error = -EINVAL;
541
542	if (!mtrr_if)
543		return -ENXIO;
544
545	max = num_var_ranges;
546	/* No CPU hotplug when we change MTRR entries */
547	get_online_cpus();
548	mutex_lock(&mtrr_mutex);
549	if (reg < 0) {
550		/*  Search for existing MTRR  */
551		for (i = 0; i < max; ++i) {
552			mtrr_if->get(i, &lbase, &lsize, &ltype);
553			if (lbase == base && lsize == size) {
554				reg = i;
555				break;
556			}
557		}
558		if (reg < 0) {
559			pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
560				 base, size);
561			goto out;
562		}
563	}
564	if (reg >= max) {
565		pr_warning("mtrr: register: %d too big\n", reg);
566		goto out;
567	}
568	mtrr_if->get(reg, &lbase, &lsize, &ltype);
569	if (lsize < 1) {
570		pr_warning("mtrr: MTRR %d not used\n", reg);
571		goto out;
572	}
573	if (mtrr_usage_table[reg] < 1) {
574		pr_warning("mtrr: reg: %d has count=0\n", reg);
575		goto out;
576	}
577	if (--mtrr_usage_table[reg] < 1)
578		set_mtrr(reg, 0, 0, 0);
579	error = reg;
580 out:
581	mutex_unlock(&mtrr_mutex);
582	put_online_cpus();
583	return error;
584}
585
586/**
587 * mtrr_del - delete a memory type region
588 * @reg: Register returned by mtrr_add
589 * @base: Physical base address
590 * @size: Size of region
591 *
592 * If register is supplied then base and size are ignored. This is
593 * how drivers should call it.
594 *
595 * Releases an MTRR region. If the usage count drops to zero the
596 * register is freed and the region returns to default state.
597 * On success the register is returned, on failure a negative error
598 * code.
599 */
600int mtrr_del(int reg, unsigned long base, unsigned long size)
601{
602	if (mtrr_check(base, size))
603		return -EINVAL;
604	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
605}
606EXPORT_SYMBOL(mtrr_del);
607
608/*
609 * HACK ALERT!
610 * These should be called implicitly, but we can't yet until all the initcall
611 * stuff is done...
612 */
613static void __init init_ifs(void)
614{
615#ifndef CONFIG_X86_64
616	amd_init_mtrr();
617	cyrix_init_mtrr();
618	centaur_init_mtrr();
619#endif
620}
621
622/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
623 * MTRR driver doesn't require this
624 */
625struct mtrr_value {
626	mtrr_type	ltype;
627	unsigned long	lbase;
628	unsigned long	lsize;
629};
630
631static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
632
633static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
634{
635	int i;
636
637	for (i = 0; i < num_var_ranges; i++) {
638		mtrr_if->get(i, &mtrr_value[i].lbase,
639				&mtrr_value[i].lsize,
640				&mtrr_value[i].ltype);
641	}
642	return 0;
643}
644
645static int mtrr_restore(struct sys_device *sysdev)
646{
647	int i;
648
649	for (i = 0; i < num_var_ranges; i++) {
650		if (mtrr_value[i].lsize) {
651			set_mtrr(i, mtrr_value[i].lbase,
652				    mtrr_value[i].lsize,
653				    mtrr_value[i].ltype);
654		}
655	}
656	return 0;
657}
658
659
660
661static struct sysdev_driver mtrr_sysdev_driver = {
662	.suspend	= mtrr_save,
663	.resume		= mtrr_restore,
664};
665
666int __initdata changed_by_mtrr_cleanup;
667
668/**
669 * mtrr_bp_init - initialize mtrrs on the boot CPU
670 *
671 * This needs to be called early; before any of the other CPUs are
672 * initialized (i.e. before smp_init()).
673 *
674 */
675void __init mtrr_bp_init(void)
676{
677	u32 phys_addr;
678
679	init_ifs();
680
681	phys_addr = 32;
682
683	if (cpu_has_mtrr) {
684		mtrr_if = &generic_mtrr_ops;
685		size_or_mask = 0xff000000;			/* 36 bits */
686		size_and_mask = 0x00f00000;
687		phys_addr = 36;
688
689		/*
690		 * This is an AMD specific MSR, but we assume(hope?) that
691		 * Intel will implement it to when they extend the address
692		 * bus of the Xeon.
693		 */
694		if (cpuid_eax(0x80000000) >= 0x80000008) {
695			phys_addr = cpuid_eax(0x80000008) & 0xff;
696			if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
697			    boot_cpu_data.x86 == 0xF &&
698			    boot_cpu_data.x86_model == 0x3 &&
699			    (boot_cpu_data.x86_mask == 0x3 ||
700			     boot_cpu_data.x86_mask == 0x4))
701				phys_addr = 36;
702
703			size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
704			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
705		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
706			   boot_cpu_data.x86 == 6) {
707			/*
708			 * VIA C* family have Intel style MTRRs,
709			 * but don't support PAE
710			 */
711			size_or_mask = 0xfff00000;		/* 32 bits */
712			size_and_mask = 0;
713			phys_addr = 32;
714		}
715	} else {
716		switch (boot_cpu_data.x86_vendor) {
717		case X86_VENDOR_AMD:
718			if (cpu_has_k6_mtrr) {
719				/* Pre-Athlon (K6) AMD CPU MTRRs */
720				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
721				size_or_mask = 0xfff00000;	/* 32 bits */
722				size_and_mask = 0;
723			}
724			break;
725		case X86_VENDOR_CENTAUR:
726			if (cpu_has_centaur_mcr) {
727				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
728				size_or_mask = 0xfff00000;	/* 32 bits */
729				size_and_mask = 0;
730			}
731			break;
732		case X86_VENDOR_CYRIX:
733			if (cpu_has_cyrix_arr) {
734				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
735				size_or_mask = 0xfff00000;	/* 32 bits */
736				size_and_mask = 0;
737			}
738			break;
739		default:
740			break;
741		}
742	}
743
744	if (mtrr_if) {
745		set_num_var_ranges();
746		init_table();
747		if (use_intel()) {
748			get_mtrr_state();
749
750			if (mtrr_cleanup(phys_addr)) {
751				changed_by_mtrr_cleanup = 1;
752				mtrr_if->set_all();
753			}
754		}
755	}
756}
757
758void mtrr_ap_init(void)
759{
760	if (!use_intel() || mtrr_aps_delayed_init)
761		return;
762	/*
763	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
764	 * changed, but this routine will be called in cpu boot time,
765	 * holding the lock breaks it.
766	 *
767	 * This routine is called in two cases:
768	 *
769	 *   1. very earily time of software resume, when there absolutely
770	 *      isn't mtrr entry changes;
771	 *
772	 *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
773	 *      lock to prevent mtrr entry changes
774	 */
775	set_mtrr(~0U, 0, 0, 0);
776}
777
778/**
779 * Save current fixed-range MTRR state of the BSP
780 */
781void mtrr_save_state(void)
782{
783	smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
784}
785
786void set_mtrr_aps_delayed_init(void)
787{
788	if (!use_intel())
789		return;
790
791	mtrr_aps_delayed_init = true;
792}
793
794/*
795 * Delayed MTRR initialization for all AP's
796 */
797void mtrr_aps_init(void)
798{
799	if (!use_intel())
800		return;
801
802	/*
803	 * Check if someone has requested the delay of AP MTRR initialization,
804	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
805	 * then we are done.
806	 */
807	if (!mtrr_aps_delayed_init)
808		return;
809
810	set_mtrr(~0U, 0, 0, 0);
811	mtrr_aps_delayed_init = false;
812}
813
814void mtrr_bp_restore(void)
815{
816	if (!use_intel())
817		return;
818
819	mtrr_if->set_all();
820}
821
822static int __init mtrr_init_finialize(void)
823{
824	if (!mtrr_if)
825		return 0;
826
827	if (use_intel()) {
828		if (!changed_by_mtrr_cleanup)
829			mtrr_state_warn();
830		return 0;
831	}
832
833	/*
834	 * The CPU has no MTRR and seems to not support SMP. They have
835	 * specific drivers, we use a tricky method to support
836	 * suspend/resume for them.
837	 *
838	 * TBD: is there any system with such CPU which supports
839	 * suspend/resume? If no, we should remove the code.
840	 */
841	sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
842
843	return 0;
844}
845subsys_initcall(mtrr_init_finialize);
846