1/* Generic MTRR (Memory Type Range Register) driver. 2 3 Copyright (C) 1997-2000 Richard Gooch 4 Copyright (c) 2002 Patrick Mochel 5 6 This library is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Library General Public 8 License as published by the Free Software Foundation; either 9 version 2 of the License, or (at your option) any later version. 10 11 This library is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Library General Public License for more details. 15 16 You should have received a copy of the GNU Library General Public 17 License along with this library; if not, write to the Free 18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 21 The postal address is: 22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 23 24 Source: "Pentium Pro Family Developer's Manual, Volume 3: 25 Operating System Writer's Guide" (Intel document number 242692), 26 section 11.11.7 27 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 29 on 6-7 March 2002. 30 Source: Intel Architecture Software Developers Manual, Volume 3: 31 System Programming Guide; Section 9.11. (1997 edition - PPro). 32*/ 33 34#include <linux/module.h> 35#include <linux/init.h> 36#include <linux/pci.h> 37#include <linux/smp.h> 38#include <linux/cpu.h> 39#include <linux/mutex.h> 40 41#include <asm/mtrr.h> 42 43#include <asm/uaccess.h> 44#include <asm/processor.h> 45#include <asm/msr.h> 46#include "mtrr.h" 47 48u32 num_var_ranges = 0; 49 50unsigned int *usage_table; 51static DEFINE_MUTEX(mtrr_mutex); 52 53u64 size_or_mask, size_and_mask; 54 55static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; 56 57struct mtrr_ops * mtrr_if = NULL; 58 59static void set_mtrr(unsigned int reg, unsigned long base, 60 unsigned long size, mtrr_type type); 61 62#ifndef CONFIG_X86_64 63extern int arr3_protected; 64#else 65#define arr3_protected 0 66#endif 67 68void set_mtrr_ops(struct mtrr_ops * ops) 69{ 70 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) 71 mtrr_ops[ops->vendor] = ops; 72} 73 74/* Returns non-zero if we have the write-combining memory type */ 75static int have_wrcomb(void) 76{ 77 struct pci_dev *dev; 78 u8 rev; 79 80 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { 81 /* ServerWorks LE chipsets < rev 6 have problems with write-combining 82 Don't allow it and leave room for other chipsets to be tagged */ 83 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 84 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { 85 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 86 if (rev <= 5) { 87 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 88 pci_dev_put(dev); 89 return 0; 90 } 91 } 92 /* Intel 450NX errata # 23. Non ascending cacheline evictions to 93 write combining memory may resulting in data corruption */ 94 if (dev->vendor == PCI_VENDOR_ID_INTEL && 95 dev->device == PCI_DEVICE_ID_INTEL_82451NX) { 96 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); 97 pci_dev_put(dev); 98 return 0; 99 } 100 pci_dev_put(dev); 101 } 102 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); 103} 104 105/* This function returns the number of variable MTRRs */ 106static void __init set_num_var_ranges(void) 107{ 108 unsigned long config = 0, dummy; 109 110 if (use_intel()) { 111 rdmsr(MTRRcap_MSR, config, dummy); 112 } else if (is_cpu(AMD)) 113 config = 2; 114 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) 115 config = 8; 116 num_var_ranges = config & 0xff; 117} 118 119static void __init init_table(void) 120{ 121 int i, max; 122 123 max = num_var_ranges; 124 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL)) 125 == NULL) { 126 printk(KERN_ERR "mtrr: could not allocate\n"); 127 return; 128 } 129 for (i = 0; i < max; i++) 130 usage_table[i] = 1; 131} 132 133struct set_mtrr_data { 134 atomic_t count; 135 atomic_t gate; 136 unsigned long smp_base; 137 unsigned long smp_size; 138 unsigned int smp_reg; 139 mtrr_type smp_type; 140}; 141 142#ifdef CONFIG_SMP 143 144static void ipi_handler(void *info) 145/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. 146 [RETURNS] Nothing. 147*/ 148{ 149 struct set_mtrr_data *data = info; 150 unsigned long flags; 151 152 local_irq_save(flags); 153 154 atomic_dec(&data->count); 155 while(!atomic_read(&data->gate)) 156 cpu_relax(); 157 158 /* The master has cleared me to execute */ 159 if (data->smp_reg != ~0U) 160 mtrr_if->set(data->smp_reg, data->smp_base, 161 data->smp_size, data->smp_type); 162 else 163 mtrr_if->set_all(); 164 165 atomic_dec(&data->count); 166 while(atomic_read(&data->gate)) 167 cpu_relax(); 168 169 atomic_dec(&data->count); 170 local_irq_restore(flags); 171} 172 173#endif 174 175static inline int types_compatible(mtrr_type type1, mtrr_type type2) { 176 return type1 == MTRR_TYPE_UNCACHABLE || 177 type2 == MTRR_TYPE_UNCACHABLE || 178 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 179 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); 180} 181 182/** 183 * set_mtrr - update mtrrs on all processors 184 * @reg: mtrr in question 185 * @base: mtrr base 186 * @size: mtrr size 187 * @type: mtrr type 188 * 189 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: 190 * 191 * 1. Send IPI to do the following: 192 * 2. Disable Interrupts 193 * 3. Wait for all procs to do so 194 * 4. Enter no-fill cache mode 195 * 5. Flush caches 196 * 6. Clear PGE bit 197 * 7. Flush all TLBs 198 * 8. Disable all range registers 199 * 9. Update the MTRRs 200 * 10. Enable all range registers 201 * 11. Flush all TLBs and caches again 202 * 12. Enter normal cache mode and reenable caching 203 * 13. Set PGE 204 * 14. Wait for buddies to catch up 205 * 15. Enable interrupts. 206 * 207 * What does that mean for us? Well, first we set data.count to the number 208 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait 209 * until it hits 0 and proceed. We set the data.gate flag and reset data.count. 210 * Meanwhile, they are waiting for that flag to be set. Once it's set, each 211 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 212 * differently, so we call mtrr_if->set() callback and let them take care of it. 213 * When they're done, they again decrement data->count and wait for data.gate to 214 * be reset. 215 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. 216 * Everyone then enables interrupts and we all continue on. 217 * 218 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 219 * becomes nops. 220 */ 221static void set_mtrr(unsigned int reg, unsigned long base, 222 unsigned long size, mtrr_type type) 223{ 224 struct set_mtrr_data data; 225 unsigned long flags; 226 227 data.smp_reg = reg; 228 data.smp_base = base; 229 data.smp_size = size; 230 data.smp_type = type; 231 atomic_set(&data.count, num_booting_cpus() - 1); 232 /* make sure data.count is visible before unleashing other CPUs */ 233 smp_wmb(); 234 atomic_set(&data.gate,0); 235 236 /* Start the ball rolling on other CPUs */ 237 if (smp_call_function(ipi_handler, &data, 1, 0) != 0) 238 panic("mtrr: timed out waiting for other CPUs\n"); 239 240 local_irq_save(flags); 241 242 while(atomic_read(&data.count)) 243 cpu_relax(); 244 245 /* ok, reset count and toggle gate */ 246 atomic_set(&data.count, num_booting_cpus() - 1); 247 smp_wmb(); 248 atomic_set(&data.gate,1); 249 250 /* do our MTRR business */ 251 252 /* HACK! 253 * We use this same function to initialize the mtrrs on boot. 254 * The state of the boot cpu's mtrrs has been saved, and we want 255 * to replicate across all the APs. 256 * If we're doing that @reg is set to something special... 257 */ 258 if (reg != ~0U) 259 mtrr_if->set(reg,base,size,type); 260 261 /* wait for the others */ 262 while(atomic_read(&data.count)) 263 cpu_relax(); 264 265 atomic_set(&data.count, num_booting_cpus() - 1); 266 smp_wmb(); 267 atomic_set(&data.gate,0); 268 269 /* 270 * Wait here for everyone to have seen the gate change 271 * So we're the last ones to touch 'data' 272 */ 273 while(atomic_read(&data.count)) 274 cpu_relax(); 275 276 local_irq_restore(flags); 277} 278 279/** 280 * mtrr_add_page - Add a memory type region 281 * @base: Physical base address of region in pages (in units of 4 kB!) 282 * @size: Physical size of region in pages (4 kB) 283 * @type: Type of MTRR desired 284 * @increment: If this is true do usage counting on the region 285 * 286 * Memory type region registers control the caching on newer Intel and 287 * non Intel processors. This function allows drivers to request an 288 * MTRR is added. The details and hardware specifics of each processor's 289 * implementation are hidden from the caller, but nevertheless the 290 * caller should expect to need to provide a power of two size on an 291 * equivalent power of two boundary. 292 * 293 * If the region cannot be added either because all regions are in use 294 * or the CPU cannot support it a negative value is returned. On success 295 * the register number for this entry is returned, but should be treated 296 * as a cookie only. 297 * 298 * On a multiprocessor machine the changes are made to all processors. 299 * This is required on x86 by the Intel processors. 300 * 301 * The available types are 302 * 303 * %MTRR_TYPE_UNCACHABLE - No caching 304 * 305 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 306 * 307 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 308 * 309 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 310 * 311 * BUGS: Needs a quiet flag for the cases where drivers do not mind 312 * failures and do not wish system log messages to be sent. 313 */ 314 315int mtrr_add_page(unsigned long base, unsigned long size, 316 unsigned int type, char increment) 317{ 318 int i, replace, error; 319 mtrr_type ltype; 320 unsigned long lbase, lsize; 321 322 if (!mtrr_if) 323 return -ENXIO; 324 325 if ((error = mtrr_if->validate_add_page(base,size,type))) 326 return error; 327 328 if (type >= MTRR_NUM_TYPES) { 329 printk(KERN_WARNING "mtrr: type: %u invalid\n", type); 330 return -EINVAL; 331 } 332 333 /* If the type is WC, check that this processor supports it */ 334 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { 335 printk(KERN_WARNING 336 "mtrr: your processor doesn't support write-combining\n"); 337 return -ENOSYS; 338 } 339 340 if (!size) { 341 printk(KERN_WARNING "mtrr: zero sized request\n"); 342 return -EINVAL; 343 } 344 345 if (base & size_or_mask || size & size_or_mask) { 346 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); 347 return -EINVAL; 348 } 349 350 error = -EINVAL; 351 replace = -1; 352 353 /* No CPU hotplug when we change MTRR entries */ 354 lock_cpu_hotplug(); 355 /* Search for existing MTRR */ 356 mutex_lock(&mtrr_mutex); 357 for (i = 0; i < num_var_ranges; ++i) { 358 mtrr_if->get(i, &lbase, &lsize, <ype); 359 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) 360 continue; 361 /* At this point we know there is some kind of overlap/enclosure */ 362 if (base < lbase || base + size - 1 > lbase + lsize - 1) { 363 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { 364 /* New region encloses an existing region */ 365 if (type == ltype) { 366 replace = replace == -1 ? i : -2; 367 continue; 368 } 369 else if (types_compatible(type, ltype)) 370 continue; 371 } 372 printk(KERN_WARNING 373 "mtrr: 0x%lx000,0x%lx000 overlaps existing" 374 " 0x%lx000,0x%lx000\n", base, size, lbase, 375 lsize); 376 goto out; 377 } 378 /* New region is enclosed by an existing region */ 379 if (ltype != type) { 380 if (types_compatible(type, ltype)) 381 continue; 382 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", 383 base, size, mtrr_attrib_to_str(ltype), 384 mtrr_attrib_to_str(type)); 385 goto out; 386 } 387 if (increment) 388 ++usage_table[i]; 389 error = i; 390 goto out; 391 } 392 /* Search for an empty MTRR */ 393 i = mtrr_if->get_free_region(base, size, replace); 394 if (i >= 0) { 395 set_mtrr(i, base, size, type); 396 if (likely(replace < 0)) 397 usage_table[i] = 1; 398 else { 399 usage_table[i] = usage_table[replace] + !!increment; 400 if (unlikely(replace != i)) { 401 set_mtrr(replace, 0, 0, 0); 402 usage_table[replace] = 0; 403 } 404 } 405 } else 406 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 407 error = i; 408 out: 409 mutex_unlock(&mtrr_mutex); 410 unlock_cpu_hotplug(); 411 return error; 412} 413 414static int mtrr_check(unsigned long base, unsigned long size) 415{ 416 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 417 printk(KERN_WARNING 418 "mtrr: size and base must be multiples of 4 kiB\n"); 419 printk(KERN_DEBUG 420 "mtrr: size: 0x%lx base: 0x%lx\n", size, base); 421 dump_stack(); 422 return -1; 423 } 424 return 0; 425} 426 427/** 428 * mtrr_add - Add a memory type region 429 * @base: Physical base address of region 430 * @size: Physical size of region 431 * @type: Type of MTRR desired 432 * @increment: If this is true do usage counting on the region 433 * 434 * Memory type region registers control the caching on newer Intel and 435 * non Intel processors. This function allows drivers to request an 436 * MTRR is added. The details and hardware specifics of each processor's 437 * implementation are hidden from the caller, but nevertheless the 438 * caller should expect to need to provide a power of two size on an 439 * equivalent power of two boundary. 440 * 441 * If the region cannot be added either because all regions are in use 442 * or the CPU cannot support it a negative value is returned. On success 443 * the register number for this entry is returned, but should be treated 444 * as a cookie only. 445 * 446 * On a multiprocessor machine the changes are made to all processors. 447 * This is required on x86 by the Intel processors. 448 * 449 * The available types are 450 * 451 * %MTRR_TYPE_UNCACHABLE - No caching 452 * 453 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 454 * 455 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 456 * 457 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 458 * 459 * BUGS: Needs a quiet flag for the cases where drivers do not mind 460 * failures and do not wish system log messages to be sent. 461 */ 462 463int 464mtrr_add(unsigned long base, unsigned long size, unsigned int type, 465 char increment) 466{ 467 if (mtrr_check(base, size)) 468 return -EINVAL; 469 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 470 increment); 471} 472 473/** 474 * mtrr_del_page - delete a memory type region 475 * @reg: Register returned by mtrr_add 476 * @base: Physical base address 477 * @size: Size of region 478 * 479 * If register is supplied then base and size are ignored. This is 480 * how drivers should call it. 481 * 482 * Releases an MTRR region. If the usage count drops to zero the 483 * register is freed and the region returns to default state. 484 * On success the register is returned, on failure a negative error 485 * code. 486 */ 487 488int mtrr_del_page(int reg, unsigned long base, unsigned long size) 489{ 490 int i, max; 491 mtrr_type ltype; 492 unsigned long lbase, lsize; 493 int error = -EINVAL; 494 495 if (!mtrr_if) 496 return -ENXIO; 497 498 max = num_var_ranges; 499 /* No CPU hotplug when we change MTRR entries */ 500 lock_cpu_hotplug(); 501 mutex_lock(&mtrr_mutex); 502 if (reg < 0) { 503 /* Search for existing MTRR */ 504 for (i = 0; i < max; ++i) { 505 mtrr_if->get(i, &lbase, &lsize, <ype); 506 if (lbase == base && lsize == size) { 507 reg = i; 508 break; 509 } 510 } 511 if (reg < 0) { 512 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, 513 size); 514 goto out; 515 } 516 } 517 if (reg >= max) { 518 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); 519 goto out; 520 } 521 if (is_cpu(CYRIX) && !use_intel()) { 522 if ((reg == 3) && arr3_protected) { 523 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n"); 524 goto out; 525 } 526 } 527 mtrr_if->get(reg, &lbase, &lsize, <ype); 528 if (lsize < 1) { 529 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); 530 goto out; 531 } 532 if (usage_table[reg] < 1) { 533 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); 534 goto out; 535 } 536 if (--usage_table[reg] < 1) 537 set_mtrr(reg, 0, 0, 0); 538 error = reg; 539 out: 540 mutex_unlock(&mtrr_mutex); 541 unlock_cpu_hotplug(); 542 return error; 543} 544/** 545 * mtrr_del - delete a memory type region 546 * @reg: Register returned by mtrr_add 547 * @base: Physical base address 548 * @size: Size of region 549 * 550 * If register is supplied then base and size are ignored. This is 551 * how drivers should call it. 552 * 553 * Releases an MTRR region. If the usage count drops to zero the 554 * register is freed and the region returns to default state. 555 * On success the register is returned, on failure a negative error 556 * code. 557 */ 558 559int 560mtrr_del(int reg, unsigned long base, unsigned long size) 561{ 562 if (mtrr_check(base, size)) 563 return -EINVAL; 564 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 565} 566 567EXPORT_SYMBOL(mtrr_add); 568EXPORT_SYMBOL(mtrr_del); 569 570/* HACK ALERT! 571 * These should be called implicitly, but we can't yet until all the initcall 572 * stuff is done... 573 */ 574extern void amd_init_mtrr(void); 575extern void cyrix_init_mtrr(void); 576extern void centaur_init_mtrr(void); 577 578static void __init init_ifs(void) 579{ 580#ifndef CONFIG_X86_64 581 amd_init_mtrr(); 582 cyrix_init_mtrr(); 583 centaur_init_mtrr(); 584#endif 585} 586 587/* The suspend/resume methods are only for CPU without MTRR. CPU using generic 588 * MTRR driver doesn't require this 589 */ 590struct mtrr_value { 591 mtrr_type ltype; 592 unsigned long lbase; 593 unsigned long lsize; 594}; 595 596static struct mtrr_value * mtrr_state; 597 598static int mtrr_save(struct sys_device * sysdev, pm_message_t state) 599{ 600 int i; 601 int size = num_var_ranges * sizeof(struct mtrr_value); 602 603 mtrr_state = kzalloc(size,GFP_ATOMIC); 604 if (!mtrr_state) 605 return -ENOMEM; 606 607 for (i = 0; i < num_var_ranges; i++) { 608 mtrr_if->get(i, 609 &mtrr_state[i].lbase, 610 &mtrr_state[i].lsize, 611 &mtrr_state[i].ltype); 612 } 613 return 0; 614} 615 616static int mtrr_restore(struct sys_device * sysdev) 617{ 618 int i; 619 620 for (i = 0; i < num_var_ranges; i++) { 621 if (mtrr_state[i].lsize) 622 set_mtrr(i, 623 mtrr_state[i].lbase, 624 mtrr_state[i].lsize, 625 mtrr_state[i].ltype); 626 } 627 kfree(mtrr_state); 628 return 0; 629} 630 631 632 633static struct sysdev_driver mtrr_sysdev_driver = { 634 .suspend = mtrr_save, 635 .resume = mtrr_restore, 636}; 637 638 639/** 640 * mtrr_bp_init - initialize mtrrs on the boot CPU 641 * 642 * This needs to be called early; before any of the other CPUs are 643 * initialized (i.e. before smp_init()). 644 * 645 */ 646__init void mtrr_bp_init(void) 647{ 648 init_ifs(); 649 650 if (cpu_has_mtrr) { 651 mtrr_if = &generic_mtrr_ops; 652 size_or_mask = 0xff000000; /* 36 bits */ 653 size_and_mask = 0x00f00000; 654 655 /* This is an AMD specific MSR, but we assume(hope?) that 656 Intel will implement it to when they extend the address 657 bus of the Xeon. */ 658 if (cpuid_eax(0x80000000) >= 0x80000008) { 659 u32 phys_addr; 660 phys_addr = cpuid_eax(0x80000008) & 0xff; 661 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 662 boot_cpu_data.x86 == 0xF && 663 boot_cpu_data.x86_model == 0x3 && 664 (boot_cpu_data.x86_mask == 0x3 || 665 boot_cpu_data.x86_mask == 0x4)) 666 phys_addr = 36; 667 668 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); 669 size_and_mask = ~size_or_mask & 0xfffff00000ULL; 670 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 671 boot_cpu_data.x86 == 6) { 672 /* VIA C* family have Intel style MTRRs, but 673 don't support PAE */ 674 size_or_mask = 0xfff00000; /* 32 bits */ 675 size_and_mask = 0; 676 } 677 } else { 678 switch (boot_cpu_data.x86_vendor) { 679 case X86_VENDOR_AMD: 680 if (cpu_has_k6_mtrr) { 681 /* Pre-Athlon (K6) AMD CPU MTRRs */ 682 mtrr_if = mtrr_ops[X86_VENDOR_AMD]; 683 size_or_mask = 0xfff00000; /* 32 bits */ 684 size_and_mask = 0; 685 } 686 break; 687 case X86_VENDOR_CENTAUR: 688 if (cpu_has_centaur_mcr) { 689 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; 690 size_or_mask = 0xfff00000; /* 32 bits */ 691 size_and_mask = 0; 692 } 693 break; 694 case X86_VENDOR_CYRIX: 695 if (cpu_has_cyrix_arr) { 696 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; 697 size_or_mask = 0xfff00000; /* 32 bits */ 698 size_and_mask = 0; 699 } 700 break; 701 default: 702 break; 703 } 704 } 705 706 if (mtrr_if) { 707 set_num_var_ranges(); 708 init_table(); 709 if (use_intel()) 710 get_mtrr_state(); 711 } 712} 713 714void mtrr_ap_init(void) 715{ 716 unsigned long flags; 717 718 if (!mtrr_if || !use_intel()) 719 return; 720 /* 721 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, 722 * but this routine will be called in cpu boot time, holding the lock 723 * breaks it. This routine is called in two cases: 1.very earily time 724 * of software resume, when there absolutely isn't mtrr entry changes; 725 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to 726 * prevent mtrr entry changes 727 */ 728 local_irq_save(flags); 729 730 mtrr_if->set_all(); 731 732 local_irq_restore(flags); 733} 734 735/** 736 * Save current fixed-range MTRR state of the BSP 737 */ 738void mtrr_save_state(void) 739{ 740 int cpu = get_cpu(); 741 742 if (cpu == 0) 743 mtrr_save_fixed_ranges(NULL); 744 else 745 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); 746 put_cpu(); 747} 748 749static int __init mtrr_init_finialize(void) 750{ 751 if (!mtrr_if) 752 return 0; 753 if (use_intel()) 754 mtrr_state_warn(); 755 else { 756 /* The CPUs haven't MTRR and seemes not support SMP. They have 757 * specific drivers, we use a tricky method to support 758 * suspend/resume for them. 759 * TBD: is there any system with such CPU which supports 760 * suspend/resume? if no, we should remove the code. 761 */ 762 sysdev_driver_register(&cpu_sysdev_class, 763 &mtrr_sysdev_driver); 764 } 765 return 0; 766} 767subsys_initcall(mtrr_init_finialize); 768