1/* 2 * Dynamic DMA mapping support for AMD Hammer. 3 * 4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. 5 * This allows to use PCI devices that only support 32bit addresses on systems 6 * with more than 4GB. 7 * 8 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification. 9 * 10 * Copyright 2002 Andi Kleen, SuSE Labs. 11 * Subject to the GNU General Public License v2 only. 12 */ 13 14#include <linux/types.h> 15#include <linux/ctype.h> 16#include <linux/agp_backend.h> 17#include <linux/init.h> 18#include <linux/mm.h> 19#include <linux/sched.h> 20#include <linux/string.h> 21#include <linux/spinlock.h> 22#include <linux/pci.h> 23#include <linux/module.h> 24#include <linux/topology.h> 25#include <linux/interrupt.h> 26#include <linux/bitmap.h> 27#include <linux/kdebug.h> 28#include <linux/scatterlist.h> 29#include <linux/iommu-helper.h> 30#include <linux/sysdev.h> 31#include <linux/io.h> 32#include <linux/gfp.h> 33#include <asm/atomic.h> 34#include <asm/mtrr.h> 35#include <asm/pgtable.h> 36#include <asm/proto.h> 37#include <asm/iommu.h> 38#include <asm/gart.h> 39#include <asm/cacheflush.h> 40#include <asm/swiotlb.h> 41#include <asm/dma.h> 42#include <asm/k8.h> 43#include <asm/x86_init.h> 44 45static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 46static unsigned long iommu_size; /* size of remapping area bytes */ 47static unsigned long iommu_pages; /* .. and in pages */ 48 49static u32 *iommu_gatt_base; /* Remapping table */ 50 51static dma_addr_t bad_dma_addr; 52 53/* 54 * If this is disabled the IOMMU will use an optimized flushing strategy 55 * of only flushing when an mapping is reused. With it true the GART is 56 * flushed for every mapping. Problem is that doing the lazy flush seems 57 * to trigger bugs with some popular PCI cards, in particular 3ware (but 58 * has been also also seen with Qlogic at least). 59 */ 60static int iommu_fullflush = 1; 61 62/* Allocation bitmap for the remapping area: */ 63static DEFINE_SPINLOCK(iommu_bitmap_lock); 64/* Guarded by iommu_bitmap_lock: */ 65static unsigned long *iommu_gart_bitmap; 66 67static u32 gart_unmapped_entry; 68 69#define GPTE_VALID 1 70#define GPTE_COHERENT 2 71#define GPTE_ENCODE(x) \ 72 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 73#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 74 75#define EMERGENCY_PAGES 32 /* = 128KB */ 76 77#ifdef CONFIG_AGP 78#define AGPEXTERN extern 79#else 80#define AGPEXTERN 81#endif 82 83/* backdoor interface to AGP driver */ 84AGPEXTERN int agp_memory_reserved; 85AGPEXTERN __u32 *agp_gatt_table; 86 87static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 88static bool need_flush; /* global flush state. set for each gart wrap */ 89 90static unsigned long alloc_iommu(struct device *dev, int size, 91 unsigned long align_mask) 92{ 93 unsigned long offset, flags; 94 unsigned long boundary_size; 95 unsigned long base_index; 96 97 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 98 PAGE_SIZE) >> PAGE_SHIFT; 99 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, 100 PAGE_SIZE) >> PAGE_SHIFT; 101 102 spin_lock_irqsave(&iommu_bitmap_lock, flags); 103 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 104 size, base_index, boundary_size, align_mask); 105 if (offset == -1) { 106 need_flush = true; 107 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 108 size, base_index, boundary_size, 109 align_mask); 110 } 111 if (offset != -1) { 112 next_bit = offset+size; 113 if (next_bit >= iommu_pages) { 114 next_bit = 0; 115 need_flush = true; 116 } 117 } 118 if (iommu_fullflush) 119 need_flush = true; 120 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 121 122 return offset; 123} 124 125static void free_iommu(unsigned long offset, int size) 126{ 127 unsigned long flags; 128 129 spin_lock_irqsave(&iommu_bitmap_lock, flags); 130 bitmap_clear(iommu_gart_bitmap, offset, size); 131 if (offset >= next_bit) 132 next_bit = offset + size; 133 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 134} 135 136/* 137 * Use global flush state to avoid races with multiple flushers. 138 */ 139static void flush_gart(void) 140{ 141 unsigned long flags; 142 143 spin_lock_irqsave(&iommu_bitmap_lock, flags); 144 if (need_flush) { 145 k8_flush_garts(); 146 need_flush = false; 147 } 148 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 149} 150 151#ifdef CONFIG_IOMMU_LEAK 152/* Debugging aid for drivers that don't free their IOMMU tables */ 153static int leak_trace; 154static int iommu_leak_pages = 20; 155 156static void dump_leak(void) 157{ 158 static int dump; 159 160 if (dump) 161 return; 162 dump = 1; 163 164 show_stack(NULL, NULL); 165 debug_dma_dump_mappings(NULL); 166} 167#endif 168 169static void iommu_full(struct device *dev, size_t size, int dir) 170{ 171 /* 172 * Ran out of IOMMU space for this operation. This is very bad. 173 * Unfortunately the drivers cannot handle this operation properly. 174 * Return some non mapped prereserved space in the aperture and 175 * let the Northbridge deal with it. This will result in garbage 176 * in the IO operation. When the size exceeds the prereserved space 177 * memory corruption will occur or random memory will be DMAed 178 * out. Hopefully no network devices use single mappings that big. 179 */ 180 181 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); 182 183 if (size > PAGE_SIZE*EMERGENCY_PAGES) { 184 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) 185 panic("PCI-DMA: Memory would be corrupted\n"); 186 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 187 panic(KERN_ERR 188 "PCI-DMA: Random memory would be DMAed\n"); 189 } 190#ifdef CONFIG_IOMMU_LEAK 191 dump_leak(); 192#endif 193} 194 195static inline int 196need_iommu(struct device *dev, unsigned long addr, size_t size) 197{ 198 return force_iommu || !dma_capable(dev, addr, size); 199} 200 201static inline int 202nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 203{ 204 return !dma_capable(dev, addr, size); 205} 206 207/* Map a single continuous physical area into the IOMMU. 208 * Caller needs to check if the iommu is needed and flush. 209 */ 210static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 211 size_t size, int dir, unsigned long align_mask) 212{ 213 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 214 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 215 int i; 216 217 if (iommu_page == -1) { 218 if (!nonforced_iommu(dev, phys_mem, size)) 219 return phys_mem; 220 if (panic_on_overflow) 221 panic("dma_map_area overflow %lu bytes\n", size); 222 iommu_full(dev, size, dir); 223 return bad_dma_addr; 224 } 225 226 for (i = 0; i < npages; i++) { 227 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 228 phys_mem += PAGE_SIZE; 229 } 230 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 231} 232 233/* Map a single area into the IOMMU */ 234static dma_addr_t gart_map_page(struct device *dev, struct page *page, 235 unsigned long offset, size_t size, 236 enum dma_data_direction dir, 237 struct dma_attrs *attrs) 238{ 239 unsigned long bus; 240 phys_addr_t paddr = page_to_phys(page) + offset; 241 242 if (!dev) 243 dev = &x86_dma_fallback_dev; 244 245 if (!need_iommu(dev, paddr, size)) 246 return paddr; 247 248 bus = dma_map_area(dev, paddr, size, dir, 0); 249 flush_gart(); 250 251 return bus; 252} 253 254/* 255 * Free a DMA mapping. 256 */ 257static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 258 size_t size, enum dma_data_direction dir, 259 struct dma_attrs *attrs) 260{ 261 unsigned long iommu_page; 262 int npages; 263 int i; 264 265 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || 266 dma_addr >= iommu_bus_base + iommu_size) 267 return; 268 269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 270 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 271 for (i = 0; i < npages; i++) { 272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 273 } 274 free_iommu(iommu_page, npages); 275} 276 277/* 278 * Wrapper for pci_unmap_single working with scatterlists. 279 */ 280static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 281 enum dma_data_direction dir, struct dma_attrs *attrs) 282{ 283 struct scatterlist *s; 284 int i; 285 286 for_each_sg(sg, s, nents, i) { 287 if (!s->dma_length || !s->length) 288 break; 289 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); 290 } 291} 292 293/* Fallback for dma_map_sg in case of overflow */ 294static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 295 int nents, int dir) 296{ 297 struct scatterlist *s; 298 int i; 299 300#ifdef CONFIG_IOMMU_DEBUG 301 pr_debug("dma_map_sg overflow\n"); 302#endif 303 304 for_each_sg(sg, s, nents, i) { 305 unsigned long addr = sg_phys(s); 306 307 if (nonforced_iommu(dev, addr, s->length)) { 308 addr = dma_map_area(dev, addr, s->length, dir, 0); 309 if (addr == bad_dma_addr) { 310 if (i > 0) 311 gart_unmap_sg(dev, sg, i, dir, NULL); 312 nents = 0; 313 sg[0].dma_length = 0; 314 break; 315 } 316 } 317 s->dma_address = addr; 318 s->dma_length = s->length; 319 } 320 flush_gart(); 321 322 return nents; 323} 324 325/* Map multiple scatterlist entries continuous into the first. */ 326static int __dma_map_cont(struct device *dev, struct scatterlist *start, 327 int nelems, struct scatterlist *sout, 328 unsigned long pages) 329{ 330 unsigned long iommu_start = alloc_iommu(dev, pages, 0); 331 unsigned long iommu_page = iommu_start; 332 struct scatterlist *s; 333 int i; 334 335 if (iommu_start == -1) 336 return -1; 337 338 for_each_sg(start, s, nelems, i) { 339 unsigned long pages, addr; 340 unsigned long phys_addr = s->dma_address; 341 342 BUG_ON(s != start && s->offset); 343 if (s == start) { 344 sout->dma_address = iommu_bus_base; 345 sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 346 sout->dma_length = s->length; 347 } else { 348 sout->dma_length += s->length; 349 } 350 351 addr = phys_addr; 352 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); 353 while (pages--) { 354 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 355 addr += PAGE_SIZE; 356 iommu_page++; 357 } 358 } 359 BUG_ON(iommu_page - iommu_start != pages); 360 361 return 0; 362} 363 364static inline int 365dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, 366 struct scatterlist *sout, unsigned long pages, int need) 367{ 368 if (!need) { 369 BUG_ON(nelems != 1); 370 sout->dma_address = start->dma_address; 371 sout->dma_length = start->length; 372 return 0; 373 } 374 return __dma_map_cont(dev, start, nelems, sout, pages); 375} 376 377/* 378 * DMA map all entries in a scatterlist. 379 * Merge chunks that have page aligned sizes into a continuous mapping. 380 */ 381static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 382 enum dma_data_direction dir, struct dma_attrs *attrs) 383{ 384 struct scatterlist *s, *ps, *start_sg, *sgmap; 385 int need = 0, nextneed, i, out, start; 386 unsigned long pages = 0; 387 unsigned int seg_size; 388 unsigned int max_seg_size; 389 390 if (nents == 0) 391 return 0; 392 393 if (!dev) 394 dev = &x86_dma_fallback_dev; 395 396 out = 0; 397 start = 0; 398 start_sg = sg; 399 sgmap = sg; 400 seg_size = 0; 401 max_seg_size = dma_get_max_seg_size(dev); 402 ps = NULL; /* shut up gcc */ 403 404 for_each_sg(sg, s, nents, i) { 405 dma_addr_t addr = sg_phys(s); 406 407 s->dma_address = addr; 408 BUG_ON(s->length == 0); 409 410 nextneed = need_iommu(dev, addr, s->length); 411 412 /* Handle the previous not yet processed entries */ 413 if (i > start) { 414 /* 415 * Can only merge when the last chunk ends on a 416 * page boundary and the new one doesn't have an 417 * offset. 418 */ 419 if (!iommu_merge || !nextneed || !need || s->offset || 420 (s->length + seg_size > max_seg_size) || 421 (ps->offset + ps->length) % PAGE_SIZE) { 422 if (dma_map_cont(dev, start_sg, i - start, 423 sgmap, pages, need) < 0) 424 goto error; 425 out++; 426 427 seg_size = 0; 428 sgmap = sg_next(sgmap); 429 pages = 0; 430 start = i; 431 start_sg = s; 432 } 433 } 434 435 seg_size += s->length; 436 need = nextneed; 437 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 438 ps = s; 439 } 440 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 441 goto error; 442 out++; 443 flush_gart(); 444 if (out < nents) { 445 sgmap = sg_next(sgmap); 446 sgmap->dma_length = 0; 447 } 448 return out; 449 450error: 451 flush_gart(); 452 gart_unmap_sg(dev, sg, out, dir, NULL); 453 454 /* When it was forced or merged try again in a dumb way */ 455 if (force_iommu || iommu_merge) { 456 out = dma_map_sg_nonforce(dev, sg, nents, dir); 457 if (out > 0) 458 return out; 459 } 460 if (panic_on_overflow) 461 panic("dma_map_sg: overflow on %lu pages\n", pages); 462 463 iommu_full(dev, pages << PAGE_SHIFT, dir); 464 for_each_sg(sg, s, nents, i) 465 s->dma_address = bad_dma_addr; 466 return 0; 467} 468 469/* allocate and map a coherent mapping */ 470static void * 471gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 472 gfp_t flag) 473{ 474 dma_addr_t paddr; 475 unsigned long align_mask; 476 struct page *page; 477 478 if (force_iommu && !(flag & GFP_DMA)) { 479 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 480 page = alloc_pages(flag | __GFP_ZERO, get_order(size)); 481 if (!page) 482 return NULL; 483 484 align_mask = (1UL << get_order(size)) - 1; 485 paddr = dma_map_area(dev, page_to_phys(page), size, 486 DMA_BIDIRECTIONAL, align_mask); 487 488 flush_gart(); 489 if (paddr != bad_dma_addr) { 490 *dma_addr = paddr; 491 return page_address(page); 492 } 493 __free_pages(page, get_order(size)); 494 } else 495 return dma_generic_alloc_coherent(dev, size, dma_addr, flag); 496 497 return NULL; 498} 499 500/* free a coherent mapping */ 501static void 502gart_free_coherent(struct device *dev, size_t size, void *vaddr, 503 dma_addr_t dma_addr) 504{ 505 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); 506 free_pages((unsigned long)vaddr, get_order(size)); 507} 508 509static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) 510{ 511 return (dma_addr == bad_dma_addr); 512} 513 514static int no_agp; 515 516static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 517{ 518 unsigned long a; 519 520 if (!iommu_size) { 521 iommu_size = aper_size; 522 if (!no_agp) 523 iommu_size /= 2; 524 } 525 526 a = aper + iommu_size; 527 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 528 529 if (iommu_size < 64*1024*1024) { 530 pr_warning( 531 "PCI-DMA: Warning: Small IOMMU %luMB." 532 " Consider increasing the AGP aperture in BIOS\n", 533 iommu_size >> 20); 534 } 535 536 return iommu_size; 537} 538 539static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 540{ 541 unsigned aper_size = 0, aper_base_32, aper_order; 542 u64 aper_base; 543 544 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); 545 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); 546 aper_order = (aper_order >> 1) & 7; 547 548 aper_base = aper_base_32 & 0x7fff; 549 aper_base <<= 25; 550 551 aper_size = (32 * 1024 * 1024) << aper_order; 552 if (aper_base + aper_size > 0x100000000UL || !aper_size) 553 aper_base = 0; 554 555 *size = aper_size; 556 return aper_base; 557} 558 559static void enable_gart_translations(void) 560{ 561 int i; 562 563 for (i = 0; i < num_k8_northbridges; i++) { 564 struct pci_dev *dev = k8_northbridges[i]; 565 566 enable_gart_translation(dev, __pa(agp_gatt_table)); 567 } 568 569 /* Flush the GART-TLB to remove stale entries */ 570 k8_flush_garts(); 571} 572 573/* 574 * If fix_up_north_bridges is set, the north bridges have to be fixed up on 575 * resume in the same way as they are handled in gart_iommu_hole_init(). 576 */ 577static bool fix_up_north_bridges; 578static u32 aperture_order; 579static u32 aperture_alloc; 580 581void set_up_gart_resume(u32 aper_order, u32 aper_alloc) 582{ 583 fix_up_north_bridges = true; 584 aperture_order = aper_order; 585 aperture_alloc = aper_alloc; 586} 587 588static void gart_fixup_northbridges(struct sys_device *dev) 589{ 590 int i; 591 592 if (!fix_up_north_bridges) 593 return; 594 595 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 596 597 for (i = 0; i < num_k8_northbridges; i++) { 598 struct pci_dev *dev = k8_northbridges[i]; 599 600 /* 601 * Don't enable translations just yet. That is the next 602 * step. Restore the pre-suspend aperture settings. 603 */ 604 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1); 605 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 606 } 607} 608 609static int gart_resume(struct sys_device *dev) 610{ 611 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 612 613 gart_fixup_northbridges(dev); 614 615 enable_gart_translations(); 616 617 return 0; 618} 619 620static int gart_suspend(struct sys_device *dev, pm_message_t state) 621{ 622 return 0; 623} 624 625static struct sysdev_class gart_sysdev_class = { 626 .name = "gart", 627 .suspend = gart_suspend, 628 .resume = gart_resume, 629 630}; 631 632static struct sys_device device_gart = { 633 .cls = &gart_sysdev_class, 634}; 635 636/* 637 * Private Northbridge GATT initialization in case we cannot use the 638 * AGP driver for some reason. 639 */ 640static __init int init_k8_gatt(struct agp_kern_info *info) 641{ 642 unsigned aper_size, gatt_size, new_aper_size; 643 unsigned aper_base, new_aper_base; 644 struct pci_dev *dev; 645 void *gatt; 646 int i, error; 647 648 pr_info("PCI-DMA: Disabling AGP.\n"); 649 650 aper_size = aper_base = info->aper_size = 0; 651 dev = NULL; 652 for (i = 0; i < num_k8_northbridges; i++) { 653 dev = k8_northbridges[i]; 654 new_aper_base = read_aperture(dev, &new_aper_size); 655 if (!new_aper_base) 656 goto nommu; 657 658 if (!aper_base) { 659 aper_size = new_aper_size; 660 aper_base = new_aper_base; 661 } 662 if (aper_size != new_aper_size || aper_base != new_aper_base) 663 goto nommu; 664 } 665 if (!aper_base) 666 goto nommu; 667 668 info->aper_base = aper_base; 669 info->aper_size = aper_size >> 20; 670 671 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 672 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 673 get_order(gatt_size)); 674 if (!gatt) 675 panic("Cannot allocate GATT table"); 676 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 677 panic("Could not set GART PTEs to uncacheable pages"); 678 679 agp_gatt_table = gatt; 680 681 error = sysdev_class_register(&gart_sysdev_class); 682 if (!error) 683 error = sysdev_register(&device_gart); 684 if (error) 685 panic("Could not register gart_sysdev -- " 686 "would corrupt data on next suspend"); 687 688 flush_gart(); 689 690 pr_info("PCI-DMA: aperture base @ %x size %u KB\n", 691 aper_base, aper_size>>10); 692 693 return 0; 694 695 nommu: 696 /* Should not happen anymore */ 697 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" 698 "falling back to iommu=soft.\n"); 699 return -1; 700} 701 702static struct dma_map_ops gart_dma_ops = { 703 .map_sg = gart_map_sg, 704 .unmap_sg = gart_unmap_sg, 705 .map_page = gart_map_page, 706 .unmap_page = gart_unmap_page, 707 .alloc_coherent = gart_alloc_coherent, 708 .free_coherent = gart_free_coherent, 709 .mapping_error = gart_mapping_error, 710}; 711 712static void gart_iommu_shutdown(void) 713{ 714 struct pci_dev *dev; 715 int i; 716 717 /* don't shutdown it if there is AGP installed */ 718 if (!no_agp) 719 return; 720 721 for (i = 0; i < num_k8_northbridges; i++) { 722 u32 ctl; 723 724 dev = k8_northbridges[i]; 725 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 726 727 ctl &= ~GARTEN; 728 729 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 730 } 731} 732 733int __init gart_iommu_init(void) 734{ 735 struct agp_kern_info info; 736 unsigned long iommu_start; 737 unsigned long aper_base, aper_size; 738 unsigned long start_pfn, end_pfn; 739 unsigned long scratch; 740 long i; 741 742 if (num_k8_northbridges == 0) 743 return 0; 744 745#ifndef CONFIG_AGP_AMD64 746 no_agp = 1; 747#else 748 /* Makefile puts PCI initialization via subsys_initcall first. */ 749 /* Add other K8 AGP bridge drivers here */ 750 no_agp = no_agp || 751 (agp_amd64_init() < 0) || 752 (agp_copy_info(agp_bridge, &info) < 0); 753#endif 754 755 if (no_iommu || 756 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 757 !gart_iommu_aperture || 758 (no_agp && init_k8_gatt(&info) < 0)) { 759 if (max_pfn > MAX_DMA32_PFN) { 760 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 761 pr_warning("falling back to iommu=soft.\n"); 762 } 763 return 0; 764 } 765 766 /* need to map that range */ 767 aper_size = info.aper_size << 20; 768 aper_base = info.aper_base; 769 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 770 771 if (end_pfn > max_low_pfn_mapped) { 772 start_pfn = (aper_base>>PAGE_SHIFT); 773 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 774 } 775 776 pr_info("PCI-DMA: using GART IOMMU.\n"); 777 iommu_size = check_iommu_size(info.aper_base, aper_size); 778 iommu_pages = iommu_size >> PAGE_SHIFT; 779 780 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 781 get_order(iommu_pages/8)); 782 if (!iommu_gart_bitmap) 783 panic("Cannot allocate iommu bitmap\n"); 784 785#ifdef CONFIG_IOMMU_LEAK 786 if (leak_trace) { 787 int ret; 788 789 ret = dma_debug_resize_entries(iommu_pages); 790 if (ret) 791 pr_debug("PCI-DMA: Cannot trace all the entries\n"); 792 } 793#endif 794 795 /* 796 * Out of IOMMU space handling. 797 * Reserve some invalid pages at the beginning of the GART. 798 */ 799 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 800 801 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 802 iommu_size >> 20); 803 804 agp_memory_reserved = iommu_size; 805 iommu_start = aper_size - iommu_size; 806 iommu_bus_base = info.aper_base + iommu_start; 807 bad_dma_addr = iommu_bus_base; 808 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 809 810 /* 811 * Unmap the IOMMU part of the GART. The alias of the page is 812 * always mapped with cache enabled and there is no full cache 813 * coherency across the GART remapping. The unmapping avoids 814 * automatic prefetches from the CPU allocating cache lines in 815 * there. All CPU accesses are done via the direct mapping to 816 * the backing memory. The GART address is only used by PCI 817 * devices. 818 */ 819 set_memory_np((unsigned long)__va(iommu_bus_base), 820 iommu_size >> PAGE_SHIFT); 821 /* 822 * Tricky. The GART table remaps the physical memory range, 823 * so the CPU wont notice potential aliases and if the memory 824 * is remapped to UC later on, we might surprise the PCI devices 825 * with a stray writeout of a cacheline. So play it sure and 826 * do an explicit, full-scale wbinvd() _after_ having marked all 827 * the pages as Not-Present: 828 */ 829 wbinvd(); 830 831 /* 832 * Now all caches are flushed and we can safely enable 833 * GART hardware. Doing it early leaves the possibility 834 * of stale cache entries that can lead to GART PTE 835 * errors. 836 */ 837 enable_gart_translations(); 838 839 scratch = get_zeroed_page(GFP_KERNEL); 840 if (!scratch) 841 panic("Cannot allocate iommu scratch page"); 842 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); 843 for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 844 iommu_gatt_base[i] = gart_unmapped_entry; 845 846 flush_gart(); 847 dma_ops = &gart_dma_ops; 848 x86_platform.iommu_shutdown = gart_iommu_shutdown; 849 swiotlb = 0; 850 851 return 0; 852} 853 854void __init gart_parse_options(char *p) 855{ 856 int arg; 857 858#ifdef CONFIG_IOMMU_LEAK 859 if (!strncmp(p, "leak", 4)) { 860 leak_trace = 1; 861 p += 4; 862 if (*p == '=') 863 ++p; 864 if (isdigit(*p) && get_option(&p, &arg)) 865 iommu_leak_pages = arg; 866 } 867#endif 868 if (isdigit(*p) && get_option(&p, &arg)) 869 iommu_size = arg; 870 if (!strncmp(p, "fullflush", 9)) 871 iommu_fullflush = 1; 872 if (!strncmp(p, "nofullflush", 11)) 873 iommu_fullflush = 0; 874 if (!strncmp(p, "noagp", 5)) 875 no_agp = 1; 876 if (!strncmp(p, "noaperture", 10)) 877 fix_aperture = 0; 878 /* duplicated from pci-dma.c */ 879 if (!strncmp(p, "force", 5)) 880 gart_iommu_aperture_allowed = 1; 881 if (!strncmp(p, "allowed", 7)) 882 gart_iommu_aperture_allowed = 1; 883 if (!strncmp(p, "memaper", 7)) { 884 fallback_aper_force = 1; 885 p += 7; 886 if (*p == '=') { 887 ++p; 888 if (get_option(&p, &arg)) 889 fallback_aper_order = arg; 890 } 891 } 892} 893