1/* 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * 4 * Rewrite, cleanup, new allocation schemes, virtual merging: 5 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * and Ben. Herrenschmidt, IBM Corporation 7 * 8 * Dynamic DMA mapping support, bus-independent parts. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 26#include <linux/init.h> 27#include <linux/types.h> 28#include <linux/slab.h> 29#include <linux/mm.h> 30#include <linux/spinlock.h> 31#include <linux/string.h> 32#include <linux/dma-mapping.h> 33#include <linux/bitmap.h> 34#include <linux/iommu-helper.h> 35#include <linux/crash_dump.h> 36#include <asm/io.h> 37#include <asm/prom.h> 38#include <asm/iommu.h> 39#include <asm/pci-bridge.h> 40#include <asm/machdep.h> 41#include <asm/kdump.h> 42 43#define DBG(...) 44 45static int novmerge; 46 47static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 48 49static int __init setup_iommu(char *str) 50{ 51 if (!strcmp(str, "novmerge")) 52 novmerge = 1; 53 else if (!strcmp(str, "vmerge")) 54 novmerge = 0; 55 return 1; 56} 57 58__setup("iommu=", setup_iommu); 59 60static unsigned long iommu_range_alloc(struct device *dev, 61 struct iommu_table *tbl, 62 unsigned long npages, 63 unsigned long *handle, 64 unsigned long mask, 65 unsigned int align_order) 66{ 67 unsigned long n, end, start; 68 unsigned long limit; 69 int largealloc = npages > 15; 70 int pass = 0; 71 unsigned long align_mask; 72 unsigned long boundary_size; 73 74 align_mask = 0xffffffffffffffffl >> (64 - align_order); 75 76 /* This allocator was derived from x86_64's bit string search */ 77 78 /* Sanity check */ 79 if (unlikely(npages == 0)) { 80 if (printk_ratelimit()) 81 WARN_ON(1); 82 return DMA_ERROR_CODE; 83 } 84 85 if (handle && *handle) 86 start = *handle; 87 else 88 start = largealloc ? tbl->it_largehint : tbl->it_hint; 89 90 /* Use only half of the table for small allocs (15 pages or less) */ 91 limit = largealloc ? tbl->it_size : tbl->it_halfpoint; 92 93 if (largealloc && start < tbl->it_halfpoint) 94 start = tbl->it_halfpoint; 95 96 /* The case below can happen if we have a small segment appended 97 * to a large, or when the previous alloc was at the very end of 98 * the available space. If so, go back to the initial start. 99 */ 100 if (start >= limit) 101 start = largealloc ? tbl->it_largehint : tbl->it_hint; 102 103 again: 104 105 if (limit + tbl->it_offset > mask) { 106 limit = mask - tbl->it_offset + 1; 107 /* If we're constrained on address range, first try 108 * at the masked hint to avoid O(n) search complexity, 109 * but on second pass, start at 0. 110 */ 111 if ((start & mask) >= limit || pass > 0) 112 start = 0; 113 else 114 start &= mask; 115 } 116 117 if (dev) 118 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 119 1 << IOMMU_PAGE_SHIFT); 120 else 121 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); 122 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 123 124 n = iommu_area_alloc(tbl->it_map, limit, start, npages, 125 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, 126 align_mask); 127 if (n == -1) { 128 if (likely(pass < 2)) { 129 /* First failure, just rescan the half of the table. 130 * Second failure, rescan the other half of the table. 131 */ 132 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; 133 limit = pass ? tbl->it_size : limit; 134 pass++; 135 goto again; 136 } else { 137 /* Third failure, give up */ 138 return DMA_ERROR_CODE; 139 } 140 } 141 142 end = n + npages; 143 144 /* Bump the hint to a new block for small allocs. */ 145 if (largealloc) { 146 /* Don't bump to new block to avoid fragmentation */ 147 tbl->it_largehint = end; 148 } else { 149 /* Overflow will be taken care of at the next allocation */ 150 tbl->it_hint = (end + tbl->it_blocksize - 1) & 151 ~(tbl->it_blocksize - 1); 152 } 153 154 /* Update handle for SG allocations */ 155 if (handle) 156 *handle = end; 157 158 return n; 159} 160 161static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 162 void *page, unsigned int npages, 163 enum dma_data_direction direction, 164 unsigned long mask, unsigned int align_order, 165 struct dma_attrs *attrs) 166{ 167 unsigned long entry, flags; 168 dma_addr_t ret = DMA_ERROR_CODE; 169 int build_fail; 170 171 spin_lock_irqsave(&(tbl->it_lock), flags); 172 173 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); 174 175 if (unlikely(entry == DMA_ERROR_CODE)) { 176 spin_unlock_irqrestore(&(tbl->it_lock), flags); 177 return DMA_ERROR_CODE; 178 } 179 180 entry += tbl->it_offset; /* Offset into real TCE table */ 181 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 182 183 /* Put the TCEs in the HW table */ 184 build_fail = ppc_md.tce_build(tbl, entry, npages, 185 (unsigned long)page & IOMMU_PAGE_MASK, 186 direction, attrs); 187 188 /* ppc_md.tce_build() only returns non-zero for transient errors. 189 * Clean up the table bitmap in this case and return 190 * DMA_ERROR_CODE. For all other errors the functionality is 191 * not altered. 192 */ 193 if (unlikely(build_fail)) { 194 __iommu_free(tbl, ret, npages); 195 196 spin_unlock_irqrestore(&(tbl->it_lock), flags); 197 return DMA_ERROR_CODE; 198 } 199 200 /* Flush/invalidate TLB caches if necessary */ 201 if (ppc_md.tce_flush) 202 ppc_md.tce_flush(tbl); 203 204 spin_unlock_irqrestore(&(tbl->it_lock), flags); 205 206 /* Make sure updates are seen by hardware */ 207 mb(); 208 209 return ret; 210} 211 212static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 213 unsigned int npages) 214{ 215 unsigned long entry, free_entry; 216 217 entry = dma_addr >> IOMMU_PAGE_SHIFT; 218 free_entry = entry - tbl->it_offset; 219 220 if (((free_entry + npages) > tbl->it_size) || 221 (entry < tbl->it_offset)) { 222 if (printk_ratelimit()) { 223 printk(KERN_INFO "iommu_free: invalid entry\n"); 224 printk(KERN_INFO "\tentry = 0x%lx\n", entry); 225 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr); 226 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); 227 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); 228 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); 229 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); 230 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); 231 WARN_ON(1); 232 } 233 return; 234 } 235 236 ppc_md.tce_free(tbl, entry, npages); 237 bitmap_clear(tbl->it_map, free_entry, npages); 238} 239 240static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 241 unsigned int npages) 242{ 243 unsigned long flags; 244 245 spin_lock_irqsave(&(tbl->it_lock), flags); 246 247 __iommu_free(tbl, dma_addr, npages); 248 249 /* Make sure TLB cache is flushed if the HW needs it. We do 250 * not do an mb() here on purpose, it is not needed on any of 251 * the current platforms. 252 */ 253 if (ppc_md.tce_flush) 254 ppc_md.tce_flush(tbl); 255 256 spin_unlock_irqrestore(&(tbl->it_lock), flags); 257} 258 259int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 260 struct scatterlist *sglist, int nelems, 261 unsigned long mask, enum dma_data_direction direction, 262 struct dma_attrs *attrs) 263{ 264 dma_addr_t dma_next = 0, dma_addr; 265 unsigned long flags; 266 struct scatterlist *s, *outs, *segstart; 267 int outcount, incount, i, build_fail = 0; 268 unsigned int align; 269 unsigned long handle; 270 unsigned int max_seg_size; 271 272 BUG_ON(direction == DMA_NONE); 273 274 if ((nelems == 0) || !tbl) 275 return 0; 276 277 outs = s = segstart = &sglist[0]; 278 outcount = 1; 279 incount = nelems; 280 handle = 0; 281 282 /* Init first segment length for backout at failure */ 283 outs->dma_length = 0; 284 285 DBG("sg mapping %d elements:\n", nelems); 286 287 spin_lock_irqsave(&(tbl->it_lock), flags); 288 289 max_seg_size = dma_get_max_seg_size(dev); 290 for_each_sg(sglist, s, nelems, i) { 291 unsigned long vaddr, npages, entry, slen; 292 293 slen = s->length; 294 /* Sanity check */ 295 if (slen == 0) { 296 dma_next = 0; 297 continue; 298 } 299 /* Allocate iommu entries for that segment */ 300 vaddr = (unsigned long) sg_virt(s); 301 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); 302 align = 0; 303 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 304 (vaddr & ~PAGE_MASK) == 0) 305 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 306 entry = iommu_range_alloc(dev, tbl, npages, &handle, 307 mask >> IOMMU_PAGE_SHIFT, align); 308 309 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 310 311 /* Handle failure */ 312 if (unlikely(entry == DMA_ERROR_CODE)) { 313 if (printk_ratelimit()) 314 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" 315 " npages %lx\n", tbl, vaddr, npages); 316 goto failure; 317 } 318 319 /* Convert entry to a dma_addr_t */ 320 entry += tbl->it_offset; 321 dma_addr = entry << IOMMU_PAGE_SHIFT; 322 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 323 324 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 325 npages, entry, dma_addr); 326 327 /* Insert into HW table */ 328 build_fail = ppc_md.tce_build(tbl, entry, npages, 329 vaddr & IOMMU_PAGE_MASK, 330 direction, attrs); 331 if(unlikely(build_fail)) 332 goto failure; 333 334 /* If we are in an open segment, try merging */ 335 if (segstart != s) { 336 DBG(" - trying merge...\n"); 337 /* We cannot merge if: 338 * - allocated dma_addr isn't contiguous to previous allocation 339 */ 340 if (novmerge || (dma_addr != dma_next) || 341 (outs->dma_length + s->length > max_seg_size)) { 342 /* Can't merge: create a new segment */ 343 segstart = s; 344 outcount++; 345 outs = sg_next(outs); 346 DBG(" can't merge, new segment.\n"); 347 } else { 348 outs->dma_length += s->length; 349 DBG(" merged, new len: %ux\n", outs->dma_length); 350 } 351 } 352 353 if (segstart == s) { 354 /* This is a new segment, fill entries */ 355 DBG(" - filling new segment.\n"); 356 outs->dma_address = dma_addr; 357 outs->dma_length = slen; 358 } 359 360 /* Calculate next page pointer for contiguous check */ 361 dma_next = dma_addr + slen; 362 363 DBG(" - dma next is: %lx\n", dma_next); 364 } 365 366 /* Flush/invalidate TLB caches if necessary */ 367 if (ppc_md.tce_flush) 368 ppc_md.tce_flush(tbl); 369 370 spin_unlock_irqrestore(&(tbl->it_lock), flags); 371 372 DBG("mapped %d elements:\n", outcount); 373 374 /* For the sake of iommu_unmap_sg, we clear out the length in the 375 * next entry of the sglist if we didn't fill the list completely 376 */ 377 if (outcount < incount) { 378 outs = sg_next(outs); 379 outs->dma_address = DMA_ERROR_CODE; 380 outs->dma_length = 0; 381 } 382 383 /* Make sure updates are seen by hardware */ 384 mb(); 385 386 return outcount; 387 388 failure: 389 for_each_sg(sglist, s, nelems, i) { 390 if (s->dma_length != 0) { 391 unsigned long vaddr, npages; 392 393 vaddr = s->dma_address & IOMMU_PAGE_MASK; 394 npages = iommu_num_pages(s->dma_address, s->dma_length, 395 IOMMU_PAGE_SIZE); 396 __iommu_free(tbl, vaddr, npages); 397 s->dma_address = DMA_ERROR_CODE; 398 s->dma_length = 0; 399 } 400 if (s == outs) 401 break; 402 } 403 spin_unlock_irqrestore(&(tbl->it_lock), flags); 404 return 0; 405} 406 407 408void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 409 int nelems, enum dma_data_direction direction, 410 struct dma_attrs *attrs) 411{ 412 struct scatterlist *sg; 413 unsigned long flags; 414 415 BUG_ON(direction == DMA_NONE); 416 417 if (!tbl) 418 return; 419 420 spin_lock_irqsave(&(tbl->it_lock), flags); 421 422 sg = sglist; 423 while (nelems--) { 424 unsigned int npages; 425 dma_addr_t dma_handle = sg->dma_address; 426 427 if (sg->dma_length == 0) 428 break; 429 npages = iommu_num_pages(dma_handle, sg->dma_length, 430 IOMMU_PAGE_SIZE); 431 __iommu_free(tbl, dma_handle, npages); 432 sg = sg_next(sg); 433 } 434 435 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we 436 * do not do an mb() here, the affected platforms do not need it 437 * when freeing. 438 */ 439 if (ppc_md.tce_flush) 440 ppc_md.tce_flush(tbl); 441 442 spin_unlock_irqrestore(&(tbl->it_lock), flags); 443} 444 445static void iommu_table_clear(struct iommu_table *tbl) 446{ 447 if (!is_kdump_kernel()) { 448 /* Clear the table in case firmware left allocations in it */ 449 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 450 return; 451 } 452 453#ifdef CONFIG_CRASH_DUMP 454 if (ppc_md.tce_get) { 455 unsigned long index, tceval, tcecount = 0; 456 457 /* Reserve the existing mappings left by the first kernel. */ 458 for (index = 0; index < tbl->it_size; index++) { 459 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); 460 /* 461 * Freed TCE entry contains 0x7fffffffffffffff on JS20 462 */ 463 if (tceval && (tceval != 0x7fffffffffffffffUL)) { 464 __set_bit(index, tbl->it_map); 465 tcecount++; 466 } 467 } 468 469 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { 470 printk(KERN_WARNING "TCE table is full; freeing "); 471 printk(KERN_WARNING "%d entries for the kdump boot\n", 472 KDUMP_MIN_TCE_ENTRIES); 473 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; 474 index < tbl->it_size; index++) 475 __clear_bit(index, tbl->it_map); 476 } 477 } 478#endif 479} 480 481/* 482 * Build a iommu_table structure. This contains a bit map which 483 * is used to manage allocation of the tce space. 484 */ 485struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) 486{ 487 unsigned long sz; 488 static int welcomed = 0; 489 struct page *page; 490 491 /* Set aside 1/4 of the table for large allocations. */ 492 tbl->it_halfpoint = tbl->it_size * 3 / 4; 493 494 /* number of bytes needed for the bitmap */ 495 sz = (tbl->it_size + 7) >> 3; 496 497 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 498 if (!page) 499 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 500 tbl->it_map = page_address(page); 501 memset(tbl->it_map, 0, sz); 502 503 tbl->it_hint = 0; 504 tbl->it_largehint = tbl->it_halfpoint; 505 spin_lock_init(&tbl->it_lock); 506 507 iommu_table_clear(tbl); 508 509 if (!welcomed) { 510 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 511 novmerge ? "disabled" : "enabled"); 512 welcomed = 1; 513 } 514 515 return tbl; 516} 517 518void iommu_free_table(struct iommu_table *tbl, const char *node_name) 519{ 520 unsigned long bitmap_sz, i; 521 unsigned int order; 522 523 if (!tbl || !tbl->it_map) { 524 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__, 525 node_name); 526 return; 527 } 528 529 /* verify that table contains no entries */ 530 /* it_size is in entries, and we're examining 64 at a time */ 531 for (i = 0; i < (tbl->it_size/64); i++) { 532 if (tbl->it_map[i] != 0) { 533 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", 534 __func__, node_name); 535 break; 536 } 537 } 538 539 /* calculate bitmap size in bytes */ 540 bitmap_sz = (tbl->it_size + 7) / 8; 541 542 /* free bitmap */ 543 order = get_order(bitmap_sz); 544 free_pages((unsigned long) tbl->it_map, order); 545 546 /* free table */ 547 kfree(tbl); 548} 549 550/* Creates TCEs for a user provided buffer. The user buffer must be 551 * contiguous real kernel storage (not vmalloc). The address passed here 552 * comprises a page address and offset into that page. The dma_addr_t 553 * returned will point to the same byte within the page as was passed in. 554 */ 555dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 556 struct page *page, unsigned long offset, size_t size, 557 unsigned long mask, enum dma_data_direction direction, 558 struct dma_attrs *attrs) 559{ 560 dma_addr_t dma_handle = DMA_ERROR_CODE; 561 void *vaddr; 562 unsigned long uaddr; 563 unsigned int npages, align; 564 565 BUG_ON(direction == DMA_NONE); 566 567 vaddr = page_address(page) + offset; 568 uaddr = (unsigned long)vaddr; 569 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 570 571 if (tbl) { 572 align = 0; 573 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && 574 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 575 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 576 577 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 578 mask >> IOMMU_PAGE_SHIFT, align, 579 attrs); 580 if (dma_handle == DMA_ERROR_CODE) { 581 if (printk_ratelimit()) { 582 printk(KERN_INFO "iommu_alloc failed, " 583 "tbl %p vaddr %p npages %d\n", 584 tbl, vaddr, npages); 585 } 586 } else 587 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 588 } 589 590 return dma_handle; 591} 592 593void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 594 size_t size, enum dma_data_direction direction, 595 struct dma_attrs *attrs) 596{ 597 unsigned int npages; 598 599 BUG_ON(direction == DMA_NONE); 600 601 if (tbl) { 602 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); 603 iommu_free(tbl, dma_handle, npages); 604 } 605} 606 607/* Allocates a contiguous real buffer and creates mappings over it. 608 * Returns the virtual address of the buffer and sets dma_handle 609 * to the dma address (mapping) of the first page. 610 */ 611void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 612 size_t size, dma_addr_t *dma_handle, 613 unsigned long mask, gfp_t flag, int node) 614{ 615 void *ret = NULL; 616 dma_addr_t mapping; 617 unsigned int order; 618 unsigned int nio_pages, io_order; 619 struct page *page; 620 621 size = PAGE_ALIGN(size); 622 order = get_order(size); 623 624 /* 625 * Client asked for way too much space. This is checked later 626 * anyway. It is easier to debug here for the drivers than in 627 * the tce tables. 628 */ 629 if (order >= IOMAP_MAX_ORDER) { 630 printk("iommu_alloc_consistent size too large: 0x%lx\n", size); 631 return NULL; 632 } 633 634 if (!tbl) 635 return NULL; 636 637 /* Alloc enough pages (and possibly more) */ 638 page = alloc_pages_node(node, flag, order); 639 if (!page) 640 return NULL; 641 ret = page_address(page); 642 memset(ret, 0, size); 643 644 /* Set up tces to cover the allocated range */ 645 nio_pages = size >> IOMMU_PAGE_SHIFT; 646 io_order = get_iommu_order(size); 647 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 648 mask >> IOMMU_PAGE_SHIFT, io_order, NULL); 649 if (mapping == DMA_ERROR_CODE) { 650 free_pages((unsigned long)ret, order); 651 return NULL; 652 } 653 *dma_handle = mapping; 654 return ret; 655} 656 657void iommu_free_coherent(struct iommu_table *tbl, size_t size, 658 void *vaddr, dma_addr_t dma_handle) 659{ 660 if (tbl) { 661 unsigned int nio_pages; 662 663 size = PAGE_ALIGN(size); 664 nio_pages = size >> IOMMU_PAGE_SHIFT; 665 iommu_free(tbl, dma_handle, nio_pages); 666 size = PAGE_ALIGN(size); 667 free_pages((unsigned long)vaddr, get_order(size)); 668 } 669} 670