1/* 2 * IOMMU implementation for Cell Broadband Processor Architecture 3 * 4 * (C) Copyright IBM Corporation 2006-2008 5 * 6 * Author: Jeremy Kerr <jk@ozlabs.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23#undef DEBUG 24 25#include <linux/kernel.h> 26#include <linux/init.h> 27#include <linux/interrupt.h> 28#include <linux/notifier.h> 29#include <linux/of.h> 30#include <linux/of_platform.h> 31#include <linux/slab.h> 32#include <linux/memblock.h> 33 34#include <asm/prom.h> 35#include <asm/iommu.h> 36#include <asm/machdep.h> 37#include <asm/pci-bridge.h> 38#include <asm/udbg.h> 39#include <asm/firmware.h> 40#include <asm/cell-regs.h> 41 42#include "interrupt.h" 43 44/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages 45 * instead of leaving them mapped to some dummy page. This can be 46 * enabled once the appropriate workarounds for spider bugs have 47 * been enabled 48 */ 49#define CELL_IOMMU_REAL_UNMAP 50 51/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of 52 * IO PTEs based on the transfer direction. That can be enabled 53 * once spider-net has been fixed to pass the correct direction 54 * to the DMA mapping functions 55 */ 56#define CELL_IOMMU_STRICT_PROTECTION 57 58 59#define NR_IOMMUS 2 60 61/* IOC mmap registers */ 62#define IOC_Reg_Size 0x2000 63 64#define IOC_IOPT_CacheInvd 0x908 65#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul 66#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul 67#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul 68 69#define IOC_IOST_Origin 0x918 70#define IOC_IOST_Origin_E 0x8000000000000000ul 71#define IOC_IOST_Origin_HW 0x0000000000000800ul 72#define IOC_IOST_Origin_HL 0x0000000000000400ul 73 74#define IOC_IO_ExcpStat 0x920 75#define IOC_IO_ExcpStat_V 0x8000000000000000ul 76#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul 77#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul 78#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul 79#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul 80#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul 81#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful 82 83#define IOC_IO_ExcpMask 0x928 84#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul 85#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul 86 87#define IOC_IOCmd_Offset 0x1000 88 89#define IOC_IOCmd_Cfg 0xc00 90#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul 91 92 93/* Segment table entries */ 94#define IOSTE_V 0x8000000000000000ul /* valid */ 95#define IOSTE_H 0x4000000000000000ul /* cache hint */ 96#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ 97#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ 98#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ 99#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ 100#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ 101#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ 102#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ 103 104 105/* IOMMU sizing */ 106#define IO_SEGMENT_SHIFT 28 107#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) 108 109/* The high bit needs to be set on every DMA address */ 110#define SPIDER_DMA_OFFSET 0x80000000ul 111 112struct iommu_window { 113 struct list_head list; 114 struct cbe_iommu *iommu; 115 unsigned long offset; 116 unsigned long size; 117 unsigned int ioid; 118 struct iommu_table table; 119}; 120 121#define NAMESIZE 8 122struct cbe_iommu { 123 int nid; 124 char name[NAMESIZE]; 125 void __iomem *xlate_regs; 126 void __iomem *cmd_regs; 127 unsigned long *stab; 128 unsigned long *ptab; 129 void *pad_page; 130 struct list_head windows; 131}; 132 133/* Static array of iommus, one per node 134 * each contains a list of windows, keyed from dma_window property 135 * - on bus setup, look for a matching window, or create one 136 * - on dev setup, assign iommu_table ptr 137 */ 138static struct cbe_iommu iommus[NR_IOMMUS]; 139static int cbe_nr_iommus; 140 141static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, 142 long n_ptes) 143{ 144 u64 __iomem *reg; 145 u64 val; 146 long n; 147 148 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; 149 150 while (n_ptes > 0) { 151 /* we can invalidate up to 1 << 11 PTEs at once */ 152 n = min(n_ptes, 1l << 11); 153 val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) 154 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) 155 | IOC_IOPT_CacheInvd_Busy; 156 157 out_be64(reg, val); 158 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) 159 ; 160 161 n_ptes -= n; 162 pte += n; 163 } 164} 165 166static int tce_build_cell(struct iommu_table *tbl, long index, long npages, 167 unsigned long uaddr, enum dma_data_direction direction, 168 struct dma_attrs *attrs) 169{ 170 int i; 171 unsigned long *io_pte, base_pte; 172 struct iommu_window *window = 173 container_of(tbl, struct iommu_window, table); 174 175 /* implementing proper protection causes problems with the spidernet 176 * driver - check mapping directions later, but allow read & write by 177 * default for now.*/ 178#ifdef CELL_IOMMU_STRICT_PROTECTION 179 /* to avoid referencing a global, we use a trick here to setup the 180 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended 181 * together for each of the 3 supported direction values. It is then 182 * shifted left so that the fields matching the desired direction 183 * lands on the appropriate bits, and other bits are masked out. 184 */ 185 const unsigned long prot = 0xc48; 186 base_pte = 187 ((prot << (52 + 4 * direction)) & 188 (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | 189 CBE_IOPTE_M | CBE_IOPTE_SO_RW | 190 (window->ioid & CBE_IOPTE_IOID_Mask); 191#else 192 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | 193 CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); 194#endif 195 if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) 196 base_pte &= ~CBE_IOPTE_SO_RW; 197 198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 199 200 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); 202 203 mb(); 204 205 invalidate_tce_cache(window->iommu, io_pte, npages); 206 207 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", 208 index, npages, direction, base_pte); 209 return 0; 210} 211 212static void tce_free_cell(struct iommu_table *tbl, long index, long npages) 213{ 214 215 int i; 216 unsigned long *io_pte, pte; 217 struct iommu_window *window = 218 container_of(tbl, struct iommu_window, table); 219 220 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); 221 222#ifdef CELL_IOMMU_REAL_UNMAP 223 pte = 0; 224#else 225 /* spider bridge does PCI reads after freeing - insert a mapping 226 * to a scratch page instead of an invalid entry */ 227 pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | 228 __pa(window->iommu->pad_page) | 229 (window->ioid & CBE_IOPTE_IOID_Mask); 230#endif 231 232 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 233 234 for (i = 0; i < npages; i++) 235 io_pte[i] = pte; 236 237 mb(); 238 239 invalidate_tce_cache(window->iommu, io_pte, npages); 240} 241 242static irqreturn_t ioc_interrupt(int irq, void *data) 243{ 244 unsigned long stat, spf; 245 struct cbe_iommu *iommu = data; 246 247 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); 248 spf = stat & IOC_IO_ExcpStat_SPF_Mask; 249 250 /* Might want to rate limit it */ 251 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); 252 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", 253 !!(stat & IOC_IO_ExcpStat_V), 254 (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', 255 (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', 256 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", 257 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); 258 printk(KERN_ERR " page=0x%016lx\n", 259 stat & IOC_IO_ExcpStat_ADDR_Mask); 260 261 /* clear interrupt */ 262 stat &= ~IOC_IO_ExcpStat_V; 263 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); 264 265 return IRQ_HANDLED; 266} 267 268static int cell_iommu_find_ioc(int nid, unsigned long *base) 269{ 270 struct device_node *np; 271 struct resource r; 272 273 *base = 0; 274 275 /* First look for new style /be nodes */ 276 for_each_node_by_name(np, "ioc") { 277 if (of_node_to_nid(np) != nid) 278 continue; 279 if (of_address_to_resource(np, 0, &r)) { 280 printk(KERN_ERR "iommu: can't get address for %s\n", 281 np->full_name); 282 continue; 283 } 284 *base = r.start; 285 of_node_put(np); 286 return 0; 287 } 288 289 /* Ok, let's try the old way */ 290 for_each_node_by_type(np, "cpu") { 291 const unsigned int *nidp; 292 const unsigned long *tmp; 293 294 nidp = of_get_property(np, "node-id", NULL); 295 if (nidp && *nidp == nid) { 296 tmp = of_get_property(np, "ioc-translation", NULL); 297 if (tmp) { 298 *base = *tmp; 299 of_node_put(np); 300 return 0; 301 } 302 } 303 } 304 305 return -ENODEV; 306} 307 308static void cell_iommu_setup_stab(struct cbe_iommu *iommu, 309 unsigned long dbase, unsigned long dsize, 310 unsigned long fbase, unsigned long fsize) 311{ 312 struct page *page; 313 unsigned long segments, stab_size; 314 315 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; 316 317 pr_debug("%s: iommu[%d]: segments: %lu\n", 318 __func__, iommu->nid, segments); 319 320 /* set up the segment table */ 321 stab_size = segments * sizeof(unsigned long); 322 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); 323 BUG_ON(!page); 324 iommu->stab = page_address(page); 325 memset(iommu->stab, 0, stab_size); 326} 327 328static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, 329 unsigned long base, unsigned long size, unsigned long gap_base, 330 unsigned long gap_size, unsigned long page_shift) 331{ 332 struct page *page; 333 int i; 334 unsigned long reg, segments, pages_per_segment, ptab_size, 335 n_pte_pages, start_seg, *ptab; 336 337 start_seg = base >> IO_SEGMENT_SHIFT; 338 segments = size >> IO_SEGMENT_SHIFT; 339 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); 340 /* PTEs for each segment must start on a 4K bounday */ 341 pages_per_segment = max(pages_per_segment, 342 (1 << 12) / sizeof(unsigned long)); 343 344 ptab_size = segments * pages_per_segment * sizeof(unsigned long); 345 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, 346 iommu->nid, ptab_size, get_order(ptab_size)); 347 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); 348 BUG_ON(!page); 349 350 ptab = page_address(page); 351 memset(ptab, 0, ptab_size); 352 353 /* number of 4K pages needed for a page table */ 354 n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; 355 356 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", 357 __func__, iommu->nid, iommu->stab, ptab, 358 n_pte_pages); 359 360 /* initialise the STEs */ 361 reg = IOSTE_V | ((n_pte_pages - 1) << 5); 362 363 switch (page_shift) { 364 case 12: reg |= IOSTE_PS_4K; break; 365 case 16: reg |= IOSTE_PS_64K; break; 366 case 20: reg |= IOSTE_PS_1M; break; 367 case 24: reg |= IOSTE_PS_16M; break; 368 default: BUG(); 369 } 370 371 gap_base = gap_base >> IO_SEGMENT_SHIFT; 372 gap_size = gap_size >> IO_SEGMENT_SHIFT; 373 374 pr_debug("Setting up IOMMU stab:\n"); 375 for (i = start_seg; i < (start_seg + segments); i++) { 376 if (i >= gap_base && i < (gap_base + gap_size)) { 377 pr_debug("\toverlap at %d, skipping\n", i); 378 continue; 379 } 380 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * 381 (i - start_seg)); 382 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); 383 } 384 385 return ptab; 386} 387 388static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) 389{ 390 int ret; 391 unsigned long reg, xlate_base; 392 unsigned int virq; 393 394 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) 395 panic("%s: missing IOC register mappings for node %d\n", 396 __func__, iommu->nid); 397 398 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); 399 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; 400 401 /* ensure that the STEs have updated */ 402 mb(); 403 404 /* setup interrupts for the iommu. */ 405 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); 406 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, 407 reg & ~IOC_IO_ExcpStat_V); 408 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, 409 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); 410 411 virq = irq_create_mapping(NULL, 412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); 413 BUG_ON(virq == NO_IRQ); 414 415 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED, 416 iommu->name, iommu); 417 BUG_ON(ret); 418 419 /* set the IOC segment table origin register (and turn on the iommu) */ 420 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; 421 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); 422 in_be64(iommu->xlate_regs + IOC_IOST_Origin); 423 424 /* turn on IO translation */ 425 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; 426 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); 427} 428 429static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, 430 unsigned long base, unsigned long size) 431{ 432 cell_iommu_setup_stab(iommu, base, size, 0, 0); 433 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, 434 IOMMU_PAGE_SHIFT); 435 cell_iommu_enable_hardware(iommu); 436} 437 438 439static inline u32 cell_iommu_get_ioid(struct device_node *np) 440{ 441 const u32 *ioid; 442 443 ioid = of_get_property(np, "ioid", NULL); 444 if (ioid == NULL) { 445 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", 446 np->full_name); 447 return 0; 448 } 449 450 return *ioid; 451} 452 453static struct iommu_window * __init 454cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, 455 unsigned long offset, unsigned long size, 456 unsigned long pte_offset) 457{ 458 struct iommu_window *window; 459 struct page *page; 460 u32 ioid; 461 462 ioid = cell_iommu_get_ioid(np); 463 464 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); 465 BUG_ON(window == NULL); 466 467 window->offset = offset; 468 window->size = size; 469 window->ioid = ioid; 470 window->iommu = iommu; 471 472 window->table.it_blocksize = 16; 473 window->table.it_base = (unsigned long)iommu->ptab; 474 window->table.it_index = iommu->nid; 475 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; 476 window->table.it_size = size >> IOMMU_PAGE_SHIFT; 477 478 iommu_init_table(&window->table, iommu->nid); 479 480 pr_debug("\tioid %d\n", window->ioid); 481 pr_debug("\tblocksize %ld\n", window->table.it_blocksize); 482 pr_debug("\tbase 0x%016lx\n", window->table.it_base); 483 pr_debug("\toffset 0x%lx\n", window->table.it_offset); 484 pr_debug("\tsize %ld\n", window->table.it_size); 485 486 list_add(&window->list, &iommu->windows); 487 488 if (offset != 0) 489 return window; 490 491 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); 492 BUG_ON(!page); 493 iommu->pad_page = page_address(page); 494 clear_page(iommu->pad_page); 495 496 __set_bit(0, window->table.it_map); 497 tce_build_cell(&window->table, window->table.it_offset, 1, 498 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); 499 window->table.it_hint = window->table.it_blocksize; 500 501 return window; 502} 503 504static struct cbe_iommu *cell_iommu_for_node(int nid) 505{ 506 int i; 507 508 for (i = 0; i < cbe_nr_iommus; i++) 509 if (iommus[i].nid == nid) 510 return &iommus[i]; 511 return NULL; 512} 513 514static unsigned long cell_dma_direct_offset; 515 516static unsigned long dma_iommu_fixed_base; 517 518/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ 519static int iommu_fixed_is_weak; 520 521static struct iommu_table *cell_get_iommu_table(struct device *dev) 522{ 523 struct iommu_window *window; 524 struct cbe_iommu *iommu; 525 526 /* Current implementation uses the first window available in that 527 * node's iommu. We -might- do something smarter later though it may 528 * never be necessary 529 */ 530 iommu = cell_iommu_for_node(dev_to_node(dev)); 531 if (iommu == NULL || list_empty(&iommu->windows)) { 532 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", 533 dev->of_node ? dev->of_node->full_name : "?", 534 dev_to_node(dev)); 535 return NULL; 536 } 537 window = list_entry(iommu->windows.next, struct iommu_window, list); 538 539 return &window->table; 540} 541 542/* A coherent allocation implies strong ordering */ 543 544static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, 545 dma_addr_t *dma_handle, gfp_t flag) 546{ 547 if (iommu_fixed_is_weak) 548 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), 549 size, dma_handle, 550 device_to_mask(dev), flag, 551 dev_to_node(dev)); 552 else 553 return dma_direct_ops.alloc_coherent(dev, size, dma_handle, 554 flag); 555} 556 557static void dma_fixed_free_coherent(struct device *dev, size_t size, 558 void *vaddr, dma_addr_t dma_handle) 559{ 560 if (iommu_fixed_is_weak) 561 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, 562 dma_handle); 563 else 564 dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); 565} 566 567static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, 568 unsigned long offset, size_t size, 569 enum dma_data_direction direction, 570 struct dma_attrs *attrs) 571{ 572 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 573 return dma_direct_ops.map_page(dev, page, offset, size, 574 direction, attrs); 575 else 576 return iommu_map_page(dev, cell_get_iommu_table(dev), page, 577 offset, size, device_to_mask(dev), 578 direction, attrs); 579} 580 581static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, 582 size_t size, enum dma_data_direction direction, 583 struct dma_attrs *attrs) 584{ 585 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 586 dma_direct_ops.unmap_page(dev, dma_addr, size, direction, 587 attrs); 588 else 589 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, 590 direction, attrs); 591} 592 593static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, 594 int nents, enum dma_data_direction direction, 595 struct dma_attrs *attrs) 596{ 597 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 598 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); 599 else 600 return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents, 601 device_to_mask(dev), direction, attrs); 602} 603 604static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, 605 int nents, enum dma_data_direction direction, 606 struct dma_attrs *attrs) 607{ 608 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 609 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); 610 else 611 iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction, 612 attrs); 613} 614 615static int dma_fixed_dma_supported(struct device *dev, u64 mask) 616{ 617 return mask == DMA_BIT_MASK(64); 618} 619 620static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 621 622struct dma_map_ops dma_iommu_fixed_ops = { 623 .alloc_coherent = dma_fixed_alloc_coherent, 624 .free_coherent = dma_fixed_free_coherent, 625 .map_sg = dma_fixed_map_sg, 626 .unmap_sg = dma_fixed_unmap_sg, 627 .dma_supported = dma_fixed_dma_supported, 628 .set_dma_mask = dma_set_mask_and_switch, 629 .map_page = dma_fixed_map_page, 630 .unmap_page = dma_fixed_unmap_page, 631}; 632 633static void cell_dma_dev_setup_fixed(struct device *dev); 634 635static void cell_dma_dev_setup(struct device *dev) 636{ 637 /* Order is important here, these are not mutually exclusive */ 638 if (get_dma_ops(dev) == &dma_iommu_fixed_ops) 639 cell_dma_dev_setup_fixed(dev); 640 else if (get_pci_dma_ops() == &dma_iommu_ops) 641 set_iommu_table_base(dev, cell_get_iommu_table(dev)); 642 else if (get_pci_dma_ops() == &dma_direct_ops) 643 set_dma_offset(dev, cell_dma_direct_offset); 644 else 645 BUG(); 646} 647 648static void cell_pci_dma_dev_setup(struct pci_dev *dev) 649{ 650 cell_dma_dev_setup(&dev->dev); 651} 652 653static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, 654 void *data) 655{ 656 struct device *dev = data; 657 658 /* We are only intereted in device addition */ 659 if (action != BUS_NOTIFY_ADD_DEVICE) 660 return 0; 661 662 /* We use the PCI DMA ops */ 663 dev->archdata.dma_ops = get_pci_dma_ops(); 664 665 cell_dma_dev_setup(dev); 666 667 return 0; 668} 669 670static struct notifier_block cell_of_bus_notifier = { 671 .notifier_call = cell_of_bus_notify 672}; 673 674static int __init cell_iommu_get_window(struct device_node *np, 675 unsigned long *base, 676 unsigned long *size) 677{ 678 const void *dma_window; 679 unsigned long index; 680 681 /* Use ibm,dma-window if available, else, hard code ! */ 682 dma_window = of_get_property(np, "ibm,dma-window", NULL); 683 if (dma_window == NULL) { 684 *base = 0; 685 *size = 0x80000000u; 686 return -ENODEV; 687 } 688 689 of_parse_dma_window(np, dma_window, &index, base, size); 690 return 0; 691} 692 693static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) 694{ 695 struct cbe_iommu *iommu; 696 int nid, i; 697 698 /* Get node ID */ 699 nid = of_node_to_nid(np); 700 if (nid < 0) { 701 printk(KERN_ERR "iommu: failed to get node for %s\n", 702 np->full_name); 703 return NULL; 704 } 705 pr_debug("iommu: setting up iommu for node %d (%s)\n", 706 nid, np->full_name); 707 708 709 if (cbe_nr_iommus >= NR_IOMMUS) { 710 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", 711 np->full_name); 712 return NULL; 713 } 714 715 /* Init base fields */ 716 i = cbe_nr_iommus++; 717 iommu = &iommus[i]; 718 iommu->stab = NULL; 719 iommu->nid = nid; 720 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); 721 INIT_LIST_HEAD(&iommu->windows); 722 723 return iommu; 724} 725 726static void __init cell_iommu_init_one(struct device_node *np, 727 unsigned long offset) 728{ 729 struct cbe_iommu *iommu; 730 unsigned long base, size; 731 732 iommu = cell_iommu_alloc(np); 733 if (!iommu) 734 return; 735 736 /* Obtain a window for it */ 737 cell_iommu_get_window(np, &base, &size); 738 739 pr_debug("\ttranslating window 0x%lx...0x%lx\n", 740 base, base + size - 1); 741 742 /* Initialize the hardware */ 743 cell_iommu_setup_hardware(iommu, base, size); 744 745 /* Setup the iommu_table */ 746 cell_iommu_setup_window(iommu, np, base, size, 747 offset >> IOMMU_PAGE_SHIFT); 748} 749 750static void __init cell_disable_iommus(void) 751{ 752 int node; 753 unsigned long base, val; 754 void __iomem *xregs, *cregs; 755 756 /* Make sure IOC translation is disabled on all nodes */ 757 for_each_online_node(node) { 758 if (cell_iommu_find_ioc(node, &base)) 759 continue; 760 xregs = ioremap(base, IOC_Reg_Size); 761 if (xregs == NULL) 762 continue; 763 cregs = xregs + IOC_IOCmd_Offset; 764 765 pr_debug("iommu: cleaning up iommu on node %d\n", node); 766 767 out_be64(xregs + IOC_IOST_Origin, 0); 768 (void)in_be64(xregs + IOC_IOST_Origin); 769 val = in_be64(cregs + IOC_IOCmd_Cfg); 770 val &= ~IOC_IOCmd_Cfg_TE; 771 out_be64(cregs + IOC_IOCmd_Cfg, val); 772 (void)in_be64(cregs + IOC_IOCmd_Cfg); 773 774 iounmap(xregs); 775 } 776} 777 778static int __init cell_iommu_init_disabled(void) 779{ 780 struct device_node *np = NULL; 781 unsigned long base = 0, size; 782 783 /* When no iommu is present, we use direct DMA ops */ 784 set_pci_dma_ops(&dma_direct_ops); 785 786 /* First make sure all IOC translation is turned off */ 787 cell_disable_iommus(); 788 789 /* If we have no Axon, we set up the spider DMA magic offset */ 790 if (of_find_node_by_name(NULL, "axon") == NULL) 791 cell_dma_direct_offset = SPIDER_DMA_OFFSET; 792 793 /* Now we need to check to see where the memory is mapped 794 * in PCI space. We assume that all busses use the same dma 795 * window which is always the case so far on Cell, thus we 796 * pick up the first pci-internal node we can find and check 797 * the DMA window from there. 798 */ 799 for_each_node_by_name(np, "axon") { 800 if (np->parent == NULL || np->parent->parent != NULL) 801 continue; 802 if (cell_iommu_get_window(np, &base, &size) == 0) 803 break; 804 } 805 if (np == NULL) { 806 for_each_node_by_name(np, "pci-internal") { 807 if (np->parent == NULL || np->parent->parent != NULL) 808 continue; 809 if (cell_iommu_get_window(np, &base, &size) == 0) 810 break; 811 } 812 } 813 of_node_put(np); 814 815 /* If we found a DMA window, we check if it's big enough to enclose 816 * all of physical memory. If not, we force enable IOMMU 817 */ 818 if (np && size < memblock_end_of_DRAM()) { 819 printk(KERN_WARNING "iommu: force-enabled, dma window" 820 " (%ldMB) smaller than total memory (%lldMB)\n", 821 size >> 20, memblock_end_of_DRAM() >> 20); 822 return -ENODEV; 823 } 824 825 cell_dma_direct_offset += base; 826 827 if (cell_dma_direct_offset != 0) 828 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 829 830 printk("iommu: disabled, direct DMA offset is 0x%lx\n", 831 cell_dma_direct_offset); 832 833 return 0; 834} 835 836/* 837 * Fixed IOMMU mapping support 838 * 839 * This code adds support for setting up a fixed IOMMU mapping on certain 840 * cell machines. For 64-bit devices this avoids the performance overhead of 841 * mapping and unmapping pages at runtime. 32-bit devices are unable to use 842 * the fixed mapping. 843 * 844 * The fixed mapping is established at boot, and maps all of physical memory 845 * 1:1 into device space at some offset. On machines with < 30 GB of memory 846 * we setup the fixed mapping immediately above the normal IOMMU window. 847 * 848 * For example a machine with 4GB of memory would end up with the normal 849 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In 850 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to 851 * 3GB, plus any offset required by firmware. The firmware offset is encoded 852 * in the "dma-ranges" property. 853 * 854 * On machines with 30GB or more of memory, we are unable to place the fixed 855 * mapping above the normal IOMMU window as we would run out of address space. 856 * Instead we move the normal IOMMU window to coincide with the hash page 857 * table, this region does not need to be part of the fixed mapping as no 858 * device should ever be DMA'ing to it. We then setup the fixed mapping 859 * from 0 to 32GB. 860 */ 861 862static u64 cell_iommu_get_fixed_address(struct device *dev) 863{ 864 u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; 865 struct device_node *np; 866 const u32 *ranges = NULL; 867 int i, len, best, naddr, nsize, pna, range_size; 868 869 np = of_node_get(dev->of_node); 870 while (1) { 871 naddr = of_n_addr_cells(np); 872 nsize = of_n_size_cells(np); 873 np = of_get_next_parent(np); 874 if (!np) 875 break; 876 877 ranges = of_get_property(np, "dma-ranges", &len); 878 879 /* Ignore empty ranges, they imply no translation required */ 880 if (ranges && len > 0) 881 break; 882 } 883 884 if (!ranges) { 885 dev_dbg(dev, "iommu: no dma-ranges found\n"); 886 goto out; 887 } 888 889 len /= sizeof(u32); 890 891 pna = of_n_addr_cells(np); 892 range_size = naddr + nsize + pna; 893 894 /* dma-ranges format: 895 * child addr : naddr cells 896 * parent addr : pna cells 897 * size : nsize cells 898 */ 899 for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { 900 cpu_addr = of_translate_dma_address(np, ranges + i + naddr); 901 size = of_read_number(ranges + i + naddr + pna, nsize); 902 903 if (cpu_addr == 0 && size > best_size) { 904 best = i; 905 best_size = size; 906 } 907 } 908 909 if (best >= 0) { 910 dev_addr = of_read_number(ranges + best, naddr); 911 } else 912 dev_dbg(dev, "iommu: no suitable range found!\n"); 913 914out: 915 of_node_put(np); 916 917 return dev_addr; 918} 919 920static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) 921{ 922 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 923 return -EIO; 924 925 if (dma_mask == DMA_BIT_MASK(64) && 926 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) 927 { 928 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); 929 set_dma_ops(dev, &dma_iommu_fixed_ops); 930 } else { 931 dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); 932 set_dma_ops(dev, get_pci_dma_ops()); 933 } 934 935 cell_dma_dev_setup(dev); 936 937 *dev->dma_mask = dma_mask; 938 939 return 0; 940} 941 942static void cell_dma_dev_setup_fixed(struct device *dev) 943{ 944 u64 addr; 945 946 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; 947 set_dma_offset(dev, addr); 948 949 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); 950} 951 952static void insert_16M_pte(unsigned long addr, unsigned long *ptab, 953 unsigned long base_pte) 954{ 955 unsigned long segment, offset; 956 957 segment = addr >> IO_SEGMENT_SHIFT; 958 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); 959 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); 960 961 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", 962 addr, ptab, segment, offset); 963 964 ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); 965} 966 967static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, 968 struct device_node *np, unsigned long dbase, unsigned long dsize, 969 unsigned long fbase, unsigned long fsize) 970{ 971 unsigned long base_pte, uaddr, ioaddr, *ptab; 972 973 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); 974 975 dma_iommu_fixed_base = fbase; 976 977 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); 978 979 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | 980 (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); 981 982 if (iommu_fixed_is_weak) 983 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); 984 else { 985 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); 986 base_pte |= CBE_IOPTE_SO_RW; 987 } 988 989 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { 990 /* Don't touch the dynamic region */ 991 ioaddr = uaddr + fbase; 992 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { 993 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); 994 continue; 995 } 996 997 insert_16M_pte(uaddr, ptab, base_pte); 998 } 999 1000 mb(); 1001} 1002 1003static int __init cell_iommu_fixed_mapping_init(void) 1004{ 1005 unsigned long dbase, dsize, fbase, fsize, hbase, hend; 1006 struct cbe_iommu *iommu; 1007 struct device_node *np; 1008 1009 /* The fixed mapping is only supported on axon machines */ 1010 np = of_find_node_by_name(NULL, "axon"); 1011 if (!np) { 1012 pr_debug("iommu: fixed mapping disabled, no axons found\n"); 1013 return -1; 1014 } 1015 1016 /* We must have dma-ranges properties for fixed mapping to work */ 1017 np = of_find_node_with_property(NULL, "dma-ranges"); 1018 of_node_put(np); 1019 1020 if (!np) { 1021 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); 1022 return -1; 1023 } 1024 1025 /* The default setup is to have the fixed mapping sit after the 1026 * dynamic region, so find the top of the largest IOMMU window 1027 * on any axon, then add the size of RAM and that's our max value. 1028 * If that is > 32GB we have to do other shennanigans. 1029 */ 1030 fbase = 0; 1031 for_each_node_by_name(np, "axon") { 1032 cell_iommu_get_window(np, &dbase, &dsize); 1033 fbase = max(fbase, dbase + dsize); 1034 } 1035 1036 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); 1037 fsize = memblock_phys_mem_size(); 1038 1039 if ((fbase + fsize) <= 0x800000000ul) 1040 hbase = 0; /* use the device tree window */ 1041 else { 1042 /* If we're over 32 GB we need to cheat. We can't map all of 1043 * RAM with the fixed mapping, and also fit the dynamic 1044 * region. So try to place the dynamic region where the hash 1045 * table sits, drivers never need to DMA to it, we don't 1046 * need a fixed mapping for that area. 1047 */ 1048 if (!htab_address) { 1049 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); 1050 return -1; 1051 } 1052 hbase = __pa(htab_address); 1053 hend = hbase + htab_size_bytes; 1054 1055 /* The window must start and end on a segment boundary */ 1056 if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || 1057 (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { 1058 pr_debug("iommu: hash window not segment aligned\n"); 1059 return -1; 1060 } 1061 1062 /* Check the hash window fits inside the real DMA window */ 1063 for_each_node_by_name(np, "axon") { 1064 cell_iommu_get_window(np, &dbase, &dsize); 1065 1066 if (hbase < dbase || (hend > (dbase + dsize))) { 1067 pr_debug("iommu: hash window doesn't fit in" 1068 "real DMA window\n"); 1069 return -1; 1070 } 1071 } 1072 1073 fbase = 0; 1074 } 1075 1076 /* Setup the dynamic regions */ 1077 for_each_node_by_name(np, "axon") { 1078 iommu = cell_iommu_alloc(np); 1079 BUG_ON(!iommu); 1080 1081 if (hbase == 0) 1082 cell_iommu_get_window(np, &dbase, &dsize); 1083 else { 1084 dbase = hbase; 1085 dsize = htab_size_bytes; 1086 } 1087 1088 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " 1089 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, 1090 dbase + dsize, fbase, fbase + fsize); 1091 1092 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1093 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, 1094 IOMMU_PAGE_SHIFT); 1095 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1096 fbase, fsize); 1097 cell_iommu_enable_hardware(iommu); 1098 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); 1099 } 1100 1101 dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; 1102 set_pci_dma_ops(&dma_iommu_ops); 1103 1104 return 0; 1105} 1106 1107static int iommu_fixed_disabled; 1108 1109static int __init setup_iommu_fixed(char *str) 1110{ 1111 struct device_node *pciep; 1112 1113 if (strcmp(str, "off") == 0) 1114 iommu_fixed_disabled = 1; 1115 1116 /* If we can find a pcie-endpoint in the device tree assume that 1117 * we're on a triblade or a CAB so by default the fixed mapping 1118 * should be set to be weakly ordered; but only if the boot 1119 * option WASN'T set for strong ordering 1120 */ 1121 pciep = of_find_node_by_type(NULL, "pcie-endpoint"); 1122 1123 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) 1124 iommu_fixed_is_weak = 1; 1125 1126 of_node_put(pciep); 1127 1128 return 1; 1129} 1130__setup("iommu_fixed=", setup_iommu_fixed); 1131 1132static int __init cell_iommu_init(void) 1133{ 1134 struct device_node *np; 1135 1136 /* If IOMMU is disabled or we have little enough RAM to not need 1137 * to enable it, we setup a direct mapping. 1138 * 1139 * Note: should we make sure we have the IOMMU actually disabled ? 1140 */ 1141 if (iommu_is_off || 1142 (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) 1143 if (cell_iommu_init_disabled() == 0) 1144 goto bail; 1145 1146 /* Setup various ppc_md. callbacks */ 1147 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 1148 ppc_md.tce_build = tce_build_cell; 1149 ppc_md.tce_free = tce_free_cell; 1150 1151 if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) 1152 goto bail; 1153 1154 /* Create an iommu for each /axon node. */ 1155 for_each_node_by_name(np, "axon") { 1156 if (np->parent == NULL || np->parent->parent != NULL) 1157 continue; 1158 cell_iommu_init_one(np, 0); 1159 } 1160 1161 /* Create an iommu for each toplevel /pci-internal node for 1162 * old hardware/firmware 1163 */ 1164 for_each_node_by_name(np, "pci-internal") { 1165 if (np->parent == NULL || np->parent->parent != NULL) 1166 continue; 1167 cell_iommu_init_one(np, SPIDER_DMA_OFFSET); 1168 } 1169 1170 /* Setup default PCI iommu ops */ 1171 set_pci_dma_ops(&dma_iommu_ops); 1172 1173 bail: 1174 /* Register callbacks on OF platform device addition/removal 1175 * to handle linking them to the right DMA operations 1176 */ 1177 bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); 1178 1179 return 0; 1180} 1181machine_arch_initcall(cell, cell_iommu_init); 1182machine_arch_initcall(celleb_native, cell_iommu_init); 1183