1 2 3#include <linux/types.h> 4#include <linux/kernel.h> 5#include <linux/spinlock.h> 6#include <linux/slab.h> 7#include <linux/init.h> 8 9#include <linux/mm.h> 10#include <linux/string.h> 11#include <linux/pci.h> 12#include <linux/scatterlist.h> 13#include <linux/iommu-helper.h> 14 15#include <asm/byteorder.h> 16#include <asm/io.h> 17#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 18 19#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 20 21#include <linux/proc_fs.h> 22#include <linux/seq_file.h> 23 24#include <asm/ropes.h> 25#include <asm/mckinley.h> /* for proc_mckinley_root */ 26#include <asm/runway.h> /* for proc_runway_root */ 27#include <asm/pdc.h> /* for PDC_MODEL_* */ 28#include <asm/pdcpat.h> /* for is_pdc_pat() */ 29#include <asm/parisc-device.h> 30 31#define MODULE_NAME "SBA" 32 33/* 34** The number of debug flags is a clue - this code is fragile. 35** Don't even think about messing with it unless you have 36** plenty of 710's to sacrifice to the computer gods. :^) 37*/ 38#undef DEBUG_SBA_INIT 39#undef DEBUG_SBA_RUN 40#undef DEBUG_SBA_RUN_SG 41#undef DEBUG_SBA_RESOURCE 42#undef ASSERT_PDIR_SANITY 43#undef DEBUG_LARGE_SG_ENTRIES 44#undef DEBUG_DMB_TRAP 45 46#ifdef DEBUG_SBA_INIT 47#define DBG_INIT(x...) printk(x) 48#else 49#define DBG_INIT(x...) 50#endif 51 52#ifdef DEBUG_SBA_RUN 53#define DBG_RUN(x...) printk(x) 54#else 55#define DBG_RUN(x...) 56#endif 57 58#ifdef DEBUG_SBA_RUN_SG 59#define DBG_RUN_SG(x...) printk(x) 60#else 61#define DBG_RUN_SG(x...) 62#endif 63 64 65#ifdef DEBUG_SBA_RESOURCE 66#define DBG_RES(x...) printk(x) 67#else 68#define DBG_RES(x...) 69#endif 70 71#define SBA_INLINE __inline__ 72 73#define DEFAULT_DMA_HINT_REG 0 74 75struct sba_device *sba_list; 76EXPORT_SYMBOL_GPL(sba_list); 77 78static unsigned long ioc_needs_fdc = 0; 79 80/* global count of IOMMUs in the system */ 81static unsigned int global_ioc_cnt = 0; 82 83static unsigned long piranha_bad_128k = 0; 84 85/* Looks nice and keeps the compiler happy */ 86#define SBA_DEV(d) ((struct sba_device *) (d)) 87 88#ifdef CONFIG_AGP_PARISC 89#define SBA_AGP_SUPPORT 90#endif /*CONFIG_AGP_PARISC*/ 91 92#ifdef SBA_AGP_SUPPORT 93static int sba_reserve_agpgart = 1; 94module_param(sba_reserve_agpgart, int, 0444); 95MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART"); 96#endif 97 98 99/************************************ 100** SBA register read and write support 101** 102** BE WARNED: register writes are posted. 103** (ie follow writes which must reach HW with a read) 104** 105** Superdome (in particular, REO) allows only 64-bit CSR accesses. 106*/ 107#define READ_REG32(addr) readl(addr) 108#define READ_REG64(addr) readq(addr) 109#define WRITE_REG32(val, addr) writel((val), (addr)) 110#define WRITE_REG64(val, addr) writeq((val), (addr)) 111 112#ifdef CONFIG_64BIT 113#define READ_REG(addr) READ_REG64(addr) 114#define WRITE_REG(value, addr) WRITE_REG64(value, addr) 115#else 116#define READ_REG(addr) READ_REG32(addr) 117#define WRITE_REG(value, addr) WRITE_REG32(value, addr) 118#endif 119 120#ifdef DEBUG_SBA_INIT 121 122/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ 123 124/** 125 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 126 * @hpa: base address of the sba 127 * 128 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 129 * IO Adapter (aka Bus Converter). 130 */ 131static void 132sba_dump_ranges(void __iomem *hpa) 133{ 134 DBG_INIT("SBA at 0x%p\n", hpa); 135 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 136 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 137 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 138 DBG_INIT("\n"); 139 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 140 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 141 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 142} 143 144/** 145 * sba_dump_tlb - debugging only - print IOMMU operating parameters 146 * @hpa: base address of the IOMMU 147 * 148 * Print the size/location of the IO MMU PDIR. 149 */ 150static void sba_dump_tlb(void __iomem *hpa) 151{ 152 DBG_INIT("IO TLB at 0x%p\n", hpa); 153 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE)); 154 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK)); 155 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG)); 156 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 157 DBG_INIT("\n"); 158} 159#else 160#define sba_dump_ranges(x) 161#define sba_dump_tlb(x) 162#endif /* DEBUG_SBA_INIT */ 163 164 165#ifdef ASSERT_PDIR_SANITY 166 167/** 168 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 169 * @ioc: IO MMU structure which owns the pdir we are interested in. 170 * @msg: text to print ont the output line. 171 * @pide: pdir index. 172 * 173 * Print one entry of the IO MMU PDIR in human readable form. 174 */ 175static void 176sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 177{ 178 /* start printing from lowest pde in rval */ 179 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 180 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 181 uint rcnt; 182 183 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 184 msg, 185 rptr, pide & (BITS_PER_LONG - 1), *rptr); 186 187 rcnt = 0; 188 while (rcnt < BITS_PER_LONG) { 189 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 190 (rcnt == (pide & (BITS_PER_LONG - 1))) 191 ? " -->" : " ", 192 rcnt, ptr, *ptr ); 193 rcnt++; 194 ptr++; 195 } 196 printk(KERN_DEBUG "%s", msg); 197} 198 199 200/** 201 * sba_check_pdir - debugging only - consistency checker 202 * @ioc: IO MMU structure which owns the pdir we are interested in. 203 * @msg: text to print ont the output line. 204 * 205 * Verify the resource map and pdir state is consistent 206 */ 207static int 208sba_check_pdir(struct ioc *ioc, char *msg) 209{ 210 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 211 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 212 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 213 uint pide = 0; 214 215 while (rptr < rptr_end) { 216 u32 rval = *rptr; 217 int rcnt = 32; /* number of bits we might check */ 218 219 while (rcnt) { 220 /* Get last byte and highest bit from that */ 221 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 222 if ((rval ^ pde) & 0x80000000) 223 { 224 /* 225 ** BUMMER! -- res_map != pdir -- 226 ** Dump rval and matching pdir entries 227 */ 228 sba_dump_pdir_entry(ioc, msg, pide); 229 return(1); 230 } 231 rcnt--; 232 rval <<= 1; /* try the next bit */ 233 pptr++; 234 pide++; 235 } 236 rptr++; /* look at next word of res_map */ 237 } 238 /* It'd be nice if we always got here :^) */ 239 return 0; 240} 241 242 243/** 244 * sba_dump_sg - debugging only - print Scatter-Gather list 245 * @ioc: IO MMU structure which owns the pdir we are interested in. 246 * @startsg: head of the SG list 247 * @nents: number of entries in SG list 248 * 249 * print the SG list so we can verify it's correct by hand. 250 */ 251static void 252sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 253{ 254 while (nents-- > 0) { 255 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 256 nents, 257 (unsigned long) sg_dma_address(startsg), 258 sg_dma_len(startsg), 259 sg_virt_addr(startsg), startsg->length); 260 startsg++; 261 } 262} 263 264#endif /* ASSERT_PDIR_SANITY */ 265 266 267 268 269/************************************************************** 270* 271* I/O Pdir Resource Management 272* 273* Bits set in the resource map are in use. 274* Each bit can represent a number of pages. 275* LSbs represent lower addresses (IOVA's). 276* 277***************************************************************/ 278#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 279 280/* Convert from IOVP to IOVA and vice versa. */ 281 282#ifdef ZX1_SUPPORT 283/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ 284#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) 285#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) 286#else 287/* only support Astro and ancestors. Saves a few cycles in key places */ 288#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) 289#define SBA_IOVP(ioc,iova) (iova) 290#endif 291 292#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 293 294#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 295#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 296 297static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, 298 unsigned int bitshiftcnt) 299{ 300 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) 301 + bitshiftcnt; 302} 303 304/** 305 * sba_search_bitmap - find free space in IO PDIR resource bitmap 306 * @ioc: IO MMU structure which owns the pdir we are interested in. 307 * @bits_wanted: number of entries we need. 308 * 309 * Find consecutive free bits in resource bitmap. 310 * Each bit represents one entry in the IO Pdir. 311 * Cool perf optimization: search for log2(size) bits at a time. 312 */ 313static SBA_INLINE unsigned long 314sba_search_bitmap(struct ioc *ioc, struct device *dev, 315 unsigned long bits_wanted) 316{ 317 unsigned long *res_ptr = ioc->res_hint; 318 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 319 unsigned long pide = ~0UL, tpide; 320 unsigned long boundary_size; 321 unsigned long shift; 322 int ret; 323 324 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, 325 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; 326 327#if defined(ZX1_SUPPORT) 328 BUG_ON(ioc->ibase & ~IOVP_MASK); 329 shift = ioc->ibase >> IOVP_SHIFT; 330#else 331 shift = 0; 332#endif 333 334 if (bits_wanted > (BITS_PER_LONG/2)) { 335 /* Search word at a time - no mask needed */ 336 for(; res_ptr < res_end; ++res_ptr) { 337 tpide = ptr_to_pide(ioc, res_ptr, 0); 338 ret = iommu_is_span_boundary(tpide, bits_wanted, 339 shift, 340 boundary_size); 341 if ((*res_ptr == 0) && !ret) { 342 *res_ptr = RESMAP_MASK(bits_wanted); 343 pide = tpide; 344 break; 345 } 346 } 347 /* point to the next word on next pass */ 348 res_ptr++; 349 ioc->res_bitshift = 0; 350 } else { 351 /* 352 ** Search the resource bit map on well-aligned values. 353 ** "o" is the alignment. 354 ** We need the alignment to invalidate I/O TLB using 355 ** SBA HW features in the unmap path. 356 */ 357 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 358 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o); 359 unsigned long mask; 360 361 if (bitshiftcnt >= BITS_PER_LONG) { 362 bitshiftcnt = 0; 363 res_ptr++; 364 } 365 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 366 367 DBG_RES("%s() o %ld %p", __func__, o, res_ptr); 368 while(res_ptr < res_end) 369 { 370 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 371 WARN_ON(mask == 0); 372 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); 373 ret = iommu_is_span_boundary(tpide, bits_wanted, 374 shift, 375 boundary_size); 376 if ((((*res_ptr) & mask) == 0) && !ret) { 377 *res_ptr |= mask; /* mark resources busy! */ 378 pide = tpide; 379 break; 380 } 381 mask >>= o; 382 bitshiftcnt += o; 383 if (mask == 0) { 384 mask = RESMAP_MASK(bits_wanted); 385 bitshiftcnt=0; 386 res_ptr++; 387 } 388 } 389 /* look in the same word on the next pass */ 390 ioc->res_bitshift = bitshiftcnt + bits_wanted; 391 } 392 393 /* wrapped ? */ 394 if (res_end <= res_ptr) { 395 ioc->res_hint = (unsigned long *) ioc->res_map; 396 ioc->res_bitshift = 0; 397 } else { 398 ioc->res_hint = res_ptr; 399 } 400 return (pide); 401} 402 403 404/** 405 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 406 * @ioc: IO MMU structure which owns the pdir we are interested in. 407 * @size: number of bytes to create a mapping for 408 * 409 * Given a size, find consecutive unmarked and then mark those bits in the 410 * resource bit map. 411 */ 412static int 413sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) 414{ 415 unsigned int pages_needed = size >> IOVP_SHIFT; 416#ifdef SBA_COLLECT_STATS 417 unsigned long cr_start = mfctl(16); 418#endif 419 unsigned long pide; 420 421 pide = sba_search_bitmap(ioc, dev, pages_needed); 422 if (pide >= (ioc->res_size << 3)) { 423 pide = sba_search_bitmap(ioc, dev, pages_needed); 424 if (pide >= (ioc->res_size << 3)) 425 panic("%s: I/O MMU @ %p is out of mapping resources\n", 426 __FILE__, ioc->ioc_hpa); 427 } 428 429#ifdef ASSERT_PDIR_SANITY 430 /* verify the first enable bit is clear */ 431 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 432 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 433 } 434#endif 435 436 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 437 __func__, size, pages_needed, pide, 438 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 439 ioc->res_bitshift ); 440 441#ifdef SBA_COLLECT_STATS 442 { 443 unsigned long cr_end = mfctl(16); 444 unsigned long tmp = cr_end - cr_start; 445 /* check for roll over */ 446 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 447 } 448 ioc->avg_search[ioc->avg_idx++] = cr_start; 449 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 450 451 ioc->used_pages += pages_needed; 452#endif 453 454 return (pide); 455} 456 457 458/** 459 * sba_free_range - unmark bits in IO PDIR resource bitmap 460 * @ioc: IO MMU structure which owns the pdir we are interested in. 461 * @iova: IO virtual address which was previously allocated. 462 * @size: number of bytes to create a mapping for 463 * 464 * clear bits in the ioc's resource map 465 */ 466static SBA_INLINE void 467sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 468{ 469 unsigned long iovp = SBA_IOVP(ioc, iova); 470 unsigned int pide = PDIR_INDEX(iovp); 471 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 472 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 473 474 int bits_not_wanted = size >> IOVP_SHIFT; 475 476 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 477 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 478 479 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 480 __func__, (uint) iova, size, 481 bits_not_wanted, m, pide, res_ptr, *res_ptr); 482 483#ifdef SBA_COLLECT_STATS 484 ioc->used_pages -= bits_not_wanted; 485#endif 486 487 *res_ptr &= ~m; 488} 489 490 491/************************************************************** 492* 493* "Dynamic DMA Mapping" support (aka "Coherent I/O") 494* 495***************************************************************/ 496 497#ifdef SBA_HINT_SUPPORT 498#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 499#endif 500 501typedef unsigned long space_t; 502#define KERNEL_SPACE 0 503 504/** 505 * sba_io_pdir_entry - fill in one IO PDIR entry 506 * @pdir_ptr: pointer to IO PDIR entry 507 * @sid: process Space ID - currently only support KERNEL_SPACE 508 * @vba: Virtual CPU address of buffer to map 509 * @hint: DMA hint set to use for this mapping 510 * 511 * SBA Mapping Routine 512 * 513 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 514 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 515 * pdir_ptr (arg0). 516 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry 517 * for Astro/Ike looks like: 518 * 519 * 520 * 0 19 51 55 63 521 * +-+---------------------+----------------------------------+----+--------+ 522 * |V| U | PPN[43:12] | U | VI | 523 * +-+---------------------+----------------------------------+----+--------+ 524 * 525 * Pluto is basically identical, supports fewer physical address bits: 526 * 527 * 0 23 51 55 63 528 * +-+------------------------+-------------------------------+----+--------+ 529 * |V| U | PPN[39:12] | U | VI | 530 * +-+------------------------+-------------------------------+----+--------+ 531 * 532 * V == Valid Bit (Most Significant Bit is bit 0) 533 * U == Unused 534 * PPN == Physical Page Number 535 * VI == Virtual Index (aka Coherent Index) 536 * 537 * LPA instruction output is put into PPN field. 538 * LCI (Load Coherence Index) instruction provides the "VI" bits. 539 * 540 * We pre-swap the bytes since PCX-W is Big Endian and the 541 * IOMMU uses little endian for the pdir. 542 */ 543 544static void SBA_INLINE 545sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 546 unsigned long hint) 547{ 548 u64 pa; /* physical address */ 549 register unsigned ci; /* coherent index */ 550 551 pa = virt_to_phys(vba); 552 pa &= IOVP_MASK; 553 554 mtsp(sid,1); 555 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 556 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 557 558 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 559 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 560 561 /* 562 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 563 * (bit #61, big endian), we have to flush and sync every time 564 * IO-PDIR is changed in Ike/Astro. 565 */ 566 if (ioc_needs_fdc) 567 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 568} 569 570 571/** 572 * sba_mark_invalid - invalidate one or more IO PDIR entries 573 * @ioc: IO MMU structure which owns the pdir we are interested in. 574 * @iova: IO Virtual Address mapped earlier 575 * @byte_cnt: number of bytes this mapping covers. 576 * 577 * Marking the IO PDIR entry(ies) as Invalid and invalidate 578 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 579 * is to purge stale entries in the IO TLB when unmapping entries. 580 * 581 * The PCOM register supports purging of multiple pages, with a minium 582 * of 1 page and a maximum of 2GB. Hardware requires the address be 583 * aligned to the size of the range being purged. The size of the range 584 * must be a power of 2. The "Cool perf optimization" in the 585 * allocation routine helps keep that true. 586 */ 587static SBA_INLINE void 588sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 589{ 590 u32 iovp = (u32) SBA_IOVP(ioc,iova); 591 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; 592 593#ifdef ASSERT_PDIR_SANITY 594 /* Assert first pdir entry is set. 595 ** 596 ** Even though this is a big-endian machine, the entries 597 ** in the iopdir are little endian. That's why we look at 598 ** the byte at +7 instead of at +0. 599 */ 600 if (0x80 != (((u8 *) pdir_ptr)[7])) { 601 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 602 } 603#endif 604 605 if (byte_cnt > IOVP_SIZE) 606 { 607 608 /* set "size" field for PCOM */ 609 iovp |= get_order(byte_cnt) + PAGE_SHIFT; 610 611 do { 612 /* clear I/O Pdir entry "valid" bit first */ 613 ((u8 *) pdir_ptr)[7] = 0; 614 if (ioc_needs_fdc) { 615 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 616 } 617 pdir_ptr++; 618 byte_cnt -= IOVP_SIZE; 619 } while (byte_cnt > IOVP_SIZE); 620 } else 621 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 622 623 /* 624 ** clear I/O PDIR entry "valid" bit. 625 ** We have to R/M/W the cacheline regardless how much of the 626 ** pdir entry that we clobber. 627 ** The rest of the entry would be useful for debugging if we 628 ** could dump core on HPMC. 629 */ 630 ((u8 *) pdir_ptr)[7] = 0; 631 if (ioc_needs_fdc) 632 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 633 634 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 635} 636 637/** 638 * sba_dma_supported - PCI driver can query DMA support 639 * @dev: instance of PCI owned by the driver that's asking 640 * @mask: number of address bits this PCI device can handle 641 * 642 * See Documentation/PCI/PCI-DMA-mapping.txt 643 */ 644static int sba_dma_supported( struct device *dev, u64 mask) 645{ 646 struct ioc *ioc; 647 648 if (dev == NULL) { 649 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 650 BUG(); 651 return(0); 652 } 653 654 /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit 655 * first, then fall back to 32-bit if that fails. 656 * We are just "encouraging" 32-bit DMA masks here since we can 657 * never allow IOMMU bypass unless we add special support for ZX1. 658 */ 659 if (mask > ~0U) 660 return 0; 661 662 ioc = GET_IOC(dev); 663 664 /* 665 * check if mask is >= than the current max IO Virt Address 666 * The max IO Virt address will *always* < 30 bits. 667 */ 668 return((int)(mask >= (ioc->ibase - 1 + 669 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); 670} 671 672 673/** 674 * sba_map_single - map one buffer and return IOVA for DMA 675 * @dev: instance of PCI owned by the driver that's asking. 676 * @addr: driver buffer to map. 677 * @size: number of bytes to map in driver buffer. 678 * @direction: R/W or both. 679 * 680 * See Documentation/PCI/PCI-DMA-mapping.txt 681 */ 682static dma_addr_t 683sba_map_single(struct device *dev, void *addr, size_t size, 684 enum dma_data_direction direction) 685{ 686 struct ioc *ioc; 687 unsigned long flags; 688 dma_addr_t iovp; 689 dma_addr_t offset; 690 u64 *pdir_start; 691 int pide; 692 693 ioc = GET_IOC(dev); 694 695 /* save offset bits */ 696 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 697 698 /* round up to nearest IOVP_SIZE */ 699 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 700 701 spin_lock_irqsave(&ioc->res_lock, flags); 702#ifdef ASSERT_PDIR_SANITY 703 sba_check_pdir(ioc,"Check before sba_map_single()"); 704#endif 705 706#ifdef SBA_COLLECT_STATS 707 ioc->msingle_calls++; 708 ioc->msingle_pages += size >> IOVP_SHIFT; 709#endif 710 pide = sba_alloc_range(ioc, dev, size); 711 iovp = (dma_addr_t) pide << IOVP_SHIFT; 712 713 DBG_RUN("%s() 0x%p -> 0x%lx\n", 714 __func__, addr, (long) iovp | offset); 715 716 pdir_start = &(ioc->pdir_base[pide]); 717 718 while (size > 0) { 719 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 720 721 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 722 pdir_start, 723 (u8) (((u8 *) pdir_start)[7]), 724 (u8) (((u8 *) pdir_start)[6]), 725 (u8) (((u8 *) pdir_start)[5]), 726 (u8) (((u8 *) pdir_start)[4]), 727 (u8) (((u8 *) pdir_start)[3]), 728 (u8) (((u8 *) pdir_start)[2]), 729 (u8) (((u8 *) pdir_start)[1]), 730 (u8) (((u8 *) pdir_start)[0]) 731 ); 732 733 addr += IOVP_SIZE; 734 size -= IOVP_SIZE; 735 pdir_start++; 736 } 737 738 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 739 if (ioc_needs_fdc) 740 asm volatile("sync" : : ); 741 742#ifdef ASSERT_PDIR_SANITY 743 sba_check_pdir(ioc,"Check after sba_map_single()"); 744#endif 745 spin_unlock_irqrestore(&ioc->res_lock, flags); 746 747 /* form complete address */ 748 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 749} 750 751 752/** 753 * sba_unmap_single - unmap one IOVA and free resources 754 * @dev: instance of PCI owned by the driver that's asking. 755 * @iova: IOVA of driver buffer previously mapped. 756 * @size: number of bytes mapped in driver buffer. 757 * @direction: R/W or both. 758 * 759 * See Documentation/PCI/PCI-DMA-mapping.txt 760 */ 761static void 762sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 763 enum dma_data_direction direction) 764{ 765 struct ioc *ioc; 766#if DELAYED_RESOURCE_CNT > 0 767 struct sba_dma_pair *d; 768#endif 769 unsigned long flags; 770 dma_addr_t offset; 771 772 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); 773 774 ioc = GET_IOC(dev); 775 offset = iova & ~IOVP_MASK; 776 iova ^= offset; /* clear offset bits */ 777 size += offset; 778 size = ALIGN(size, IOVP_SIZE); 779 780 spin_lock_irqsave(&ioc->res_lock, flags); 781 782#ifdef SBA_COLLECT_STATS 783 ioc->usingle_calls++; 784 ioc->usingle_pages += size >> IOVP_SHIFT; 785#endif 786 787 sba_mark_invalid(ioc, iova, size); 788 789#if DELAYED_RESOURCE_CNT > 0 790 /* Delaying when we re-use a IO Pdir entry reduces the number 791 * of MMIO reads needed to flush writes to the PCOM register. 792 */ 793 d = &(ioc->saved[ioc->saved_cnt]); 794 d->iova = iova; 795 d->size = size; 796 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 797 int cnt = ioc->saved_cnt; 798 while (cnt--) { 799 sba_free_range(ioc, d->iova, d->size); 800 d--; 801 } 802 ioc->saved_cnt = 0; 803 804 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 805 } 806#else /* DELAYED_RESOURCE_CNT == 0 */ 807 sba_free_range(ioc, iova, size); 808 809 /* If fdc's were issued, force fdc's to be visible now */ 810 if (ioc_needs_fdc) 811 asm volatile("sync" : : ); 812 813 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 814#endif /* DELAYED_RESOURCE_CNT == 0 */ 815 816 spin_unlock_irqrestore(&ioc->res_lock, flags); 817 818} 819 820 821/** 822 * sba_alloc_consistent - allocate/map shared mem for DMA 823 * @hwdev: instance of PCI owned by the driver that's asking. 824 * @size: number of bytes mapped in driver buffer. 825 * @dma_handle: IOVA of new buffer. 826 * 827 * See Documentation/PCI/PCI-DMA-mapping.txt 828 */ 829static void *sba_alloc_consistent(struct device *hwdev, size_t size, 830 dma_addr_t *dma_handle, gfp_t gfp) 831{ 832 void *ret; 833 834 if (!hwdev) { 835 /* only support PCI */ 836 *dma_handle = 0; 837 return NULL; 838 } 839 840 ret = (void *) __get_free_pages(gfp, get_order(size)); 841 842 if (ret) { 843 memset(ret, 0, size); 844 *dma_handle = sba_map_single(hwdev, ret, size, 0); 845 } 846 847 return ret; 848} 849 850 851/** 852 * sba_free_consistent - free/unmap shared mem for DMA 853 * @hwdev: instance of PCI owned by the driver that's asking. 854 * @size: number of bytes mapped in driver buffer. 855 * @vaddr: virtual address IOVA of "consistent" buffer. 856 * @dma_handler: IO virtual address of "consistent" buffer. 857 * 858 * See Documentation/PCI/PCI-DMA-mapping.txt 859 */ 860static void 861sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 862 dma_addr_t dma_handle) 863{ 864 sba_unmap_single(hwdev, dma_handle, size, 0); 865 free_pages((unsigned long) vaddr, get_order(size)); 866} 867 868 869/* 870** Since 0 is a valid pdir_base index value, can't use that 871** to determine if a value is valid or not. Use a flag to indicate 872** the SG list entry contains a valid pdir index. 873*/ 874#define PIDE_FLAG 0x80000000UL 875 876#ifdef SBA_COLLECT_STATS 877#define IOMMU_MAP_STATS 878#endif 879#include "iommu-helpers.h" 880 881#ifdef DEBUG_LARGE_SG_ENTRIES 882int dump_run_sg = 0; 883#endif 884 885 886/** 887 * sba_map_sg - map Scatter/Gather list 888 * @dev: instance of PCI owned by the driver that's asking. 889 * @sglist: array of buffer/length pairs 890 * @nents: number of entries in list 891 * @direction: R/W or both. 892 * 893 * See Documentation/PCI/PCI-DMA-mapping.txt 894 */ 895static int 896sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 897 enum dma_data_direction direction) 898{ 899 struct ioc *ioc; 900 int coalesced, filled = 0; 901 unsigned long flags; 902 903 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); 904 905 ioc = GET_IOC(dev); 906 907 /* Fast path single entry scatterlists. */ 908 if (nents == 1) { 909 sg_dma_address(sglist) = sba_map_single(dev, 910 (void *)sg_virt_addr(sglist), 911 sglist->length, direction); 912 sg_dma_len(sglist) = sglist->length; 913 return 1; 914 } 915 916 spin_lock_irqsave(&ioc->res_lock, flags); 917 918#ifdef ASSERT_PDIR_SANITY 919 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 920 { 921 sba_dump_sg(ioc, sglist, nents); 922 panic("Check before sba_map_sg()"); 923 } 924#endif 925 926#ifdef SBA_COLLECT_STATS 927 ioc->msg_calls++; 928#endif 929 930 /* 931 ** First coalesce the chunks and allocate I/O pdir space 932 ** 933 ** If this is one DMA stream, we can properly map using the 934 ** correct virtual address associated with each DMA page. 935 ** w/o this association, we wouldn't have coherent DMA! 936 ** Access to the virtual address is what forces a two pass algorithm. 937 */ 938 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); 939 940 /* 941 ** Program the I/O Pdir 942 ** 943 ** map the virtual addresses to the I/O Pdir 944 ** o dma_address will contain the pdir index 945 ** o dma_len will contain the number of bytes to map 946 ** o address contains the virtual address. 947 */ 948 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 949 950 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 951 if (ioc_needs_fdc) 952 asm volatile("sync" : : ); 953 954#ifdef ASSERT_PDIR_SANITY 955 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 956 { 957 sba_dump_sg(ioc, sglist, nents); 958 panic("Check after sba_map_sg()\n"); 959 } 960#endif 961 962 spin_unlock_irqrestore(&ioc->res_lock, flags); 963 964 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); 965 966 return filled; 967} 968 969 970/** 971 * sba_unmap_sg - unmap Scatter/Gather list 972 * @dev: instance of PCI owned by the driver that's asking. 973 * @sglist: array of buffer/length pairs 974 * @nents: number of entries in list 975 * @direction: R/W or both. 976 * 977 * See Documentation/PCI/PCI-DMA-mapping.txt 978 */ 979static void 980sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 981 enum dma_data_direction direction) 982{ 983 struct ioc *ioc; 984#ifdef ASSERT_PDIR_SANITY 985 unsigned long flags; 986#endif 987 988 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 989 __func__, nents, sg_virt_addr(sglist), sglist->length); 990 991 ioc = GET_IOC(dev); 992 993#ifdef SBA_COLLECT_STATS 994 ioc->usg_calls++; 995#endif 996 997#ifdef ASSERT_PDIR_SANITY 998 spin_lock_irqsave(&ioc->res_lock, flags); 999 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1000 spin_unlock_irqrestore(&ioc->res_lock, flags); 1001#endif 1002 1003 while (sg_dma_len(sglist) && nents--) { 1004 1005 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1006#ifdef SBA_COLLECT_STATS 1007 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1008 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1009#endif 1010 ++sglist; 1011 } 1012 1013 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); 1014 1015#ifdef ASSERT_PDIR_SANITY 1016 spin_lock_irqsave(&ioc->res_lock, flags); 1017 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1018 spin_unlock_irqrestore(&ioc->res_lock, flags); 1019#endif 1020 1021} 1022 1023static struct hppa_dma_ops sba_ops = { 1024 .dma_supported = sba_dma_supported, 1025 .alloc_consistent = sba_alloc_consistent, 1026 .alloc_noncoherent = sba_alloc_consistent, 1027 .free_consistent = sba_free_consistent, 1028 .map_single = sba_map_single, 1029 .unmap_single = sba_unmap_single, 1030 .map_sg = sba_map_sg, 1031 .unmap_sg = sba_unmap_sg, 1032 .dma_sync_single_for_cpu = NULL, 1033 .dma_sync_single_for_device = NULL, 1034 .dma_sync_sg_for_cpu = NULL, 1035 .dma_sync_sg_for_device = NULL, 1036}; 1037 1038 1039/************************************************************************** 1040** 1041** SBA PAT PDC support 1042** 1043** o call pdc_pat_cell_module() 1044** o store ranges in PCI "resource" structures 1045** 1046**************************************************************************/ 1047 1048static void 1049sba_get_pat_resources(struct sba_device *sba_dev) 1050{ 1051} 1052 1053 1054/************************************************************** 1055* 1056* Initialization and claim 1057* 1058***************************************************************/ 1059#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1060#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1061static void * 1062sba_alloc_pdir(unsigned int pdir_size) 1063{ 1064 unsigned long pdir_base; 1065 unsigned long pdir_order = get_order(pdir_size); 1066 1067 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1068 if (NULL == (void *) pdir_base) { 1069 panic("%s() could not allocate I/O Page Table\n", 1070 __func__); 1071 } 1072 1073 /* If this is not PA8700 (PCX-W2) 1074 ** OR newer than ver 2.2 1075 ** OR in a system that doesn't need VINDEX bits from SBA, 1076 ** 1077 ** then we aren't exposed to the HW bug. 1078 */ 1079 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1080 || (boot_cpu_data.pdc.versions > 0x202) 1081 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1082 return (void *) pdir_base; 1083 1084 /* 1085 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1086 * 1087 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1088 * Ike/Astro can cause silent data corruption. This is only 1089 * a problem if the I/O PDIR is located in memory such that 1090 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1091 * 1092 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1093 * right physical address, we can either avoid (IOPDIR <= 1MB) 1094 * or minimize (2MB IO Pdir) the problem if we restrict the 1095 * IO Pdir to a maximum size of 2MB-128K (1902K). 1096 * 1097 * Because we always allocate 2^N sized IO pdirs, either of the 1098 * "bad" regions will be the last 128K if at all. That's easy 1099 * to test for. 1100 * 1101 */ 1102 if (pdir_order <= (19-12)) { 1103 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1104 /* allocate a new one on 512k alignment */ 1105 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1106 /* release original */ 1107 free_pages(pdir_base, pdir_order); 1108 1109 pdir_base = new_pdir; 1110 1111 /* release excess */ 1112 while (pdir_order < (19-12)) { 1113 new_pdir += pdir_size; 1114 free_pages(new_pdir, pdir_order); 1115 pdir_order +=1; 1116 pdir_size <<=1; 1117 } 1118 } 1119 } else { 1120 /* 1121 ** 1MB or 2MB Pdir 1122 ** Needs to be aligned on an "odd" 1MB boundary. 1123 */ 1124 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1125 1126 /* release original */ 1127 free_pages( pdir_base, pdir_order); 1128 1129 /* release first 1MB */ 1130 free_pages(new_pdir, 20-12); 1131 1132 pdir_base = new_pdir + 1024*1024; 1133 1134 if (pdir_order > (20-12)) { 1135 /* 1136 ** 2MB Pdir. 1137 ** 1138 ** Flag tells init_bitmap() to mark bad 128k as used 1139 ** and to reduce the size by 128k. 1140 */ 1141 piranha_bad_128k = 1; 1142 1143 new_pdir += 3*1024*1024; 1144 /* release last 1MB */ 1145 free_pages(new_pdir, 20-12); 1146 1147 /* release unusable 128KB */ 1148 free_pages(new_pdir - 128*1024 , 17-12); 1149 1150 pdir_size -= 128*1024; 1151 } 1152 } 1153 1154 memset((void *) pdir_base, 0, pdir_size); 1155 return (void *) pdir_base; 1156} 1157 1158struct ibase_data_struct { 1159 struct ioc *ioc; 1160 int ioc_num; 1161}; 1162 1163static int setup_ibase_imask_callback(struct device *dev, void *data) 1164{ 1165 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */ 1166 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1167 struct parisc_device *lba = to_parisc_device(dev); 1168 struct ibase_data_struct *ibd = data; 1169 int rope_num = (lba->hpa.start >> 13) & 0xf; 1170 if (rope_num >> 3 == ibd->ioc_num) 1171 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask); 1172 return 0; 1173} 1174 1175/* setup Mercury or Elroy IBASE/IMASK registers. */ 1176static void 1177setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1178{ 1179 struct ibase_data_struct ibase_data = { 1180 .ioc = ioc, 1181 .ioc_num = ioc_num, 1182 }; 1183 1184 device_for_each_child(&sba->dev, &ibase_data, 1185 setup_ibase_imask_callback); 1186} 1187 1188#ifdef SBA_AGP_SUPPORT 1189static int 1190sba_ioc_find_quicksilver(struct device *dev, void *data) 1191{ 1192 int *agp_found = data; 1193 struct parisc_device *lba = to_parisc_device(dev); 1194 1195 if (IS_QUICKSILVER(lba)) 1196 *agp_found = 1; 1197 return 0; 1198} 1199#endif 1200 1201static void 1202sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1203{ 1204 u32 iova_space_mask; 1205 u32 iova_space_size; 1206 int iov_order, tcnfg; 1207#ifdef SBA_AGP_SUPPORT 1208 int agp_found = 0; 1209#endif 1210 /* 1211 ** Firmware programs the base and size of a "safe IOVA space" 1212 ** (one that doesn't overlap memory or LMMIO space) in the 1213 ** IBASE and IMASK registers. 1214 */ 1215 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1216 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1217 1218 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { 1219 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n"); 1220 iova_space_size /= 2; 1221 } 1222 1223 /* 1224 ** iov_order is always based on a 1GB IOVA space since we want to 1225 ** turn on the other half for AGP GART. 1226 */ 1227 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); 1228 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1229 1230 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1231 __func__, ioc->ioc_hpa, iova_space_size >> 20, 1232 iov_order + PAGE_SHIFT); 1233 1234 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1235 get_order(ioc->pdir_size)); 1236 if (!ioc->pdir_base) 1237 panic("Couldn't allocate I/O Page Table\n"); 1238 1239 memset(ioc->pdir_base, 0, ioc->pdir_size); 1240 1241 DBG_INIT("%s() pdir %p size %x\n", 1242 __func__, ioc->pdir_base, ioc->pdir_size); 1243 1244#ifdef SBA_HINT_SUPPORT 1245 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1246 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1247 1248 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1249 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1250#endif 1251 1252 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); 1253 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1254 1255 /* build IMASK for IOC and Elroy */ 1256 iova_space_mask = 0xffffffff; 1257 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1258 ioc->imask = iova_space_mask; 1259#ifdef ZX1_SUPPORT 1260 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1261#endif 1262 sba_dump_tlb(ioc->ioc_hpa); 1263 1264 setup_ibase_imask(sba, ioc, ioc_num); 1265 1266 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); 1267 1268#ifdef CONFIG_64BIT 1269 /* 1270 ** Setting the upper bits makes checking for bypass addresses 1271 ** a little faster later on. 1272 */ 1273 ioc->imask |= 0xFFFFFFFF00000000UL; 1274#endif 1275 1276 /* Set I/O PDIR Page size to system page size */ 1277 switch (PAGE_SHIFT) { 1278 case 12: tcnfg = 0; break; /* 4K */ 1279 case 13: tcnfg = 1; break; /* 8K */ 1280 case 14: tcnfg = 2; break; /* 16K */ 1281 case 16: tcnfg = 3; break; /* 64K */ 1282 default: 1283 panic(__FILE__ "Unsupported system page size %d", 1284 1 << PAGE_SHIFT); 1285 break; 1286 } 1287 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); 1288 1289 /* 1290 ** Program the IOC's ibase and enable IOVA translation 1291 ** Bit zero == enable bit. 1292 */ 1293 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); 1294 1295 /* 1296 ** Clear I/O TLB of any possible entries. 1297 ** (Yes. This is a bit paranoid...but so what) 1298 */ 1299 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1300 1301#ifdef SBA_AGP_SUPPORT 1302 1303 /* 1304 ** If an AGP device is present, only use half of the IOV space 1305 ** for PCI DMA. Unfortunately we can't know ahead of time 1306 ** whether GART support will actually be used, for now we 1307 ** can just key on any AGP device found in the system. 1308 ** We program the next pdir index after we stop w/ a key for 1309 ** the GART code to handshake on. 1310 */ 1311 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver); 1312 1313 if (agp_found && sba_reserve_agpgart) { 1314 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n", 1315 __func__, (iova_space_size/2) >> 20); 1316 ioc->pdir_size /= 2; 1317 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; 1318 } 1319#endif /*SBA_AGP_SUPPORT*/ 1320} 1321 1322static void 1323sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1324{ 1325 u32 iova_space_size, iova_space_mask; 1326 unsigned int pdir_size, iov_order; 1327 1328 /* 1329 ** Determine IOVA Space size from memory size. 1330 ** 1331 ** Ideally, PCI drivers would register the maximum number 1332 ** of DMA they can have outstanding for each device they 1333 ** own. Next best thing would be to guess how much DMA 1334 ** can be outstanding based on PCI Class/sub-class. Both 1335 ** methods still require some "extra" to support PCI 1336 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1337 ** 1338 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1339 ** for DMA hints - ergo only 30 bits max. 1340 */ 1341 1342 iova_space_size = (u32) (totalram_pages/global_ioc_cnt); 1343 1344 /* limit IOVA space size to 1MB-1GB */ 1345 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1346 iova_space_size = 1 << (20 - PAGE_SHIFT); 1347 } 1348 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1349 iova_space_size = 1 << (30 - PAGE_SHIFT); 1350 } 1351 1352 /* 1353 ** iova space must be log2() in size. 1354 ** thus, pdir/res_map will also be log2(). 1355 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1356 */ 1357 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1358 1359 /* iova_space_size is now bytes, not pages */ 1360 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1361 1362 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1363 1364 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1365 __func__, 1366 ioc->ioc_hpa, 1367 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), 1368 iova_space_size>>20, 1369 iov_order + PAGE_SHIFT); 1370 1371 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1372 1373 DBG_INIT("%s() pdir %p size %x\n", 1374 __func__, ioc->pdir_base, pdir_size); 1375 1376#ifdef SBA_HINT_SUPPORT 1377 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1378 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1379 1380 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1381 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1382#endif 1383 1384 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1385 1386 /* build IMASK for IOC and Elroy */ 1387 iova_space_mask = 0xffffffff; 1388 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1389 1390 /* 1391 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1392 ** ibase=0, imask=0xFE000000, size=0x2000000. 1393 */ 1394 ioc->ibase = 0; 1395 ioc->imask = iova_space_mask; /* save it */ 1396#ifdef ZX1_SUPPORT 1397 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1398#endif 1399 1400 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1401 __func__, ioc->ibase, ioc->imask); 1402 1403 1404 setup_ibase_imask(sba, ioc, ioc_num); 1405 1406 /* 1407 ** Program the IOC's ibase and enable IOVA translation 1408 */ 1409 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1410 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1411 1412 /* Set I/O PDIR Page size to 4K */ 1413 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1414 1415 /* 1416 ** Clear I/O TLB of any possible entries. 1417 ** (Yes. This is a bit paranoid...but so what) 1418 */ 1419 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1420 1421 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1422 1423 DBG_INIT("%s() DONE\n", __func__); 1424} 1425 1426 1427 1428 1429static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) 1430{ 1431 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); 1432} 1433 1434static void sba_hw_init(struct sba_device *sba_dev) 1435{ 1436 int i; 1437 int num_ioc; 1438 u64 ioc_ctl; 1439 1440 if (!is_pdc_pat()) { 1441 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { 1442 pdc_io_reset_devices(); 1443 } 1444 1445 } 1446 1447 1448 1449 if (!IS_PLUTO(sba_dev->dev)) { 1450 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1451 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1452 __func__, sba_dev->sba_hpa, ioc_ctl); 1453 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1454 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1455 /* j6700 v1.6 firmware sets 0x294f */ 1456 /* A500 firmware sets 0x4d */ 1457 1458 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1459 1460#ifdef DEBUG_SBA_INIT 1461 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1462 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1463#endif 1464 } /* if !PLUTO */ 1465 1466 if (IS_ASTRO(sba_dev->dev)) { 1467 int err; 1468 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1469 num_ioc = 1; 1470 1471 sba_dev->chip_resv.name = "Astro Intr Ack"; 1472 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; 1473 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; 1474 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1475 BUG_ON(err < 0); 1476 1477 } else if (IS_PLUTO(sba_dev->dev)) { 1478 int err; 1479 1480 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1481 num_ioc = 1; 1482 1483 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; 1484 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; 1485 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); 1486 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1487 WARN_ON(err < 0); 1488 1489 sba_dev->iommu_resv.name = "IOVA Space"; 1490 sba_dev->iommu_resv.start = 0x40000000UL; 1491 sba_dev->iommu_resv.end = 0x50000000UL - 1; 1492 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1493 WARN_ON(err < 0); 1494 } else { 1495 /* IKE, REO */ 1496 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1497 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1498 num_ioc = 2; 1499 1500 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1501 } 1502 1503 sba_dev->num_ioc = num_ioc; 1504 for (i = 0; i < num_ioc; i++) { 1505 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; 1506 unsigned int j; 1507 1508 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) { 1509 1510 /* 1511 * Clear ROPE(N)_CONFIG AO bit. 1512 * Disables "NT Ordering" (~= !"Relaxed Ordering") 1513 * Overrides bit 1 in DMA Hint Sets. 1514 * Improves netperf UDP_STREAM by ~10% for bcm5701. 1515 */ 1516 if (IS_PLUTO(sba_dev->dev)) { 1517 void __iomem *rope_cfg; 1518 unsigned long cfg_val; 1519 1520 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j; 1521 cfg_val = READ_REG(rope_cfg); 1522 cfg_val &= ~IOC_ROPE_AO; 1523 WRITE_REG(cfg_val, rope_cfg); 1524 } 1525 1526 /* 1527 ** Make sure the box crashes on rope errors. 1528 */ 1529 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j); 1530 } 1531 1532 /* flush out the last writes */ 1533 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1534 1535 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", 1536 i, 1537 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), 1538 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) 1539 ); 1540 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", 1541 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), 1542 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1543 ); 1544 1545 if (IS_PLUTO(sba_dev->dev)) { 1546 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1547 } else { 1548 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1549 } 1550 } 1551} 1552 1553static void 1554sba_common_init(struct sba_device *sba_dev) 1555{ 1556 int i; 1557 1558 /* add this one to the head of the list (order doesn't matter) 1559 ** This will be useful for debugging - especially if we get coredumps 1560 */ 1561 sba_dev->next = sba_list; 1562 sba_list = sba_dev; 1563 1564 for(i=0; i< sba_dev->num_ioc; i++) { 1565 int res_size; 1566#ifdef DEBUG_DMB_TRAP 1567 extern void iterate_pages(unsigned long , unsigned long , 1568 void (*)(pte_t * , unsigned long), 1569 unsigned long ); 1570 void set_data_memory_break(pte_t * , unsigned long); 1571#endif 1572 /* resource map size dictated by pdir_size */ 1573 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1574 1575 /* Second part of PIRANHA BUG */ 1576 if (piranha_bad_128k) { 1577 res_size -= (128*1024)/sizeof(u64); 1578 } 1579 1580 res_size >>= 3; /* convert bit count to byte count */ 1581 DBG_INIT("%s() res_size 0x%x\n", 1582 __func__, res_size); 1583 1584 sba_dev->ioc[i].res_size = res_size; 1585 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1586 1587#ifdef DEBUG_DMB_TRAP 1588 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1589 set_data_memory_break, 0); 1590#endif 1591 1592 if (NULL == sba_dev->ioc[i].res_map) 1593 { 1594 panic("%s:%s() could not allocate resource map\n", 1595 __FILE__, __func__ ); 1596 } 1597 1598 memset(sba_dev->ioc[i].res_map, 0, res_size); 1599 /* next available IOVP - circular search */ 1600 sba_dev->ioc[i].res_hint = (unsigned long *) 1601 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1602 1603#ifdef ASSERT_PDIR_SANITY 1604 /* Mark first bit busy - ie no IOVA 0 */ 1605 sba_dev->ioc[i].res_map[0] = 0x80; 1606 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1607#endif 1608 1609 /* Third (and last) part of PIRANHA BUG */ 1610 if (piranha_bad_128k) { 1611 /* region from +1408K to +1536 is un-usable. */ 1612 1613 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1614 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1615 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1616 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1617 1618 /* mark that part of the io pdir busy */ 1619 while (p_start < p_end) 1620 *p_start++ = -1; 1621 1622 } 1623 1624#ifdef DEBUG_DMB_TRAP 1625 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1626 set_data_memory_break, 0); 1627 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1628 set_data_memory_break, 0); 1629#endif 1630 1631 DBG_INIT("%s() %d res_map %x %p\n", 1632 __func__, i, res_size, sba_dev->ioc[i].res_map); 1633 } 1634 1635 spin_lock_init(&sba_dev->sba_lock); 1636 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1637 1638#ifdef DEBUG_SBA_INIT 1639 /* 1640 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1641 * (bit #61, big endian), we have to flush and sync every time 1642 * IO-PDIR is changed in Ike/Astro. 1643 */ 1644 if (ioc_needs_fdc) { 1645 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1646 } else { 1647 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1648 } 1649#endif 1650} 1651 1652#ifdef CONFIG_PROC_FS 1653static int sba_proc_info(struct seq_file *m, void *p) 1654{ 1655 struct sba_device *sba_dev = sba_list; 1656 struct ioc *ioc = &sba_dev->ioc[0]; 1657 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1658#ifdef SBA_COLLECT_STATS 1659 unsigned long avg = 0, min, max; 1660#endif 1661 int i, len = 0; 1662 1663 len += seq_printf(m, "%s rev %d.%d\n", 1664 sba_dev->name, 1665 (sba_dev->hw_rev & 0x7) + 1, 1666 (sba_dev->hw_rev & 0x18) >> 3 1667 ); 1668 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1669 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1670 total_pages); 1671 1672 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1673 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1674 1675 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", 1676 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), 1677 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), 1678 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE) 1679 ); 1680 1681 for (i=0; i<4; i++) 1682 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i, 1683 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), 1684 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), 1685 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18) 1686 ); 1687 1688#ifdef SBA_COLLECT_STATS 1689 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1690 total_pages - ioc->used_pages, ioc->used_pages, 1691 (int) (ioc->used_pages * 100 / total_pages)); 1692 1693 min = max = ioc->avg_search[0]; 1694 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1695 avg += ioc->avg_search[i]; 1696 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1697 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1698 } 1699 avg /= SBA_SEARCH_SAMPLE; 1700 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1701 min, avg, max); 1702 1703 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1704 ioc->msingle_calls, ioc->msingle_pages, 1705 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1706 1707 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1708 min = ioc->usingle_calls; 1709 max = ioc->usingle_pages - ioc->usg_pages; 1710 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1711 min, max, (int) ((max * 1000)/min)); 1712 1713 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1714 ioc->msg_calls, ioc->msg_pages, 1715 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); 1716 1717 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1718 ioc->usg_calls, ioc->usg_pages, 1719 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); 1720#endif 1721 1722 return 0; 1723} 1724 1725static int 1726sba_proc_open(struct inode *i, struct file *f) 1727{ 1728 return single_open(f, &sba_proc_info, NULL); 1729} 1730 1731static const struct file_operations sba_proc_fops = { 1732 .owner = THIS_MODULE, 1733 .open = sba_proc_open, 1734 .read = seq_read, 1735 .llseek = seq_lseek, 1736 .release = single_release, 1737}; 1738 1739static int 1740sba_proc_bitmap_info(struct seq_file *m, void *p) 1741{ 1742 struct sba_device *sba_dev = sba_list; 1743 struct ioc *ioc = &sba_dev->ioc[0]; 1744 unsigned int *res_ptr = (unsigned int *)ioc->res_map; 1745 int i, len = 0; 1746 1747 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) { 1748 if ((i & 7) == 0) 1749 len += seq_printf(m, "\n "); 1750 len += seq_printf(m, " %08x", *res_ptr); 1751 } 1752 len += seq_printf(m, "\n"); 1753 1754 return 0; 1755} 1756 1757static int 1758sba_proc_bitmap_open(struct inode *i, struct file *f) 1759{ 1760 return single_open(f, &sba_proc_bitmap_info, NULL); 1761} 1762 1763static const struct file_operations sba_proc_bitmap_fops = { 1764 .owner = THIS_MODULE, 1765 .open = sba_proc_bitmap_open, 1766 .read = seq_read, 1767 .llseek = seq_lseek, 1768 .release = single_release, 1769}; 1770#endif /* CONFIG_PROC_FS */ 1771 1772static struct parisc_device_id sba_tbl[] = { 1773 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 1774 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 1775 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 1776 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 1777 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, 1778 { 0, } 1779}; 1780 1781static int sba_driver_callback(struct parisc_device *); 1782 1783static struct parisc_driver sba_driver = { 1784 .name = MODULE_NAME, 1785 .id_table = sba_tbl, 1786 .probe = sba_driver_callback, 1787}; 1788 1789/* 1790** Determine if sba should claim this chip (return 0) or not (return 1). 1791** If so, initialize the chip and tell other partners in crime they 1792** have work to do. 1793*/ 1794static int sba_driver_callback(struct parisc_device *dev) 1795{ 1796 struct sba_device *sba_dev; 1797 u32 func_class; 1798 int i; 1799 char *version; 1800 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); 1801#ifdef CONFIG_PROC_FS 1802 struct proc_dir_entry *root; 1803#endif 1804 1805 sba_dump_ranges(sba_addr); 1806 1807 /* Read HW Rev First */ 1808 func_class = READ_REG(sba_addr + SBA_FCLASS); 1809 1810 if (IS_ASTRO(dev)) { 1811 unsigned long fclass; 1812 static char astro_rev[]="Astro ?.?"; 1813 1814 /* Astro is broken...Read HW Rev First */ 1815 fclass = READ_REG(sba_addr); 1816 1817 astro_rev[6] = '1' + (char) (fclass & 0x7); 1818 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 1819 version = astro_rev; 1820 1821 } else if (IS_IKE(dev)) { 1822 static char ike_rev[] = "Ike rev ?"; 1823 ike_rev[8] = '0' + (char) (func_class & 0xff); 1824 version = ike_rev; 1825 } else if (IS_PLUTO(dev)) { 1826 static char pluto_rev[]="Pluto ?.?"; 1827 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 1828 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 1829 version = pluto_rev; 1830 } else { 1831 static char reo_rev[] = "REO rev ?"; 1832 reo_rev[8] = '0' + (char) (func_class & 0xff); 1833 version = reo_rev; 1834 } 1835 1836 if (!global_ioc_cnt) { 1837 global_ioc_cnt = count_parisc_driver(&sba_driver); 1838 1839 /* Astro and Pluto have one IOC per SBA */ 1840 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev))) 1841 global_ioc_cnt *= 2; 1842 } 1843 1844 printk(KERN_INFO "%s found %s at 0x%llx\n", 1845 MODULE_NAME, version, (unsigned long long)dev->hpa.start); 1846 1847 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); 1848 if (!sba_dev) { 1849 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 1850 return -ENOMEM; 1851 } 1852 1853 parisc_set_drvdata(dev, sba_dev); 1854 1855 for(i=0; i<MAX_IOC; i++) 1856 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 1857 1858 sba_dev->dev = dev; 1859 sba_dev->hw_rev = func_class; 1860 sba_dev->name = dev->name; 1861 sba_dev->sba_hpa = sba_addr; 1862 1863 sba_get_pat_resources(sba_dev); 1864 sba_hw_init(sba_dev); 1865 sba_common_init(sba_dev); 1866 1867 hppa_dma_ops = &sba_ops; 1868 1869#ifdef CONFIG_PROC_FS 1870 switch (dev->id.hversion) { 1871 case PLUTO_MCKINLEY_PORT: 1872 root = proc_mckinley_root; 1873 break; 1874 case ASTRO_RUNWAY_PORT: 1875 case IKE_MERCED_PORT: 1876 default: 1877 root = proc_runway_root; 1878 break; 1879 } 1880 1881 proc_create("sba_iommu", 0, root, &sba_proc_fops); 1882 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); 1883#endif 1884 1885 parisc_has_iommu(); 1886 return 0; 1887} 1888 1889/* 1890** One time initialization to let the world know the SBA was found. 1891** This is the only routine which is NOT static. 1892** Must be called exactly once before pci_init(). 1893*/ 1894void __init sba_init(void) 1895{ 1896 register_parisc_driver(&sba_driver); 1897} 1898 1899 1900/** 1901 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 1902 * @dev: The parisc device. 1903 * 1904 * Returns the appropriate IOMMU data for the given parisc PCI controller. 1905 * This is cached and used later for PCI DMA Mapping. 1906 */ 1907void * sba_get_iommu(struct parisc_device *pci_hba) 1908{ 1909 struct parisc_device *sba_dev = parisc_parent(pci_hba); 1910 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 1911 char t = sba_dev->id.hw_type; 1912 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 1913 1914 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); 1915 1916 return &(sba->ioc[iocnum]); 1917} 1918 1919 1920/** 1921 * sba_directed_lmmio - return first directed LMMIO range routed to rope 1922 * @pa_dev: The parisc device. 1923 * @r: resource PCI host controller wants start/end fields assigned. 1924 * 1925 * For the given parisc PCI controller, determine if any direct ranges 1926 * are routed down the corresponding rope. 1927 */ 1928void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 1929{ 1930 struct parisc_device *sba_dev = parisc_parent(pci_hba); 1931 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 1932 char t = sba_dev->id.hw_type; 1933 int i; 1934 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 1935 1936 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 1937 1938 r->start = r->end = 0; 1939 1940 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ 1941 for (i=0; i<4; i++) { 1942 int base, size; 1943 void __iomem *reg = sba->sba_hpa + i*0x18; 1944 1945 base = READ_REG32(reg + LMMIO_DIRECT0_BASE); 1946 if ((base & 1) == 0) 1947 continue; /* not enabled */ 1948 1949 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); 1950 1951 if ((size & (ROPES_PER_IOC-1)) != rope) 1952 continue; /* directed down different rope */ 1953 1954 r->start = (base & ~1UL) | PCI_F_EXTEND; 1955 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 1956 r->end = r->start + size; 1957 r->flags = IORESOURCE_MEM; 1958 } 1959} 1960 1961 1962/** 1963 * sba_distributed_lmmio - return portion of distributed LMMIO range 1964 * @pa_dev: The parisc device. 1965 * @r: resource PCI host controller wants start/end fields assigned. 1966 * 1967 * For the given parisc PCI controller, return portion of distributed LMMIO 1968 * range. The distributed LMMIO is always present and it's just a question 1969 * of the base address and size of the range. 1970 */ 1971void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 1972{ 1973 struct parisc_device *sba_dev = parisc_parent(pci_hba); 1974 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 1975 char t = sba_dev->id.hw_type; 1976 int base, size; 1977 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 1978 1979 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 1980 1981 r->start = r->end = 0; 1982 1983 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); 1984 if ((base & 1) == 0) { 1985 BUG(); /* Gah! Distr Range wasn't enabled! */ 1986 return; 1987 } 1988 1989 r->start = (base & ~1UL) | PCI_F_EXTEND; 1990 1991 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 1992 r->start += rope * (size + 1); /* adjust base for this rope */ 1993 r->end = r->start + size; 1994 r->flags = IORESOURCE_MEM; 1995} 1996