1 2 3#include <linux/config.h> 4#include <linux/types.h> 5#include <linux/kernel.h> 6#include <linux/spinlock.h> 7#include <linux/slab.h> 8#include <linux/init.h> 9 10#include <linux/mm.h> 11#include <linux/string.h> 12#undef PCI_DEBUG /* for ASSERT */ 13#include <linux/pci.h> 14#undef PCI_DEBUG 15 16#include <asm/byteorder.h> 17#include <asm/io.h> 18#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 19 20#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 21#include <asm/gsc.h> 22 23#include <linux/proc_fs.h> 24#include <asm/runway.h> /* for proc_runway_root */ 25#include <asm/pdc.h> /* for PDC_MODEL_* */ 26 27#define MODULE_NAME "SBA" 28 29/* 30** The number of debug flags is a clue - this code is fragile. 31** Don't even think about messing with it unless you have 32** plenty of 710's to sacrifice to the computer gods. :^) 33*/ 34#undef DEBUG_SBA_INIT 35#undef DEBUG_SBA_RUN 36#undef DEBUG_SBA_RUN_SG 37#undef DEBUG_SBA_RESOURCE 38#undef ASSERT_PDIR_SANITY 39#undef DEBUG_LARGE_SG_ENTRIES 40#undef DEBUG_DMB_TRAP 41 42#define SBA_INLINE __inline__ 43 44#ifdef DEBUG_SBA_INIT 45#define DBG_INIT(x...) printk(x) 46#else 47#define DBG_INIT(x...) 48#endif 49 50#ifdef DEBUG_SBA_RUN 51#define DBG_RUN(x...) printk(x) 52#else 53#define DBG_RUN(x...) 54#endif 55 56#ifdef DEBUG_SBA_RUN_SG 57#define DBG_RUN_SG(x...) printk(x) 58#else 59#define DBG_RUN_SG(x...) 60#endif 61 62 63#ifdef DEBUG_SBA_RESOURCE 64#define DBG_RES(x...) printk(x) 65#else 66#define DBG_RES(x...) 67#endif 68 69/* 70** The number of pdir entries to "free" before issueing 71** a read to PCOM register to flush out PCOM writes. 72** Interacts with allocation granularity (ie 4 or 8 entries 73** allocated and free'd/purged at a time might make this 74** less interesting). 75*/ 76#define DELAYED_RESOURCE_CNT 16 77 78#define DEFAULT_DMA_HINT_REG 0 79 80#define ASTRO_RUNWAY_PORT 0x582 81#define ASTRO_ROPES_PORT 0x780 82 83#define IKE_MERCED_PORT 0x803 84#define IKE_ROPES_PORT 0x781 85 86#define REO_MERCED_PORT 0x804 87#define REO_ROPES_PORT 0x782 88 89#define REOG_MERCED_PORT 0x805 90#define REOG_ROPES_PORT 0x783 91 92#define SBA_FUNC_ID 0x0000 /* function id */ 93#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ 94 95#define IS_ASTRO(id) \ 96(((id)->hversion == ASTRO_RUNWAY_PORT) || ((id)->hversion == ASTRO_ROPES_PORT)) 97 98#define IS_IKE(id) \ 99(((id)->hversion == IKE_MERCED_PORT) || ((id)->hversion == IKE_ROPES_PORT)) 100 101#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */ 102 103#define ASTRO_IOC_OFFSET 0x20000 104/* Ike's IOC's occupy functions 2 and 3 (not 0 and 1) */ 105#define IKE_IOC_OFFSET(p) ((p+2)*SBA_FUNC_SIZE) 106 107#define IOC_CTRL 0x8 /* IOC_CTRL offset */ 108#define IOC_CTRL_TC (1 << 0) /* TOC Enable */ 109#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */ 110#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */ 111#define IOC_CTRL_RM (1 << 8) /* Real Mode */ 112#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */ 113 114#define MAX_IOC 2 /* per Ike. Astro only has 1 */ 115 116 117/* 118** Offsets into MBIB (Function 0 on Ike and hopefully Astro) 119** Firmware programs this stuff. Don't touch it. 120*/ 121#define IOS_DIST_BASE 0x390 122#define IOS_DIST_MASK 0x398 123#define IOS_DIST_ROUTE 0x3A0 124 125#define IOS_DIRECT_BASE 0x3C0 126#define IOS_DIRECT_MASK 0x3C8 127#define IOS_DIRECT_ROUTE 0x3D0 128 129/* 130** Offsets into I/O TLB (Function 2 and 3 on Ike) 131*/ 132#define ROPE0_CTL 0x200 /* "regbus pci0" */ 133#define ROPE1_CTL 0x208 134#define ROPE2_CTL 0x210 135#define ROPE3_CTL 0x218 136#define ROPE4_CTL 0x220 137#define ROPE5_CTL 0x228 138#define ROPE6_CTL 0x230 139#define ROPE7_CTL 0x238 140 141#define HF_ENABLE 0x40 142 143 144#define IOC_IBASE 0x300 /* IO TLB */ 145#define IOC_IMASK 0x308 146#define IOC_PCOM 0x310 147#define IOC_TCNFG 0x318 148#define IOC_PDIR_BASE 0x320 149 150#define IOC_IOVA_SPACE_BASE 0 /* IOVA ranges start at 0 */ 151 152/* 153** IOC supports 4/8/16/64KB page sizes (see TCNFG register) 154** It's safer (avoid memory corruption) to keep DMA page mappings 155** equivalently sized to VM PAGE_SIZE. 156** 157** We really can't avoid generating a new mapping for each 158** page since the Virtual Coherence Index has to be generated 159** and updated for each page. 160** 161** IOVP_SIZE could only be greater than PAGE_SIZE if we are 162** confident the drivers really only touch the next physical 163** page iff that driver instance owns it. 164*/ 165#define IOVP_SIZE PAGE_SIZE 166#define IOVP_SHIFT PAGE_SHIFT 167#define IOVP_MASK PAGE_MASK 168 169#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */ 170#define SBA_PERF_MASK1 0x718 171#define SBA_PERF_MASK2 0x730 172 173 174/* 175** Offsets into PCI Performance Counters (functions 12 and 13) 176** Controlled by PERF registers in function 2 & 3 respectively. 177*/ 178#define SBA_PERF_CNT1 0x200 179#define SBA_PERF_CNT2 0x208 180#define SBA_PERF_CNT3 0x210 181 182 183struct ioc { 184 unsigned long ioc_hpa; /* I/O MMU base address */ 185 char *res_map; /* resource map, bit == pdir entry */ 186 u64 *pdir_base; /* physical base address */ 187 188 unsigned long *res_hint; /* next avail IOVP - circular search */ 189 spinlock_t res_lock; 190 unsigned long hint_mask_pdir; /* bits used for DMA hints */ 191 unsigned int res_bitshift; /* from the LEFT! */ 192 unsigned int res_size; /* size of resource map in bytes */ 193 unsigned int hint_shift_pdir; 194#if DELAYED_RESOURCE_CNT > 0 195 int saved_cnt; 196 struct sba_dma_pair { 197 dma_addr_t iova; 198 size_t size; 199 } saved[DELAYED_RESOURCE_CNT]; 200#endif 201 202#ifdef CONFIG_PROC_FS 203#define SBA_SEARCH_SAMPLE 0x100 204 unsigned long avg_search[SBA_SEARCH_SAMPLE]; 205 unsigned long avg_idx; /* current index into avg_search */ 206 unsigned long used_pages; 207 unsigned long msingle_calls; 208 unsigned long msingle_pages; 209 unsigned long msg_calls; 210 unsigned long msg_pages; 211 unsigned long usingle_calls; 212 unsigned long usingle_pages; 213 unsigned long usg_calls; 214 unsigned long usg_pages; 215#endif 216 217 /* STUFF We don't need in performance path */ 218 unsigned int pdir_size; /* in bytes, determined by IOV Space size */ 219 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ 220 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ 221}; 222 223struct sba_device { 224 struct sba_device *next; /* list of SBA's in system */ 225 struct parisc_device *dev; /* dev found in bus walk */ 226 struct parisc_device_id *iodc; /* data about dev from firmware */ 227 const char *name; 228 unsigned long sba_hpa; /* base address */ 229 spinlock_t sba_lock; 230 unsigned int flags; /* state/functionality enabled */ 231 unsigned int hw_rev; /* HW revision of chip */ 232 233 unsigned int num_ioc; /* number of on-board IOC's */ 234 struct ioc ioc[MAX_IOC]; 235}; 236 237 238static struct sba_device *sba_list; 239 240static unsigned long ioc_needs_fdc = 0; 241 242/* Ratio of Host MEM to IOV Space size */ 243static unsigned long sba_mem_ratio = 8; 244 245/* global count of IOMMUs in the system */ 246static unsigned int global_ioc_cnt = 0; 247 248static unsigned long piranha_bad_128k = 0; 249 250/* Looks nice and keeps the compiler happy */ 251#define SBA_DEV(d) ((struct sba_device *) (d)) 252 253 254#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 255 256 257/************************************ 258** SBA register read and write support 259** 260** BE WARNED: register writes are posted. 261** (ie follow writes which must reach HW with a read) 262** 263** Superdome (in particular, REO) allows only 64-bit CSR accesses. 264*/ 265#define READ_REG32(addr) le32_to_cpu(__raw_readl(addr)) 266#define READ_REG64(addr) le64_to_cpu(__raw_readq(addr)) 267#define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr) 268#define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr) 269 270#ifdef __LP64__ 271#define READ_REG(addr) READ_REG64(addr) 272#define WRITE_REG(value, addr) WRITE_REG64(value, addr) 273#else 274#define READ_REG(addr) READ_REG32(addr) 275#define WRITE_REG(value, addr) WRITE_REG32(value, addr) 276#endif 277 278#ifdef DEBUG_SBA_INIT 279 280/* NOTE: When __LP64__ isn't defined, READ_REG64() is two 32-bit reads */ 281 282/** 283 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 284 * @hpa: base address of the sba 285 * 286 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 287 * IO Adapter (aka Bus Converter). 288 */ 289static void 290sba_dump_ranges(unsigned long hpa) 291{ 292 DBG_INIT("SBA at 0x%lx\n", hpa); 293 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 294 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 295 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 296 DBG_INIT("\n"); 297 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 298 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 299 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 300} 301 302/** 303 * sba_dump_tlb - debugging only - print IOMMU operating parameters 304 * @hpa: base address of the IOMMU 305 * 306 * Print the size/location of the IO MMU PDIR. 307 */ 308static void 309sba_dump_tlb(unsigned long hpa) 310{ 311 DBG_INIT("IO TLB at 0x%lx\n", hpa); 312 DBG_INIT("IOC_IBASE : %Lx\n", READ_REG64(hpa+IOC_IBASE)); 313 DBG_INIT("IOC_IMASK : %Lx\n", READ_REG64(hpa+IOC_IMASK)); 314 DBG_INIT("IOC_TCNFG : %Lx\n", READ_REG64(hpa+IOC_TCNFG)); 315 DBG_INIT("IOC_PDIR_BASE: %Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 316 DBG_INIT("\n"); 317} 318#endif 319 320 321#ifdef ASSERT_PDIR_SANITY 322 323/** 324 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 325 * @ioc: IO MMU structure which owns the pdir we are interested in. 326 * @msg: text to print ont the output line. 327 * @pide: pdir index. 328 * 329 * Print one entry of the IO MMU PDIR in human readable form. 330 */ 331static void 332sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 333{ 334 /* start printing from lowest pde in rval */ 335 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 336 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 337 uint rcnt; 338 339 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 340 msg, 341 rptr, pide & (BITS_PER_LONG - 1), *rptr); 342 343 rcnt = 0; 344 while (rcnt < BITS_PER_LONG) { 345 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 346 (rcnt == (pide & (BITS_PER_LONG - 1))) 347 ? " -->" : " ", 348 rcnt, ptr, *ptr ); 349 rcnt++; 350 ptr++; 351 } 352 printk(KERN_DEBUG "%s", msg); 353} 354 355 356/** 357 * sba_check_pdir - debugging only - consistency checker 358 * @ioc: IO MMU structure which owns the pdir we are interested in. 359 * @msg: text to print ont the output line. 360 * 361 * Verify the resource map and pdir state is consistent 362 */ 363static int 364sba_check_pdir(struct ioc *ioc, char *msg) 365{ 366 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 367 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 368 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 369 uint pide = 0; 370 371 while (rptr < rptr_end) { 372 u32 rval = *rptr; 373 int rcnt = 32; /* number of bits we might check */ 374 375 while (rcnt) { 376 /* Get last byte and highest bit from that */ 377 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 378 if ((rval ^ pde) & 0x80000000) 379 { 380 /* 381 ** BUMMER! -- res_map != pdir -- 382 ** Dump rval and matching pdir entries 383 */ 384 sba_dump_pdir_entry(ioc, msg, pide); 385 return(1); 386 } 387 rcnt--; 388 rval <<= 1; /* try the next bit */ 389 pptr++; 390 pide++; 391 } 392 rptr++; /* look at next word of res_map */ 393 } 394 /* It'd be nice if we always got here :^) */ 395 return 0; 396} 397 398 399/** 400 * sba_dump_sg - debugging only - print Scatter-Gather list 401 * @ioc: IO MMU structure which owns the pdir we are interested in. 402 * @startsg: head of the SG list 403 * @nents: number of entries in SG list 404 * 405 * print the SG list so we can verify it's correct by hand. 406 */ 407static void 408sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 409{ 410 while (nents-- > 0) { 411 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 412 nents, 413 (unsigned long) sg_dma_address(startsg), 414 sg_dma_len(startsg), 415 sg_virt_addr(startsg), startsg->length); 416 startsg++; 417 } 418} 419 420#endif /* ASSERT_PDIR_SANITY */ 421 422 423 424 425/************************************************************** 426* 427* I/O Pdir Resource Management 428* 429* Bits set in the resource map are in use. 430* Each bit can represent a number of pages. 431* LSbs represent lower addresses (IOVA's). 432* 433***************************************************************/ 434#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 435 436/* Convert from IOVP to IOVA and vice versa. */ 437#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir))) 438#define SBA_IOVP(ioc,iova) ((iova) & ioc->hint_mask_pdir) 439 440#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 441#define MKIOVP(dma_hint,pide) (dma_addr_t)((long)(dma_hint) | ((long)(pide) << IOVP_SHIFT)) 442#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset) 443 444#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 445#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 446 447 448/** 449 * sba_search_bitmap - find free space in IO PDIR resource bitmap 450 * @ioc: IO MMU structure which owns the pdir we are interested in. 451 * @bits_wanted: number of entries we need. 452 * 453 * Find consecutive free bits in resource bitmap. 454 * Each bit represents one entry in the IO Pdir. 455 * Cool perf optimization: search for log2(size) bits at a time. 456 */ 457static SBA_INLINE unsigned long 458sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 459{ 460 unsigned long *res_ptr = ioc->res_hint; 461 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 462 unsigned long pide = ~0UL; 463 464 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); 465 ASSERT(res_ptr < res_end); 466 if (bits_wanted > (BITS_PER_LONG/2)) { 467 /* Search word at a time - no mask needed */ 468 for(; res_ptr < res_end; ++res_ptr) { 469 if (*res_ptr == 0) { 470 *res_ptr = RESMAP_MASK(bits_wanted); 471 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 472 pide <<= 3; /* convert to bit address */ 473 break; 474 } 475 } 476 /* point to the next word on next pass */ 477 res_ptr++; 478 ioc->res_bitshift = 0; 479 } else { 480 /* 481 ** Search the resource bit map on well-aligned values. 482 ** "o" is the alignment. 483 ** We need the alignment to invalidate I/O TLB using 484 ** SBA HW features in the unmap path. 485 */ 486 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 487 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); 488 unsigned long mask; 489 490 if (bitshiftcnt >= BITS_PER_LONG) { 491 bitshiftcnt = 0; 492 res_ptr++; 493 } 494 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 495 496 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 497 while(res_ptr < res_end) 498 { 499 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 500 ASSERT(0 != mask); 501 if(0 == ((*res_ptr) & mask)) { 502 *res_ptr |= mask; /* mark resources busy! */ 503 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 504 pide <<= 3; /* convert to bit address */ 505 pide += bitshiftcnt; 506 break; 507 } 508 mask >>= o; 509 bitshiftcnt += o; 510 if (0 == mask) { 511 mask = RESMAP_MASK(bits_wanted); 512 bitshiftcnt=0; 513 res_ptr++; 514 } 515 } 516 /* look in the same word on the next pass */ 517 ioc->res_bitshift = bitshiftcnt + bits_wanted; 518 } 519 520 /* wrapped ? */ 521 if (res_end <= res_ptr) { 522 ioc->res_hint = (unsigned long *) ioc->res_map; 523 ioc->res_bitshift = 0; 524 } else { 525 ioc->res_hint = res_ptr; 526 } 527 return (pide); 528} 529 530 531/** 532 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 533 * @ioc: IO MMU structure which owns the pdir we are interested in. 534 * @size: number of bytes to create a mapping for 535 * 536 * Given a size, find consecutive unmarked and then mark those bits in the 537 * resource bit map. 538 */ 539static int 540sba_alloc_range(struct ioc *ioc, size_t size) 541{ 542 unsigned int pages_needed = size >> IOVP_SHIFT; 543#ifdef CONFIG_PROC_FS 544 unsigned long cr_start = mfctl(16); 545#endif 546 unsigned long pide; 547 548 ASSERT(pages_needed); 549 ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE); 550 ASSERT(pages_needed <= BITS_PER_LONG); 551 ASSERT(0 == (size & ~IOVP_MASK)); 552 553 /* 554 ** "seek and ye shall find"...praying never hurts either... 555 ** ggg sacrifices another 710 to the computer gods. 556 */ 557 558 pide = sba_search_bitmap(ioc, pages_needed); 559 if (pide >= (ioc->res_size << 3)) { 560 pide = sba_search_bitmap(ioc, pages_needed); 561 if (pide >= (ioc->res_size << 3)) 562 panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa); 563 } 564 565#ifdef ASSERT_PDIR_SANITY 566 /* verify the first enable bit is clear */ 567 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 568 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 569 } 570#endif 571 572 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 573 __FUNCTION__, size, pages_needed, pide, 574 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 575 ioc->res_bitshift ); 576 577#ifdef CONFIG_PROC_FS 578 { 579 unsigned long cr_end = mfctl(16); 580 unsigned long tmp = cr_end - cr_start; 581 /* check for roll over */ 582 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 583 } 584 ioc->avg_search[ioc->avg_idx++] = cr_start; 585 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 586 587 ioc->used_pages += pages_needed; 588#endif 589 590 return (pide); 591} 592 593 594/** 595 * sba_free_range - unmark bits in IO PDIR resource bitmap 596 * @ioc: IO MMU structure which owns the pdir we are interested in. 597 * @iova: IO virtual address which was previously allocated. 598 * @size: number of bytes to create a mapping for 599 * 600 * clear bits in the ioc's resource map 601 */ 602static SBA_INLINE void 603sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 604{ 605 unsigned long iovp = SBA_IOVP(ioc, iova); 606 unsigned int pide = PDIR_INDEX(iovp); 607 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 608 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 609 610 int bits_not_wanted = size >> IOVP_SHIFT; 611 612 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 613 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 614 615 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 616 __FUNCTION__, (uint) iova, size, 617 bits_not_wanted, m, pide, res_ptr, *res_ptr); 618 619#ifdef CONFIG_PROC_FS 620 ioc->used_pages -= bits_not_wanted; 621#endif 622 623 ASSERT(m != 0); 624 ASSERT(bits_not_wanted); 625 ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE); 626 ASSERT(bits_not_wanted <= BITS_PER_LONG); 627 ASSERT((*res_ptr & m) == m); /* verify same bits are set */ 628 *res_ptr &= ~m; 629} 630 631 632/************************************************************** 633* 634* "Dynamic DMA Mapping" support (aka "Coherent I/O") 635* 636***************************************************************/ 637 638#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 639 640 641typedef unsigned long space_t; 642#define KERNEL_SPACE 0 643 644/** 645 * sba_io_pdir_entry - fill in one IO PDIR entry 646 * @pdir_ptr: pointer to IO PDIR entry 647 * @sid: process Space ID 648 * @vba: Virtual CPU address of buffer to map 649 * 650 * SBA Mapping Routine 651 * 652 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 653 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 654 * pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as 655 * shown below (MSB == bit 0): 656 * 657 * 0 19 51 55 63 658 * +-+---------------------+----------------------------------+----+--------+ 659 * |V| U | PPN[43:12] | U | VI | 660 * +-+---------------------+----------------------------------+----+--------+ 661 * 662 * V == Valid Bit 663 * U == Unused 664 * PPN == Physical Page Number 665 * VI == Virtual Index (aka Coherent Index) 666 * 667 * The physical address fields are filled with the results of the LPA 668 * instruction. The virtual index field is filled with the results of 669 * of the LCI (Load Coherence Index) instruction. The 8 bits used for 670 * the virtual index are bits 12:19 of the value returned by LCI. 671 * 672 * We need to pre-swap the bytes since PCX-W is Big Endian. 673 */ 674 675 676void SBA_INLINE 677sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba) 678{ 679 u64 pa; /* physical address */ 680 register unsigned ci; /* coherent index */ 681 682 /* We currently only support kernel addresses. 683 * fdc instr below will need to reload sr1 with KERNEL_SPACE 684 * once we try to support direct DMA to user space. 685 */ 686 ASSERT(sid == KERNEL_SPACE); 687 688 pa = virt_to_phys(vba); 689 pa &= ~4095ULL; /* clear out offset bits */ 690 691 mtsp(sid,1); 692 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 693 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 694 695 pa |= 0x8000000000000000ULL; /* set "valid" bit */ 696 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 697 698 /* 699 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 700 * (bit #61, big endian), we have to flush and sync every time 701 * IO-PDIR is changed in Ike/Astro. 702 */ 703 if (ioc_needs_fdc) { 704 asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr)); 705 } 706} 707 708 709/** 710 * sba_mark_invalid - invalidate one or more IO PDIR entries 711 * @ioc: IO MMU structure which owns the pdir we are interested in. 712 * @iova: IO Virtual Address mapped earlier 713 * @byte_cnt: number of bytes this mapping covers. 714 * 715 * Marking the IO PDIR entry(ies) as Invalid and invalidate 716 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 717 * is to purge stale entries in the IO TLB when unmapping entries. 718 * 719 * The PCOM register supports purging of multiple pages, with a minium 720 * of 1 page and a maximum of 2GB. Hardware requires the address be 721 * aligned to the size of the range being purged. The size of the range 722 * must be a power of 2. The "Cool perf optimization" in the 723 * allocation routine helps keep that true. 724 */ 725static SBA_INLINE void 726sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 727{ 728 u32 iovp = (u32) SBA_IOVP(ioc,iova); 729 730 /* Even though this is a big-endian machine, the entries 731 ** in the iopdir are little endian. That's why we clear the byte 732 ** at +7 instead of at +0. 733 */ 734 int off = PDIR_INDEX(iovp)*sizeof(u64)+7; 735 736 /* Must be non-zero and rounded up */ 737 ASSERT(byte_cnt > 0); 738 ASSERT(0 == (byte_cnt & ~IOVP_MASK)); 739 740#ifdef ASSERT_PDIR_SANITY 741 /* Assert first pdir entry is set */ 742 if (0x80 != (((u8 *) ioc->pdir_base)[off])) { 743 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 744 } 745#endif 746 747 if (byte_cnt <= IOVP_SIZE) 748 { 749 ASSERT( off < ioc->pdir_size); 750 751 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 752 753 /* 754 ** clear I/O PDIR entry "valid" bit 755 ** Do NOT clear the rest - save it for debugging. 756 ** We should only clear bits that have previously 757 ** been enabled. 758 */ 759 ((u8 *)(ioc->pdir_base))[off] = 0; 760 } else { 761 u32 t = get_order(byte_cnt) + PAGE_SHIFT; 762 763 iovp |= t; 764 ASSERT(t <= 31); /* 2GB! Max value of "size" field */ 765 766 do { 767 /* verify this pdir entry is enabled */ 768 ASSERT(0x80 == (((u8 *) ioc->pdir_base)[off] & 0x80)); 769 /* clear I/O Pdir entry "valid" bit first */ 770 ((u8 *)(ioc->pdir_base))[off] = 0; 771 off += sizeof(u64); 772 byte_cnt -= IOVP_SIZE; 773 } while (byte_cnt > 0); 774 } 775 776 WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM); 777} 778 779/** 780 * sba_dma_supported - PCI driver can query DMA support 781 * @dev: instance of PCI owned by the driver that's asking 782 * @mask: number of address bits this PCI device can handle 783 * 784 * See Documentation/DMA-mapping.txt 785 */ 786static int 787sba_dma_supported( struct pci_dev *dev, u64 mask) 788{ 789 if (dev == NULL) { 790 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 791 BUG(); 792 return(0); 793 } 794 795 dev->dma_mask = mask; /* save it */ 796 797 /* only support 32-bit PCI devices - no DAC support (yet) */ 798 return((int) (mask == 0xffffffff)); 799} 800 801 802/** 803 * sba_map_single - map one buffer and return IOVA for DMA 804 * @dev: instance of PCI owned by the driver that's asking. 805 * @addr: driver buffer to map. 806 * @size: number of bytes to map in driver buffer. 807 * @direction: R/W or both. 808 * 809 * See Documentation/DMA-mapping.txt 810 */ 811static dma_addr_t 812sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) 813{ 814 struct ioc *ioc; 815 unsigned long flags; 816 dma_addr_t iovp; 817 dma_addr_t offset; 818 u64 *pdir_start; 819 int pide; 820 821 ASSERT(size > 0); 822 ASSERT(size <= DMA_CHUNK_SIZE); 823 824 ASSERT(dev->sysdata); 825 ioc = GET_IOC(dev); 826 ASSERT(ioc); 827 828 /* save offset bits */ 829 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 830 831 /* round up to nearest IOVP_SIZE */ 832 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 833 834 spin_lock_irqsave(&ioc->res_lock, flags); 835#ifdef ASSERT_PDIR_SANITY 836 sba_check_pdir(ioc,"Check before sba_map_single()"); 837#endif 838 839#ifdef CONFIG_PROC_FS 840 ioc->msingle_calls++; 841 ioc->msingle_pages += size >> IOVP_SHIFT; 842#endif 843 pide = sba_alloc_range(ioc, size); 844 iovp = (dma_addr_t) pide << IOVP_SHIFT; 845 846 DBG_RUN("%s() 0x%p -> 0x%lx", 847 __FUNCTION__, addr, (long) iovp | offset); 848 849 pdir_start = &(ioc->pdir_base[pide]); 850 851 while (size > 0) { 852 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ 853 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr); 854 855 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 856 pdir_start, 857 (u8) (((u8 *) pdir_start)[7]), 858 (u8) (((u8 *) pdir_start)[6]), 859 (u8) (((u8 *) pdir_start)[5]), 860 (u8) (((u8 *) pdir_start)[4]), 861 (u8) (((u8 *) pdir_start)[3]), 862 (u8) (((u8 *) pdir_start)[2]), 863 (u8) (((u8 *) pdir_start)[1]), 864 (u8) (((u8 *) pdir_start)[0]) 865 ); 866 867 addr += IOVP_SIZE; 868 size -= IOVP_SIZE; 869 pdir_start++; 870 } 871 /* form complete address */ 872#ifdef ASSERT_PDIR_SANITY 873 sba_check_pdir(ioc,"Check after sba_map_single()"); 874#endif 875 spin_unlock_irqrestore(&ioc->res_lock, flags); 876 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 877} 878 879 880/** 881 * sba_unmap_single - unmap one IOVA and free resources 882 * @dev: instance of PCI owned by the driver that's asking. 883 * @iova: IOVA of driver buffer previously mapped. 884 * @size: number of bytes mapped in driver buffer. 885 * @direction: R/W or both. 886 * 887 * See Documentation/DMA-mapping.txt 888 */ 889static void 890sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction) 891{ 892 struct ioc *ioc; 893#if DELAYED_RESOURCE_CNT > 0 894 struct sba_dma_pair *d; 895#endif 896 unsigned long flags; 897 dma_addr_t offset; 898 899 ASSERT(dev->sysdata); 900 ioc = GET_IOC(dev); 901 ASSERT(ioc); 902 903 offset = iova & ~IOVP_MASK; 904 905 DBG_RUN("%s() iovp 0x%lx/%x\n", 906 __FUNCTION__, (long) iova, size); 907 908 iova ^= offset; /* clear offset bits */ 909 size += offset; 910 size = ROUNDUP(size, IOVP_SIZE); 911 912 spin_lock_irqsave(&ioc->res_lock, flags); 913 914#ifdef CONFIG_PROC_FS 915 ioc->usingle_calls++; 916 ioc->usingle_pages += size >> IOVP_SHIFT; 917#endif 918 919#if DELAYED_RESOURCE_CNT > 0 920 d = &(ioc->saved[ioc->saved_cnt]); 921 d->iova = iova; 922 d->size = size; 923 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 924 int cnt = ioc->saved_cnt; 925 while (cnt--) { 926 sba_mark_invalid(ioc, d->iova, d->size); 927 sba_free_range(ioc, d->iova, d->size); 928 d--; 929 } 930 ioc->saved_cnt = 0; 931 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 932 } 933#else /* DELAYED_RESOURCE_CNT == 0 */ 934 sba_mark_invalid(ioc, iova, size); 935 sba_free_range(ioc, iova, size); 936 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 937#endif /* DELAYED_RESOURCE_CNT == 0 */ 938 spin_unlock_irqrestore(&ioc->res_lock, flags); 939 940} 941 942 943/** 944 * sba_alloc_consistent - allocate/map shared mem for DMA 945 * @hwdev: instance of PCI owned by the driver that's asking. 946 * @size: number of bytes mapped in driver buffer. 947 * @dma_handle: IOVA of new buffer. 948 * 949 * See Documentation/DMA-mapping.txt 950 */ 951static void * 952sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) 953{ 954 void *ret; 955 956 if (!hwdev) { 957 /* only support PCI */ 958 *dma_handle = 0; 959 return 0; 960 } 961 962 ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size)); 963 964 if (ret) { 965 memset(ret, 0, size); 966 *dma_handle = sba_map_single(hwdev, ret, size, 0); 967 } 968 969 return ret; 970} 971 972 973/** 974 * sba_free_consistent - free/unmap shared mem for DMA 975 * @hwdev: instance of PCI owned by the driver that's asking. 976 * @size: number of bytes mapped in driver buffer. 977 * @vaddr: virtual address IOVA of "consistent" buffer. 978 * @dma_handler: IO virtual address of "consistent" buffer. 979 * 980 * See Documentation/DMA-mapping.txt 981 */ 982static void 983sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) 984{ 985 sba_unmap_single(hwdev, dma_handle, size, 0); 986 free_pages((unsigned long) vaddr, get_order(size)); 987} 988 989 990/* 991** Since 0 is a valid pdir_base index value, can't use that 992** to determine if a value is valid or not. Use a flag to indicate 993** the SG list entry contains a valid pdir index. 994*/ 995#define PIDE_FLAG 0x80000000UL 996 997#ifdef DEBUG_LARGE_SG_ENTRIES 998int dump_run_sg = 0; 999#endif 1000 1001 1002/** 1003 * sba_fill_pdir - write allocated SG entries into IO PDIR 1004 * @ioc: IO MMU structure which owns the pdir we are interested in. 1005 * @startsg: list of IOVA/size pairs 1006 * @nents: number of entries in startsg list 1007 * 1008 * Take preprocessed SG list and write corresponding entries 1009 * in the IO PDIR. 1010 */ 1011 1012static SBA_INLINE int 1013sba_fill_pdir( 1014 struct ioc *ioc, 1015 struct scatterlist *startsg, 1016 int nents) 1017{ 1018 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ 1019 int n_mappings = 0; 1020 u64 *pdirp = 0; 1021 unsigned long dma_offset = 0; 1022 1023 dma_sg--; 1024 while (nents-- > 0) { 1025 int cnt = sg_dma_len(startsg); 1026 sg_dma_len(startsg) = 0; 1027 1028#ifdef DEBUG_LARGE_SG_ENTRIES 1029 if (dump_run_sg) 1030 printk(KERN_DEBUG " %2d : %08lx/%05x %p/%05x\n", 1031 nents, 1032 (unsigned long) sg_dma_address(startsg), cnt, 1033 sg_virt_address(startsg), startsg->length 1034 ); 1035#else 1036 DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", 1037 nents, 1038 (unsigned long) sg_dma_address(startsg), cnt, 1039 sg_virt_addr(startsg), startsg->length 1040 ); 1041#endif 1042 /* 1043 ** Look for the start of a new DMA stream 1044 */ 1045 if (sg_dma_address(startsg) & PIDE_FLAG) { 1046 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG; 1047 dma_offset = (unsigned long) pide & ~IOVP_MASK; 1048 sg_dma_address(startsg) = 0; 1049 dma_sg++; 1050 sg_dma_address(dma_sg) = pide; 1051 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); 1052 n_mappings++; 1053 } 1054 1055 /* 1056 ** Look for a VCONTIG chunk 1057 */ 1058 if (cnt) { 1059 unsigned long vaddr = (unsigned long) sg_virt_addr(startsg); 1060 ASSERT(pdirp); 1061 1062 /* Since multiple Vcontig blocks could make up 1063 ** one DMA stream, *add* cnt to dma_len. 1064 */ 1065 sg_dma_len(dma_sg) += cnt; 1066 cnt += dma_offset; 1067 dma_offset=0; /* only want offset on first chunk */ 1068 cnt = ROUNDUP(cnt, IOVP_SIZE); 1069#ifdef CONFIG_PROC_FS 1070 ioc->msg_pages += cnt >> IOVP_SHIFT; 1071#endif 1072 do { 1073 sba_io_pdir_entry(pdirp, KERNEL_SPACE, vaddr); 1074 vaddr += IOVP_SIZE; 1075 cnt -= IOVP_SIZE; 1076 pdirp++; 1077 } while (cnt > 0); 1078 } 1079 startsg++; 1080 } 1081#ifdef DEBUG_LARGE_SG_ENTRIES 1082 dump_run_sg = 0; 1083#endif 1084 return(n_mappings); 1085} 1086 1087 1088/* 1089** Two address ranges are DMA contiguous *iff* "end of prev" and 1090** "start of next" are both on a page boundry. 1091** 1092** (shift left is a quick trick to mask off upper bits) 1093*/ 1094#define DMA_CONTIG(__X, __Y) \ 1095 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL) 1096 1097 1098/** 1099 * sba_coalesce_chunks - preprocess the SG list 1100 * @ioc: IO MMU structure which owns the pdir we are interested in. 1101 * @startsg: list of IOVA/size pairs 1102 * @nents: number of entries in startsg list 1103 * 1104 * First pass is to walk the SG list and determine where the breaks are 1105 * in the DMA stream. Allocates PDIR entries but does not fill them. 1106 * Returns the number of DMA chunks. 1107 * 1108 * Doing the fill seperate from the coalescing/allocation keeps the 1109 * code simpler. Future enhancement could make one pass through 1110 * the sglist do both. 1111 */ 1112static SBA_INLINE int 1113sba_coalesce_chunks( struct ioc *ioc, 1114 struct scatterlist *startsg, 1115 int nents) 1116{ 1117 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ 1118 unsigned long vcontig_len; /* len of VCONTIG chunk */ 1119 unsigned long vcontig_end; 1120 struct scatterlist *dma_sg; /* next DMA stream head */ 1121 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 1122 int n_mappings = 0; 1123 1124 while (nents > 0) { 1125 unsigned long vaddr = (unsigned long) sg_virt_addr(startsg); 1126 1127 /* 1128 ** Prepare for first/next DMA stream 1129 */ 1130 dma_sg = vcontig_sg = startsg; 1131 dma_len = vcontig_len = vcontig_end = startsg->length; 1132 vcontig_end += vaddr; 1133 dma_offset = vaddr & ~IOVP_MASK; 1134 1135 /* PARANOID: clear entries */ 1136 sg_dma_address(startsg) = 0; 1137 sg_dma_len(startsg) = 0; 1138 1139 /* 1140 ** This loop terminates one iteration "early" since 1141 ** it's always looking one "ahead". 1142 */ 1143 while (--nents > 0) { 1144 unsigned long vaddr; /* tmp */ 1145 1146 startsg++; 1147 1148 /* PARANOID: clear entries */ 1149 sg_dma_address(startsg) = 0; 1150 sg_dma_len(startsg) = 0; 1151 1152 /* catch brokenness in SCSI layer */ 1153 ASSERT(startsg->length <= DMA_CHUNK_SIZE); 1154 1155 /* 1156 ** First make sure current dma stream won't 1157 ** exceed DMA_CHUNK_SIZE if we coalesce the 1158 ** next entry. 1159 */ 1160 if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK) > DMA_CHUNK_SIZE) 1161 break; 1162 1163 /* 1164 ** Then look for virtually contiguous blocks. 1165 ** PARISC needs to associate a virtual address 1166 ** with each IO address mapped. The CPU cache is 1167 ** virtually tagged and the IOMMU uses part 1168 ** of the virtual address to participate in 1169 ** CPU cache coherency. 1170 ** 1171 ** append the next transaction? 1172 */ 1173 vaddr = (unsigned long) sg_virt_addr(startsg); 1174 if (vcontig_end == vaddr) 1175 { 1176 vcontig_len += startsg->length; 1177 vcontig_end += startsg->length; 1178 dma_len += startsg->length; 1179 continue; 1180 } 1181 1182#ifdef DEBUG_LARGE_SG_ENTRIES 1183 dump_run_sg = (vcontig_len > IOVP_SIZE); 1184#endif 1185 1186 /* 1187 ** Not virtually contigous. 1188 ** Terminate prev chunk. 1189 ** Start a new chunk. 1190 ** 1191 ** Once we start a new VCONTIG chunk, dma_offset 1192 ** can't change. And we need the offset from the first 1193 ** chunk - not the last one. Ergo Successive chunks 1194 ** must start on page boundaries and dove tail 1195 ** with it's predecessor. 1196 */ 1197 sg_dma_len(vcontig_sg) = vcontig_len; 1198 1199 vcontig_sg = startsg; 1200 vcontig_len = startsg->length; 1201 1202 /* 1203 ** 3) do the entries end/start on page boundaries? 1204 ** Don't update vcontig_end until we've checked. 1205 */ 1206 if (DMA_CONTIG(vcontig_end, vaddr)) 1207 { 1208 vcontig_end = vcontig_len + vaddr; 1209 dma_len += vcontig_len; 1210 continue; 1211 } else { 1212 break; 1213 } 1214 } 1215 1216 /* 1217 ** End of DMA Stream 1218 ** Terminate last VCONTIG block. 1219 ** Allocate space for DMA stream. 1220 */ 1221 sg_dma_len(vcontig_sg) = vcontig_len; 1222 dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK; 1223 ASSERT(dma_len <= DMA_CHUNK_SIZE); 1224 sg_dma_address(dma_sg) = 1225 PIDE_FLAG 1226 | (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT) 1227 | dma_offset; 1228 n_mappings++; 1229 } 1230 1231 return n_mappings; 1232} 1233 1234 1235/** 1236 * sba_map_sg - map Scatter/Gather list 1237 * @dev: instance of PCI owned by the driver that's asking. 1238 * @sglist: array of buffer/length pairs 1239 * @nents: number of entries in list 1240 * @direction: R/W or both. 1241 * 1242 * See Documentation/DMA-mapping.txt 1243 */ 1244static int 1245sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction) 1246{ 1247 struct ioc *ioc; 1248 int coalesced, filled = 0; 1249 unsigned long flags; 1250 1251 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 1252 1253 ASSERT(dev->sysdata); 1254 ioc = GET_IOC(dev); 1255 ASSERT(ioc); 1256 1257 /* Fast path single entry scatterlists. */ 1258 if (nents == 1) { 1259 sg_dma_address(sglist) = sba_map_single(dev, 1260 sg_virt_addr(sglist), 1261 sglist->length, direction); 1262 sg_dma_len(sglist) = sglist->length; 1263 return 1; 1264 } 1265 1266 spin_lock_irqsave(&ioc->res_lock, flags); 1267 1268#ifdef ASSERT_PDIR_SANITY 1269 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1270 { 1271 sba_dump_sg(ioc, sglist, nents); 1272 panic("Check before sba_map_sg()"); 1273 } 1274#endif 1275 1276#ifdef CONFIG_PROC_FS 1277 ioc->msg_calls++; 1278#endif 1279 1280 /* 1281 ** First coalesce the chunks and allocate I/O pdir space 1282 ** 1283 ** If this is one DMA stream, we can properly map using the 1284 ** correct virtual address associated with each DMA page. 1285 ** w/o this association, we wouldn't have coherent DMA! 1286 ** Access to the virtual address is what forces a two pass algorithm. 1287 */ 1288 coalesced = sba_coalesce_chunks(ioc, sglist, nents); 1289 1290 /* 1291 ** Program the I/O Pdir 1292 ** 1293 ** map the virtual addresses to the I/O Pdir 1294 ** o dma_address will contain the pdir index 1295 ** o dma_len will contain the number of bytes to map 1296 ** o address contains the virtual address. 1297 */ 1298 filled = sba_fill_pdir(ioc, sglist, nents); 1299 1300#ifdef ASSERT_PDIR_SANITY 1301 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1302 { 1303 sba_dump_sg(ioc, sglist, nents); 1304 panic("Check after sba_map_sg()\n"); 1305 } 1306#endif 1307 1308 spin_unlock_irqrestore(&ioc->res_lock, flags); 1309 1310 ASSERT(coalesced == filled); 1311 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1312 1313 return filled; 1314} 1315 1316 1317/** 1318 * sba_unmap_sg - unmap Scatter/Gather list 1319 * @dev: instance of PCI owned by the driver that's asking. 1320 * @sglist: array of buffer/length pairs 1321 * @nents: number of entries in list 1322 * @direction: R/W or both. 1323 * 1324 * See Documentation/DMA-mapping.txt 1325 */ 1326static void 1327sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction) 1328{ 1329 struct ioc *ioc; 1330#ifdef ASSERT_PDIR_SANITY 1331 unsigned long flags; 1332#endif 1333 1334 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1335 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1336 1337 ASSERT(dev->sysdata); 1338 ioc = GET_IOC(dev); 1339 ASSERT(ioc); 1340 1341#ifdef CONFIG_PROC_FS 1342 ioc->usg_calls++; 1343#endif 1344 1345#ifdef ASSERT_PDIR_SANITY 1346 spin_lock_irqsave(&ioc->res_lock, flags); 1347 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1348 spin_unlock_irqrestore(&ioc->res_lock, flags); 1349#endif 1350 1351 while (sg_dma_len(sglist) && nents--) { 1352 1353 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1354#ifdef CONFIG_PROC_FS 1355 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1356 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1357#endif 1358 ++sglist; 1359 } 1360 1361 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1362 1363#ifdef ASSERT_PDIR_SANITY 1364 spin_lock_irqsave(&ioc->res_lock, flags); 1365 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1366 spin_unlock_irqrestore(&ioc->res_lock, flags); 1367#endif 1368 1369} 1370 1371static struct pci_dma_ops sba_ops = { 1372 sba_dma_supported, 1373 sba_alloc_consistent, /* allocate cacheable host mem */ 1374 sba_free_consistent, /* release cacheable host mem */ 1375 sba_map_single, 1376 sba_unmap_single, 1377 sba_map_sg, 1378 sba_unmap_sg, 1379 NULL, /* dma_sync_single */ 1380 NULL /* dma_sync_sg */ 1381}; 1382 1383 1384/************************************************************************** 1385** 1386** SBA PAT PDC support 1387** 1388** o call pdc_pat_cell_module() 1389** o store ranges in PCI "resource" structures 1390** 1391**************************************************************************/ 1392 1393static void 1394sba_get_pat_resources(struct sba_device *sba_dev) 1395{ 1396} 1397 1398 1399/************************************************************** 1400* 1401* Initialization and claim 1402* 1403***************************************************************/ 1404#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1405#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1406static void * 1407sba_alloc_pdir(unsigned int pdir_size) 1408{ 1409 unsigned long pdir_base; 1410 unsigned long pdir_order = get_order(pdir_size); 1411 1412 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1413 if (NULL == (void *) pdir_base) 1414 panic("sba_ioc_init() could not allocate I/O Page Table\n"); 1415 1416 /* If this is not PA8700 (PCX-W2) 1417 ** OR newer than ver 2.2 1418 ** OR in a system that doesn't need VINDEX bits from SBA, 1419 ** 1420 ** then we aren't exposed to the HW bug. 1421 */ 1422 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1423 || (boot_cpu_data.pdc.versions > 0x202) 1424 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1425 return (void *) pdir_base; 1426 1427 /* 1428 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1429 * 1430 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1431 * Ike/Astro can cause silent data corruption. This is only 1432 * a problem if the I/O PDIR is located in memory such that 1433 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1434 * 1435 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1436 * right physical address, we can either avoid (IOPDIR <= 1MB) 1437 * or minimize (2MB IO Pdir) the problem if we restrict the 1438 * IO Pdir to a maximum size of 2MB-128K (1902K). 1439 * 1440 * Because we always allocate 2^N sized IO pdirs, either of the 1441 * "bad" regions will be the last 128K if at all. That's easy 1442 * to test for. 1443 * 1444 */ 1445 if (pdir_order <= (19-12)) { 1446 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1447 /* allocate a new one on 512k alignment */ 1448 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1449 /* release original */ 1450 free_pages(pdir_base, pdir_order); 1451 1452 pdir_base = new_pdir; 1453 1454 /* release excess */ 1455 while (pdir_order < (19-12)) { 1456 new_pdir += pdir_size; 1457 free_pages(new_pdir, pdir_order); 1458 pdir_order +=1; 1459 pdir_size <<=1; 1460 } 1461 } 1462 } else { 1463 /* 1464 ** 1MB or 2MB Pdir 1465 ** Needs to be aligned on an "odd" 1MB boundary. 1466 */ 1467 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1468 1469 /* release original */ 1470 free_pages( pdir_base, pdir_order); 1471 1472 /* release first 1MB */ 1473 free_pages(new_pdir, 20-12); 1474 1475 pdir_base = new_pdir + 1024*1024; 1476 1477 if (pdir_order > (20-12)) { 1478 /* 1479 ** 2MB Pdir. 1480 ** 1481 ** Flag tells init_bitmap() to mark bad 128k as used 1482 ** and to reduce the size by 128k. 1483 */ 1484 piranha_bad_128k = 1; 1485 1486 new_pdir += 3*1024*1024; 1487 /* release last 1MB */ 1488 free_pages(new_pdir, 20-12); 1489 1490 /* release unusable 128KB */ 1491 free_pages(new_pdir - 128*1024 , 17-12); 1492 1493 pdir_size -= 128*1024; 1494 } 1495 } 1496 1497 memset((void *) pdir_base, 0, pdir_size); 1498 return (void *) pdir_base; 1499} 1500 1501 1502static void 1503sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1504{ 1505 /* lba_set_iregs() is in arch/parisc/kernel/lba_pci.c */ 1506 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1507 1508 u32 iova_space_size, iova_space_mask; 1509 int pdir_size, iov_order; 1510 unsigned long physmem; 1511 struct parisc_device *lba; 1512 1513 /* 1514 ** Determine IOVA Space size from memory size. 1515 ** 1516 ** Ideally, PCI drivers would register the maximum number 1517 ** of DMA they can have outstanding for each device they 1518 ** own. Next best thing would be to guess how much DMA 1519 ** can be outstanding based on PCI Class/sub-class. Both 1520 ** methods still require some "extra" to support PCI 1521 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1522 ** 1523 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1524 ** for DMA hints - ergo only 30 bits max. 1525 */ 1526 1527 physmem = num_physpages << PAGE_SHIFT; 1528 iova_space_size = (u32) (physmem/(sba_mem_ratio*global_ioc_cnt)); 1529 1530 /* limit IOVA space size to 1MB-1GB */ 1531 if (iova_space_size < 1024*1024) { 1532 iova_space_size = 1024*1024; 1533 } 1534#ifdef __LP64__ 1535 else if (iova_space_size > 512*1024*1024) { 1536 iova_space_size = 512*1024*1024; 1537 } 1538#endif 1539 1540 /* 1541 ** iova space must be log2() in size. 1542 ** thus, pdir/res_map will also be log2(). 1543 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1544 */ 1545 iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT)); 1546 ASSERT(iov_order <= (30 - IOVP_SHIFT)); /* iova_space_size <= 1GB */ 1547 ASSERT(iov_order >= (20 - IOVP_SHIFT)); /* iova_space_size >= 1MB */ 1548 iova_space_size = 1 << (iov_order + IOVP_SHIFT); 1549 1550 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1551 1552 ASSERT(pdir_size < 4*1024*1024); /* max pdir size == 2MB */ 1553 1554 /* Verify it's a power of two */ 1555 ASSERT((1 << get_order(pdir_size)) == (pdir_size >> PAGE_SHIFT)); 1556 1557 DBG_INIT("%s() hpa 0x%lx mem %dMB IOV %dMB (%d bits) PDIR size 0x%0x\n", 1558 __FUNCTION__, ioc->ioc_hpa, (int) (physmem>>20), 1559 iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size); 1560 1561 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1562 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1563 1564 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1565 1566 DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", 1567 __FUNCTION__, ioc->pdir_base, pdir_size, 1568 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1569 1570 ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base); 1571 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1572 1573 /* build IMASK for IOC and Elroy */ 1574 iova_space_mask = 0xffffffff; 1575 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1576 1577 /* 1578 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1579 ** ibase=0, imask=0xFE000000, size=0x2000000. 1580 */ 1581 ioc->ibase = IOC_IOVA_SPACE_BASE | 1; /* bit 0 == enable bit */ 1582 ioc->imask = iova_space_mask; /* save it */ 1583 1584 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1585 __FUNCTION__, ioc->ibase, ioc->imask); 1586 1587 1588 /* 1589 ** setup Elroy IBASE/IMASK registers as well. 1590 */ 1591 for (lba = sba->child; lba; lba = lba->sibling) { 1592 int rope_num = (lba->hpa >> 13) & 0xf; 1593 if (rope_num >> 3 == ioc_num) 1594 lba_set_iregs(lba, ioc->ibase, ioc->imask); 1595 } 1596 1597 /* 1598 ** Program the IOC's ibase and enable IOVA translation 1599 */ 1600 WRITE_REG(ioc->ibase, ioc->ioc_hpa+IOC_IBASE); 1601 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1602 1603 /* Set I/O PDIR Page size to 4K */ 1604 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1605 1606 /* 1607 ** Clear I/O TLB of any possible entries. 1608 ** (Yes. This is a bit paranoid...but so what) 1609 */ 1610 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1611 1612 DBG_INIT("%s() DONE\n", __FUNCTION__); 1613} 1614 1615 1616 1617 1618static void 1619sba_hw_init(struct sba_device *sba_dev) 1620{ 1621 int i; 1622 int num_ioc; 1623 u64 ioc_ctl; 1624 1625 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1626 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1627 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1628 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1629 ioc_ctl |= IOC_CTRL_TC; /* Astro: firmware enables this */ 1630 1631 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1632 1633#ifdef DEBUG_SBA_INIT 1634 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1635 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1636#endif 1637 1638 if (IS_ASTRO(sba_dev->iodc)) { 1639 /* PAT_PDC (L-class) also reports the same goofy base */ 1640 sba_dev->ioc[0].ioc_hpa = ASTRO_IOC_OFFSET; 1641 num_ioc = 1; 1642 } else { 1643 sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0; 1644 num_ioc = 2; 1645 } 1646 1647 sba_dev->num_ioc = num_ioc; 1648 for (i = 0; i < num_ioc; i++) { 1649 sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa + IKE_IOC_OFFSET(i); 1650 1651 /* 1652 ** Make sure the box crashes if we get any errors on a rope. 1653 */ 1654 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL); 1655 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL); 1656 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL); 1657 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL); 1658 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL); 1659 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL); 1660 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL); 1661 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1662 1663 /* flush out the writes */ 1664 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1665 1666 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1667 } 1668} 1669 1670static void 1671sba_common_init(struct sba_device *sba_dev) 1672{ 1673 int i; 1674 1675 /* add this one to the head of the list (order doesn't matter) 1676 ** This will be useful for debugging - especially if we get coredumps 1677 */ 1678 sba_dev->next = sba_list; 1679 sba_list = sba_dev; 1680 1681 for(i=0; i< sba_dev->num_ioc; i++) { 1682 int res_size; 1683#ifdef DEBUG_DMB_TRAP 1684 extern void iterate_pages(unsigned long , unsigned long , 1685 void (*)(pte_t * , unsigned long), 1686 unsigned long ); 1687 void set_data_memory_break(pte_t * , unsigned long); 1688#endif 1689 /* resource map size dictated by pdir_size */ 1690 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1691 1692 /* Second part of PIRANHA BUG */ 1693 if (piranha_bad_128k) { 1694 res_size -= (128*1024)/sizeof(u64); 1695 } 1696 1697 res_size >>= 3; /* convert bit count to byte count */ 1698 DBG_INIT("%s() res_size 0x%x\n", 1699 __FUNCTION__, res_size); 1700 1701 sba_dev->ioc[i].res_size = res_size; 1702 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1703 1704#ifdef DEBUG_DMB_TRAP 1705 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1706 set_data_memory_break, 0); 1707#endif 1708 1709 if (NULL == sba_dev->ioc[i].res_map) 1710 { 1711 panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ ); 1712 } 1713 1714 memset(sba_dev->ioc[i].res_map, 0, res_size); 1715 /* next available IOVP - circular search */ 1716 sba_dev->ioc[i].res_hint = (unsigned long *) 1717 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1718 1719#ifdef ASSERT_PDIR_SANITY 1720 /* Mark first bit busy - ie no IOVA 0 */ 1721 sba_dev->ioc[i].res_map[0] = 0x80; 1722 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1723#endif 1724 1725 /* Third (and last) part of PIRANHA BUG */ 1726 if (piranha_bad_128k) { 1727 /* region from +1408K to +1536 is un-usable. */ 1728 1729 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1730 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1731 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1732 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1733 1734 /* mark that part of the io pdir busy */ 1735 while (p_start < p_end) 1736 *p_start++ = -1; 1737 1738 } 1739 1740#ifdef DEBUG_DMB_TRAP 1741 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1742 set_data_memory_break, 0); 1743 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1744 set_data_memory_break, 0); 1745#endif 1746 1747 DBG_INIT("%s() %d res_map %x %p\n", 1748 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1749 } 1750 1751 sba_dev->sba_lock = SPIN_LOCK_UNLOCKED; 1752 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1753 1754#ifdef DEBUG_SBA_INIT 1755 /* 1756 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1757 * (bit #61, big endian), we have to flush and sync every time 1758 * IO-PDIR is changed in Ike/Astro. 1759 */ 1760 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) { 1761 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1762 } else { 1763 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1764 } 1765#endif 1766} 1767 1768#ifdef CONFIG_PROC_FS 1769static int sba_proc_info(char *buf, char **start, off_t offset, int len) 1770{ 1771 struct sba_device *sba_dev = sba_list; 1772 struct ioc *ioc = &sba_dev->ioc[0]; 1773 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1774 unsigned long i = 0, avg = 0, min, max; 1775 1776 sprintf(buf, "%s rev %d.%d\n", 1777 sba_dev->name, 1778 (sba_dev->hw_rev & 0x7) + 1, 1779 (sba_dev->hw_rev & 0x18) >> 3 1780 ); 1781 sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", 1782 buf, 1783 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1784 total_pages); 1785 1786 sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, 1787 total_pages - ioc->used_pages, ioc->used_pages, 1788 (int) (ioc->used_pages * 100 / total_pages)); 1789 1790 sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", 1791 buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1792 1793 min = max = ioc->avg_search[0]; 1794 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1795 avg += ioc->avg_search[i]; 1796 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1797 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1798 } 1799 avg /= SBA_SEARCH_SAMPLE; 1800 sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1801 buf, min, avg, max); 1802 1803 sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1804 buf, ioc->msingle_calls, ioc->msingle_pages, 1805 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1806 1807 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1808 min = ioc->usingle_calls; 1809 max = ioc->usingle_pages - ioc->usg_pages; 1810 sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1811 buf, min, max, 1812 (int) ((max * 1000)/min)); 1813 1814 sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1815 buf, ioc->msg_calls, ioc->msg_pages, 1816 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); 1817 1818 sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1819 buf, ioc->usg_calls, ioc->usg_pages, 1820 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); 1821 1822 return strlen(buf); 1823} 1824 1825#endif /* CONFIG_PROC_FS */ 1826 1827static struct parisc_device_id sba_tbl[] = { 1828 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 1829 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 1830 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 1831 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 1832/* These two entries commented out because we don't find them in a 1833 * buswalk yet. If/when we do, they would cause us to think we had 1834 * many more SBAs then we really do. 1835 * { HPHW_BCPORT, HVERSION_REV_ANY_ID, ASTRO_ROPES_PORT, 0xc }, 1836 * { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_ROPES_PORT, 0xc }, 1837 */ 1838 { 0, } 1839}; 1840 1841int sba_driver_callback(struct parisc_device *); 1842 1843static struct parisc_driver sba_driver = { 1844 name: MODULE_NAME, 1845 id_table: sba_tbl, 1846 probe: sba_driver_callback, 1847}; 1848 1849/* 1850** Determine if lba should claim this chip (return 0) or not (return 1). 1851** If so, initialize the chip and tell other partners in crime they 1852** have work to do. 1853*/ 1854int 1855sba_driver_callback(struct parisc_device *dev) 1856{ 1857 struct sba_device *sba_dev; 1858 u32 func_class; 1859 int i; 1860 char *version; 1861 1862#ifdef DEBUG_SBA_INIT 1863 sba_dump_ranges(dev->hpa); 1864#endif 1865 1866 /* Read HW Rev First */ 1867 func_class = READ_REG(dev->hpa + SBA_FCLASS); 1868 1869 if (IS_ASTRO(&dev->id)) { 1870 unsigned long fclass; 1871 static char astro_rev[]="Astro ?.?"; 1872 1873 /* Astro is broken...Read HW Rev First */ 1874 fclass = READ_REG(dev->hpa); 1875 1876 astro_rev[6] = '1' + (char) (fclass & 0x7); 1877 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 1878 version = astro_rev; 1879 1880 } else if (IS_IKE(&dev->id)) { 1881 static char ike_rev[]="Ike rev ?"; 1882 1883 ike_rev[8] = '0' + (char) (func_class & 0xff); 1884 version = ike_rev; 1885 } else { 1886 static char reo_rev[]="REO rev ?"; 1887 1888 reo_rev[8] = '0' + (char) (func_class & 0xff); 1889 version = reo_rev; 1890 } 1891 1892 if (!global_ioc_cnt) { 1893 global_ioc_cnt = count_parisc_driver(&sba_driver); 1894 1895 /* Only Astro has one IOC per SBA */ 1896 if (!IS_ASTRO(&dev->id)) 1897 global_ioc_cnt *= 2; 1898 } 1899 1900 printk(KERN_INFO "%s found %s at 0x%lx\n", 1901 MODULE_NAME, version, dev->hpa); 1902 1903#ifdef DEBUG_SBA_INIT 1904 sba_dump_tlb(dev->hpa); 1905#endif 1906 1907 sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); 1908 if (NULL == sba_dev) { 1909 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 1910 return(1); 1911 } 1912 1913 dev->sysdata = (void *) sba_dev; 1914 memset(sba_dev, 0, sizeof(struct sba_device)); 1915 1916 for(i=0; i<MAX_IOC; i++) 1917 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 1918 1919 sba_dev->dev = dev; 1920 sba_dev->hw_rev = func_class; 1921 sba_dev->iodc = &dev->id; 1922 sba_dev->name = dev->name; 1923 sba_dev->sba_hpa = dev->hpa; /* faster access */ 1924 1925 sba_get_pat_resources(sba_dev); 1926 sba_hw_init(sba_dev); 1927 sba_common_init(sba_dev); 1928 1929 hppa_dma_ops = &sba_ops; 1930 1931#ifdef CONFIG_PROC_FS 1932 if (IS_ASTRO(&dev->id)) { 1933 create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info); 1934 } else if (IS_IKE(&dev->id)) { 1935 create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info); 1936 } else { 1937 create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info); 1938 } 1939#endif 1940 return 0; 1941} 1942 1943/* 1944** One time initialization to let the world know the SBA was found. 1945** This is the only routine which is NOT static. 1946** Must be called exactly once before pci_init(). 1947*/ 1948void __init sba_init(void) 1949{ 1950 register_parisc_driver(&sba_driver); 1951} 1952 1953 1954/** 1955 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 1956 * @dev: The parisc device. 1957 * 1958 * This function searches through the registerd IOMMU's and returns the 1959 * appropriate IOMMU data for the given parisc PCI controller. 1960 */ 1961void * sba_get_iommu(struct parisc_device *pci_hba) 1962{ 1963 struct sba_device *sba = (struct sba_device *) pci_hba->parent->sysdata; 1964 char t = pci_hba->parent->id.hw_type; 1965 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 1966 1967 if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT)) 1968 BUG(); 1969 1970 return &(sba->ioc[iocnum]); 1971} 1972