• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/parisc/

Lines Matching defs:ioc

169  * @ioc: IO MMU structure which owns the pdir we are interested in.
176 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
179 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
180 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
202 * @ioc: IO MMU structure which owns the pdir we are interested in.
208 sba_check_pdir(struct ioc *ioc, char *msg)
210 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
211 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
212 u64 *pptr = ioc->pdir_base; /* pdir ptr */
228 sba_dump_pdir_entry(ioc, msg, pide);
245 * @ioc: IO MMU structure which owns the pdir we are interested in.
252 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
284 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
285 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
288 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
289 #define SBA_IOVP(ioc,iova) (iova)
297 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
300 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
306 * @ioc: IO MMU structure which owns the pdir we are interested in.
314 sba_search_bitmap(struct ioc *ioc, struct device *dev,
317 unsigned long *res_ptr = ioc->res_hint;
318 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
328 BUG_ON(ioc->ibase & ~IOVP_MASK);
329 shift = ioc->ibase >> IOVP_SHIFT;
337 tpide = ptr_to_pide(ioc, res_ptr, 0);
349 ioc->res_bitshift = 0;
358 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
372 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
390 ioc->res_bitshift = bitshiftcnt + bits_wanted;
395 ioc->res_hint = (unsigned long *) ioc->res_map;
396 ioc->res_bitshift = 0;
398 ioc->res_hint = res_ptr;
406 * @ioc: IO MMU structure which owns the pdir we are interested in.
413 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
421 pide = sba_search_bitmap(ioc, dev, pages_needed);
422 if (pide >= (ioc->res_size << 3)) {
423 pide = sba_search_bitmap(ioc, dev, pages_needed);
424 if (pide >= (ioc->res_size << 3))
426 __FILE__, ioc->ioc_hpa);
431 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
432 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
438 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
439 ioc->res_bitshift );
448 ioc->avg_search[ioc->avg_idx++] = cr_start;
449 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
451 ioc->used_pages += pages_needed;
460 * @ioc: IO MMU structure which owns the pdir we are interested in.
464 * clear bits in the ioc's resource map
467 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
469 unsigned long iovp = SBA_IOVP(ioc, iova);
472 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
484 ioc->used_pages -= bits_not_wanted;
498 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
573 * @ioc: IO MMU structure which owns the pdir we are interested in.
588 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
590 u32 iovp = (u32) SBA_IOVP(ioc,iova);
591 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
601 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
634 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
646 struct ioc *ioc;
662 ioc = GET_IOC(dev);
668 return((int)(mask >= (ioc->ibase - 1 +
669 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
686 struct ioc *ioc;
693 ioc = GET_IOC(dev);
701 spin_lock_irqsave(&ioc->res_lock, flags);
703 sba_check_pdir(ioc,"Check before sba_map_single()");
707 ioc->msingle_calls++;
708 ioc->msingle_pages += size >> IOVP_SHIFT;
710 pide = sba_alloc_range(ioc, dev, size);
716 pdir_start = &(ioc->pdir_base[pide]);
743 sba_check_pdir(ioc,"Check after sba_map_single()");
745 spin_unlock_irqrestore(&ioc->res_lock, flags);
748 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
765 struct ioc *ioc;
774 ioc = GET_IOC(dev);
780 spin_lock_irqsave(&ioc->res_lock, flags);
783 ioc->usingle_calls++;
784 ioc->usingle_pages += size >> IOVP_SHIFT;
787 sba_mark_invalid(ioc, iova, size);
793 d = &(ioc->saved[ioc->saved_cnt]);
796 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
797 int cnt = ioc->saved_cnt;
799 sba_free_range(ioc, d->iova, d->size);
802 ioc->saved_cnt = 0;
804 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
807 sba_free_range(ioc, iova, size);
813 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
816 spin_unlock_irqrestore(&ioc->res_lock, flags);
899 struct ioc *ioc;
905 ioc = GET_IOC(dev);
916 spin_lock_irqsave(&ioc->res_lock, flags);
919 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
921 sba_dump_sg(ioc, sglist, nents);
927 ioc->msg_calls++;
938 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
948 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
955 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
957 sba_dump_sg(ioc, sglist, nents);
962 spin_unlock_irqrestore(&ioc->res_lock, flags);
983 struct ioc *ioc;
991 ioc = GET_IOC(dev);
994 ioc->usg_calls++;
998 spin_lock_irqsave(&ioc->res_lock, flags);
999 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1000 spin_unlock_irqrestore(&ioc->res_lock, flags);
1007 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1008 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1016 spin_lock_irqsave(&ioc->res_lock, flags);
1017 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1018 spin_unlock_irqrestore(&ioc->res_lock, flags);
1159 struct ioc *ioc;
1171 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1177 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1180 .ioc = ioc,
1202 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1215 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1216 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1218 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1228 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1231 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1234 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1235 get_order(ioc->pdir_size));
1236 if (!ioc->pdir_base)
1239 memset(ioc->pdir_base, 0, ioc->pdir_size);
1242 __func__, ioc->pdir_base, ioc->pdir_size);
1245 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1246 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1249 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1252 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1253 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1258 ioc->imask = iova_space_mask;
1260 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1262 sba_dump_tlb(ioc->ioc_hpa);
1264 setup_ibase_imask(sba, ioc, ioc_num);
1266 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1273 ioc->imask |= 0xFFFFFFFF00000000UL;
1287 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1293 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1299 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1316 ioc->pdir_size /= 2;
1317 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1323 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1362 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1366 ioc->ioc_hpa,
1371 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1374 __func__, ioc->pdir_base, pdir_size);
1377 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1378 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1381 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1384 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1394 ioc->ibase = 0;
1395 ioc->imask = iova_space_mask; /* save it */
1397 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1401 __func__, ioc->ibase, ioc->imask);
1404 setup_ibase_imask(sba, ioc, ioc_num);
1409 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1410 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1413 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1419 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1421 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1468 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1480 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1496 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1497 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1505 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1533 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1535 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1537 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1538 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1541 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1542 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1546 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1548 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1573 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1584 sba_dev->ioc[i].res_size = res_size;
1585 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1588 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1592 if (NULL == sba_dev->ioc[i].res_map)
1598 memset(sba_dev->ioc[i].res_map, 0, res_size);
1600 sba_dev->ioc[i].res_hint = (unsigned long *)
1601 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1605 sba_dev->ioc[i].res_map[0] = 0x80;
1606 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1615 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1616 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1625 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1627 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1632 __func__, i, res_size, sba_dev->ioc[i].res_map);
1656 struct ioc *ioc = &sba_dev->ioc[0];
1657 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1669 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1673 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1690 total_pages - ioc->used_pages, ioc->used_pages,
1691 (int) (ioc->used_pages * 100 / total_pages));
1693 min = max = ioc->avg_search[0];
1695 avg += ioc->avg_search[i];
1696 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1697 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1704 ioc->msingle_calls, ioc->msingle_pages,
1705 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1708 min = ioc->usingle_calls;
1709 max = ioc->usingle_pages - ioc->usg_pages;
1714 ioc->msg_calls, ioc->msg_pages,
1715 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1718 ioc->usg_calls, ioc->usg_pages,
1719 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1743 struct ioc *ioc = &sba_dev->ioc[0];
1744 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1747 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1856 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1916 return &(sba->ioc[iocnum]);