• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/ia64/hp/common/

Lines Matching defs:ioc

202 struct ioc {
235 struct ioc *next; /* list of IOC's in system */
245 static struct ioc *ioc_list;
248 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
249 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
310 * @ioc: IO MMU structure which owns the pdir we are interested in.
317 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
320 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
321 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
342 * @ioc: IO MMU structure which owns the pdir we are interested in.
348 sba_check_pdir(struct ioc *ioc, char *msg)
350 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
351 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
352 u64 *pptr = ioc->pdir_base; /* pdir ptr */
371 sba_dump_pdir_entry(ioc, msg, pide);
388 * @ioc: IO MMU structure which owns the pdir we are interested in.
395 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
406 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
435 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
465 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
468 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
474 * @ioc: IO MMU structure which owns the pdir we are interested in.
483 sba_search_bitmap(struct ioc *ioc, struct device *dev,
487 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
493 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
499 BUG_ON(ioc->ibase & ~iovp_mask);
500 shift = ioc->ibase >> iovp_shift;
502 spin_lock_irqsave(&ioc->res_lock, flags);
506 res_ptr = ioc->res_hint;
508 res_ptr = (ulong *)ioc->res_map;
509 ioc->res_bitshift = 0;
526 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
527 ioc->res_bitshift = bitshiftcnt + bits_wanted;
543 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
555 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
562 ioc->res_bitshift = bitshiftcnt + bits_wanted;
582 tpide = ptr_to_pide(ioc, res_ptr, 0);
601 ioc->res_bitshift = bits;
609 prefetch(ioc->res_map);
610 ioc->res_hint = (unsigned long *) ioc->res_map;
611 ioc->res_bitshift = 0;
612 spin_unlock_irqrestore(&ioc->res_lock, flags);
616 ioc->res_hint = res_ptr;
617 spin_unlock_irqrestore(&ioc->res_lock, flags);
624 * @ioc: IO MMU structure which owns the pdir we are interested in.
631 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
648 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
649 if (unlikely(pide >= (ioc->res_size << 3))) {
650 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
651 if (unlikely(pide >= (ioc->res_size << 3))) {
660 spin_lock_irqsave(&ioc->saved_lock, flags);
661 if (ioc->saved_cnt > 0) {
663 int cnt = ioc->saved_cnt;
665 d = &(ioc->saved[ioc->saved_cnt - 1]);
667 spin_lock(&ioc->res_lock);
669 sba_mark_invalid(ioc, d->iova, d->size);
670 sba_free_range(ioc, d->iova, d->size);
673 ioc->saved_cnt = 0;
674 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
675 spin_unlock(&ioc->res_lock);
677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 if (unlikely(pide >= (ioc->res_size << 3))) {
683 __func__, ioc->ioc_hpa, ioc->res_size,
690 __func__, ioc->ioc_hpa, ioc->res_size,
698 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
699 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
702 prefetchw(&(ioc->pdir_base[pide]));
706 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
707 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
713 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
714 ioc->res_bitshift );
722 * @ioc: IO MMU structure which owns the pdir we are interested in.
726 * clear bits in the ioc's resource map
729 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
731 unsigned long iovp = SBA_IOVP(ioc, iova);
734 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
824 * @ioc: IO MMU structure which owns the pdir we are interested in.
839 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
841 u32 iovp = (u32) SBA_IOVP(ioc,iova);
851 if (!(ioc->pdir_base[off] >> 60)) {
852 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
858 ASSERT(off < ioc->pdir_size);
869 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
876 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
886 ASSERT(ioc->pdir_base[off] >> 63);
889 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
891 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
898 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
916 struct ioc *ioc;
945 ioc = GET_IOC(dev);
946 ASSERT(ioc);
948 prefetch(ioc->res_hint);
960 spin_lock_irqsave(&ioc->res_lock, flags);
961 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
963 spin_unlock_irqrestore(&ioc->res_lock, flags);
966 pide = sba_alloc_range(ioc, dev, size);
974 pdir_start = &(ioc->pdir_base[pide]);
991 spin_lock_irqsave(&ioc->res_lock, flags);
992 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
993 spin_unlock_irqrestore(&ioc->res_lock, flags);
995 return SBA_IOVA(ioc, iovp, offset);
1008 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1010 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1015 addr = phys_to_virt(ioc->pdir_base[off] &
1020 addr = phys_to_virt(ioc->pdir_base[off] &
1043 struct ioc *ioc;
1050 ioc = GET_IOC(dev);
1051 ASSERT(ioc);
1054 if (likely((iova & ioc->imask) != ioc->ibase)) {
1079 sba_mark_clean(ioc, iova, size);
1083 spin_lock_irqsave(&ioc->saved_lock, flags);
1084 d = &(ioc->saved[ioc->saved_cnt]);
1087 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1088 int cnt = ioc->saved_cnt;
1089 spin_lock(&ioc->res_lock);
1091 sba_mark_invalid(ioc, d->iova, d->size);
1092 sba_free_range(ioc, d->iova, d->size);
1095 ioc->saved_cnt = 0;
1096 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1097 spin_unlock(&ioc->res_lock);
1099 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1101 spin_lock_irqsave(&ioc->res_lock, flags);
1102 sba_mark_invalid(ioc, iova, size);
1103 sba_free_range(ioc, iova, size);
1104 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1105 spin_unlock_irqrestore(&ioc->res_lock, flags);
1126 struct ioc *ioc;
1129 ioc = GET_IOC(dev);
1130 ASSERT(ioc);
1135 page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
1136 numa_node_id() : ioc->node, flags,
1170 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1208 * @ioc: IO MMU structure which owns the pdir we are interested in.
1218 struct ioc *ioc,
1250 dma_sg->dma_address = pide | ioc->ibase;
1251 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1300 * @ioc: IO MMU structure which owns the pdir we are interested in.
1313 sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1423 idx = sba_alloc_range(ioc, dev, dma_len);
1453 struct ioc *ioc;
1463 ioc = GET_IOC(dev);
1464 ASSERT(ioc);
1468 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1484 spin_lock_irqsave(&ioc->res_lock, flags);
1485 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1487 sba_dump_sg(ioc, sglist, nents);
1490 spin_unlock_irqrestore(&ioc->res_lock, flags);
1493 prefetch(ioc->res_hint);
1503 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1517 filled = sba_fill_pdir(ioc, sglist, nents);
1520 spin_lock_irqsave(&ioc->res_lock, flags);
1521 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1523 sba_dump_sg(ioc, sglist, nents);
1526 spin_unlock_irqrestore(&ioc->res_lock, flags);
1550 struct ioc *ioc;
1558 ioc = GET_IOC(dev);
1559 ASSERT(ioc);
1561 spin_lock_irqsave(&ioc->res_lock, flags);
1562 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1563 spin_unlock_irqrestore(&ioc->res_lock, flags);
1577 spin_lock_irqsave(&ioc->res_lock, flags);
1578 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1579 spin_unlock_irqrestore(&ioc->res_lock, flags);
1591 ioc_iova_init(struct ioc *ioc)
1605 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1606 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1608 ioc->iov_size = ~ioc->imask + 1;
1611 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1612 ioc->iov_size >> 20);
1624 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1626 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1627 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1628 get_order(ioc->pdir_size));
1629 if (!ioc->pdir_base)
1632 memset(ioc->pdir_base, 0, ioc->pdir_size);
1635 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1637 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1638 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1653 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1654 ioc->pdir_size /= 2;
1655 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1682 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1683 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1687 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1688 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1691 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1692 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1696 ioc_resource_init(struct ioc *ioc)
1698 spin_lock_init(&ioc->res_lock);
1700 spin_lock_init(&ioc->saved_lock);
1704 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1705 ioc->res_size >>= 3; /* convert bit count to byte count */
1706 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1708 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1709 get_order(ioc->res_size));
1710 if (!ioc->res_map)
1713 memset(ioc->res_map, 0, ioc->res_size);
1715 ioc->res_hint = (unsigned long *) ioc->res_map;
1719 ioc->res_map[0] = 0x1;
1720 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1724 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1725 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1730 ioc->res_size, (void *) ioc->res_map);
1734 ioc_sac_init(struct ioc *ioc)
1752 controller->iommu = ioc;
1758 ioc->sac_only_dev = sac;
1762 ioc_zx1_init(struct ioc *ioc)
1767 if (ioc->rev < 0x20)
1771 ioc->dma_mask = (0x1UL << 39) - 1;
1780 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1782 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1786 typedef void (initfunc)(struct ioc *);
1801 static struct ioc * __init
1804 struct ioc *ioc;
1807 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1808 if (!ioc)
1811 ioc->next = ioc_list;
1812 ioc_list = ioc;
1814 ioc->handle = handle;
1815 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1817 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1818 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1819 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1822 if (ioc->func_id == info->func_id) {
1823 ioc->name = info->name;
1825 (info->init)(ioc);
1835 if (!ioc->name) {
1836 ioc->name = kmalloc(24, GFP_KERNEL);
1837 if (ioc->name)
1838 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1839 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1841 ioc->name = "Unknown";
1844 ioc_iova_init(ioc);
1845 ioc_resource_init(ioc);
1846 ioc_sac_init(ioc);
1853 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1854 hpa, ioc->iov_size >> 20, ioc->ibase);
1856 return ioc;
1866 struct ioc *ioc;
1869 for (ioc = ioc_list; ioc; ioc = ioc->next)
1871 return ioc;
1879 struct ioc *ioc = v;
1882 return ioc->next;
1893 struct ioc *ioc = v;
1894 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1898 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1900 if (ioc->node != MAX_NUMNODES)
1901 seq_printf(s, "NUMA node : %d\n", ioc->node);
1903 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1906 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1909 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1915 min = max = ioc->avg_search[0];
1917 avg += ioc->avg_search[i];
1918 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1919 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1970 struct ioc *ioc;
1988 for (ioc = ioc_list; ioc; ioc = ioc->next)
1989 if (ioc->handle == handle) {
1990 PCI_CONTROLLER(bus)->iommu = ioc;
2003 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2008 ioc->node = MAX_NUMNODES;
2020 ioc->node = node;
2024 #define sba_map_ioc_to_node(ioc, handle)
2030 struct ioc *ioc;
2062 ioc = ioc_init(hpa, device->handle);
2063 if (!ioc)
2067 sba_map_ioc_to_node(ioc, device->handle);