Lines Matching refs:ioc

214 struct ioc {
242 struct ioc *next; /* Linked list of discovered iocs */
244 unsigned int hw_path; /* the hardware path this ioc is associatd with */
249 static struct ioc *ioc_list;
284 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
288 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
293 ioc->res_hint = res_idx + (size >> 3); \
299 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
300 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
301 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
302 res_ptr = (u##size *)&(ioc)->res_map[0]; \
320 * ccio_alloc_range - Allocate pages in the ioc's resource map.
321 * @ioc: The I/O Controller.
326 * This function searches the resource map of the ioc to locate a range
330 ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
364 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
366 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
369 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
371 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
387 __func__, res_idx, ioc->res_hint);
396 ioc->avg_search[ioc->avg_idx++] = cr_start;
397 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
398 ioc->used_pages += pages_needed;
406 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
407 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
412 * ccio_free_range - Free pages from the ioc's resource map.
413 * @ioc: The I/O Controller.
421 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
434 ioc->used_pages -= pages_mapped;
441 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
443 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
446 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
448 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
451 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
608 * @ioc: The I/O Controller.
617 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
619 u32 chain_size = 1 << ioc->chainid_shift;
625 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
633 * @ioc: The I/O Controller.
650 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
661 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
663 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
677 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
718 struct ioc *ioc;
726 ioc = GET_IOC(dev);
727 if (!ioc)
737 spin_lock_irqsave(&ioc->res_lock, flags);
740 ioc->msingle_calls++;
741 ioc->msingle_pages += size >> IOVP_SHIFT;
744 idx = ccio_alloc_range(ioc, dev, size);
747 pdir_start = &(ioc->pdir_base[idx]);
768 spin_unlock_irqrestore(&ioc->res_lock, flags);
797 struct ioc *ioc;
802 ioc = GET_IOC(dev);
803 if (!ioc) {
804 WARN_ON(!ioc);
815 spin_lock_irqsave(&ioc->res_lock, flags);
818 ioc->usingle_calls++;
819 ioc->usingle_pages += size >> IOVP_SHIFT;
822 ccio_mark_invalid(ioc, iova, size);
823 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
824 spin_unlock_irqrestore(&ioc->res_lock, flags);
906 struct ioc *ioc;
914 ioc = GET_IOC(dev);
915 if (!ioc)
932 spin_lock_irqsave(&ioc->res_lock, flags);
935 ioc->msg_calls++;
946 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
956 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
958 spin_unlock_irqrestore(&ioc->res_lock, flags);
986 struct ioc *ioc;
989 ioc = GET_IOC(dev);
990 if (!ioc) {
991 WARN_ON(!ioc);
999 ioc->usg_calls++;
1005 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1032 struct ioc *ioc = ioc_list;
1034 while (ioc != NULL) {
1035 unsigned int total_pages = ioc->res_size << 3;
1041 seq_printf(m, "%s\n", ioc->name);
1044 (ioc->cujo20_bug ? "yes" : "no"));
1051 total_pages - ioc->used_pages, ioc->used_pages,
1052 (int)(ioc->used_pages * 100 / total_pages));
1056 ioc->res_size, total_pages);
1059 min = max = ioc->avg_search[0];
1061 avg += ioc->avg_search[j];
1062 if(ioc->avg_search[j] > max)
1063 max = ioc->avg_search[j];
1064 if(ioc->avg_search[j] < min)
1065 min = ioc->avg_search[j];
1072 ioc->msingle_calls, ioc->msingle_pages,
1073 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1076 min = ioc->usingle_calls - ioc->usg_calls;
1077 max = ioc->usingle_pages - ioc->usg_pages;
1082 ioc->msg_calls, ioc->msg_pages,
1083 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1086 ioc->usg_calls, ioc->usg_pages,
1087 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1090 ioc = ioc->next;
1098 struct ioc *ioc = ioc_list;
1100 while (ioc != NULL) {
1101 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1102 ioc->res_size, false);
1104 ioc = ioc->next;
1113 * ccio_find_ioc - Find the ioc in the ioc_list
1114 * @hw_path: The hardware path of the ioc.
1116 * This function searches the ioc_list for an ioc that matches
1119 static struct ioc * ccio_find_ioc(int hw_path)
1122 struct ioc *ioc;
1124 ioc = ioc_list;
1126 if (ioc->hw_path == hw_path)
1127 return ioc;
1129 ioc = ioc->next;
1161 struct ioc *ioc = ccio_get_iommu(dev);
1164 ioc->cujo20_bug = 1;
1165 res_ptr = ioc->res_map;
1168 while (idx < ioc->res_size) {
1219 * @ioc: The I/O Controller.
1226 ccio_ioc_init(struct ioc *ioc)
1273 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1275 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */
1278 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1281 __func__, ioc->ioc_regs,
1286 ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
1287 get_order(ioc->pdir_size));
1288 if(NULL == ioc->pdir_base) {
1291 memset(ioc->pdir_base, 0, ioc->pdir_size);
1293 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1294 DBG_INIT(" base %p\n", ioc->pdir_base);
1297 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1298 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1300 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1301 get_order(ioc->res_size));
1302 if(NULL == ioc->res_map) {
1305 memset(ioc->res_map, 0, ioc->res_size);
1308 ioc->res_hint = 16;
1311 spin_lock_init(&ioc->res_lock);
1317 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1318 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1323 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1324 &ioc->ioc_regs->io_chain_id_mask);
1326 WRITE_U32(virt_to_phys(ioc->pdir_base),
1327 &ioc->ioc_regs->io_pdir_base);
1332 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1337 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1338 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1341 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1342 &ioc->ioc_regs->io_command);
1379 static int __init ccio_init_resources(struct ioc *ioc)
1381 struct resource *res = ioc->mmio_region;
1385 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1387 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1388 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1441 * Some other issues: one of the resources in the ioc may be unassigned.
1448 struct ioc *ioc = ccio_get_iommu(dev);
1449 if (!ioc)
1452 parent = ioc->mmio_region;
1464 &ioc->ioc_regs->io_io_low);
1466 &ioc->ioc_regs->io_io_high);
1470 &ioc->ioc_regs->io_io_low_hv);
1472 &ioc->ioc_regs->io_io_high_hv);
1485 struct ioc *ioc = ccio_get_iommu(dev);
1487 if (!ioc) {
1489 } else if ((ioc->mmio_region->start <= res->start) &&
1490 (res->end <= ioc->mmio_region->end)) {
1491 parent = ioc->mmio_region;
1492 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1493 (res->end <= (ioc->mmio_region + 1)->end)) {
1494 parent = ioc->mmio_region + 1;
1518 struct ioc *ioc, **ioc_p = &ioc_list;
1521 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1522 if (ioc == NULL) {
1527 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1529 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1535 *ioc_p = ioc;
1537 ioc->hw_path = dev->hw_path;
1538 ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
1539 if (!ioc->ioc_regs) {
1540 kfree(ioc);
1543 ccio_ioc_init(ioc);
1544 if (ccio_init_resources(ioc)) {
1545 iounmap(ioc->ioc_regs);
1546 kfree(ioc);
1555 hba->iommu = ioc;