Lines Matching refs:tbl

45 	struct iommu_table *tbl = data;
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
51 static void iommu_debugfs_add(struct iommu_table *tbl)
56 sprintf(name, "%08lx", tbl->it_index);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
68 static void iommu_debugfs_del(struct iommu_table *tbl)
72 sprintf(name, "%08lx", tbl->it_index);
76 static void iommu_debugfs_add(struct iommu_table *tbl){}
77 static void iommu_debugfs_del(struct iommu_table *tbl){}
214 struct iommu_table *tbl,
247 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
250 pool = &(tbl->large_pool);
252 pool = &(tbl->pools[pool_nr]);
272 if (limit + tbl->it_offset > mask) {
273 limit = mask - tbl->it_offset + 1;
280 pool = &(tbl->pools[0]);
288 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
289 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
298 } else if (pass <= tbl->nr_pools) {
301 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
302 pool = &tbl->pools[pool_nr];
308 } else if (pass == tbl->nr_pools + 1) {
311 pool = &tbl->large_pool;
332 pool->hint = (end + tbl->it_blocksize - 1) &
333 ~(tbl->it_blocksize - 1);
345 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
355 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
360 entry += tbl->it_offset; /* Offset into real TCE table */
361 ret = entry << tbl->it_page_shift; /* Set the return dma address */
364 build_fail = tbl->it_ops->set(tbl, entry, npages,
366 IOMMU_PAGE_MASK(tbl), direction, attrs);
368 /* tbl->it_ops->set() only returns non-zero for transient errors.
374 __iommu_free(tbl, ret, npages);
379 if (tbl->it_ops->flush)
380 tbl->it_ops->flush(tbl);
388 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
393 entry = dma_addr >> tbl->it_page_shift;
394 free_entry = entry - tbl->it_offset;
396 if (((free_entry + npages) > tbl->it_size) ||
397 (entry < tbl->it_offset)) {
402 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
403 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
404 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
405 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
406 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
416 static struct iommu_pool *get_pool(struct iommu_table *tbl,
420 unsigned long largepool_start = tbl->large_pool.start;
424 p = &tbl->large_pool;
426 unsigned int pool_nr = entry / tbl->poolsize;
428 BUG_ON(pool_nr > tbl->nr_pools);
429 p = &tbl->pools[pool_nr];
435 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
442 entry = dma_addr >> tbl->it_page_shift;
443 free_entry = entry - tbl->it_offset;
445 pool = get_pool(tbl, free_entry);
447 if (!iommu_free_check(tbl, dma_addr, npages))
450 tbl->it_ops->clear(tbl, entry, npages);
453 bitmap_clear(tbl->it_map, free_entry, npages);
457 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
460 __iommu_free(tbl, dma_addr, npages);
466 if (tbl->it_ops->flush)
467 tbl->it_ops->flush(tbl);
470 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
484 if ((nelems == 0) || !tbl)
509 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
511 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
513 align = PAGE_SHIFT - tbl->it_page_shift;
514 entry = iommu_range_alloc(dev, tbl, npages, &handle,
515 mask >> tbl->it_page_shift, align);
523 dev_info(dev, "iommu_alloc failed, tbl %p "
524 "vaddr %lx npages %lu\n", tbl, vaddr,
530 entry += tbl->it_offset;
531 dma_addr = entry << tbl->it_page_shift;
532 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
538 build_fail = tbl->it_ops->set(tbl, entry, npages,
539 vaddr & IOMMU_PAGE_MASK(tbl),
577 if (tbl->it_ops->flush)
578 tbl->it_ops->flush(tbl);
600 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
602 IOMMU_PAGE_SIZE(tbl));
603 __iommu_free(tbl, vaddr, npages);
613 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
621 if (!tbl)
632 IOMMU_PAGE_SIZE(tbl));
633 __iommu_free(tbl, dma_handle, npages);
641 if (tbl->it_ops->flush)
642 tbl->it_ops->flush(tbl);
645 static void iommu_table_clear(struct iommu_table *tbl)
654 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
659 if (tbl->it_ops->get) {
663 for (index = 0; index < tbl->it_size; index++) {
664 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
669 __set_bit(index, tbl->it_map);
674 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
678 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
679 index < tbl->it_size; index++)
680 __clear_bit(index, tbl->it_map);
686 static void iommu_table_reserve_pages(struct iommu_table *tbl,
697 if (tbl->it_offset == 0)
698 set_bit(0, tbl->it_map);
700 if (res_start < tbl->it_offset)
701 res_start = tbl->it_offset;
703 if (res_end > (tbl->it_offset + tbl->it_size))
704 res_end = tbl->it_offset + tbl->it_size;
708 tbl->it_reserved_start = tbl->it_offset;
709 tbl->it_reserved_end = tbl->it_offset;
713 tbl->it_reserved_start = res_start;
714 tbl->it_reserved_end = res_end;
716 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
717 set_bit(i - tbl->it_offset, tbl->it_map);
724 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
732 BUG_ON(!tbl->it_ops);
735 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
737 tbl->it_map = vzalloc_node(sz, nid);
738 if (!tbl->it_map) {
743 iommu_table_reserve_pages(tbl, res_start, res_end);
746 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
747 tbl->nr_pools = IOMMU_NR_POOLS;
749 tbl->nr_pools = 1;
752 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
754 for (i = 0; i < tbl->nr_pools; i++) {
755 p = &tbl->pools[i];
757 p->start = tbl->poolsize * i;
759 p->end = p->start + tbl->poolsize;
762 p = &tbl->large_pool;
764 p->start = tbl->poolsize * i;
766 p->end = tbl->it_size;
768 iommu_table_clear(tbl);
776 iommu_debugfs_add(tbl);
778 return tbl;
781 bool iommu_table_in_use(struct iommu_table *tbl)
786 if (tbl->it_offset == 0)
790 if (!tbl->it_reserved_start && !tbl->it_reserved_end)
791 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
793 end = tbl->it_reserved_start - tbl->it_offset;
794 if (find_next_bit(tbl->it_map, end, start) != end)
797 start = tbl->it_reserved_end - tbl->it_offset;
798 end = tbl->it_size;
799 return find_next_bit(tbl->it_map, end, start) != end;
804 struct iommu_table *tbl;
806 tbl = container_of(kref, struct iommu_table, it_kref);
808 if (tbl->it_ops->free)
809 tbl->it_ops->free(tbl);
811 if (!tbl->it_map) {
812 kfree(tbl);
816 iommu_debugfs_del(tbl);
819 if (iommu_table_in_use(tbl))
823 vfree(tbl->it_map);
826 kfree(tbl);
829 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
831 if (kref_get_unless_zero(&tbl->it_kref))
832 return tbl;
838 int iommu_tce_table_put(struct iommu_table *tbl)
840 if (WARN_ON(!tbl))
843 return kref_put(&tbl->it_kref, iommu_table_free);
852 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
867 if (tbl) {
868 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
870 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
872 align = PAGE_SHIFT - tbl->it_page_shift;
874 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
875 mask >> tbl->it_page_shift, align,
880 dev_info(dev, "iommu_alloc failed, tbl %p "
881 "vaddr %p npages %d\n", tbl, vaddr,
885 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
891 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
899 if (tbl) {
901 IOMMU_PAGE_SIZE(tbl));
902 iommu_free(tbl, dma_handle, npages);
910 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
919 int tcesize = (1 << tbl->it_page_shift);
935 if (!tbl)
946 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
948 io_order = get_iommu_order(size, tbl);
949 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
950 mask >> tbl->it_page_shift, io_order, 0);
960 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
963 if (tbl) {
967 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
968 iommu_free(tbl, dma_handle, nio_pages);
1035 void iommu_flush_tce(struct iommu_table *tbl)
1038 if (tbl->it_ops->flush)
1039 tbl->it_ops->flush(tbl);
1078 struct iommu_table *tbl,
1085 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1088 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1096 void iommu_tce_kill(struct iommu_table *tbl,
1099 if (tbl->it_ops->tce_kill)
1100 tbl->it_ops->tce_kill(tbl, entry, pages);
1105 static int iommu_take_ownership(struct iommu_table *tbl)
1107 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1117 if (!tbl->it_ops->xchg_no_kill)
1120 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1121 for (i = 0; i < tbl->nr_pools; i++)
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1124 if (iommu_table_in_use(tbl)) {
1128 memset(tbl->it_map, 0xff, sz);
1131 for (i = 0; i < tbl->nr_pools; i++)
1132 spin_unlock(&tbl->pools[i].lock);
1133 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1138 static void iommu_release_ownership(struct iommu_table *tbl)
1140 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1142 spin_lock_irqsave(&tbl->large_pool.lock, flags);
1143 for (i = 0; i < tbl->nr_pools; i++)
1144 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1146 memset(tbl->it_map, 0, sz);
1148 iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1149 tbl->it_reserved_end);
1151 for (i = 0; i < tbl->nr_pools; i++)
1152 spin_unlock(&tbl->pools[i].lock);
1153 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1209 struct iommu_table *tbl = table_group->tables[0];
1214 if (tbl->it_page_shift != page_shift ||
1215 tbl->it_size != (window_size >> page_shift) ||
1216 tbl->it_indirect_levels != levels - 1)
1219 *ptbl = iommu_tce_table_get(tbl);
1224 int num, struct iommu_table *tbl)
1226 return tbl == table_group->tables[num] ? 0 : -EPERM;
1239 struct iommu_table *tbl = table_group->tables[i];
1241 if (!tbl || !tbl->it_map)
1244 rc = iommu_take_ownership(tbl);
1260 struct iommu_table *tbl = table_group->tables[i];
1262 if (!tbl)
1265 iommu_table_clear(tbl);
1266 if (tbl->it_map)
1267 iommu_release_ownership(tbl);