Lines Matching refs:iommu

2 /* iommu.c: Generic sparc64 IOMMU support.
15 #include <linux/iommu-helper.h>
17 #include <asm/iommu-common.h>
23 #include <asm/iommu.h>
52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
53 if (iommu->iommu_flushinv) {
54 iommu_write(iommu->iommu_flushinv, ~(u64)0);
59 tag = iommu->iommu_tags;
66 (void) iommu_read(iommu->write_complete_reg);
80 #define IOPTE_IS_DUMMY(iommu, iopte) \
81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
88 val |= iommu->dummy_page_pa;
93 int iommu_table_init(struct iommu *iommu, int tsbsize,
103 spin_lock_init(&iommu->lock);
104 iommu->ctx_lowest_free = 1;
105 iommu->tbl.table_map_base = dma_offset;
106 iommu->dma_addr_mask = dma_addr_mask;
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112 if (!iommu->tbl.map)
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
127 iommu->dummy_page = (unsigned long) page_address(page);
128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
138 iommu->page_table = (iopte_t *)page_address(page);
141 iopte_make_dummy(iommu, &iommu->page_table[i]);
146 free_page(iommu->dummy_page);
147 iommu->dummy_page = 0UL;
150 kfree(iommu->tbl.map);
151 iommu->tbl.map = NULL;
157 struct iommu *iommu,
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
167 return iommu->page_table + entry;
170 static int iommu_alloc_ctx(struct iommu *iommu)
172 int lowest = iommu->ctx_lowest_free;
173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
183 __set_bit(n, iommu->ctx_bitmap);
188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
191 __clear_bit(ctx, iommu->ctx_bitmap);
192 if (ctx < iommu->ctx_lowest_free)
193 iommu->ctx_lowest_free = ctx;
202 struct iommu *iommu;
221 iommu = dev->archdata.iommu;
223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
230 *dma_addrp = (iommu->tbl.table_map_base +
231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
250 struct iommu *iommu;
254 iommu = dev->archdata.iommu;
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
268 struct iommu *iommu;
276 iommu = dev->archdata.iommu;
286 base = alloc_npages(dev, iommu, npages);
287 spin_lock_irqsave(&iommu->lock, flags);
289 if (iommu->iommu_ctxflush)
290 ctx = iommu_alloc_ctx(iommu);
291 spin_unlock_irqrestore(&iommu->lock, flags);
296 bus_addr = (iommu->tbl.table_map_base +
297 ((base - iommu->page_table) << IO_PAGE_SHIFT));
313 iommu_free_ctx(iommu, ctx);
320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
327 iommu->iommu_ctxflush) {
370 (void) iommu_read(iommu->write_complete_reg);
390 struct iommu *iommu;
401 iommu = dev->archdata.iommu;
406 base = iommu->page_table +
407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
410 spin_lock_irqsave(&iommu->lock, flags);
414 if (iommu->iommu_ctxflush)
419 strbuf_flush(strbuf, iommu, bus_addr, ctx,
424 iopte_make_dummy(iommu, base + i);
426 iommu_free_ctx(iommu, ctx);
427 spin_unlock_irqrestore(&iommu->lock, flags);
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
443 struct iommu *iommu;
448 iommu = dev->archdata.iommu;
450 if (nelems == 0 || !iommu)
453 spin_lock_irqsave(&iommu->lock, flags);
456 if (iommu->iommu_ctxflush)
457 ctx = iommu_alloc_ctx(iommu);
476 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
487 /* Allocate iommu entries for that segment */
490 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
496 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
497 " npages %lx\n", iommu, paddr, npages);
501 base = iommu->page_table + entry;
504 dma_addr = iommu->tbl.table_map_base +
545 spin_unlock_irqrestore(&iommu->lock, flags);
564 entry = (vaddr - iommu->tbl.table_map_base)
566 base = iommu->page_table + entry;
569 iopte_make_dummy(iommu, base + j);
571 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
579 spin_unlock_irqrestore(&iommu->lock, flags);
587 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
591 if (iommu->iommu_ctxflush) {
594 struct iommu_map_table *tbl = &iommu->tbl;
597 base = iommu->page_table +
612 struct iommu *iommu;
616 iommu = dev->archdata.iommu;
619 ctx = fetch_sg_ctx(iommu, sglist);
621 spin_lock_irqsave(&iommu->lock, flags);
635 entry = ((dma_handle - iommu->tbl.table_map_base)
637 base = iommu->page_table + entry;
641 strbuf_flush(strbuf, iommu, dma_handle, ctx,
645 iopte_make_dummy(iommu, base + i);
647 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
652 iommu_free_ctx(iommu, ctx);
654 spin_unlock_irqrestore(&iommu->lock, flags);
661 struct iommu *iommu;
665 iommu = dev->archdata.iommu;
671 spin_lock_irqsave(&iommu->lock, flags);
679 if (iommu->iommu_ctxflush &&
682 struct iommu_map_table *tbl = &iommu->tbl;
684 iopte = iommu->page_table +
690 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
692 spin_unlock_irqrestore(&iommu->lock, flags);
699 struct iommu *iommu;
705 iommu = dev->archdata.iommu;
711 spin_lock_irqsave(&iommu->lock, flags);
715 if (iommu->iommu_ctxflush &&
718 struct iommu_map_table *tbl = &iommu->tbl;
720 iopte = iommu->page_table + ((sglist[0].dma_address -
736 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
738 spin_unlock_irqrestore(&iommu->lock, flags);
743 struct iommu *iommu = dev->archdata.iommu;
748 if (device_mask < iommu->dma_addr_mask)