Lines Matching defs:iommu

17 #include "iommu.h"
116 struct intel_iommu *iommu;
122 for_each_active_iommu(iommu, drhd) {
130 iommu->name, drhd->reg_base_addr);
136 raw_spin_lock_irqsave(&iommu->register_lock, flag);
138 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
144 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
149 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
217 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
239 context = iommu_context_addr(iommu, bus, devfn, 0);
248 tbl_wlk.rt_entry = &iommu->root_entry[bus];
252 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
263 static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
267 spin_lock(&iommu->lock);
268 seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
269 (u64)virt_to_phys(iommu->root_entry));
278 ctx_tbl_walk(m, iommu, bus);
279 spin_unlock(&iommu->lock);
285 struct intel_iommu *iommu;
289 for_each_active_iommu(iommu, drhd) {
290 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
293 iommu->name);
296 root_tbl_walk(m, iommu);
353 struct intel_iommu *iommu;
361 for_each_active_iommu(iommu, drhd) {
366 if (seg != iommu->segment)
369 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
372 iommu->name);
375 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT)
381 * The iommu->lock is held across the callback, which will
389 spin_lock(&iommu->lock);
391 context = iommu_context_addr(iommu, bus, devfn, 0);
443 iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
456 spin_unlock(&iommu->lock);
483 struct intel_iommu *iommu)
485 int index, shift = qi_shift(iommu);
489 if (ecap_smts(iommu->ecap))
496 desc = iommu->qi->desc + offset;
497 if (ecap_smts(iommu->ecap))
501 iommu->qi->desc_status[index]);
505 iommu->qi->desc_status[index]);
512 struct intel_iommu *iommu;
518 for_each_active_iommu(iommu, drhd) {
519 qi = iommu->qi;
520 shift = qi_shift(iommu);
522 if (!qi || !ecap_qis(iommu->ecap))
525 seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
530 dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
531 dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
532 invalidation_queue_entry_show(m, iommu);
544 struct intel_iommu *iommu)
554 ri_entry = &iommu->ir_table->base[idx];
568 struct intel_iommu *iommu)
578 pi_entry = &iommu->ir_table->base[idx];
600 struct intel_iommu *iommu;
605 for_each_active_iommu(iommu, drhd) {
606 if (!ecap_ir_support(iommu->ecap))
610 iommu->name);
612 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
613 if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
614 irta = virt_to_phys(iommu->ir_table->base);
616 ir_tbl_remap_entry_show(m, iommu);
625 for_each_active_iommu(iommu, drhd) {
626 if (!cap_pi_support(iommu->cap))
630 iommu->name);
632 if (iommu->ir_table) {
633 irta = virt_to_phys(iommu->ir_table->base);
635 ir_tbl_posted_entry_show(m, iommu);
648 static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
654 iommu->name, drhd->reg_base_addr);
656 ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
667 struct intel_iommu *iommu;
670 for_each_active_iommu(iommu, drhd)
671 latency_show_one(m, iommu, drhd);
687 struct intel_iommu *iommu;
705 for_each_active_iommu(iommu, drhd) {
706 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
707 dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
708 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
714 for_each_active_iommu(iommu, drhd)
715 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB);
720 for_each_active_iommu(iommu, drhd)
721 dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB);
726 for_each_active_iommu(iommu, drhd)
727 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
768 * /sys/kernel/debug/iommu/intel/0000:00:01.0/domain_translation_struct
788 * /sys/kernel/debug/iommu/intel/0000:00:01.0/1/domain_translation_struct