Lines Matching refs:iommu

23 #include "iommu.h"
33 struct intel_iommu *iommu;
40 struct intel_iommu *iommu;
47 struct intel_iommu *iommu;
74 * ->iommu->register_lock
83 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
87 static bool ir_pre_enabled(struct intel_iommu *iommu)
89 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
92 static void clear_ir_pre_enabled(struct intel_iommu *iommu)
94 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
97 static void init_ir_status(struct intel_iommu *iommu)
101 gsts = readl(iommu->reg + DMAR_GSTS_REG);
103 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
106 static int alloc_irte(struct intel_iommu *iommu,
109 struct ir_table *table = iommu->ir_table;
122 if (mask > ecap_max_handle_mask(iommu->ecap)) {
125 ecap_max_handle_mask(iommu->ecap));
133 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
135 irq_iommu->iommu = iommu;
146 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
156 return qi_submit_sync(iommu, &desc, 1, 0);
162 struct intel_iommu *iommu;
172 iommu = irq_iommu->iommu;
175 irte = &iommu->ir_table->base[index];
190 __iommu_flush_cache(iommu, irte, sizeof(*irte));
192 rc = qi_flush_iec(iommu, index, 0);
194 /* Update iommu mode according to the IRTE mode */
206 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
207 return ir_hpet[i].iommu;
217 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
218 return ir_ioapic[i].iommu;
227 return drhd ? drhd->iommu->ir_domain : NULL;
233 struct intel_iommu *iommu;
239 iommu = irq_iommu->iommu;
242 start = iommu->ir_table->base + index;
249 bitmap_release_region(iommu->ir_table->bitmap, index,
252 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
311 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
336 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
421 static int iommu_load_old_irte(struct intel_iommu *iommu)
430 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
444 memcpy(iommu->ir_table->base, old_ir_table, size);
446 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
453 if (iommu->ir_table->base[i].present)
454 bitmap_set(iommu->ir_table->bitmap, i, 1);
463 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
469 addr = virt_to_phys((void *)iommu->ir_table->base);
471 raw_spin_lock_irqsave(&iommu->register_lock, flags);
473 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
477 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
479 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
481 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
487 if (!cap_esirtps(iommu->cap))
488 qi_global_iec(iommu);
491 static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
496 raw_spin_lock_irqsave(&iommu->register_lock, flags);
499 iommu->gcmd |= DMA_GCMD_IRE;
500 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
501 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
506 iommu->gcmd &= ~DMA_GCMD_CFI;
507 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
508 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
522 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
525 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
532 if (iommu->ir_table)
539 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
543 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
549 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
553 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
557 iommu->ir_domain =
561 iommu);
562 if (!iommu->ir_domain) {
563 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
567 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR);
568 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
571 if (cap_caching_mode(iommu->cap))
572 iommu->ir_domain->msi_parent_ops = &virt_dmar_msi_parent_ops;
574 iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
578 iommu->ir_table = ir_table;
584 if (!iommu->qi) {
588 dmar_fault(-1, iommu);
589 dmar_disable_qi(iommu);
591 if (dmar_enable_qi(iommu)) {
597 init_ir_status(iommu);
599 if (ir_pre_enabled(iommu)) {
602 iommu->name);
603 clear_ir_pre_enabled(iommu);
604 iommu_disable_irq_remapping(iommu);
605 } else if (iommu_load_old_irte(iommu))
607 iommu->name);
610 iommu->name);
613 iommu_set_irq_remapping(iommu, eim_mode);
618 irq_domain_remove(iommu->ir_domain);
619 iommu->ir_domain = NULL;
629 iommu->ir_table = NULL;
634 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
638 if (iommu && iommu->ir_table) {
639 if (iommu->ir_domain) {
640 fn = iommu->ir_domain->fwnode;
642 irq_domain_remove(iommu->ir_domain);
644 iommu->ir_domain = NULL;
646 free_pages((unsigned long)iommu->ir_table->base,
648 bitmap_free(iommu->ir_table->bitmap);
649 kfree(iommu->ir_table);
650 iommu->ir_table = NULL;
657 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
662 if (!ecap_ir_support(iommu->ecap))
669 if (!cap_esirtps(iommu->cap))
670 qi_global_iec(iommu);
672 raw_spin_lock_irqsave(&iommu->register_lock, flags);
674 sts = readl(iommu->reg + DMAR_GSTS_REG);
678 iommu->gcmd &= ~DMA_GCMD_IRE;
679 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
681 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
685 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
700 struct intel_iommu *iommu;
702 for_each_iommu(iommu, drhd) {
703 if (ecap_ir_support(iommu->ecap)) {
704 iommu_disable_irq_remapping(iommu);
705 intel_teardown_irq_remapping(iommu);
716 struct intel_iommu *iommu;
744 for_each_iommu(iommu, drhd)
745 if (!ecap_ir_support(iommu->ecap))
757 for_each_iommu(iommu, drhd) {
758 if (eim && !ecap_eim_support(iommu->ecap)) {
759 pr_info("%s does not support EIM\n", iommu->name);
769 for_each_iommu(iommu, drhd) {
770 if (intel_setup_irq_remapping(iommu)) {
772 iommu->name);
790 struct intel_iommu *iommu;
804 for_each_iommu(iommu, drhd)
805 if (!cap_pi_support(iommu->cap)) {
816 struct intel_iommu *iommu;
822 for_each_iommu(iommu, drhd) {
823 if (!ir_pre_enabled(iommu))
824 iommu_enable_irq_remapping(iommu);
845 struct intel_iommu *iommu,
868 if (ir_hpet[count].iommu == iommu &&
871 else if (ir_hpet[count].iommu == NULL && free == -1)
879 ir_hpet[free].iommu = iommu;
890 struct intel_iommu *iommu,
913 if (ir_ioapic[count].iommu == iommu &&
916 else if (ir_ioapic[count].iommu == NULL && free == -1)
926 ir_ioapic[free].iommu = iommu;
929 scope->enumeration_id, drhd->address, iommu->seq_id);
935 struct intel_iommu *iommu)
949 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
951 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
958 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
963 if (ir_hpet[i].iommu == iommu)
964 ir_hpet[i].iommu = NULL;
967 if (ir_ioapic[i].iommu == iommu)
968 ir_ioapic[i].iommu = NULL;
978 struct intel_iommu *iommu;
982 for_each_iommu(iommu, drhd) {
985 if (!ecap_ir_support(iommu->ecap))
988 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
1001 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1029 struct intel_iommu *iommu = NULL;
1034 for_each_iommu(iommu, drhd) {
1035 if (!ecap_ir_support(iommu->ecap))
1038 iommu_disable_irq_remapping(iommu);
1052 struct intel_iommu *iommu = NULL;
1054 for_each_iommu(iommu, drhd)
1055 if (iommu->qi)
1056 dmar_reenable_qi(iommu);
1061 for_each_iommu(iommu, drhd) {
1062 if (!ecap_ir_support(iommu->ecap))
1065 /* Set up interrupt remapping for iommu.*/
1066 iommu_set_irq_remapping(iommu, eim);
1067 iommu_enable_irq_remapping(iommu);
1313 struct intel_iommu *iommu = domain->host_data;
1320 if (!info || !iommu)
1334 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1405 struct intel_iommu *iommu = NULL;
1408 iommu = map_ioapic_to_iommu(fwspec->param[0]);
1410 iommu = map_hpet_to_iommu(fwspec->param[0]);
1412 return iommu && d == iommu->ir_domain;
1441 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1446 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
1450 if (eim && !ecap_eim_support(iommu->ecap)) {
1452 iommu->reg_phys, iommu->ecap);
1456 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1458 iommu->reg_phys);
1465 ret = intel_setup_irq_remapping(iommu);
1468 iommu->name);
1469 intel_teardown_irq_remapping(iommu);
1470 ir_remove_ioapic_hpet_scope(iommu);
1472 iommu_enable_irq_remapping(iommu);
1481 struct intel_iommu *iommu = dmaru->iommu;
1485 if (iommu == NULL)
1487 if (!ecap_ir_support(iommu->ecap))
1490 !cap_pi_support(iommu->cap))
1494 if (!iommu->ir_table)
1495 ret = dmar_ir_add(dmaru, iommu);
1497 if (iommu->ir_table) {
1498 if (!bitmap_empty(iommu->ir_table->bitmap,
1502 iommu_disable_irq_remapping(iommu);
1503 intel_teardown_irq_remapping(iommu);
1504 ir_remove_ioapic_hpet_scope(iommu);