Lines Matching defs:iommu

28 #include <linux/iommu.h>
33 #include "iommu.h"
35 #include "../iommu-pages.h"
68 static void free_iommu(struct intel_iommu *iommu);
462 if (dmaru->iommu)
463 free_iommu(dmaru->iommu);
502 drhd->iommu->node = node;
940 x86_init.iommu.iommu_init = intel_iommu_init;
953 static void unmap_iommu(struct intel_iommu *iommu)
955 iounmap(iommu->reg);
956 release_mem_region(iommu->reg_phys, iommu->reg_size);
960 * map_iommu: map the iommu's registers
961 * @iommu: the iommu to map
964 * Memory map the iommu's registers. Start w/ a single page, and
967 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
972 iommu->reg_phys = phys_addr;
973 iommu->reg_size = drhd->reg_size;
975 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
981 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
982 if (!iommu->reg) {
988 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
989 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
991 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
998 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
999 cap_max_fault_reg_offset(iommu->cap));
1001 if (map_size > iommu->reg_size) {
1002 iounmap(iommu->reg);
1003 release_mem_region(iommu->reg_phys, iommu->reg_size);
1004 iommu->reg_size = map_size;
1005 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1006 iommu->name)) {
1011 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1012 if (!iommu->reg) {
1019 if (cap_ecmds(iommu->cap)) {
1023 iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1032 iounmap(iommu->reg);
1034 release_mem_region(iommu->reg_phys, iommu->reg_size);
1041 struct intel_iommu *iommu;
1052 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1053 if (!iommu)
1056 iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1058 if (iommu->seq_id < 0) {
1060 err = iommu->seq_id;
1063 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1065 err = map_iommu(iommu, drhd);
1067 pr_err("Failed to map %s\n", iommu->name);
1071 if (!cap_sagaw(iommu->cap) &&
1072 (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1074 iommu->name);
1079 agaw = iommu_calculate_agaw(iommu);
1081 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1082 iommu->seq_id);
1087 msagaw = iommu_calculate_max_sagaw(iommu);
1089 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1090 iommu->seq_id);
1095 iommu->agaw = agaw;
1096 iommu->msagaw = msagaw;
1097 iommu->segment = drhd->segment;
1098 iommu->device_rbtree = RB_ROOT;
1099 spin_lock_init(&iommu->device_rbtree_lock);
1100 mutex_init(&iommu->iopf_lock);
1101 iommu->node = NUMA_NO_NODE;
1103 ver = readl(iommu->reg + DMAR_VER_REG);
1105 iommu->name,
1108 (unsigned long long)iommu->cap,
1109 (unsigned long long)iommu->ecap);
1112 sts = readl(iommu->reg + DMAR_GSTS_REG);
1114 iommu->gcmd |= DMA_GCMD_IRE;
1116 iommu->gcmd |= DMA_GCMD_TE;
1118 iommu->gcmd |= DMA_GCMD_QIE;
1120 if (alloc_iommu_pmu(iommu))
1121 pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1123 raw_spin_lock_init(&iommu->register_lock);
1129 if (pasid_supported(iommu))
1130 iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1138 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1140 "%s", iommu->name);
1144 err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1148 iommu_pmu_register(iommu);
1151 drhd->iommu = iommu;
1152 iommu->drhd = drhd;
1157 iommu_device_sysfs_remove(&iommu->iommu);
1159 free_iommu_pmu(iommu);
1160 unmap_iommu(iommu);
1162 ida_free(&dmar_seq_ids, iommu->seq_id);
1164 kfree(iommu);
1168 static void free_iommu(struct intel_iommu *iommu)
1170 if (intel_iommu_enabled && !iommu->drhd->ignored) {
1171 iommu_pmu_unregister(iommu);
1172 iommu_device_unregister(&iommu->iommu);
1173 iommu_device_sysfs_remove(&iommu->iommu);
1176 free_iommu_pmu(iommu);
1178 if (iommu->irq) {
1179 if (iommu->pr_irq) {
1180 free_irq(iommu->pr_irq, iommu);
1181 dmar_free_hwirq(iommu->pr_irq);
1182 iommu->pr_irq = 0;
1184 free_irq(iommu->irq, iommu);
1185 dmar_free_hwirq(iommu->irq);
1186 iommu->irq = 0;
1189 if (iommu->qi) {
1190 iommu_free_page(iommu->qi->desc);
1191 kfree(iommu->qi->desc_status);
1192 kfree(iommu->qi);
1195 if (iommu->reg)
1196 unmap_iommu(iommu);
1198 ida_free(&dmar_seq_ids, iommu->seq_id);
1199 kfree(iommu);
1241 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1243 unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1244 u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1245 struct qi_desc *desc = iommu->qi->desc + head;
1262 head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1263 head <<= qi_shift(iommu);
1264 desc = iommu->qi->desc + head;
1272 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1278 struct q_inval *qi = iommu->qi;
1279 int shift = qi_shift(iommu);
1284 fault = readl(iommu->reg + DMAR_FSTS_REG);
1286 qi_dump_fault(iommu, fault);
1294 head = readl(iommu->reg + DMAR_IQH_REG);
1305 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1316 head = readl(iommu->reg + DMAR_IQH_REG);
1319 tail = readl(iommu->reg + DMAR_IQT_REG);
1326 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1329 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1346 dev = device_rbtree_find(iommu, ite_sid);
1356 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1370 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1373 struct q_inval *qi = iommu->qi;
1390 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1394 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1398 dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1418 shift = qi_shift(iommu);
1424 trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1447 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1457 rc = qi_check_fault(iommu, index, wait_index);
1476 dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1480 dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1484 dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1493 void qi_global_iec(struct intel_iommu *iommu)
1503 qi_submit_sync(iommu, &desc, 1, 0);
1506 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1517 qi_submit_sync(iommu, &desc, 1, 0);
1520 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1528 if (cap_write_drain(iommu->cap))
1531 if (cap_read_drain(iommu->cap))
1541 qi_submit_sync(iommu, &desc, 1, 0);
1544 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1555 if (!(iommu->gcmd & DMA_GCMD_TE))
1572 qi_submit_sync(iommu, &desc, 1, 0);
1576 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1613 qi_submit_sync(iommu, &desc, 1, 0);
1617 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1629 if (!(iommu->gcmd & DMA_GCMD_TE))
1666 qi_submit_sync(iommu, &desc, 1, 0);
1669 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1676 qi_submit_sync(iommu, &desc, 1, 0);
1682 void dmar_disable_qi(struct intel_iommu *iommu)
1688 if (!ecap_qis(iommu->ecap))
1691 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1693 sts = readl(iommu->reg + DMAR_GSTS_REG);
1700 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1701 readl(iommu->reg + DMAR_IQH_REG)) &&
1705 iommu->gcmd &= ~DMA_GCMD_QIE;
1706 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1708 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1711 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1717 static void __dmar_enable_qi(struct intel_iommu *iommu)
1721 struct q_inval *qi = iommu->qi;
1731 if (ecap_smts(iommu->ecap))
1734 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1737 writel(0, iommu->reg + DMAR_IQT_REG);
1739 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1741 iommu->gcmd |= DMA_GCMD_QIE;
1742 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1745 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1747 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1755 int dmar_enable_qi(struct intel_iommu *iommu)
1761 if (!ecap_qis(iommu->ecap))
1767 if (iommu->qi)
1770 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1771 if (!iommu->qi)
1774 qi = iommu->qi;
1780 order = ecap_smts(iommu->ecap) ? 1 : 0;
1781 desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
1784 iommu->qi = NULL;
1794 iommu->qi = NULL;
1800 __dmar_enable_qi(iommu);
1805 /* iommu interrupt handling. Most stuff are MSI-like. */
1920 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1922 if (iommu->irq == irq)
1924 else if (iommu->pr_irq == irq)
1926 else if (iommu->perf_irq == irq)
1934 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1935 int reg = dmar_msi_reg(iommu, data->irq);
1939 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1940 writel(0, iommu->reg + reg);
1942 readl(iommu->reg + reg);
1943 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1948 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1949 int reg = dmar_msi_reg(iommu, data->irq);
1953 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1954 writel(DMA_FECTL_IM, iommu->reg + reg);
1956 readl(iommu->reg + reg);
1957 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1962 struct intel_iommu *iommu = irq_get_handler_data(irq);
1963 int reg = dmar_msi_reg(iommu, irq);
1966 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1967 writel(msg->data, iommu->reg + reg + 4);
1968 writel(msg->address_lo, iommu->reg + reg + 8);
1969 writel(msg->address_hi, iommu->reg + reg + 12);
1970 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1975 struct intel_iommu *iommu = irq_get_handler_data(irq);
1976 int reg = dmar_msi_reg(iommu, irq);
1979 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1980 msg->data = readl(iommu->reg + reg + 4);
1981 msg->address_lo = readl(iommu->reg + reg + 8);
1982 msg->address_hi = readl(iommu->reg + reg + 12);
1983 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1986 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
2017 dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
2025 struct intel_iommu *iommu = dev_id;
2033 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2034 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2043 reg = cap_fault_reg_offset(iommu->cap);
2056 data = readl(iommu->reg + reg +
2066 data = readl(iommu->reg + reg +
2071 guest_addr = dmar_readq(iommu->reg + reg +
2077 writel(DMA_FRCD_F, iommu->reg + reg +
2080 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2084 dmar_fault_do_one(iommu, type, fault_reason,
2089 if (fault_index >= cap_num_fault_regs(iommu->cap))
2091 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2095 iommu->reg + DMAR_FSTS_REG);
2098 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2102 int dmar_set_interrupt(struct intel_iommu *iommu)
2109 if (iommu->irq)
2112 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2114 iommu->irq = irq;
2120 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2129 struct intel_iommu *iommu;
2134 for_each_iommu(iommu, drhd) {
2138 if (iommu->irq || iommu->node != cpu_to_node(cpu))
2141 ret = dmar_set_interrupt(iommu);
2152 dmar_fault(iommu->irq, iommu);
2153 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2154 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2163 int dmar_reenable_qi(struct intel_iommu *iommu)
2165 if (!ecap_qis(iommu->ecap))
2168 if (!iommu->qi)
2174 dmar_disable_qi(iommu);
2180 __dmar_enable_qi(iommu);