Lines Matching defs:iommu

27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
126 * released by the iommu subsystem after being returned. The caller
130 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
136 spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
137 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
140 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
145 static int device_rbtree_insert(struct intel_iommu *iommu,
151 spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
152 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
153 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
162 struct intel_iommu *iommu = info->iommu;
165 spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
166 rb_erase(&info->node, &iommu->device_rbtree);
167 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
173 * 2. It maps to each iommu if successful.
174 * 3. Each iommu mapps to this domain if successful.
200 struct intel_iommu *iommu; /* the corresponding iommu */
230 static bool translation_pre_enabled(struct intel_iommu *iommu)
232 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
235 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
237 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
240 static void init_translation_status(struct intel_iommu *iommu)
244 gsts = readl(iommu->reg + DMAR_GSTS_REG);
246 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
266 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
269 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
313 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
317 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
318 sl_sagaw = cap_sagaw(iommu->cap);
321 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
325 if (!ecap_slts(iommu->ecap))
331 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
336 sagaw = __iommu_calculate_sagaw(iommu);
346 * Calculate max SAGAW for each iommu.
348 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
350 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
354 * calculate agaw for each iommu.
358 int iommu_calculate_agaw(struct intel_iommu *iommu)
360 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
363 static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
365 return sm_supported(iommu) ?
366 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
373 struct intel_iommu *iommu;
380 if (!iommu_paging_structure_coherency(info->iommu)) {
390 for_each_active_iommu(iommu, drhd) {
391 if (!iommu_paging_structure_coherency(iommu)) {
403 struct intel_iommu *iommu;
411 for_each_active_iommu(iommu, drhd) {
412 if (iommu != skip) {
414 if (!cap_fl1gp_support(iommu->cap))
417 mask &= cap_super_page_val(iommu->cap);
498 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
501 struct root_entry *root = &iommu->root_entry[bus];
509 if (!alloc && context_copied(iommu, bus, devfn))
513 if (sm_supported(iommu)) {
527 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
531 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
534 __iommu_flush_cache(iommu, entry, sizeof(*entry));
585 /* we know that the this iommu should be at offset 0xa000 from vtbar */
596 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
598 if (!iommu || iommu->drhd->ignored)
617 struct intel_iommu *iommu;
639 for_each_iommu(iommu, drhd) {
673 iommu = NULL;
675 if (iommu_is_dummy(iommu, dev))
676 iommu = NULL;
680 return iommu;
690 static void free_context_table(struct intel_iommu *iommu)
695 if (!iommu->root_entry)
699 context = iommu_context_addr(iommu, i, 0, 0);
703 if (!sm_supported(iommu))
706 context = iommu_context_addr(iommu, i, 0x80, 0);
711 iommu_free_page(iommu->root_entry);
712 iommu->root_entry = NULL;
716 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
740 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
752 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
755 rt_entry = &iommu->root_entry[bus];
761 if (sm_supported(iommu))
768 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0);
778 if (!sm_supported(iommu)) {
818 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
1096 /* iommu handling */
1097 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1101 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
1104 iommu->name);
1108 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1109 iommu->root_entry = root;
1114 static void iommu_set_root_entry(struct intel_iommu *iommu)
1120 addr = virt_to_phys(iommu->root_entry);
1121 if (sm_supported(iommu))
1124 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1125 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1127 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1130 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1133 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1139 if (cap_esrtps(iommu->cap))
1142 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1143 if (sm_supported(iommu))
1144 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1145 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1148 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1153 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1156 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1157 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1160 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1163 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1167 static void __iommu_flush_context(struct intel_iommu *iommu,
1187 iommu->name, type);
1192 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1193 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1196 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1199 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1203 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1206 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1225 iommu->name, type);
1229 if (cap_write_drain(iommu->cap))
1232 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1235 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1236 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1239 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1255 struct intel_iommu *iommu, u8 bus, u8 devfn)
1262 if (info->iommu == iommu && info->bus == bus &&
1372 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1382 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1387 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1390 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1391 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1393 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1396 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1399 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1402 static void iommu_enable_translation(struct intel_iommu *iommu)
1407 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1408 iommu->gcmd |= DMA_GCMD_TE;
1409 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1412 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1415 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1418 static void iommu_disable_translation(struct intel_iommu *iommu)
1423 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1424 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1427 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1428 iommu->gcmd &= ~DMA_GCMD_TE;
1429 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1432 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1435 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1438 static int iommu_init_domains(struct intel_iommu *iommu)
1442 ndomains = cap_ndoms(iommu->cap);
1444 iommu->name, ndomains);
1446 spin_lock_init(&iommu->lock);
1448 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
1449 if (!iommu->domain_ids)
1458 set_bit(0, iommu->domain_ids);
1467 if (sm_supported(iommu))
1468 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1473 static void disable_dmar_iommu(struct intel_iommu *iommu)
1475 if (!iommu->domain_ids)
1479 * All iommu domains must have been detached from the devices,
1482 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
1486 if (iommu->gcmd & DMA_GCMD_TE)
1487 iommu_disable_translation(iommu);
1490 static void free_dmar_iommu(struct intel_iommu *iommu)
1492 if (iommu->domain_ids) {
1493 bitmap_free(iommu->domain_ids);
1494 iommu->domain_ids = NULL;
1497 if (iommu->copied_tables) {
1498 bitmap_free(iommu->copied_tables);
1499 iommu->copied_tables = NULL;
1503 free_context_table(iommu);
1506 if (pasid_supported(iommu)) {
1507 if (ecap_prs(iommu->ecap))
1508 intel_svm_finish_prq(iommu);
1553 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
1566 spin_lock(&iommu->lock);
1567 curr = xa_load(&domain->iommu_array, iommu->seq_id);
1570 spin_unlock(&iommu->lock);
1575 ndomains = cap_ndoms(iommu->cap);
1576 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1578 pr_err("%s: No free domain ids\n", iommu->name);
1582 set_bit(num, iommu->domain_ids);
1585 info->iommu = iommu;
1586 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
1594 spin_unlock(&iommu->lock);
1598 clear_bit(info->did, iommu->domain_ids);
1600 spin_unlock(&iommu->lock);
1605 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
1612 spin_lock(&iommu->lock);
1613 info = xa_load(&domain->iommu_array, iommu->seq_id);
1615 clear_bit(info->did, iommu->domain_ids);
1616 xa_erase(&domain->iommu_array, iommu->seq_id);
1621 spin_unlock(&iommu->lock);
1654 struct intel_iommu *iommu,
1658 domain_lookup_dev_info(domain, iommu, bus, devfn);
1659 u16 did = domain_id_iommu(domain, iommu);
1671 spin_lock(&iommu->lock);
1673 context = iommu_context_addr(iommu, bus, devfn, 1);
1678 if (context_present(context) && !context_copied(iommu, bus, devfn))
1690 if (context_copied(iommu, bus, devfn)) {
1693 if (did_old < cap_ndoms(iommu->cap)) {
1694 iommu->flush.flush_context(iommu, did_old,
1698 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1702 clear_context_copied(iommu, bus, devfn);
1710 * Skip top levels of page tables for iommu which has
1713 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1733 context_set_address_width(context, iommu->msagaw);
1739 if (!ecap_coherent(iommu->ecap))
1748 if (cap_caching_mode(iommu->cap)) {
1749 iommu->flush.flush_context(iommu, 0,
1753 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1755 iommu_flush_write_buffer(iommu);
1761 spin_unlock(&iommu->lock);
1770 struct intel_iommu *iommu = info->iommu;
1773 return domain_context_mapping_one(domain, iommu,
1781 struct intel_iommu *iommu = info->iommu;
1785 return domain_context_mapping_one(domain, iommu, bus, devfn);
1960 struct intel_iommu *iommu = info->iommu;
1964 spin_lock(&iommu->lock);
1965 context = iommu_context_addr(iommu, bus, devfn, 0);
1967 spin_unlock(&iommu->lock);
1974 __iommu_flush_cache(iommu, context, sizeof(*context));
1975 spin_unlock(&iommu->lock);
1976 iommu->flush.flush_context(iommu,
1982 iommu->flush.flush_iotlb(iommu,
1991 static int domain_setup_first_level(struct intel_iommu *iommu,
2001 * Skip top levels of page tables for iommu which has
2004 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2020 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2021 domain_id_iommu(domain, iommu),
2109 struct intel_iommu *iommu = info->iommu;
2113 ret = domain_attach_iommu(domain, iommu);
2119 domain_detach_iommu(domain, iommu);
2131 if (!sm_supported(iommu))
2134 ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
2136 ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
2138 ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
2145 if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
2203 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2206 * Start from the sane iommu hardware state.
2211 if (!iommu->qi) {
2215 dmar_fault(-1, iommu);
2220 dmar_disable_qi(iommu);
2223 if (dmar_enable_qi(iommu)) {
2227 iommu->flush.flush_context = __iommu_flush_context;
2228 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2230 iommu->name);
2232 iommu->flush.flush_context = qi_flush_context;
2233 iommu->flush.flush_iotlb = qi_flush_iotlb;
2234 pr_info("%s: Using Queued invalidation\n", iommu->name);
2238 static int copy_context_table(struct intel_iommu *iommu,
2260 __iommu_flush_cache(iommu, new_ce,
2290 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
2304 if (did >= 0 && did < cap_ndoms(iommu->cap))
2305 set_bit(did, iommu->domain_ids);
2307 set_context_copied(iommu, bus, devfn);
2313 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2322 static int copy_translation_tables(struct intel_iommu *iommu)
2332 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2334 new_ext = !!sm_supported(iommu);
2345 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
2346 if (!iommu->copied_tables)
2365 ret = copy_context_table(iommu, &old_rt[bus],
2369 iommu->name, bus);
2374 spin_lock(&iommu->lock);
2383 iommu->root_entry[bus].lo = val;
2390 iommu->root_entry[bus].hi = val;
2393 spin_unlock(&iommu->lock);
2397 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
2410 struct intel_iommu *iommu;
2417 for_each_iommu(iommu, drhd) {
2419 iommu_disable_translation(iommu);
2428 if (pasid_supported(iommu)) {
2429 u32 temp = 2 << ecap_pss(iommu->ecap);
2435 intel_iommu_init_qi(iommu);
2437 ret = iommu_init_domains(iommu);
2441 init_translation_status(iommu);
2443 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
2444 iommu_disable_translation(iommu);
2445 clear_translation_pre_enabled(iommu);
2447 iommu->name);
2455 ret = iommu_alloc_root_entry(iommu);
2459 if (translation_pre_enabled(iommu)) {
2462 ret = copy_translation_tables(iommu);
2474 iommu->name);
2475 iommu_disable_translation(iommu);
2476 clear_translation_pre_enabled(iommu);
2479 iommu->name);
2483 if (!ecap_pass_through(iommu->ecap))
2485 intel_svm_check(iommu);
2493 for_each_active_iommu(iommu, drhd) {
2494 iommu_flush_write_buffer(iommu);
2495 iommu_set_root_entry(iommu);
2511 for_each_iommu(iommu, drhd) {
2518 iommu_disable_protect_mem_regions(iommu);
2522 iommu_flush_write_buffer(iommu);
2525 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
2531 ret = intel_svm_enable_prq(iommu);
2537 ret = dmar_set_interrupt(iommu);
2545 for_each_active_iommu(iommu, drhd) {
2546 disable_dmar_iommu(iommu);
2547 free_dmar_iommu(iommu);
2597 struct intel_iommu *iommu = NULL;
2600 for_each_active_iommu(iommu, drhd) {
2601 if (iommu->qi) {
2602 ret = dmar_reenable_qi(iommu);
2608 for_each_iommu(iommu, drhd) {
2615 iommu_disable_protect_mem_regions(iommu);
2619 iommu_flush_write_buffer(iommu);
2620 iommu_set_root_entry(iommu);
2621 iommu_enable_translation(iommu);
2622 iommu_disable_protect_mem_regions(iommu);
2631 struct intel_iommu *iommu;
2633 for_each_active_iommu(iommu, drhd) {
2634 iommu->flush.flush_context(iommu, 0, 0, 0,
2636 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2644 struct intel_iommu *iommu = NULL;
2649 for_each_active_iommu(iommu, drhd) {
2650 iommu_disable_translation(iommu);
2652 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2654 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2655 readl(iommu->reg + DMAR_FECTL_REG);
2656 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2657 readl(iommu->reg + DMAR_FEDATA_REG);
2658 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2659 readl(iommu->reg + DMAR_FEADDR_REG);
2660 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2661 readl(iommu->reg + DMAR_FEUADDR_REG);
2663 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2671 struct intel_iommu *iommu = NULL;
2682 for_each_active_iommu(iommu, drhd) {
2684 raw_spin_lock_irqsave(&iommu->register_lock, flag);
2686 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2687 iommu->reg + DMAR_FECTL_REG);
2688 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2689 iommu->reg + DMAR_FEDATA_REG);
2690 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2691 iommu->reg + DMAR_FEADDR_REG);
2692 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2693 iommu->reg + DMAR_FEUADDR_REG);
2695 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2921 struct intel_iommu *iommu = dmaru->iommu;
2923 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
2927 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
2929 iommu->name);
2933 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
2934 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
2936 iommu->name);
2943 if (iommu->gcmd & DMA_GCMD_TE)
2944 iommu_disable_translation(iommu);
2946 ret = iommu_init_domains(iommu);
2948 ret = iommu_alloc_root_entry(iommu);
2952 intel_svm_check(iommu);
2959 iommu_disable_protect_mem_regions(iommu);
2963 intel_iommu_init_qi(iommu);
2964 iommu_flush_write_buffer(iommu);
2967 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
2968 ret = intel_svm_enable_prq(iommu);
2973 ret = dmar_set_interrupt(iommu);
2977 iommu_set_root_entry(iommu);
2978 iommu_enable_translation(iommu);
2980 iommu_disable_protect_mem_regions(iommu);
2984 disable_dmar_iommu(iommu);
2986 free_dmar_iommu(iommu);
2993 struct intel_iommu *iommu = dmaru->iommu;
2997 if (iommu == NULL)
3003 disable_dmar_iommu(iommu);
3004 free_dmar_iommu(iommu);
3057 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
3077 return !(satcu->atc_required && !sm_supported(iommu));
3222 struct intel_iommu *iommu = NULL;
3225 for_each_iommu(iommu, drhd)
3226 iommu_disable_translation(iommu);
3232 struct intel_iommu *iommu = NULL;
3240 for_each_iommu(iommu, drhd)
3241 iommu_disable_protect_mem_regions(iommu);
3253 return container_of(iommu_dev, struct intel_iommu, iommu);
3259 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3260 u32 ver = readl(iommu->reg + DMAR_VER_REG);
3269 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3270 return sysfs_emit(buf, "%llx\n", iommu->reg_phys);
3277 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3278 return sysfs_emit(buf, "%llx\n", iommu->cap);
3285 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3286 return sysfs_emit(buf, "%llx\n", iommu->ecap);
3293 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3294 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap));
3301 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
3303 bitmap_weight(iommu->domain_ids,
3304 cap_ndoms(iommu->cap)));
3319 .name = "intel-iommu",
3366 struct intel_iommu *iommu __maybe_unused;
3370 for_each_active_iommu(iommu, drhd) {
3415 struct intel_iommu *iommu;
3460 for_each_iommu(iommu, drhd)
3461 iommu_disable_protect_mem_regions(iommu);
3496 for_each_active_iommu(iommu, drhd) {
3504 if (cap_caching_mode(iommu->cap) &&
3509 iommu_device_sysfs_add(&iommu->iommu, NULL,
3511 "%s", iommu->name);
3512 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
3514 iommu_pmu_register(iommu);
3526 for_each_iommu(iommu, drhd) {
3527 if (!drhd->ignored && !translation_pre_enabled(iommu))
3528 iommu_enable_translation(iommu);
3530 iommu_disable_protect_mem_regions(iommu);
3555 * NB - intel-iommu lacks any sort of reference counting for the users of
3577 struct intel_iommu *iommu = info->iommu;
3582 if (sm_supported(iommu))
3583 intel_pasid_tear_down_entry(iommu, dev,
3597 domain_detach_iommu(info->domain, iommu);
3679 struct intel_iommu *iommu = info->iommu;
3685 if (!nested_supported(iommu) || flags)
3693 if (nested_parent && !nested_supported(iommu))
3695 if (user_data || (dirty_tracking && !ssads_supported(iommu)))
3740 struct intel_iommu *iommu = info->iommu;
3743 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
3746 if (domain->dirty_ops && !ssads_supported(iommu))
3749 /* check if this iommu agaw is sufficient for max mapped address */
3750 addr_width = agaw_to_width(iommu->agaw);
3751 if (addr_width > cap_mgaw(iommu->cap))
3752 addr_width = cap_mgaw(iommu->cap);
3761 while (iommu->agaw < dmar_domain->agaw) {
3772 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
3773 context_copied(iommu, info->bus, info->devfn))
3817 pr_err("%s: iommu width (%d) is not "
3932 if (!ecap_sc_support(info->iommu->ecap)) {
3956 intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
3993 return ecap_sc_support(info->iommu->ecap);
3995 return ssads_supported(info->iommu);
4005 struct intel_iommu *iommu;
4009 iommu = device_lookup_iommu(dev, &bus, &devfn);
4010 if (!iommu || !iommu->iommu.ops)
4024 info->segment = iommu->segment;
4028 info->iommu = iommu;
4030 if (ecap_dev_iotlb_support(iommu->ecap) &&
4032 dmar_ats_supported(pdev, iommu)) {
4043 if (ecap_dit(iommu->ecap))
4047 if (sm_supported(iommu)) {
4048 if (pasid_supported(iommu)) {
4055 if (info->ats_supported && ecap_prs(iommu->ecap) &&
4063 ret = device_rbtree_insert(iommu, info);
4068 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
4075 if (!context_copied(iommu, info->bus, info->devfn)) {
4084 return &iommu->iommu;
4098 struct intel_iommu *iommu = info->iommu;
4100 mutex_lock(&iommu->iopf_lock);
4103 mutex_unlock(&iommu->iopf_lock);
4105 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
4106 !context_copied(iommu, info->bus, info->devfn))
4184 struct intel_iommu *iommu;
4189 iommu = info->iommu;
4190 if (!iommu)
4193 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
4220 struct intel_iommu *iommu;
4229 iommu = info->iommu;
4230 if (!iommu)
4241 ret = iopf_queue_add_device(iommu->iopf_queue, dev);
4247 iopf_queue_remove_device(iommu->iopf_queue, dev);
4259 struct intel_iommu *iommu = info->iommu;
4274 iopf_queue_remove_device(iommu->iopf_queue, dev);
4313 return translation_pre_enabled(info->iommu) && !info->domain;
4347 struct intel_iommu *iommu = info->iommu;
4362 domain_detach_iommu(dmar_domain, iommu);
4365 intel_pasid_tear_down_entry(iommu, dev, pasid, false);
4374 struct intel_iommu *iommu = info->iommu;
4379 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
4385 if (context_copied(iommu, info->bus, info->devfn))
4396 ret = domain_attach_iommu(dmar_domain, iommu);
4405 ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
4407 ret = domain_setup_first_level(iommu, dmar_domain,
4410 ret = intel_pasid_setup_second_level(iommu, dmar_domain,
4428 domain_detach_iommu(dmar_domain, iommu);
4437 struct intel_iommu *iommu = info->iommu;
4445 vtd->cap_reg = iommu->cap;
4446 vtd->ecap_reg = iommu->ecap;
4462 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
4820 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
4837 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
4840 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
4859 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
4865 if (!cap_ecmds(iommu->cap))
4868 raw_spin_lock_irqsave(&iommu->register_lock, flags);
4870 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG);
4883 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob);
4884 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT));
4886 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq,
4896 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);