Lines Matching defs:iommu

15 #include <linux/iommu.h>
21 #include "iommu.h"
62 pages = alloc_pages_node(info->iommu->node,
74 if (!ecap_coherent(info->iommu->ecap))
149 entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
164 if (!ecap_coherent(info->iommu->ecap)) {
192 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
203 qi_submit_sync(iommu, &desc, 1, 0);
207 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
231 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
233 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
236 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
242 spin_lock(&iommu->lock);
245 spin_unlock(&iommu->lock);
252 spin_unlock(&iommu->lock);
254 if (!ecap_coherent(iommu->ecap))
257 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
260 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
262 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
265 if (!cap_caching_mode(iommu->cap))
266 devtlb_invalidation_with_pasid(iommu, dev, pasid);
273 static void pasid_flush_caches(struct intel_iommu *iommu,
277 if (!ecap_coherent(iommu->ecap))
280 if (cap_caching_mode(iommu->cap)) {
281 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
282 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
284 iommu_flush_write_buffer(iommu);
292 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
298 if (!ecap_flts(iommu->ecap)) {
300 iommu->name);
304 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
306 iommu->name);
310 spin_lock(&iommu->lock);
313 spin_unlock(&iommu->lock);
318 spin_unlock(&iommu->lock);
334 pasid_set_address_width(pte, iommu->agaw);
335 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
341 spin_unlock(&iommu->lock);
343 pasid_flush_caches(iommu, pte, pasid, did);
349 * Skip top levels of page tables for iommu which has less agaw
353 struct intel_iommu *iommu,
358 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
370 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
384 if (!ecap_slts(iommu->ecap)) {
386 iommu->name);
391 agaw = iommu_skip_agaw(domain, iommu, &pgd);
398 did = domain_id_iommu(domain, iommu);
400 spin_lock(&iommu->lock);
403 spin_unlock(&iommu->lock);
408 spin_unlock(&iommu->lock);
418 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
423 spin_unlock(&iommu->lock);
425 pasid_flush_caches(iommu, pte, pasid, did);
433 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
440 spin_lock(&iommu->lock);
444 spin_unlock(&iommu->lock);
454 spin_unlock(&iommu->lock);
463 spin_unlock(&iommu->lock);
471 spin_unlock(&iommu->lock);
473 if (!ecap_coherent(iommu->ecap))
490 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
492 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
495 if (!cap_caching_mode(iommu->cap))
496 devtlb_invalidation_with_pasid(iommu, dev, pasid);
504 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
510 spin_lock(&iommu->lock);
513 spin_unlock(&iommu->lock);
518 spin_unlock(&iommu->lock);
524 pasid_set_address_width(pte, iommu->agaw);
527 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
529 spin_unlock(&iommu->lock);
531 pasid_flush_caches(iommu, pte, pasid, did);
539 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
545 spin_lock(&iommu->lock);
548 spin_unlock(&iommu->lock);
554 spin_unlock(&iommu->lock);
556 if (!ecap_coherent(iommu->ecap))
570 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
571 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
574 if (!cap_caching_mode(iommu->cap))
575 devtlb_invalidation_with_pasid(iommu, dev, pasid);
580 * @iommu: IOMMU which the device belong to
589 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
595 u16 did = domain_id_iommu(domain, iommu);
604 if (!cap_fl5lp_support(iommu->cap)) {
616 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) {
618 iommu->name);
622 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) {
624 iommu->name);
628 spin_lock(&iommu->lock);
631 spin_unlock(&iommu->lock);
635 spin_unlock(&iommu->lock);
662 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
667 spin_unlock(&iommu->lock);
669 pasid_flush_caches(iommu, pte, pasid, did);
682 struct intel_iommu *iommu = info->iommu;
685 spin_lock(&iommu->lock);
686 context = iommu_context_addr(iommu, bus, devfn, false);
688 spin_unlock(&iommu->lock);
693 __iommu_flush_cache(iommu, context, sizeof(*context));
694 spin_unlock(&iommu->lock);
707 * The iommu has been parked in the blocking state. All domains have
711 iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
713 devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
760 struct intel_iommu *iommu = info->iommu;
778 __iommu_flush_cache(iommu, context, sizeof(*context));
786 struct intel_iommu *iommu = info->iommu;
789 spin_lock(&iommu->lock);
790 context = iommu_context_addr(iommu, bus, devfn, true);
792 spin_unlock(&iommu->lock);
796 if (context_present(context) && !context_copied(iommu, bus, devfn)) {
797 spin_unlock(&iommu->lock);
801 if (context_copied(iommu, bus, devfn)) {
803 __iommu_flush_cache(iommu, context, sizeof(*context));
814 iommu->flush.flush_context(iommu, 0,
818 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
819 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
820 devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
827 clear_context_copied(iommu, bus, devfn);
831 spin_unlock(&iommu->lock);
839 if (cap_caching_mode(iommu->cap)) {
840 iommu->flush.flush_context(iommu, 0,
844 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);