Lines Matching defs:flags

329 	return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
335 u32 flags = 0;
338 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
341 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
345 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
348 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
351 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
354 return flags;
366 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
395 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
429 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
522 dev_data->flags = pdev_get_caps(to_pci_dev(dev));
591 int devid, vmg_tag, flags;
597 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
607 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
608 vmg_tag, spa, flags);
611 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
613 vmg_tag, spa, flags);
623 int devid, flags_rmp, vmg_tag, flags;
630 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
640 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
641 vmg_tag, gpa, flags_rmp, flags);
644 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
646 vmg_tag, gpa, flags_rmp, flags);
653 #define IS_IOMMU_MEM_TRANSACTION(flags) \
654 (((flags) & EVENT_FLAG_I) == 0)
656 #define IS_WRITE_REQUEST(flags) \
657 ((flags) & EVENT_FLAG_RW)
661 u64 address, int flags)
677 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
689 IS_WRITE_REQUEST(flags) ?
696 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
697 domain_id, address, flags);
700 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
702 domain_id, address, flags);
713 int type, devid, flags, tag;
724 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
738 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
744 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
746 pasid, address, flags);
751 "address=0x%llx flags=0x%04x]\n",
753 address, flags);
756 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
758 pasid, address, flags);
765 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
766 address, flags);
774 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
776 pasid, address, flags);
787 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
789 pasid, address, flags, tag);
1244 unsigned long flags;
1247 raw_spin_lock_irqsave(&iommu->lock, flags);
1249 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1266 unsigned long flags;
1276 raw_spin_lock_irqsave(&iommu->lock, flags);
1285 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1620 unsigned long flags;
1622 spin_lock_irqsave(&domain->lock, flags);
1624 spin_unlock_irqrestore(&domain->lock, flags);
1695 unsigned long flags;
1698 spin_lock_irqsave(&pd_bitmap_lock, flags);
1705 spin_unlock_irqrestore(&pd_bitmap_lock, flags);
1712 unsigned long flags;
1714 spin_lock_irqsave(&pd_bitmap_lock, flags);
1717 spin_unlock_irqrestore(&pd_bitmap_lock, flags);
1899 u64 flags = 0;
1927 flags = dev_table[devid].data[1];
1930 flags |= DTE_FLAG_IOTLB;
1948 flags &= ~tmp;
1951 flags &= ~tmp;
1958 flags |= tmp;
1961 flags |= tmp;
1973 flags &= ~DEV_DOMID_MASK;
1974 flags |= domid;
1977 dev_table[devid].data[1] = flags;
2083 unsigned long flags;
2086 spin_lock_irqsave(&domain->lock, flags);
2105 spin_unlock_irqrestore(&domain->lock, flags);
2117 unsigned long flags;
2122 spin_lock_irqsave(&domain->lock, flags);
2143 spin_unlock_irqrestore(&domain->lock, flags);
2359 struct device *dev, u32 flags)
2361 bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
2410 amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
2417 if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
2420 return do_iommu_domain_alloc(type, dev, flags);
2426 unsigned long flags;
2433 spin_lock_irqsave(&domain->lock, flags);
2437 spin_unlock_irqrestore(&domain->lock, flags);
2605 unsigned long flags;
2608 spin_lock_irqsave(&pdomain->lock, flags);
2610 spin_unlock_irqrestore(&pdomain->lock, flags);
2634 spin_unlock_irqrestore(&pdomain->lock, flags);
2641 unsigned long flags,
2658 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
2730 unsigned long flags;
2732 spin_lock_irqsave(&dom->lock, flags);
2734 spin_unlock_irqrestore(&dom->lock, flags);
2741 unsigned long flags;
2743 spin_lock_irqsave(&dom->lock, flags);
2746 spin_unlock_irqrestore(&dom->lock, flags);
2828 unsigned long flags;
2838 raw_spin_lock_irqsave(&iommu->lock, flags);
2847 raw_spin_unlock_irqrestore(&iommu->lock, flags);
2942 unsigned long flags;
2945 spin_lock_irqsave(&iommu_table_lock, flags);
2958 spin_unlock_irqrestore(&iommu_table_lock, flags);
2965 spin_lock_irqsave(&iommu_table_lock, flags);
2993 spin_unlock_irqrestore(&iommu_table_lock, flags);
3007 unsigned long flags;
3016 raw_spin_lock_irqsave(&table->lock, flags);
3043 raw_spin_unlock_irqrestore(&table->lock, flags);
3053 unsigned long flags;
3060 raw_spin_lock_irqsave(&table->lock, flags);
3074 raw_spin_unlock_irqrestore(&table->lock, flags);
3097 unsigned long flags;
3103 raw_spin_lock_irqsave(&table->lock, flags);
3105 raw_spin_unlock_irqrestore(&table->lock, flags);
3115 unsigned long flags;
3121 raw_spin_lock_irqsave(&table->lock, flags);
3123 raw_spin_unlock_irqrestore(&table->lock, flags);
3740 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |