Lines Matching defs:iommu

21 #include <linux/iommu-helper.h>
23 #include <linux/amd-iommu.h>
37 #include <asm/iommu.h>
43 #include "../dma-iommu.h"
45 #include "../iommu-pages.h"
79 static void set_dte_entry(struct amd_iommu *iommu,
140 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
143 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
170 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
172 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
174 pci_seg->rlookup_table[devid] = iommu;
198 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
201 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
215 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
219 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
235 struct amd_iommu *iommu;
242 iommu = rlookup_amd_iommu(&pdev->dev);
243 if (!iommu)
246 amd_iommu_set_rlookup_table(iommu, alias);
247 dev_table = get_dev_table(iommu);
255 static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
268 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
273 static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
276 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
292 clone_aliases(iommu, dev);
295 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
299 dev_data = search_dev_data(iommu, devid);
302 dev_data = alloc_dev_data(iommu, devid);
306 if (translation_pre_enabled(iommu))
484 struct amd_iommu *iommu;
495 iommu = rlookup_amd_iommu(dev);
496 if (!iommu)
500 pci_seg = iommu->pci_seg;
507 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
520 dev_data = find_dev_data(iommu, devid);
525 setup_aliases(iommu, dev);
543 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
545 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
546 struct dev_table_entry *dev_table = get_dev_table(iommu);
557 setup_aliases(iommu, dev);
583 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
586 struct dev_table_entry *dev_table = get_dev_table(iommu);
601 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
613 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
625 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
633 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
646 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
658 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
672 static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
679 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
695 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
714 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
723 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
725 struct device *dev = iommu->iommu.dev;
751 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
758 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
760 dump_dte_entry(iommu, devid);
765 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
770 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
783 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
788 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
792 amd_iommu_report_rmp_fault(iommu, event);
795 amd_iommu_report_rmp_hw_error(iommu, event);
801 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
819 static void iommu_poll_events(struct amd_iommu *iommu)
823 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
824 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
827 iommu_print_event(iommu, iommu->evt_buf + head);
831 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
845 static void iommu_poll_ga_log(struct amd_iommu *iommu)
849 if (iommu->ga_log == NULL)
852 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
853 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
859 raw = (u64 *)(iommu->ga_log + head);
866 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
888 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
894 dev_set_msi_domain(dev, iommu->ir_domain);
899 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
907 struct amd_iommu *iommu = (struct amd_iommu *) data;
908 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
913 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
917 iommu->index, evt_type);
918 int_handler(iommu);
922 overflow_handler(iommu);
937 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
990 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
994 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
1007 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
1014 tail = iommu->cmd_buf_tail;
1015 target = iommu->cmd_buf + tail;
1019 iommu->cmd_buf_tail = tail;
1022 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1026 struct amd_iommu *iommu,
1029 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
1163 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1170 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1172 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1186 iommu->cmd_buf_head = readl(iommu->mmio_base +
1192 copy_cmd_to_buffer(iommu, cmd);
1195 iommu->need_sync = sync;
1200 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1207 raw_spin_lock_irqsave(&iommu->lock, flags);
1208 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1209 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1214 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1216 return iommu_queue_command_sync(iommu, cmd, true);
1223 static int iommu_completion_wait(struct amd_iommu *iommu)
1230 if (!iommu->need_sync)
1233 data = atomic64_add_return(1, &iommu->cmd_sem_val);
1234 build_completion_wait(&cmd, iommu, data);
1236 raw_spin_lock_irqsave(&iommu->lock, flags);
1238 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1242 ret = wait_on_sem(iommu, data);
1245 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1250 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1256 return iommu_queue_command(iommu, &cmd);
1259 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1262 u16 last_bdf = iommu->pci_seg->last_bdf;
1265 iommu_flush_dte(iommu, devid);
1267 iommu_completion_wait(iommu);
1274 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1277 u16 last_bdf = iommu->pci_seg->last_bdf;
1283 iommu_queue_command(iommu, &cmd);
1286 iommu_completion_wait(iommu);
1289 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1295 iommu_queue_command(iommu, &cmd);
1297 iommu_completion_wait(iommu);
1300 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1306 iommu_queue_command(iommu, &cmd);
1307 iommu_completion_wait(iommu);
1310 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1316 iommu_queue_command(iommu, &cmd);
1319 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1322 u16 last_bdf = iommu->pci_seg->last_bdf;
1324 if (iommu->irtcachedis_enabled)
1328 iommu_flush_irt(iommu, devid);
1330 iommu_completion_wait(iommu);
1333 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
1336 amd_iommu_flush_all(iommu);
1338 amd_iommu_flush_dte_all(iommu);
1339 amd_iommu_flush_irt_all(iommu);
1340 amd_iommu_flush_tlb_all(iommu);
1350 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1357 return iommu_queue_command(iommu, &cmd);
1362 struct amd_iommu *iommu = data;
1364 return iommu_flush_dte(iommu, alias);
1372 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1383 device_flush_dte_alias, iommu);
1385 ret = iommu_flush_dte(iommu, dev_data->devid);
1389 pci_seg = iommu->pci_seg;
1392 ret = iommu_flush_dte(iommu, alias);
1414 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1420 ret |= iommu_queue_command(iommu, &cmd);
1540 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1544 iommu_queue_command(iommu, &cmd);
1549 iommu_completion_wait(iommu);
1605 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
1607 set_dte_entry(iommu, dev_data);
1608 clone_aliases(iommu, dev_data->dev);
1630 struct amd_iommu *iommu;
1634 iommu = get_amd_iommu_from_dev(dev);
1639 return iommu_queue_command(iommu, &cmd);
1744 struct amd_iommu *iommu, int pasids)
1747 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
1854 static void set_dte_entry(struct amd_iommu *iommu,
1863 struct dev_table_entry *dev_table = get_dev_table(iommu);
1945 amd_iommu_flush_tlb_domid(iommu, old_domid);
1949 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
1951 struct dev_table_entry *dev_table = get_dev_table(iommu);
1961 amd_iommu_apply_erratum_63(iommu, devid);
1967 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1970 set_dte_entry(iommu, dev_data);
1972 clear_dte_entry(iommu, dev_data->devid);
1974 clone_aliases(iommu, dev_data->dev);
1976 iommu_completion_wait(iommu);
1986 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2001 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
2034 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2047 domain->dev_iommu[iommu->index] += 1;
2064 if (amd_iommu_iopf_add_device(iommu, dev_data))
2080 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2097 domain->dev_iommu[iommu->index] -= 1;
2140 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2168 amd_iommu_iopf_remove_device(iommu, dev_data);
2182 struct amd_iommu *iommu;
2189 iommu = rlookup_amd_iommu(dev);
2190 if (!iommu)
2194 if (!iommu->iommu.ops)
2198 return &iommu->iommu;
2200 ret = iommu_init_device(iommu, dev);
2204 iommu_ignore_device(iommu, dev);
2206 amd_iommu_set_pci_msi_domain(dev, iommu);
2207 iommu_dev = &iommu->iommu;
2217 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
2221 iommu_completion_wait(iommu);
2228 struct amd_iommu *iommu;
2233 iommu = rlookup_amd_iommu(dev);
2234 if (!iommu)
2238 iommu_completion_wait(iommu);
2393 static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2395 return iommu && (iommu->features & FEATURE_HDSUP);
2403 struct amd_iommu *iommu = NULL;
2406 iommu = get_amd_iommu_from_dev(dev);
2415 if (dirty_tracking && !amd_iommu_hd_support(iommu))
2426 if (iommu) {
2428 domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
2429 domain->domain.ops = iommu->iommu.ops->default_domain_ops;
2487 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2503 if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2520 iommu_completion_wait(iommu);
2626 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2628 return amd_iommu_hd_support(iommu);
2644 struct amd_iommu *iommu;
2655 iommu = get_amd_iommu_from_dev_data(dev_data);
2657 dev_table = get_dev_table(iommu);
2706 struct amd_iommu *iommu;
2715 iommu = get_amd_iommu_from_dev(dev);
2716 pci_seg = iommu->pci_seg;
2900 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
2907 if (iommu->irtcachedis_enabled)
2911 data = atomic64_add_return(1, &iommu->cmd_sem_val);
2912 build_completion_wait(&cmd2, iommu, data);
2914 raw_spin_lock_irqsave(&iommu->lock, flags);
2915 ret = __iommu_queue_command_sync(iommu, &cmd, true);
2918 ret = __iommu_queue_command_sync(iommu, &cmd2, false);
2921 wait_on_sem(iommu, data);
2923 raw_spin_unlock_irqrestore(&iommu->lock, flags);
2926 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
2930 struct dev_table_entry *dev_table = get_dev_table(iommu);
2942 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
2945 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2948 "%s: no iommu for devid %x:%x\n",
2984 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
2987 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
2990 set_dte_irq_entry(iommu, devid, table);
2991 iommu_flush_dte(iommu, devid);
2999 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
3001 if (!iommu)
3004 pci_seg = iommu->pci_seg;
3006 set_dte_irq_entry(iommu, alias, table);
3012 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
3023 pci_seg = iommu->pci_seg;
3031 set_remap_table_entry(iommu, devid, table);
3049 set_remap_table_entry(iommu, devid, table);
3060 set_remap_table_entry(iommu, devid, table);
3063 set_remap_table_entry(iommu, alias, table);
3066 iommu_completion_wait(iommu);
3078 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3085 table = alloc_irq_table(iommu, devid, pdev);
3097 if (!iommu->irte_ops->is_allocated(table, index)) {
3107 iommu->irte_ops->set_allocated(table, index - c + 1);
3124 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3132 table = get_irq_table(iommu, devid);
3155 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3160 ret = __modify_irte_ga(iommu, devid, index, irte);
3164 iommu_flush_irt_and_complete(iommu, devid);
3169 static int modify_irte(struct amd_iommu *iommu,
3175 table = get_irq_table(iommu, devid);
3183 iommu_flush_irt_and_complete(iommu, devid);
3188 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3193 table = get_irq_table(iommu, devid);
3198 iommu->irte_ops->clear_allocated(table, index);
3201 iommu_flush_irt_and_complete(iommu, devid);
3234 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3239 modify_irte(iommu, devid, index, irte);
3242 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3247 modify_irte_ga(iommu, devid, index, irte);
3250 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3255 modify_irte(iommu, devid, index, irte);
3258 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3263 modify_irte_ga(iommu, devid, index, irte);
3266 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3273 modify_irte(iommu, devid, index, irte);
3276 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3287 modify_irte_ga(iommu, devid, index, irte);
3375 struct amd_iommu *iommu = data->iommu;
3377 if (!iommu)
3382 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
3426 struct amd_iommu *iommu;
3442 iommu = __rlookup_amd_iommu(seg, devid);
3443 if (!iommu)
3453 table = alloc_irq_table(iommu, devid, NULL);
3462 iommu->irte_ops->set_allocated(table, i);
3473 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3476 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
3508 data->iommu = iommu;
3525 free_irte(iommu, devid, index + i);
3544 free_irte(data->iommu, irte_info->devid, irte_info->index);
3552 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3562 struct amd_iommu *iommu = data->iommu;
3565 if (!iommu)
3568 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3570 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3579 struct amd_iommu *iommu = data->iommu;
3581 if (iommu)
3582 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3589 struct amd_iommu *iommu;
3602 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3604 return iommu && iommu->ir_domain == d;
3636 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3666 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3680 if (ir_data->iommu == NULL)
3683 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
3728 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3738 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
3750 struct amd_iommu *iommu = ir_data->iommu;
3753 if (!iommu)
3760 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
3792 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
3796 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
3799 iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
3800 fn, &amd_ir_domain_ops, iommu);
3801 if (!iommu->ir_domain) {
3806 irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
3807 iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
3809 iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
3823 if (!ir_data->iommu)
3834 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,