1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright �� 2018 Intel Corporation.
4 *
5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
6 *	    Sohil Mehta <sohil.mehta@intel.com>
7 *	    Jacob Pan <jacob.jun.pan@linux.intel.com>
8 *	    Lu Baolu <baolu.lu@linux.intel.com>
9 */
10
11#include <linux/debugfs.h>
12#include <linux/dmar.h>
13#include <linux/pci.h>
14
15#include <asm/irq_remapping.h>
16
17#include "iommu.h"
18#include "pasid.h"
19#include "perf.h"
20
21struct tbl_walk {
22	u16 bus;
23	u16 devfn;
24	u32 pasid;
25	struct root_entry *rt_entry;
26	struct context_entry *ctx_entry;
27	struct pasid_entry *pasid_tbl_entry;
28};
29
30struct iommu_regset {
31	int offset;
32	const char *regs;
33};
34
35#define DEBUG_BUFFER_SIZE	1024
36static char debug_buf[DEBUG_BUFFER_SIZE];
37
38#define IOMMU_REGSET_ENTRY(_reg_)					\
39	{ DMAR_##_reg_##_REG, __stringify(_reg_) }
40
41static const struct iommu_regset iommu_regs_32[] = {
42	IOMMU_REGSET_ENTRY(VER),
43	IOMMU_REGSET_ENTRY(GCMD),
44	IOMMU_REGSET_ENTRY(GSTS),
45	IOMMU_REGSET_ENTRY(FSTS),
46	IOMMU_REGSET_ENTRY(FECTL),
47	IOMMU_REGSET_ENTRY(FEDATA),
48	IOMMU_REGSET_ENTRY(FEADDR),
49	IOMMU_REGSET_ENTRY(FEUADDR),
50	IOMMU_REGSET_ENTRY(PMEN),
51	IOMMU_REGSET_ENTRY(PLMBASE),
52	IOMMU_REGSET_ENTRY(PLMLIMIT),
53	IOMMU_REGSET_ENTRY(ICS),
54	IOMMU_REGSET_ENTRY(PRS),
55	IOMMU_REGSET_ENTRY(PECTL),
56	IOMMU_REGSET_ENTRY(PEDATA),
57	IOMMU_REGSET_ENTRY(PEADDR),
58	IOMMU_REGSET_ENTRY(PEUADDR),
59};
60
61static const struct iommu_regset iommu_regs_64[] = {
62	IOMMU_REGSET_ENTRY(CAP),
63	IOMMU_REGSET_ENTRY(ECAP),
64	IOMMU_REGSET_ENTRY(RTADDR),
65	IOMMU_REGSET_ENTRY(CCMD),
66	IOMMU_REGSET_ENTRY(AFLOG),
67	IOMMU_REGSET_ENTRY(PHMBASE),
68	IOMMU_REGSET_ENTRY(PHMLIMIT),
69	IOMMU_REGSET_ENTRY(IQH),
70	IOMMU_REGSET_ENTRY(IQT),
71	IOMMU_REGSET_ENTRY(IQA),
72	IOMMU_REGSET_ENTRY(IRTA),
73	IOMMU_REGSET_ENTRY(PQH),
74	IOMMU_REGSET_ENTRY(PQT),
75	IOMMU_REGSET_ENTRY(PQA),
76	IOMMU_REGSET_ENTRY(MTRRCAP),
77	IOMMU_REGSET_ENTRY(MTRRDEF),
78	IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
79	IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
80	IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
81	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
82	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
83	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
84	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
85	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
86	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
87	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
88	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
89	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
90	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
91	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
92	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
93	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
94	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
95	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
96	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
97	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
98	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
99	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
100	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
101	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
102	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
103	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
104	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
105	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
106	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
107	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
108	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
109};
110
111static struct dentry *intel_iommu_debug;
112
113static int iommu_regset_show(struct seq_file *m, void *unused)
114{
115	struct dmar_drhd_unit *drhd;
116	struct intel_iommu *iommu;
117	unsigned long flag;
118	int i, ret = 0;
119	u64 value;
120
121	rcu_read_lock();
122	for_each_active_iommu(iommu, drhd) {
123		if (!drhd->reg_base_addr) {
124			seq_puts(m, "IOMMU: Invalid base address\n");
125			ret = -EINVAL;
126			goto out;
127		}
128
129		seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
130			   iommu->name, drhd->reg_base_addr);
131		seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
132		/*
133		 * Publish the contents of the 64-bit hardware registers
134		 * by adding the offset to the pointer (virtual address).
135		 */
136		raw_spin_lock_irqsave(&iommu->register_lock, flag);
137		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
138			value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
139			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
140				   iommu_regs_32[i].regs, iommu_regs_32[i].offset,
141				   value);
142		}
143		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
144			value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
145			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
146				   iommu_regs_64[i].regs, iommu_regs_64[i].offset,
147				   value);
148		}
149		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
150		seq_putc(m, '\n');
151	}
152out:
153	rcu_read_unlock();
154
155	return ret;
156}
157DEFINE_SHOW_ATTRIBUTE(iommu_regset);
158
159static inline void print_tbl_walk(struct seq_file *m)
160{
161	struct tbl_walk *tbl_wlk = m->private;
162
163	seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
164		   tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
165		   PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
166		   tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
167		   tbl_wlk->ctx_entry->lo);
168
169	/*
170	 * A legacy mode DMAR doesn't support PASID, hence default it to -1
171	 * indicating that it's invalid. Also, default all PASID related fields
172	 * to 0.
173	 */
174	if (!tbl_wlk->pasid_tbl_entry)
175		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
176			   (u64)0, (u64)0, (u64)0);
177	else
178		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
179			   tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
180			   tbl_wlk->pasid_tbl_entry->val[1],
181			   tbl_wlk->pasid_tbl_entry->val[0]);
182}
183
184static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
185			   u16 dir_idx)
186{
187	struct tbl_walk *tbl_wlk = m->private;
188	u8 tbl_idx;
189
190	for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
191		if (pasid_pte_is_present(tbl_entry)) {
192			tbl_wlk->pasid_tbl_entry = tbl_entry;
193			tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
194			print_tbl_walk(m);
195		}
196
197		tbl_entry++;
198	}
199}
200
201static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
202			   u16 pasid_dir_size)
203{
204	struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
205	struct pasid_entry *pasid_tbl;
206	u16 dir_idx;
207
208	for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
209		pasid_tbl = get_pasid_table_from_pde(dir_entry);
210		if (pasid_tbl)
211			pasid_tbl_walk(m, pasid_tbl, dir_idx);
212
213		dir_entry++;
214	}
215}
216
217static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
218{
219	struct context_entry *context;
220	u16 devfn, pasid_dir_size;
221	u64 pasid_dir_ptr;
222
223	for (devfn = 0; devfn < 256; devfn++) {
224		struct tbl_walk tbl_wlk = {0};
225
226		/*
227		 * Scalable mode root entry points to upper scalable mode
228		 * context table and lower scalable mode context table. Each
229		 * scalable mode context table has 128 context entries where as
230		 * legacy mode context table has 256 context entries. So in
231		 * scalable mode, the context entries for former 128 devices are
232		 * in the lower scalable mode context table, while the latter
233		 * 128 devices are in the upper scalable mode context table.
234		 * In scalable mode, when devfn > 127, iommu_context_addr()
235		 * automatically refers to upper scalable mode context table and
236		 * hence the caller doesn't have to worry about differences
237		 * between scalable mode and non scalable mode.
238		 */
239		context = iommu_context_addr(iommu, bus, devfn, 0);
240		if (!context)
241			return;
242
243		if (!context_present(context))
244			continue;
245
246		tbl_wlk.bus = bus;
247		tbl_wlk.devfn = devfn;
248		tbl_wlk.rt_entry = &iommu->root_entry[bus];
249		tbl_wlk.ctx_entry = context;
250		m->private = &tbl_wlk;
251
252		if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
253			pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
254			pasid_dir_size = get_pasid_dir_size(context);
255			pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
256			continue;
257		}
258
259		print_tbl_walk(m);
260	}
261}
262
263static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
264{
265	u16 bus;
266
267	spin_lock(&iommu->lock);
268	seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
269		   (u64)virt_to_phys(iommu->root_entry));
270	seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
271
272	/*
273	 * No need to check if the root entry is present or not because
274	 * iommu_context_addr() performs the same check before returning
275	 * context entry.
276	 */
277	for (bus = 0; bus < 256; bus++)
278		ctx_tbl_walk(m, iommu, bus);
279	spin_unlock(&iommu->lock);
280}
281
282static int dmar_translation_struct_show(struct seq_file *m, void *unused)
283{
284	struct dmar_drhd_unit *drhd;
285	struct intel_iommu *iommu;
286	u32 sts;
287
288	rcu_read_lock();
289	for_each_active_iommu(iommu, drhd) {
290		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
291		if (!(sts & DMA_GSTS_TES)) {
292			seq_printf(m, "DMA Remapping is not enabled on %s\n",
293				   iommu->name);
294			continue;
295		}
296		root_tbl_walk(m, iommu);
297		seq_putc(m, '\n');
298	}
299	rcu_read_unlock();
300
301	return 0;
302}
303DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
304
305static inline unsigned long level_to_directory_size(int level)
306{
307	return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
308}
309
310static inline void
311dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
312{
313	seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx",
314		   iova >> VTD_PAGE_SHIFT, path[5], path[4], path[3]);
315	if (path[2]) {
316		seq_printf(m, "\t0x%016llx", path[2]);
317		if (path[1])
318			seq_printf(m, "\t0x%016llx", path[1]);
319	}
320	seq_putc(m, '\n');
321}
322
323static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
324			       int level, unsigned long start,
325			       u64 *path)
326{
327	int i;
328
329	if (level > 5 || level < 1)
330		return;
331
332	for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
333			i++, pde++, start += level_to_directory_size(level)) {
334		if (!dma_pte_present(pde))
335			continue;
336
337		path[level] = pde->val;
338		if (dma_pte_superpage(pde) || level == 1)
339			dump_page_info(m, start, path);
340		else
341			pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
342					   level - 1, start, path);
343		path[level] = 0;
344	}
345}
346
347static int domain_translation_struct_show(struct seq_file *m,
348					  struct device_domain_info *info,
349					  ioasid_t pasid)
350{
351	bool scalable, found = false;
352	struct dmar_drhd_unit *drhd;
353	struct intel_iommu *iommu;
354	u16 devfn, bus, seg;
355
356	bus = info->bus;
357	devfn = info->devfn;
358	seg = info->segment;
359
360	rcu_read_lock();
361	for_each_active_iommu(iommu, drhd) {
362		struct context_entry *context;
363		u64 pgd, path[6] = { 0 };
364		u32 sts, agaw;
365
366		if (seg != iommu->segment)
367			continue;
368
369		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
370		if (!(sts & DMA_GSTS_TES)) {
371			seq_printf(m, "DMA Remapping is not enabled on %s\n",
372				   iommu->name);
373			continue;
374		}
375		if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT)
376			scalable = true;
377		else
378			scalable = false;
379
380		/*
381		 * The iommu->lock is held across the callback, which will
382		 * block calls to domain_attach/domain_detach. Hence,
383		 * the domain of the device will not change during traversal.
384		 *
385		 * Traversing page table possibly races with the iommu_unmap()
386		 * interface. This could be solved by RCU-freeing the page
387		 * table pages in the iommu_unmap() path.
388		 */
389		spin_lock(&iommu->lock);
390
391		context = iommu_context_addr(iommu, bus, devfn, 0);
392		if (!context || !context_present(context))
393			goto iommu_unlock;
394
395		if (scalable) {	/* scalable mode */
396			struct pasid_entry *pasid_tbl, *pasid_tbl_entry;
397			struct pasid_dir_entry *dir_tbl, *dir_entry;
398			u16 dir_idx, tbl_idx, pgtt;
399			u64 pasid_dir_ptr;
400
401			pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
402
403			/* Dump specified device domain mappings with PASID. */
404			dir_idx = pasid >> PASID_PDE_SHIFT;
405			tbl_idx = pasid & PASID_PTE_MASK;
406
407			dir_tbl = phys_to_virt(pasid_dir_ptr);
408			dir_entry = &dir_tbl[dir_idx];
409
410			pasid_tbl = get_pasid_table_from_pde(dir_entry);
411			if (!pasid_tbl)
412				goto iommu_unlock;
413
414			pasid_tbl_entry = &pasid_tbl[tbl_idx];
415			if (!pasid_pte_is_present(pasid_tbl_entry))
416				goto iommu_unlock;
417
418			/*
419			 * According to PASID Granular Translation Type(PGTT),
420			 * get the page table pointer.
421			 */
422			pgtt = (u16)(pasid_tbl_entry->val[0] & GENMASK_ULL(8, 6)) >> 6;
423			agaw = (u8)(pasid_tbl_entry->val[0] & GENMASK_ULL(4, 2)) >> 2;
424
425			switch (pgtt) {
426			case PASID_ENTRY_PGTT_FL_ONLY:
427				pgd = pasid_tbl_entry->val[2];
428				break;
429			case PASID_ENTRY_PGTT_SL_ONLY:
430			case PASID_ENTRY_PGTT_NESTED:
431				pgd = pasid_tbl_entry->val[0];
432				break;
433			default:
434				goto iommu_unlock;
435			}
436			pgd &= VTD_PAGE_MASK;
437		} else { /* legacy mode */
438			pgd = context->lo & VTD_PAGE_MASK;
439			agaw = context->hi & 7;
440		}
441
442		seq_printf(m, "Device %04x:%02x:%02x.%x ",
443			   iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
444
445		if (scalable)
446			seq_printf(m, "with pasid %x @0x%llx\n", pasid, pgd);
447		else
448			seq_printf(m, "@0x%llx\n", pgd);
449
450		seq_printf(m, "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n",
451			   "IOVA_PFN", "PML5E", "PML4E", "PDPE", "PDE", "PTE");
452		pgtable_walk_level(m, phys_to_virt(pgd), agaw + 2, 0, path);
453
454		found = true;
455iommu_unlock:
456		spin_unlock(&iommu->lock);
457		if (found)
458			break;
459	}
460	rcu_read_unlock();
461
462	return 0;
463}
464
465static int dev_domain_translation_struct_show(struct seq_file *m, void *unused)
466{
467	struct device_domain_info *info = (struct device_domain_info *)m->private;
468
469	return domain_translation_struct_show(m, info, IOMMU_NO_PASID);
470}
471DEFINE_SHOW_ATTRIBUTE(dev_domain_translation_struct);
472
473static int pasid_domain_translation_struct_show(struct seq_file *m, void *unused)
474{
475	struct dev_pasid_info *dev_pasid = (struct dev_pasid_info *)m->private;
476	struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
477
478	return domain_translation_struct_show(m, info, dev_pasid->pasid);
479}
480DEFINE_SHOW_ATTRIBUTE(pasid_domain_translation_struct);
481
482static void invalidation_queue_entry_show(struct seq_file *m,
483					  struct intel_iommu *iommu)
484{
485	int index, shift = qi_shift(iommu);
486	struct qi_desc *desc;
487	int offset;
488
489	if (ecap_smts(iommu->ecap))
490		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
491	else
492		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
493
494	for (index = 0; index < QI_LENGTH; index++) {
495		offset = index << shift;
496		desc = iommu->qi->desc + offset;
497		if (ecap_smts(iommu->ecap))
498			seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
499				   index, desc->qw0, desc->qw1,
500				   desc->qw2, desc->qw3,
501				   iommu->qi->desc_status[index]);
502		else
503			seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
504				   index, desc->qw0, desc->qw1,
505				   iommu->qi->desc_status[index]);
506	}
507}
508
509static int invalidation_queue_show(struct seq_file *m, void *unused)
510{
511	struct dmar_drhd_unit *drhd;
512	struct intel_iommu *iommu;
513	unsigned long flags;
514	struct q_inval *qi;
515	int shift;
516
517	rcu_read_lock();
518	for_each_active_iommu(iommu, drhd) {
519		qi = iommu->qi;
520		shift = qi_shift(iommu);
521
522		if (!qi || !ecap_qis(iommu->ecap))
523			continue;
524
525		seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
526
527		raw_spin_lock_irqsave(&qi->q_lock, flags);
528		seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
529			   (u64)virt_to_phys(qi->desc),
530			   dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
531			   dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
532		invalidation_queue_entry_show(m, iommu);
533		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
534		seq_putc(m, '\n');
535	}
536	rcu_read_unlock();
537
538	return 0;
539}
540DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
541
542#ifdef CONFIG_IRQ_REMAP
543static void ir_tbl_remap_entry_show(struct seq_file *m,
544				    struct intel_iommu *iommu)
545{
546	struct irte *ri_entry;
547	unsigned long flags;
548	int idx;
549
550	seq_puts(m, " Entry SrcID   DstID    Vct IRTE_high\t\tIRTE_low\n");
551
552	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
553	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
554		ri_entry = &iommu->ir_table->base[idx];
555		if (!ri_entry->present || ri_entry->p_pst)
556			continue;
557
558		seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x  %016llx\t%016llx\n",
559			   idx, PCI_BUS_NUM(ri_entry->sid),
560			   PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
561			   ri_entry->dest_id, ri_entry->vector,
562			   ri_entry->high, ri_entry->low);
563	}
564	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
565}
566
567static void ir_tbl_posted_entry_show(struct seq_file *m,
568				     struct intel_iommu *iommu)
569{
570	struct irte *pi_entry;
571	unsigned long flags;
572	int idx;
573
574	seq_puts(m, " Entry SrcID   PDA_high PDA_low  Vct IRTE_high\t\tIRTE_low\n");
575
576	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
577	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
578		pi_entry = &iommu->ir_table->base[idx];
579		if (!pi_entry->present || !pi_entry->p_pst)
580			continue;
581
582		seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x  %016llx\t%016llx\n",
583			   idx, PCI_BUS_NUM(pi_entry->sid),
584			   PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
585			   pi_entry->pda_h, pi_entry->pda_l << 6,
586			   pi_entry->vector, pi_entry->high,
587			   pi_entry->low);
588	}
589	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
590}
591
592/*
593 * For active IOMMUs go through the Interrupt remapping
594 * table and print valid entries in a table format for
595 * Remapped and Posted Interrupts.
596 */
597static int ir_translation_struct_show(struct seq_file *m, void *unused)
598{
599	struct dmar_drhd_unit *drhd;
600	struct intel_iommu *iommu;
601	u64 irta;
602	u32 sts;
603
604	rcu_read_lock();
605	for_each_active_iommu(iommu, drhd) {
606		if (!ecap_ir_support(iommu->ecap))
607			continue;
608
609		seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
610			   iommu->name);
611
612		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
613		if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
614			irta = virt_to_phys(iommu->ir_table->base);
615			seq_printf(m, " IR table address:%llx\n", irta);
616			ir_tbl_remap_entry_show(m, iommu);
617		} else {
618			seq_puts(m, "Interrupt Remapping is not enabled\n");
619		}
620		seq_putc(m, '\n');
621	}
622
623	seq_puts(m, "****\n\n");
624
625	for_each_active_iommu(iommu, drhd) {
626		if (!cap_pi_support(iommu->cap))
627			continue;
628
629		seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
630			   iommu->name);
631
632		if (iommu->ir_table) {
633			irta = virt_to_phys(iommu->ir_table->base);
634			seq_printf(m, " IR table address:%llx\n", irta);
635			ir_tbl_posted_entry_show(m, iommu);
636		} else {
637			seq_puts(m, "Interrupt Remapping is not enabled\n");
638		}
639		seq_putc(m, '\n');
640	}
641	rcu_read_unlock();
642
643	return 0;
644}
645DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
646#endif
647
648static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
649			     struct dmar_drhd_unit *drhd)
650{
651	int ret;
652
653	seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
654		   iommu->name, drhd->reg_base_addr);
655
656	ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
657	if (ret < 0)
658		seq_puts(m, "Failed to get latency snapshot");
659	else
660		seq_puts(m, debug_buf);
661	seq_puts(m, "\n");
662}
663
664static int latency_show(struct seq_file *m, void *v)
665{
666	struct dmar_drhd_unit *drhd;
667	struct intel_iommu *iommu;
668
669	rcu_read_lock();
670	for_each_active_iommu(iommu, drhd)
671		latency_show_one(m, iommu, drhd);
672	rcu_read_unlock();
673
674	return 0;
675}
676
677static int dmar_perf_latency_open(struct inode *inode, struct file *filp)
678{
679	return single_open(filp, latency_show, NULL);
680}
681
682static ssize_t dmar_perf_latency_write(struct file *filp,
683				       const char __user *ubuf,
684				       size_t cnt, loff_t *ppos)
685{
686	struct dmar_drhd_unit *drhd;
687	struct intel_iommu *iommu;
688	int counting;
689	char buf[64];
690
691	if (cnt > 63)
692		cnt = 63;
693
694	if (copy_from_user(&buf, ubuf, cnt))
695		return -EFAULT;
696
697	buf[cnt] = 0;
698
699	if (kstrtoint(buf, 0, &counting))
700		return -EINVAL;
701
702	switch (counting) {
703	case 0:
704		rcu_read_lock();
705		for_each_active_iommu(iommu, drhd) {
706			dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
707			dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
708			dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
709			dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
710		}
711		rcu_read_unlock();
712		break;
713	case 1:
714		rcu_read_lock();
715		for_each_active_iommu(iommu, drhd)
716			dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB);
717		rcu_read_unlock();
718		break;
719	case 2:
720		rcu_read_lock();
721		for_each_active_iommu(iommu, drhd)
722			dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB);
723		rcu_read_unlock();
724		break;
725	case 3:
726		rcu_read_lock();
727		for_each_active_iommu(iommu, drhd)
728			dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
729		rcu_read_unlock();
730		break;
731	case 4:
732		rcu_read_lock();
733		for_each_active_iommu(iommu, drhd)
734			dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
735		rcu_read_unlock();
736		break;
737	default:
738		return -EINVAL;
739	}
740
741	*ppos += cnt;
742	return cnt;
743}
744
745static const struct file_operations dmar_perf_latency_fops = {
746	.open		= dmar_perf_latency_open,
747	.write		= dmar_perf_latency_write,
748	.read		= seq_read,
749	.llseek		= seq_lseek,
750	.release	= single_release,
751};
752
753void __init intel_iommu_debugfs_init(void)
754{
755	intel_iommu_debug = debugfs_create_dir("intel", iommu_debugfs_dir);
756
757	debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
758			    &iommu_regset_fops);
759	debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
760			    NULL, &dmar_translation_struct_fops);
761	debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
762			    NULL, &invalidation_queue_fops);
763#ifdef CONFIG_IRQ_REMAP
764	debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
765			    NULL, &ir_translation_struct_fops);
766#endif
767	debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
768			    NULL, &dmar_perf_latency_fops);
769}
770
771/*
772 * Create a debugfs directory for each device, and then create a
773 * debugfs file in this directory for users to dump the page table
774 * of the default domain. e.g.
775 * /sys/kernel/debug/iommu/intel/0000:00:01.0/domain_translation_struct
776 */
777void intel_iommu_debugfs_create_dev(struct device_domain_info *info)
778{
779	info->debugfs_dentry = debugfs_create_dir(dev_name(info->dev), intel_iommu_debug);
780
781	debugfs_create_file("domain_translation_struct", 0444, info->debugfs_dentry,
782			    info, &dev_domain_translation_struct_fops);
783}
784
785/* Remove the device debugfs directory. */
786void intel_iommu_debugfs_remove_dev(struct device_domain_info *info)
787{
788	debugfs_remove_recursive(info->debugfs_dentry);
789}
790
791/*
792 * Create a debugfs directory per pair of {device, pasid}, then create the
793 * corresponding debugfs file in this directory for users to dump its page
794 * table. e.g.
795 * /sys/kernel/debug/iommu/intel/0000:00:01.0/1/domain_translation_struct
796 *
797 * The debugfs only dumps the page tables whose mappings are created and
798 * destroyed by the iommu_map/unmap() interfaces. Check the mapping type
799 * of the domain before creating debugfs directory.
800 */
801void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid)
802{
803	struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
804	char dir_name[10];
805
806	sprintf(dir_name, "%x", dev_pasid->pasid);
807	dev_pasid->debugfs_dentry = debugfs_create_dir(dir_name, info->debugfs_dentry);
808
809	debugfs_create_file("domain_translation_struct", 0444, dev_pasid->debugfs_dentry,
810			    dev_pasid, &pasid_domain_translation_struct_fops);
811}
812
813/* Remove the device pasid debugfs directory. */
814void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid)
815{
816	debugfs_remove_recursive(dev_pasid->debugfs_dentry);
817}
818