1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/fs.h>
12#include <linux/io.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/random.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/pci.h>
22#include <linux/pci_ids.h>
23
24#include <linux/pci_regs.h>
25
26#include <uapi/linux/pcitest.h>
27
28#define DRV_MODULE_NAME				"pci-endpoint-test"
29
30#define IRQ_TYPE_UNDEFINED			-1
31#define IRQ_TYPE_INTX				0
32#define IRQ_TYPE_MSI				1
33#define IRQ_TYPE_MSIX				2
34
35#define PCI_ENDPOINT_TEST_MAGIC			0x0
36
37#define PCI_ENDPOINT_TEST_COMMAND		0x4
38#define COMMAND_RAISE_INTX_IRQ			BIT(0)
39#define COMMAND_RAISE_MSI_IRQ			BIT(1)
40#define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41#define COMMAND_READ				BIT(3)
42#define COMMAND_WRITE				BIT(4)
43#define COMMAND_COPY				BIT(5)
44
45#define PCI_ENDPOINT_TEST_STATUS		0x8
46#define STATUS_READ_SUCCESS			BIT(0)
47#define STATUS_READ_FAIL			BIT(1)
48#define STATUS_WRITE_SUCCESS			BIT(2)
49#define STATUS_WRITE_FAIL			BIT(3)
50#define STATUS_COPY_SUCCESS			BIT(4)
51#define STATUS_COPY_FAIL			BIT(5)
52#define STATUS_IRQ_RAISED			BIT(6)
53#define STATUS_SRC_ADDR_INVALID			BIT(7)
54#define STATUS_DST_ADDR_INVALID			BIT(8)
55
56#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58
59#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61
62#define PCI_ENDPOINT_TEST_SIZE			0x1c
63#define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64
65#define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66#define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67
68#define PCI_ENDPOINT_TEST_FLAGS			0x2c
69#define FLAG_USE_DMA				BIT(0)
70
71#define PCI_DEVICE_ID_TI_AM654			0xb00c
72#define PCI_DEVICE_ID_TI_J7200			0xb00f
73#define PCI_DEVICE_ID_TI_AM64			0xb010
74#define PCI_DEVICE_ID_TI_J721S2		0xb013
75#define PCI_DEVICE_ID_LS1088A			0x80c0
76#define PCI_DEVICE_ID_IMX8			0x0808
77
78#define is_am654_pci_dev(pdev)		\
79		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
80
81#define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
82#define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
83#define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
84#define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
85#define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
86
87static DEFINE_IDA(pci_endpoint_test_ida);
88
89#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
90					    miscdev)
91
92static bool no_msi;
93module_param(no_msi, bool, 0444);
94MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
95
96static int irq_type = IRQ_TYPE_MSI;
97module_param(irq_type, int, 0444);
98MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
99
100enum pci_barno {
101	BAR_0,
102	BAR_1,
103	BAR_2,
104	BAR_3,
105	BAR_4,
106	BAR_5,
107};
108
109struct pci_endpoint_test {
110	struct pci_dev	*pdev;
111	void __iomem	*base;
112	void __iomem	*bar[PCI_STD_NUM_BARS];
113	struct completion irq_raised;
114	int		last_irq;
115	int		num_irqs;
116	int		irq_type;
117	/* mutex to protect the ioctls */
118	struct mutex	mutex;
119	struct miscdevice miscdev;
120	enum pci_barno test_reg_bar;
121	size_t alignment;
122	const char *name;
123};
124
125struct pci_endpoint_test_data {
126	enum pci_barno test_reg_bar;
127	size_t alignment;
128	int irq_type;
129};
130
131static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
132					  u32 offset)
133{
134	return readl(test->base + offset);
135}
136
137static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
138					    u32 offset, u32 value)
139{
140	writel(value, test->base + offset);
141}
142
143static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
144					      int bar, int offset)
145{
146	return readl(test->bar[bar] + offset);
147}
148
149static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
150						int bar, u32 offset, u32 value)
151{
152	writel(value, test->bar[bar] + offset);
153}
154
155static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
156{
157	struct pci_endpoint_test *test = dev_id;
158	u32 reg;
159
160	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
161	if (reg & STATUS_IRQ_RAISED) {
162		test->last_irq = irq;
163		complete(&test->irq_raised);
164	}
165
166	return IRQ_HANDLED;
167}
168
169static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
170{
171	struct pci_dev *pdev = test->pdev;
172
173	pci_free_irq_vectors(pdev);
174	test->irq_type = IRQ_TYPE_UNDEFINED;
175}
176
177static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
178						int type)
179{
180	int irq = -1;
181	struct pci_dev *pdev = test->pdev;
182	struct device *dev = &pdev->dev;
183	bool res = true;
184
185	switch (type) {
186	case IRQ_TYPE_INTX:
187		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
188		if (irq < 0)
189			dev_err(dev, "Failed to get Legacy interrupt\n");
190		break;
191	case IRQ_TYPE_MSI:
192		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
193		if (irq < 0)
194			dev_err(dev, "Failed to get MSI interrupts\n");
195		break;
196	case IRQ_TYPE_MSIX:
197		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198		if (irq < 0)
199			dev_err(dev, "Failed to get MSI-X interrupts\n");
200		break;
201	default:
202		dev_err(dev, "Invalid IRQ type selected\n");
203	}
204
205	if (irq < 0) {
206		irq = 0;
207		res = false;
208	}
209
210	test->irq_type = type;
211	test->num_irqs = irq;
212
213	return res;
214}
215
216static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
217{
218	int i;
219	struct pci_dev *pdev = test->pdev;
220	struct device *dev = &pdev->dev;
221
222	for (i = 0; i < test->num_irqs; i++)
223		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
224
225	test->num_irqs = 0;
226}
227
228static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229{
230	int i;
231	int err;
232	struct pci_dev *pdev = test->pdev;
233	struct device *dev = &pdev->dev;
234
235	for (i = 0; i < test->num_irqs; i++) {
236		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
237				       pci_endpoint_test_irqhandler,
238				       IRQF_SHARED, test->name, test);
239		if (err)
240			goto fail;
241	}
242
243	return true;
244
245fail:
246	switch (irq_type) {
247	case IRQ_TYPE_INTX:
248		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249			pci_irq_vector(pdev, i));
250		break;
251	case IRQ_TYPE_MSI:
252		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253			pci_irq_vector(pdev, i),
254			i + 1);
255		break;
256	case IRQ_TYPE_MSIX:
257		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258			pci_irq_vector(pdev, i),
259			i + 1);
260		break;
261	}
262
263	return false;
264}
265
266static const u32 bar_test_pattern[] = {
267	0xA0A0A0A0,
268	0xA1A1A1A1,
269	0xA2A2A2A2,
270	0xA3A3A3A3,
271	0xA4A4A4A4,
272	0xA5A5A5A5,
273};
274
275static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
276				  enum pci_barno barno)
277{
278	int j;
279	u32 val;
280	int size;
281	struct pci_dev *pdev = test->pdev;
282
283	if (!test->bar[barno])
284		return false;
285
286	size = pci_resource_len(pdev, barno);
287
288	if (barno == test->test_reg_bar)
289		size = 0x4;
290
291	for (j = 0; j < size; j += 4)
292		pci_endpoint_test_bar_writel(test, barno, j,
293					     bar_test_pattern[barno]);
294
295	for (j = 0; j < size; j += 4) {
296		val = pci_endpoint_test_bar_readl(test, barno, j);
297		if (val != bar_test_pattern[barno])
298			return false;
299	}
300
301	return true;
302}
303
304static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
305{
306	u32 val;
307
308	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
309				 IRQ_TYPE_INTX);
310	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
311	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
312				 COMMAND_RAISE_INTX_IRQ);
313	val = wait_for_completion_timeout(&test->irq_raised,
314					  msecs_to_jiffies(1000));
315	if (!val)
316		return false;
317
318	return true;
319}
320
321static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
322				       u16 msi_num, bool msix)
323{
324	u32 val;
325	struct pci_dev *pdev = test->pdev;
326
327	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
328				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
329	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
330	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
331				 msix ? COMMAND_RAISE_MSIX_IRQ :
332				 COMMAND_RAISE_MSI_IRQ);
333	val = wait_for_completion_timeout(&test->irq_raised,
334					  msecs_to_jiffies(1000));
335	if (!val)
336		return false;
337
338	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
339}
340
341static int pci_endpoint_test_validate_xfer_params(struct device *dev,
342		struct pci_endpoint_test_xfer_param *param, size_t alignment)
343{
344	if (!param->size) {
345		dev_dbg(dev, "Data size is zero\n");
346		return -EINVAL;
347	}
348
349	if (param->size > SIZE_MAX - alignment) {
350		dev_dbg(dev, "Maximum transfer data size exceeded\n");
351		return -EINVAL;
352	}
353
354	return 0;
355}
356
357static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
358				   unsigned long arg)
359{
360	struct pci_endpoint_test_xfer_param param;
361	bool ret = false;
362	void *src_addr;
363	void *dst_addr;
364	u32 flags = 0;
365	bool use_dma;
366	size_t size;
367	dma_addr_t src_phys_addr;
368	dma_addr_t dst_phys_addr;
369	struct pci_dev *pdev = test->pdev;
370	struct device *dev = &pdev->dev;
371	void *orig_src_addr;
372	dma_addr_t orig_src_phys_addr;
373	void *orig_dst_addr;
374	dma_addr_t orig_dst_phys_addr;
375	size_t offset;
376	size_t alignment = test->alignment;
377	int irq_type = test->irq_type;
378	u32 src_crc32;
379	u32 dst_crc32;
380	int err;
381
382	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
383	if (err) {
384		dev_err(dev, "Failed to get transfer param\n");
385		return false;
386	}
387
388	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
389	if (err)
390		return false;
391
392	size = param.size;
393
394	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
395	if (use_dma)
396		flags |= FLAG_USE_DMA;
397
398	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
399		dev_err(dev, "Invalid IRQ type option\n");
400		goto err;
401	}
402
403	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
404	if (!orig_src_addr) {
405		dev_err(dev, "Failed to allocate source buffer\n");
406		ret = false;
407		goto err;
408	}
409
410	get_random_bytes(orig_src_addr, size + alignment);
411	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
412					    size + alignment, DMA_TO_DEVICE);
413	if (dma_mapping_error(dev, orig_src_phys_addr)) {
414		dev_err(dev, "failed to map source buffer address\n");
415		ret = false;
416		goto err_src_phys_addr;
417	}
418
419	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
420		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
421		offset = src_phys_addr - orig_src_phys_addr;
422		src_addr = orig_src_addr + offset;
423	} else {
424		src_phys_addr = orig_src_phys_addr;
425		src_addr = orig_src_addr;
426	}
427
428	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
429				 lower_32_bits(src_phys_addr));
430
431	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
432				 upper_32_bits(src_phys_addr));
433
434	src_crc32 = crc32_le(~0, src_addr, size);
435
436	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
437	if (!orig_dst_addr) {
438		dev_err(dev, "Failed to allocate destination address\n");
439		ret = false;
440		goto err_dst_addr;
441	}
442
443	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
444					    size + alignment, DMA_FROM_DEVICE);
445	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
446		dev_err(dev, "failed to map destination buffer address\n");
447		ret = false;
448		goto err_dst_phys_addr;
449	}
450
451	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
452		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
453		offset = dst_phys_addr - orig_dst_phys_addr;
454		dst_addr = orig_dst_addr + offset;
455	} else {
456		dst_phys_addr = orig_dst_phys_addr;
457		dst_addr = orig_dst_addr;
458	}
459
460	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
461				 lower_32_bits(dst_phys_addr));
462	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
463				 upper_32_bits(dst_phys_addr));
464
465	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
466				 size);
467
468	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
469	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
470	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
471	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
472				 COMMAND_COPY);
473
474	wait_for_completion(&test->irq_raised);
475
476	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
477			 DMA_FROM_DEVICE);
478
479	dst_crc32 = crc32_le(~0, dst_addr, size);
480	if (dst_crc32 == src_crc32)
481		ret = true;
482
483err_dst_phys_addr:
484	kfree(orig_dst_addr);
485
486err_dst_addr:
487	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
488			 DMA_TO_DEVICE);
489
490err_src_phys_addr:
491	kfree(orig_src_addr);
492
493err:
494	return ret;
495}
496
497static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
498				    unsigned long arg)
499{
500	struct pci_endpoint_test_xfer_param param;
501	bool ret = false;
502	u32 flags = 0;
503	bool use_dma;
504	u32 reg;
505	void *addr;
506	dma_addr_t phys_addr;
507	struct pci_dev *pdev = test->pdev;
508	struct device *dev = &pdev->dev;
509	void *orig_addr;
510	dma_addr_t orig_phys_addr;
511	size_t offset;
512	size_t alignment = test->alignment;
513	int irq_type = test->irq_type;
514	size_t size;
515	u32 crc32;
516	int err;
517
518	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
519	if (err != 0) {
520		dev_err(dev, "Failed to get transfer param\n");
521		return false;
522	}
523
524	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
525	if (err)
526		return false;
527
528	size = param.size;
529
530	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
531	if (use_dma)
532		flags |= FLAG_USE_DMA;
533
534	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
535		dev_err(dev, "Invalid IRQ type option\n");
536		goto err;
537	}
538
539	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
540	if (!orig_addr) {
541		dev_err(dev, "Failed to allocate address\n");
542		ret = false;
543		goto err;
544	}
545
546	get_random_bytes(orig_addr, size + alignment);
547
548	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
549					DMA_TO_DEVICE);
550	if (dma_mapping_error(dev, orig_phys_addr)) {
551		dev_err(dev, "failed to map source buffer address\n");
552		ret = false;
553		goto err_phys_addr;
554	}
555
556	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
557		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
558		offset = phys_addr - orig_phys_addr;
559		addr = orig_addr + offset;
560	} else {
561		phys_addr = orig_phys_addr;
562		addr = orig_addr;
563	}
564
565	crc32 = crc32_le(~0, addr, size);
566	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
567				 crc32);
568
569	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
570				 lower_32_bits(phys_addr));
571	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
572				 upper_32_bits(phys_addr));
573
574	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
575
576	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
577	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
578	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
579	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
580				 COMMAND_READ);
581
582	wait_for_completion(&test->irq_raised);
583
584	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
585	if (reg & STATUS_READ_SUCCESS)
586		ret = true;
587
588	dma_unmap_single(dev, orig_phys_addr, size + alignment,
589			 DMA_TO_DEVICE);
590
591err_phys_addr:
592	kfree(orig_addr);
593
594err:
595	return ret;
596}
597
598static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
599				   unsigned long arg)
600{
601	struct pci_endpoint_test_xfer_param param;
602	bool ret = false;
603	u32 flags = 0;
604	bool use_dma;
605	size_t size;
606	void *addr;
607	dma_addr_t phys_addr;
608	struct pci_dev *pdev = test->pdev;
609	struct device *dev = &pdev->dev;
610	void *orig_addr;
611	dma_addr_t orig_phys_addr;
612	size_t offset;
613	size_t alignment = test->alignment;
614	int irq_type = test->irq_type;
615	u32 crc32;
616	int err;
617
618	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
619	if (err) {
620		dev_err(dev, "Failed to get transfer param\n");
621		return false;
622	}
623
624	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
625	if (err)
626		return false;
627
628	size = param.size;
629
630	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
631	if (use_dma)
632		flags |= FLAG_USE_DMA;
633
634	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
635		dev_err(dev, "Invalid IRQ type option\n");
636		goto err;
637	}
638
639	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
640	if (!orig_addr) {
641		dev_err(dev, "Failed to allocate destination address\n");
642		ret = false;
643		goto err;
644	}
645
646	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
647					DMA_FROM_DEVICE);
648	if (dma_mapping_error(dev, orig_phys_addr)) {
649		dev_err(dev, "failed to map source buffer address\n");
650		ret = false;
651		goto err_phys_addr;
652	}
653
654	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
655		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
656		offset = phys_addr - orig_phys_addr;
657		addr = orig_addr + offset;
658	} else {
659		phys_addr = orig_phys_addr;
660		addr = orig_addr;
661	}
662
663	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
664				 lower_32_bits(phys_addr));
665	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
666				 upper_32_bits(phys_addr));
667
668	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
669
670	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
671	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
672	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
673	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
674				 COMMAND_WRITE);
675
676	wait_for_completion(&test->irq_raised);
677
678	dma_unmap_single(dev, orig_phys_addr, size + alignment,
679			 DMA_FROM_DEVICE);
680
681	crc32 = crc32_le(~0, addr, size);
682	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
683		ret = true;
684
685err_phys_addr:
686	kfree(orig_addr);
687err:
688	return ret;
689}
690
691static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
692{
693	pci_endpoint_test_release_irq(test);
694	pci_endpoint_test_free_irq_vectors(test);
695	return true;
696}
697
698static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
699				      int req_irq_type)
700{
701	struct pci_dev *pdev = test->pdev;
702	struct device *dev = &pdev->dev;
703
704	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
705		dev_err(dev, "Invalid IRQ type option\n");
706		return false;
707	}
708
709	if (test->irq_type == req_irq_type)
710		return true;
711
712	pci_endpoint_test_release_irq(test);
713	pci_endpoint_test_free_irq_vectors(test);
714
715	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
716		goto err;
717
718	if (!pci_endpoint_test_request_irq(test))
719		goto err;
720
721	return true;
722
723err:
724	pci_endpoint_test_free_irq_vectors(test);
725	return false;
726}
727
728static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
729				    unsigned long arg)
730{
731	int ret = -EINVAL;
732	enum pci_barno bar;
733	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
734	struct pci_dev *pdev = test->pdev;
735
736	mutex_lock(&test->mutex);
737
738	reinit_completion(&test->irq_raised);
739	test->last_irq = -ENODATA;
740
741	switch (cmd) {
742	case PCITEST_BAR:
743		bar = arg;
744		if (bar > BAR_5)
745			goto ret;
746		if (is_am654_pci_dev(pdev) && bar == BAR_0)
747			goto ret;
748		ret = pci_endpoint_test_bar(test, bar);
749		break;
750	case PCITEST_INTX_IRQ:
751		ret = pci_endpoint_test_intx_irq(test);
752		break;
753	case PCITEST_MSI:
754	case PCITEST_MSIX:
755		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
756		break;
757	case PCITEST_WRITE:
758		ret = pci_endpoint_test_write(test, arg);
759		break;
760	case PCITEST_READ:
761		ret = pci_endpoint_test_read(test, arg);
762		break;
763	case PCITEST_COPY:
764		ret = pci_endpoint_test_copy(test, arg);
765		break;
766	case PCITEST_SET_IRQTYPE:
767		ret = pci_endpoint_test_set_irq(test, arg);
768		break;
769	case PCITEST_GET_IRQTYPE:
770		ret = irq_type;
771		break;
772	case PCITEST_CLEAR_IRQ:
773		ret = pci_endpoint_test_clear_irq(test);
774		break;
775	}
776
777ret:
778	mutex_unlock(&test->mutex);
779	return ret;
780}
781
782static const struct file_operations pci_endpoint_test_fops = {
783	.owner = THIS_MODULE,
784	.unlocked_ioctl = pci_endpoint_test_ioctl,
785};
786
787static int pci_endpoint_test_probe(struct pci_dev *pdev,
788				   const struct pci_device_id *ent)
789{
790	int err;
791	int id;
792	char name[24];
793	enum pci_barno bar;
794	void __iomem *base;
795	struct device *dev = &pdev->dev;
796	struct pci_endpoint_test *test;
797	struct pci_endpoint_test_data *data;
798	enum pci_barno test_reg_bar = BAR_0;
799	struct miscdevice *misc_device;
800
801	if (pci_is_bridge(pdev))
802		return -ENODEV;
803
804	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
805	if (!test)
806		return -ENOMEM;
807
808	test->test_reg_bar = 0;
809	test->alignment = 0;
810	test->pdev = pdev;
811	test->irq_type = IRQ_TYPE_UNDEFINED;
812
813	if (no_msi)
814		irq_type = IRQ_TYPE_INTX;
815
816	data = (struct pci_endpoint_test_data *)ent->driver_data;
817	if (data) {
818		test_reg_bar = data->test_reg_bar;
819		test->test_reg_bar = test_reg_bar;
820		test->alignment = data->alignment;
821		irq_type = data->irq_type;
822	}
823
824	init_completion(&test->irq_raised);
825	mutex_init(&test->mutex);
826
827	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
828	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
829		dev_err(dev, "Cannot set DMA mask\n");
830		return -EINVAL;
831	}
832
833	err = pci_enable_device(pdev);
834	if (err) {
835		dev_err(dev, "Cannot enable PCI device\n");
836		return err;
837	}
838
839	err = pci_request_regions(pdev, DRV_MODULE_NAME);
840	if (err) {
841		dev_err(dev, "Cannot obtain PCI resources\n");
842		goto err_disable_pdev;
843	}
844
845	pci_set_master(pdev);
846
847	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
848		err = -EINVAL;
849		goto err_disable_irq;
850	}
851
852	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
853		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
854			base = pci_ioremap_bar(pdev, bar);
855			if (!base) {
856				dev_err(dev, "Failed to read BAR%d\n", bar);
857				WARN_ON(bar == test_reg_bar);
858			}
859			test->bar[bar] = base;
860		}
861	}
862
863	test->base = test->bar[test_reg_bar];
864	if (!test->base) {
865		err = -ENOMEM;
866		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
867			test_reg_bar);
868		goto err_iounmap;
869	}
870
871	pci_set_drvdata(pdev, test);
872
873	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
874	if (id < 0) {
875		err = id;
876		dev_err(dev, "Unable to get id\n");
877		goto err_iounmap;
878	}
879
880	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
881	test->name = kstrdup(name, GFP_KERNEL);
882	if (!test->name) {
883		err = -ENOMEM;
884		goto err_ida_remove;
885	}
886
887	if (!pci_endpoint_test_request_irq(test)) {
888		err = -EINVAL;
889		goto err_kfree_test_name;
890	}
891
892	misc_device = &test->miscdev;
893	misc_device->minor = MISC_DYNAMIC_MINOR;
894	misc_device->name = kstrdup(name, GFP_KERNEL);
895	if (!misc_device->name) {
896		err = -ENOMEM;
897		goto err_release_irq;
898	}
899	misc_device->parent = &pdev->dev;
900	misc_device->fops = &pci_endpoint_test_fops;
901
902	err = misc_register(misc_device);
903	if (err) {
904		dev_err(dev, "Failed to register device\n");
905		goto err_kfree_name;
906	}
907
908	return 0;
909
910err_kfree_name:
911	kfree(misc_device->name);
912
913err_release_irq:
914	pci_endpoint_test_release_irq(test);
915
916err_kfree_test_name:
917	kfree(test->name);
918
919err_ida_remove:
920	ida_free(&pci_endpoint_test_ida, id);
921
922err_iounmap:
923	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
924		if (test->bar[bar])
925			pci_iounmap(pdev, test->bar[bar]);
926	}
927
928err_disable_irq:
929	pci_endpoint_test_free_irq_vectors(test);
930	pci_release_regions(pdev);
931
932err_disable_pdev:
933	pci_disable_device(pdev);
934
935	return err;
936}
937
938static void pci_endpoint_test_remove(struct pci_dev *pdev)
939{
940	int id;
941	enum pci_barno bar;
942	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
943	struct miscdevice *misc_device = &test->miscdev;
944
945	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
946		return;
947	if (id < 0)
948		return;
949
950	pci_endpoint_test_release_irq(test);
951	pci_endpoint_test_free_irq_vectors(test);
952
953	misc_deregister(&test->miscdev);
954	kfree(misc_device->name);
955	kfree(test->name);
956	ida_free(&pci_endpoint_test_ida, id);
957	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
958		if (test->bar[bar])
959			pci_iounmap(pdev, test->bar[bar]);
960	}
961
962	pci_release_regions(pdev);
963	pci_disable_device(pdev);
964}
965
966static const struct pci_endpoint_test_data default_data = {
967	.test_reg_bar = BAR_0,
968	.alignment = SZ_4K,
969	.irq_type = IRQ_TYPE_MSI,
970};
971
972static const struct pci_endpoint_test_data am654_data = {
973	.test_reg_bar = BAR_2,
974	.alignment = SZ_64K,
975	.irq_type = IRQ_TYPE_MSI,
976};
977
978static const struct pci_endpoint_test_data j721e_data = {
979	.alignment = 256,
980	.irq_type = IRQ_TYPE_MSI,
981};
982
983static const struct pci_device_id pci_endpoint_test_tbl[] = {
984	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
985	  .driver_data = (kernel_ulong_t)&default_data,
986	},
987	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
988	  .driver_data = (kernel_ulong_t)&default_data,
989	},
990	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
991	  .driver_data = (kernel_ulong_t)&default_data,
992	},
993	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
994	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
995	  .driver_data = (kernel_ulong_t)&default_data,
996	},
997	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
998	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
999	  .driver_data = (kernel_ulong_t)&am654_data
1000	},
1001	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1002	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1003	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1004	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1005	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1006	  .driver_data = (kernel_ulong_t)&default_data,
1007	},
1008	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1009	  .driver_data = (kernel_ulong_t)&j721e_data,
1010	},
1011	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1012	  .driver_data = (kernel_ulong_t)&j721e_data,
1013	},
1014	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1015	  .driver_data = (kernel_ulong_t)&j721e_data,
1016	},
1017	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1018	  .driver_data = (kernel_ulong_t)&j721e_data,
1019	},
1020	{ }
1021};
1022MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1023
1024static struct pci_driver pci_endpoint_test_driver = {
1025	.name		= DRV_MODULE_NAME,
1026	.id_table	= pci_endpoint_test_tbl,
1027	.probe		= pci_endpoint_test_probe,
1028	.remove		= pci_endpoint_test_remove,
1029	.sriov_configure = pci_sriov_configure_simple,
1030};
1031module_pci_driver(pci_endpoint_test_driver);
1032
1033MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1034MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1035MODULE_LICENSE("GPL v2");
1036