1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4/*
5 * nfp6000_pcie.c
6 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
7 *          Jason McMullan <jason.mcmullan@netronome.com>
8 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
9 *
10 * Multiplexes the NFP BARs between NFP internal resources and
11 * implements the PCIe specific interface for generic CPP bus access.
12 *
13 * The BARs are managed with refcounts and are allocated/acquired
14 * using target, token and offset/size matching.  The generic CPP bus
15 * abstraction builds upon this BAR interface.
16 */
17
18#include <asm/unaligned.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/kref.h>
22#include <linux/io.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/sort.h>
26#include <linux/sched.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29
30#include "nfp_cpp.h"
31#include "nfp_dev.h"
32
33#include "nfp6000/nfp6000.h"
34
35#include "nfp6000_pcie.h"
36
37#define NFP_PCIE_BAR(_pf)	(0x30000 + ((_pf) & 7) * 0xc0)
38#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
39	(0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
40#define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x)     (((_x) & 0x3) << 30)
41#define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x)  (((_x) >> 30) & 0x3)
42#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x)          (((_x) & 0x3) << 28)
43#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x)       (((_x) >> 28) & 0x3)
44#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x)        (((_x) & 0xffffff) << 0)
45#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x)     (((_x) >> 0) & 0xffffff)
46#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
47	(0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
48#define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x)      (((_x) & 0x7f) << 24)
49#define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x)   (((_x) >> 24) & 0x7f)
50#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x)     (((_x) & 0x3ff) << 14)
51#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x)  (((_x) >> 14) & 0x3ff)
52#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x)        (((_x) & 0x3fff) << 0)
53#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x)     (((_x) >> 0) & 0x3fff)
54#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
55	(0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
56#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x)         (((_x) & 0xf) << 28)
57#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x)      (((_x) >> 28) & 0xf)
58#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x)         (((_x) & 0x1f) << 23)
59#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x)      (((_x) >> 23) & 0x1f)
60#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x)         (((_x) & 0x1f) << 18)
61#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x)      (((_x) >> 18) & 0x1f)
62#define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x)       (((_x) & 0xff) << 10)
63#define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x)    (((_x) >> 10) & 0xff)
64#define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x)   (((_x) & 0x3ff) << 0)
65#define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
66
67#define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x)  (((_x) & 0x1f) << 16)
68#define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
69#define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x)         (((_x) & 0xffff) << 0)
70#define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x)      (((_x) >> 0) & 0xffff)
71#define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x)        (((_x) & 0x3) << 27)
72#define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x)     (((_x) >> 27) & 0x3)
73#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT    0
74#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT    1
75#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE    3
76#define   NFP_PCIE_BAR_PCIE2CPP_MapType(_x)             (((_x) & 0x7) << 29)
77#define   NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x)          (((_x) >> 29) & 0x7)
78#define     NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED         0
79#define     NFP_PCIE_BAR_PCIE2CPP_MapType_BULK          1
80#define     NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET        2
81#define     NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL       3
82#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0     4
83#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1     5
84#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2     6
85#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3     7
86#define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x)  (((_x) & 0xf) << 23)
87#define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
88#define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x)   (((_x) & 0x3) << 21)
89#define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
90#define NFP_PCIE_EM                                     0x020000
91#define NFP_PCIE_SRAM                                   0x000000
92
93/* Minimal size of the PCIe cfg memory we depend on being mapped,
94 * queue controller and DMA controller don't have to be covered.
95 */
96#define NFP_PCI_MIN_MAP_SIZE				0x080000
97
98#define NFP_PCIE_P2C_FIXED_SIZE(bar)               (1 << (bar)->bitsize)
99#define NFP_PCIE_P2C_BULK_SIZE(bar)                (1 << (bar)->bitsize)
100#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
101#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
102#define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
103
104#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index)		((bar_index) * 4)
105
106/* The number of explicit BARs to reserve.
107 * Minimum is 0, maximum is 4 on the NFP6000.
108 * The NFP3800 can have only one per PF.
109 */
110#define NFP_PCIE_EXPLICIT_BARS		2
111
112struct nfp6000_pcie;
113struct nfp6000_area_priv;
114
115/**
116 * struct nfp_bar - describes BAR configuration and usage
117 * @nfp:	backlink to owner
118 * @barcfg:	cached contents of BAR config CSR
119 * @base:	the BAR's base CPP offset
120 * @mask:       mask for the BAR aperture (read only)
121 * @bitsize:	bitsize of BAR aperture (read only)
122 * @index:	index of the BAR
123 * @refcnt:	number of current users
124 * @iomem:	mapped IO memory
125 * @resource:	iomem resource window
126 */
127struct nfp_bar {
128	struct nfp6000_pcie *nfp;
129	u32 barcfg;
130	u64 base;          /* CPP address base */
131	u64 mask;          /* Bit mask of the bar */
132	u32 bitsize;       /* Bit size of the bar */
133	int index;
134	atomic_t refcnt;
135
136	void __iomem *iomem;
137	struct resource *resource;
138};
139
140#define NFP_PCI_BAR_MAX    (PCI_64BIT_BAR_COUNT * 8)
141
142struct nfp6000_pcie {
143	struct pci_dev *pdev;
144	struct device *dev;
145	const struct nfp_dev_info *dev_info;
146
147	/* PCI BAR management */
148	spinlock_t bar_lock;		/* Protect the PCI2CPP BAR cache */
149	int bars;
150	struct nfp_bar bar[NFP_PCI_BAR_MAX];
151	wait_queue_head_t bar_waiters;
152
153	/* Reserved BAR access */
154	struct {
155		void __iomem *csr;
156		void __iomem *em;
157		void __iomem *expl[4];
158	} iomem;
159
160	/* Explicit IO access */
161	struct {
162		struct mutex mutex; /* Lock access to this explicit group */
163		u8 master_id;
164		u8 signal_ref;
165		void __iomem *data;
166		struct {
167			void __iomem *addr;
168			int bitsize;
169			int free[4];
170		} group[4];
171	} expl;
172};
173
174static u32 nfp_bar_maptype(struct nfp_bar *bar)
175{
176	return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
177}
178
179static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
180{
181	return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
182}
183
184static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
185{
186	return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
187		+ nfp_bar_resource_len(bar) * (bar->index & 7);
188}
189
190#define TARGET_WIDTH_32    4
191#define TARGET_WIDTH_64    8
192
193static int
194compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
195	    u32 *bar_config, u64 *bar_base,
196	    int tgt, int act, int tok, u64 offset, size_t size, int width)
197{
198	int bitsize;
199	u32 newcfg;
200
201	if (tgt >= NFP_CPP_NUM_TARGETS)
202		return -EINVAL;
203
204	switch (width) {
205	case 8:
206		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
207			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
208		break;
209	case 4:
210		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
211			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
212		break;
213	case 0:
214		newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
215			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
216		break;
217	default:
218		return -EINVAL;
219	}
220
221	if (act != NFP_CPP_ACTION_RW && act != 0) {
222		/* Fixed CPP mapping with specific action */
223		u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
224
225		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
226			  NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
227		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
228		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
229		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
230
231		if ((offset & mask) != ((offset + size - 1) & mask))
232			return -EINVAL;
233		offset &= mask;
234
235		bitsize = 40 - 16;
236	} else {
237		u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
238
239		/* Bulk mapping */
240		newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
241			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
242		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
243		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
244
245		if ((offset & mask) != ((offset + size - 1) & mask))
246			return -EINVAL;
247
248		offset &= mask;
249
250		bitsize = 40 - 21;
251	}
252
253	if (bar->bitsize < bitsize)
254		return -EINVAL;
255
256	newcfg |= offset >> bitsize;
257
258	if (bar_base)
259		*bar_base = offset;
260
261	if (bar_config)
262		*bar_config = newcfg;
263
264	return 0;
265}
266
267static int
268nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
269{
270	unsigned int xbar;
271
272	xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
273
274	if (nfp->iomem.csr) {
275		writel(newcfg, nfp->iomem.csr + xbar);
276		/* Readback to ensure BAR is flushed */
277		readl(nfp->iomem.csr + xbar);
278	} else {
279		xbar += nfp->dev_info->pcie_cfg_expbar_offset;
280		pci_write_config_dword(nfp->pdev, xbar, newcfg);
281	}
282
283	bar->barcfg = newcfg;
284
285	return 0;
286}
287
288static int
289reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
290		int tgt, int act, int tok, u64 offset, size_t size, int width)
291{
292	u64 newbase;
293	u32 newcfg;
294	int err;
295
296	err = compute_bar(nfp, bar, &newcfg, &newbase,
297			  tgt, act, tok, offset, size, width);
298	if (err)
299		return err;
300
301	bar->base = newbase;
302
303	return nfp6000_bar_write(nfp, bar, newcfg);
304}
305
306/* Check if BAR can be used with the given parameters. */
307static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
308			u64 offset, size_t size, int width)
309{
310	int bartgt, baract, bartok;
311	int barwidth;
312	u32 maptype;
313
314	maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
315	bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
316	bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
317	baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
318
319	barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
320	switch (barwidth) {
321	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
322		barwidth = 4;
323		break;
324	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
325		barwidth = 8;
326		break;
327	case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
328		barwidth = 0;
329		break;
330	default:
331		barwidth = -1;
332		break;
333	}
334
335	switch (maptype) {
336	case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
337		bartok = -1;
338		fallthrough;
339	case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
340		baract = NFP_CPP_ACTION_RW;
341		if (act == 0)
342			act = NFP_CPP_ACTION_RW;
343		fallthrough;
344	case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
345		break;
346	default:
347		/* We don't match explicit bars through the area interface */
348		return 0;
349	}
350
351	/* Make sure to match up the width */
352	if (barwidth != width)
353		return 0;
354
355	if ((bartgt < 0 || bartgt == tgt) &&
356	    (bartok < 0 || bartok == tok) &&
357	    (baract == act) &&
358	    bar->base <= offset &&
359	    (bar->base + (1 << bar->bitsize)) >= (offset + size))
360		return 1;
361
362	/* No match */
363	return 0;
364}
365
366static int
367find_matching_bar(struct nfp6000_pcie *nfp,
368		  u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
369{
370	int n;
371
372	for (n = 0; n < nfp->bars; n++) {
373		struct nfp_bar *bar = &nfp->bar[n];
374
375		if (matching_bar(bar, tgt, act, tok, offset, size, width))
376			return n;
377	}
378
379	return -1;
380}
381
382/* Return EAGAIN if no resource is available */
383static int
384find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
385			int tgt, int act, int tok,
386			u64 offset, size_t size, int width)
387{
388	int n, busy = 0;
389
390	for (n = 0; n < nfp->bars; n++) {
391		const struct nfp_bar *bar = &nfp->bar[n];
392		int err;
393
394		if (!bar->bitsize)
395			continue;
396
397		/* Just check to see if we can make it fit... */
398		err = compute_bar(nfp, bar, NULL, NULL,
399				  tgt, act, tok, offset, size, width);
400		if (err)
401			continue;
402
403		if (!atomic_read(&bar->refcnt))
404			return n;
405
406		busy++;
407	}
408
409	if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
410		 tgt, act, tok, offset, size, width))
411		return -EINVAL;
412
413	return -EAGAIN;
414}
415
416static int
417find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
418			 int tgt, int act, int tok,
419			 u64 offset, size_t size, int width)
420{
421	unsigned long flags;
422	int n;
423
424	spin_lock_irqsave(&nfp->bar_lock, flags);
425
426	n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
427	if (n < 0)
428		spin_unlock_irqrestore(&nfp->bar_lock, flags);
429	else
430		__release(&nfp->bar_lock);
431
432	return n;
433}
434
435static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
436{
437	atomic_inc(&bar->refcnt);
438}
439
440static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
441{
442	if (atomic_dec_and_test(&bar->refcnt))
443		wake_up_interruptible(&nfp->bar_waiters);
444}
445
446static int
447nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
448		 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
449{
450	return wait_event_interruptible(nfp->bar_waiters,
451		(*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
452						    offset, size, width))
453					!= -EAGAIN);
454}
455
456static int
457nfp_alloc_bar(struct nfp6000_pcie *nfp,
458	      u32 tgt, u32 act, u32 tok,
459	      u64 offset, size_t size, int width, int nonblocking)
460{
461	unsigned long irqflags;
462	int barnum, retval;
463
464	if (size > (1 << 24))
465		return -EINVAL;
466
467	spin_lock_irqsave(&nfp->bar_lock, irqflags);
468	barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
469	if (barnum >= 0) {
470		/* Found a perfect match. */
471		nfp_bar_get(nfp, &nfp->bar[barnum]);
472		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
473		return barnum;
474	}
475
476	barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
477					 offset, size, width);
478	if (barnum < 0) {
479		if (nonblocking)
480			goto err_nobar;
481
482		/* Wait until a BAR becomes available.  The
483		 * find_unused_bar function will reclaim the bar_lock
484		 * if a free BAR is found.
485		 */
486		spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
487		retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
488					  offset, size, width);
489		if (retval)
490			return retval;
491		__acquire(&nfp->bar_lock);
492	}
493
494	nfp_bar_get(nfp, &nfp->bar[barnum]);
495	retval = reconfigure_bar(nfp, &nfp->bar[barnum],
496				 tgt, act, tok, offset, size, width);
497	if (retval < 0) {
498		nfp_bar_put(nfp, &nfp->bar[barnum]);
499		barnum = retval;
500	}
501
502err_nobar:
503	spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
504	return barnum;
505}
506
507static void disable_bars(struct nfp6000_pcie *nfp);
508
509static int bar_cmp(const void *aptr, const void *bptr)
510{
511	const struct nfp_bar *a = aptr, *b = bptr;
512
513	if (a->bitsize == b->bitsize)
514		return a->index - b->index;
515	else
516		return a->bitsize - b->bitsize;
517}
518
519/* Map all PCI bars and fetch the actual BAR configurations from the
520 * board.  We assume that the BAR with the PCIe config block is
521 * already mapped.
522 *
523 * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
524 * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
525 * BAR0.2: --
526 * BAR0.3: --
527 * BAR0.4: Reserved for Explicit 0.0-0.3 access
528 * BAR0.5: Reserved for Explicit 1.0-1.3 access
529 * BAR0.6: Reserved for Explicit 2.0-2.3 access
530 * BAR0.7: Reserved for Explicit 3.0-3.3 access
531 *
532 * BAR1.0-BAR1.7: --
533 * BAR2.0-BAR2.7: --
534 */
535static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
536{
537	const u32 barcfg_msix_general =
538		NFP_PCIE_BAR_PCIE2CPP_MapType(
539			NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
540		NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
541			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
542	const u32 barcfg_msix_xpb =
543		NFP_PCIE_BAR_PCIE2CPP_MapType(
544			NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
545		NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
546			NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
547		NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
548			NFP_CPP_TARGET_ISLAND_XPB);
549	const u32 barcfg_explicit[4] = {
550		NFP_PCIE_BAR_PCIE2CPP_MapType(
551			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
552		NFP_PCIE_BAR_PCIE2CPP_MapType(
553			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
554		NFP_PCIE_BAR_PCIE2CPP_MapType(
555			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
556		NFP_PCIE_BAR_PCIE2CPP_MapType(
557			NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
558	};
559	char status_msg[196] = {};
560	int i, err, bars_free;
561	struct nfp_bar *bar;
562	int expl_groups;
563	char *msg, *end;
564
565	msg = status_msg +
566		snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: ");
567	end = status_msg + sizeof(status_msg) - 1;
568
569	bar = &nfp->bar[0];
570	for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
571		struct resource *res;
572
573		res = &nfp->pdev->resource[(i >> 3) * 2];
574
575		/* Skip over BARs that are not IORESOURCE_MEM */
576		if (!(resource_type(res) & IORESOURCE_MEM)) {
577			bar--;
578			continue;
579		}
580
581		bar->resource = res;
582		bar->barcfg = 0;
583
584		bar->nfp = nfp;
585		bar->index = i;
586		bar->mask = nfp_bar_resource_len(bar) - 1;
587		bar->bitsize = fls(bar->mask);
588		bar->base = 0;
589		bar->iomem = NULL;
590	}
591
592	nfp->bars = bar - &nfp->bar[0];
593	if (nfp->bars < 8) {
594		dev_err(nfp->dev, "No usable BARs found!\n");
595		return -EINVAL;
596	}
597
598	bars_free = nfp->bars;
599
600	/* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
601	 */
602	mutex_init(&nfp->expl.mutex);
603
604	nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
605		<< 4;
606	nfp->expl.signal_ref = 0x10;
607
608	/* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
609	bar = &nfp->bar[0];
610	if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE)
611		bar->iomem = ioremap(nfp_bar_resource_start(bar),
612					     nfp_bar_resource_len(bar));
613	if (bar->iomem) {
614		int pf;
615
616		msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
617		atomic_inc(&bar->refcnt);
618		bars_free--;
619
620		nfp6000_bar_write(nfp, bar, barcfg_msix_general);
621
622		nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
623			nfp->dev_info->pcie_expl_offset;
624
625		switch (nfp->pdev->device) {
626		case PCI_DEVICE_ID_NFP3800:
627			pf = nfp->pdev->devfn & 7;
628			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
629			break;
630		case PCI_DEVICE_ID_NFP4000:
631		case PCI_DEVICE_ID_NFP5000:
632		case PCI_DEVICE_ID_NFP6000:
633			nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
634			break;
635		default:
636			dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
637				nfp->pdev->device);
638			err = -EINVAL;
639			goto err_unmap_bar0;
640		}
641		nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
642	}
643
644	switch (nfp->pdev->device) {
645	case PCI_DEVICE_ID_NFP3800:
646		expl_groups = 1;
647		break;
648	case PCI_DEVICE_ID_NFP4000:
649	case PCI_DEVICE_ID_NFP5000:
650	case PCI_DEVICE_ID_NFP6000:
651		expl_groups = 4;
652		break;
653	default:
654		dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
655			nfp->pdev->device);
656		err = -EINVAL;
657		goto err_unmap_bar0;
658	}
659
660	/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
661	bar = &nfp->bar[1];
662	msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
663	atomic_inc(&bar->refcnt);
664	bars_free--;
665
666	nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
667
668	/* Use BAR0.4..BAR0.7 for EXPL IO */
669	for (i = 0; i < 4; i++) {
670		int j;
671
672		if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
673			nfp->expl.group[i].bitsize = 0;
674			continue;
675		}
676
677		bar = &nfp->bar[4 + i];
678		bar->iomem = ioremap(nfp_bar_resource_start(bar),
679					     nfp_bar_resource_len(bar));
680		if (bar->iomem) {
681			msg += scnprintf(msg, end - msg,
682					 "0.%d: Explicit%d, ", 4 + i, i);
683			atomic_inc(&bar->refcnt);
684			bars_free--;
685
686			nfp->expl.group[i].bitsize = bar->bitsize;
687			nfp->expl.group[i].addr = bar->iomem;
688			nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
689
690			for (j = 0; j < 4; j++)
691				nfp->expl.group[i].free[j] = true;
692		}
693		nfp->iomem.expl[i] = bar->iomem;
694	}
695
696	/* Sort bars by bit size - use the smallest possible first. */
697	sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
698	     bar_cmp, NULL);
699
700	dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
701
702	return 0;
703
704err_unmap_bar0:
705	if (nfp->bar[0].iomem)
706		iounmap(nfp->bar[0].iomem);
707	return err;
708}
709
710static void disable_bars(struct nfp6000_pcie *nfp)
711{
712	struct nfp_bar *bar = &nfp->bar[0];
713	int n;
714
715	for (n = 0; n < nfp->bars; n++, bar++) {
716		if (bar->iomem) {
717			iounmap(bar->iomem);
718			bar->iomem = NULL;
719		}
720	}
721}
722
723/*
724 * Generic CPP bus access interface.
725 */
726
727struct nfp6000_area_priv {
728	atomic_t refcnt;
729
730	struct nfp_bar *bar;
731	u32 bar_offset;
732
733	u32 target;
734	u32 action;
735	u32 token;
736	u64 offset;
737	struct {
738		int read;
739		int write;
740		int bar;
741	} width;
742	size_t size;
743
744	void __iomem *iomem;
745	phys_addr_t phys;
746	struct resource resource;
747};
748
749static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
750			     unsigned long long address, unsigned long size)
751{
752	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
753	u32 target = NFP_CPP_ID_TARGET_of(dest);
754	u32 action = NFP_CPP_ID_ACTION_of(dest);
755	u32 token = NFP_CPP_ID_TOKEN_of(dest);
756	int pp;
757
758	pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
759	if (pp < 0)
760		return pp;
761
762	priv->width.read = PUSH_WIDTH(pp);
763	priv->width.write = PULL_WIDTH(pp);
764	if (priv->width.read > 0 &&
765	    priv->width.write > 0 &&
766	    priv->width.read != priv->width.write) {
767		return -EINVAL;
768	}
769
770	if (priv->width.read > 0)
771		priv->width.bar = priv->width.read;
772	else
773		priv->width.bar = priv->width.write;
774
775	atomic_set(&priv->refcnt, 0);
776	priv->bar = NULL;
777
778	priv->target = target;
779	priv->action = action;
780	priv->token = token;
781	priv->offset = address;
782	priv->size = size;
783	memset(&priv->resource, 0, sizeof(priv->resource));
784
785	return 0;
786}
787
788static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
789{
790}
791
792static void priv_area_get(struct nfp_cpp_area *area)
793{
794	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
795
796	atomic_inc(&priv->refcnt);
797}
798
799static int priv_area_put(struct nfp_cpp_area *area)
800{
801	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
802
803	if (WARN_ON(!atomic_read(&priv->refcnt)))
804		return 0;
805
806	return atomic_dec_and_test(&priv->refcnt);
807}
808
809static int nfp6000_area_acquire(struct nfp_cpp_area *area)
810{
811	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
812	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
813	int barnum, err;
814
815	if (priv->bar) {
816		/* Already allocated. */
817		priv_area_get(area);
818		return 0;
819	}
820
821	barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
822			       priv->offset, priv->size, priv->width.bar, 1);
823
824	if (barnum < 0) {
825		err = barnum;
826		goto err_alloc_bar;
827	}
828	priv->bar = &nfp->bar[barnum];
829
830	/* Calculate offset into BAR. */
831	if (nfp_bar_maptype(priv->bar) ==
832	    NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
833		priv->bar_offset = priv->offset &
834			(NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
835		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
836			priv->bar, priv->target);
837		priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
838			priv->bar, priv->token);
839	} else {
840		priv->bar_offset = priv->offset & priv->bar->mask;
841	}
842
843	/* We don't actually try to acquire the resource area using
844	 * request_resource.  This would prevent sharing the mapped
845	 * BAR between multiple CPP areas and prevent us from
846	 * effectively utilizing the limited amount of BAR resources.
847	 */
848	priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
849	priv->resource.name = nfp_cpp_area_name(area);
850	priv->resource.start = priv->phys;
851	priv->resource.end = priv->resource.start + priv->size - 1;
852	priv->resource.flags = IORESOURCE_MEM;
853
854	/* If the bar is already mapped in, use its mapping */
855	if (priv->bar->iomem)
856		priv->iomem = priv->bar->iomem + priv->bar_offset;
857	else
858		/* Must have been too big. Sub-allocate. */
859		priv->iomem = ioremap(priv->phys, priv->size);
860
861	if (IS_ERR_OR_NULL(priv->iomem)) {
862		dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
863			(int)priv->size, priv->bar->index);
864		err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
865		priv->iomem = NULL;
866		goto err_iomem_remap;
867	}
868
869	priv_area_get(area);
870	return 0;
871
872err_iomem_remap:
873	nfp_bar_put(nfp, priv->bar);
874	priv->bar = NULL;
875err_alloc_bar:
876	return err;
877}
878
879static void nfp6000_area_release(struct nfp_cpp_area *area)
880{
881	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
882	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
883
884	if (!priv_area_put(area))
885		return;
886
887	if (!priv->bar->iomem)
888		iounmap(priv->iomem);
889
890	nfp_bar_put(nfp, priv->bar);
891
892	priv->bar = NULL;
893	priv->iomem = NULL;
894}
895
896static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
897{
898	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
899
900	return priv->phys;
901}
902
903static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
904{
905	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
906
907	return priv->iomem;
908}
909
910static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
911{
912	/* Use the BAR resource as the resource for the CPP area.
913	 * This enables us to share the BAR among multiple CPP areas
914	 * without resource conflicts.
915	 */
916	struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
917
918	return priv->bar->resource;
919}
920
921static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
922			     unsigned long offset, unsigned int length)
923{
924	u64 __maybe_unused *wrptr64 = kernel_vaddr;
925	const u64 __iomem __maybe_unused *rdptr64;
926	struct nfp6000_area_priv *priv;
927	u32 *wrptr32 = kernel_vaddr;
928	const u32 __iomem *rdptr32;
929	int n, width;
930
931	priv = nfp_cpp_area_priv(area);
932	rdptr64 = priv->iomem + offset;
933	rdptr32 = priv->iomem + offset;
934
935	if (offset + length > priv->size)
936		return -EFAULT;
937
938	width = priv->width.read;
939	if (width <= 0)
940		return -EINVAL;
941
942	/* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */
943	if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) &&
944	    priv->action == NFP_CPP_ACTION_RW &&
945	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
946		width = TARGET_WIDTH_32;
947
948	/* Unaligned? Translate to an explicit access */
949	if ((priv->offset + offset) & (width - 1))
950		return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
951					     NFP_CPP_ID(priv->target,
952							priv->action,
953							priv->token),
954					     priv->offset + offset,
955					     kernel_vaddr, length, width);
956
957	if (WARN_ON(!priv->bar))
958		return -EFAULT;
959
960	switch (width) {
961	case TARGET_WIDTH_32:
962		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
963			return -EINVAL;
964
965		for (n = 0; n < length; n += sizeof(u32))
966			*wrptr32++ = __raw_readl(rdptr32++);
967		return n;
968#ifdef __raw_readq
969	case TARGET_WIDTH_64:
970		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
971			return -EINVAL;
972
973		for (n = 0; n < length; n += sizeof(u64))
974			*wrptr64++ = __raw_readq(rdptr64++);
975		return n;
976#endif
977	default:
978		return -EINVAL;
979	}
980}
981
982static int
983nfp6000_area_write(struct nfp_cpp_area *area,
984		   const void *kernel_vaddr,
985		   unsigned long offset, unsigned int length)
986{
987	const u64 __maybe_unused *rdptr64 = kernel_vaddr;
988	u64 __iomem __maybe_unused *wrptr64;
989	const u32 *rdptr32 = kernel_vaddr;
990	struct nfp6000_area_priv *priv;
991	u32 __iomem *wrptr32;
992	int n, width;
993
994	priv = nfp_cpp_area_priv(area);
995	wrptr64 = priv->iomem + offset;
996	wrptr32 = priv->iomem + offset;
997
998	if (offset + length > priv->size)
999		return -EFAULT;
1000
1001	width = priv->width.write;
1002	if (width <= 0)
1003		return -EINVAL;
1004
1005	/* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */
1006	if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
1007	    priv->action == NFP_CPP_ACTION_RW &&
1008	    (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4))
1009		width = TARGET_WIDTH_32;
1010
1011	/* Unaligned? Translate to an explicit access */
1012	if ((priv->offset + offset) & (width - 1))
1013		return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
1014					      NFP_CPP_ID(priv->target,
1015							 priv->action,
1016							 priv->token),
1017					      priv->offset + offset,
1018					      kernel_vaddr, length, width);
1019
1020	if (WARN_ON(!priv->bar))
1021		return -EFAULT;
1022
1023	switch (width) {
1024	case TARGET_WIDTH_32:
1025		if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
1026			return -EINVAL;
1027
1028		for (n = 0; n < length; n += sizeof(u32)) {
1029			__raw_writel(*rdptr32++, wrptr32++);
1030			wmb();
1031		}
1032		return n;
1033#ifdef __raw_writeq
1034	case TARGET_WIDTH_64:
1035		if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
1036			return -EINVAL;
1037
1038		for (n = 0; n < length; n += sizeof(u64)) {
1039			__raw_writeq(*rdptr64++, wrptr64++);
1040			wmb();
1041		}
1042		return n;
1043#endif
1044	default:
1045		return -EINVAL;
1046	}
1047}
1048
1049struct nfp6000_explicit_priv {
1050	struct nfp6000_pcie *nfp;
1051	struct {
1052		int group;
1053		int area;
1054	} bar;
1055	int bitsize;
1056	void __iomem *data;
1057	void __iomem *addr;
1058};
1059
1060static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
1061{
1062	struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
1063	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1064	int i, j;
1065
1066	mutex_lock(&nfp->expl.mutex);
1067	for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
1068		if (!nfp->expl.group[i].bitsize)
1069			continue;
1070
1071		for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
1072			u16 data_offset;
1073
1074			if (!nfp->expl.group[i].free[j])
1075				continue;
1076
1077			priv->nfp = nfp;
1078			priv->bar.group = i;
1079			priv->bar.area = j;
1080			priv->bitsize = nfp->expl.group[i].bitsize - 2;
1081
1082			data_offset = (priv->bar.group << 9) +
1083				(priv->bar.area << 7);
1084			priv->data = nfp->expl.data + data_offset;
1085			priv->addr = nfp->expl.group[i].addr +
1086				(priv->bar.area << priv->bitsize);
1087			nfp->expl.group[i].free[j] = false;
1088
1089			mutex_unlock(&nfp->expl.mutex);
1090			return 0;
1091		}
1092	}
1093	mutex_unlock(&nfp->expl.mutex);
1094
1095	return -EAGAIN;
1096}
1097
1098static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
1099{
1100	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1101	struct nfp6000_pcie *nfp = priv->nfp;
1102
1103	mutex_lock(&nfp->expl.mutex);
1104	nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
1105	mutex_unlock(&nfp->expl.mutex);
1106}
1107
1108static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
1109				const void *buff, size_t len)
1110{
1111	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1112	const u32 *src = buff;
1113	size_t i;
1114
1115	for (i = 0; i < len; i += sizeof(u32))
1116		writel(*(src++), priv->data + i);
1117
1118	return i;
1119}
1120
1121static int
1122nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
1123		    const struct nfp_cpp_explicit_command *cmd, u64 address)
1124{
1125	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1126	u8 signal_master, signal_ref, data_master;
1127	struct nfp6000_pcie *nfp = priv->nfp;
1128	int sigmask = 0;
1129	u16 data_ref;
1130	u32 csr[3];
1131
1132	if (cmd->siga_mode)
1133		sigmask |= 1 << cmd->siga;
1134	if (cmd->sigb_mode)
1135		sigmask |= 1 << cmd->sigb;
1136
1137	signal_master = cmd->signal_master;
1138	if (!signal_master)
1139		signal_master = nfp->expl.master_id;
1140
1141	signal_ref = cmd->signal_ref;
1142	if (signal_master == nfp->expl.master_id)
1143		signal_ref = nfp->expl.signal_ref +
1144			((priv->bar.group * 4 + priv->bar.area) << 1);
1145
1146	data_master = cmd->data_master;
1147	if (!data_master)
1148		data_master = nfp->expl.master_id;
1149
1150	data_ref = cmd->data_ref;
1151	if (data_master == nfp->expl.master_id)
1152		data_ref = 0x1000 +
1153			(priv->bar.group << 9) + (priv->bar.area << 7);
1154
1155	csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
1156		NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
1157			NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
1158		NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
1159
1160	csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
1161		NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
1162		NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
1163
1164	csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
1165			NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
1166		NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
1167			NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
1168		NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
1169		NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
1170		NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
1171
1172	if (nfp->iomem.csr) {
1173		writel(csr[0], nfp->iomem.csr +
1174		       NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1175						  priv->bar.area));
1176		writel(csr[1], nfp->iomem.csr +
1177		       NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1178						  priv->bar.area));
1179		writel(csr[2], nfp->iomem.csr +
1180		       NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1181						  priv->bar.area));
1182		/* Readback to ensure BAR is flushed */
1183		readl(nfp->iomem.csr +
1184		      NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
1185						 priv->bar.area));
1186		readl(nfp->iomem.csr +
1187		      NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
1188						 priv->bar.area));
1189		readl(nfp->iomem.csr +
1190		      NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
1191						 priv->bar.area));
1192	} else {
1193		pci_write_config_dword(nfp->pdev, 0x400 +
1194				       NFP_PCIE_BAR_EXPLICIT_BAR0(
1195					       priv->bar.group, priv->bar.area),
1196				       csr[0]);
1197
1198		pci_write_config_dword(nfp->pdev, 0x400 +
1199				       NFP_PCIE_BAR_EXPLICIT_BAR1(
1200					       priv->bar.group, priv->bar.area),
1201				       csr[1]);
1202
1203		pci_write_config_dword(nfp->pdev, 0x400 +
1204				       NFP_PCIE_BAR_EXPLICIT_BAR2(
1205					       priv->bar.group, priv->bar.area),
1206				       csr[2]);
1207	}
1208
1209	/* Issue the 'kickoff' transaction */
1210	readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
1211
1212	return sigmask;
1213}
1214
1215static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
1216				void *buff, size_t len)
1217{
1218	struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
1219	u32 *dst = buff;
1220	size_t i;
1221
1222	for (i = 0; i < len; i += sizeof(u32))
1223		*(dst++) = readl(priv->data + i);
1224
1225	return i;
1226}
1227
1228static int nfp6000_init(struct nfp_cpp *cpp)
1229{
1230	nfp_cpp_area_cache_add(cpp, SZ_64K);
1231	nfp_cpp_area_cache_add(cpp, SZ_64K);
1232	nfp_cpp_area_cache_add(cpp, SZ_256K);
1233
1234	return 0;
1235}
1236
1237static void nfp6000_free(struct nfp_cpp *cpp)
1238{
1239	struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
1240
1241	disable_bars(nfp);
1242	kfree(nfp);
1243}
1244
1245static int nfp6000_read_serial(struct device *dev, u8 *serial)
1246{
1247	struct pci_dev *pdev = to_pci_dev(dev);
1248	u64 dsn;
1249
1250	dsn = pci_get_dsn(pdev);
1251	if (!dsn) {
1252		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1253		return -EINVAL;
1254	}
1255
1256	put_unaligned_be32((u32)(dsn >> 32), serial);
1257	put_unaligned_be16((u16)(dsn >> 16), serial + 4);
1258
1259	return 0;
1260}
1261
1262static int nfp6000_get_interface(struct device *dev)
1263{
1264	struct pci_dev *pdev = to_pci_dev(dev);
1265	u64 dsn;
1266
1267	dsn = pci_get_dsn(pdev);
1268	if (!dsn) {
1269		dev_err(dev, "can't find PCIe Serial Number Capability\n");
1270		return -EINVAL;
1271	}
1272
1273	return dsn & 0xffff;
1274}
1275
1276static const struct nfp_cpp_operations nfp6000_pcie_ops = {
1277	.owner			= THIS_MODULE,
1278
1279	.init			= nfp6000_init,
1280	.free			= nfp6000_free,
1281
1282	.read_serial		= nfp6000_read_serial,
1283	.get_interface		= nfp6000_get_interface,
1284
1285	.area_priv_size		= sizeof(struct nfp6000_area_priv),
1286	.area_init		= nfp6000_area_init,
1287	.area_cleanup		= nfp6000_area_cleanup,
1288	.area_acquire		= nfp6000_area_acquire,
1289	.area_release		= nfp6000_area_release,
1290	.area_phys		= nfp6000_area_phys,
1291	.area_iomem		= nfp6000_area_iomem,
1292	.area_resource		= nfp6000_area_resource,
1293	.area_read		= nfp6000_area_read,
1294	.area_write		= nfp6000_area_write,
1295
1296	.explicit_priv_size	= sizeof(struct nfp6000_explicit_priv),
1297	.explicit_acquire	= nfp6000_explicit_acquire,
1298	.explicit_release	= nfp6000_explicit_release,
1299	.explicit_put		= nfp6000_explicit_put,
1300	.explicit_do		= nfp6000_explicit_do,
1301	.explicit_get		= nfp6000_explicit_get,
1302};
1303
1304/**
1305 * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
1306 * @pdev:	NFP6000 PCI device
1307 * @dev_info:	NFP ASIC params
1308 *
1309 * Return: NFP CPP handle
1310 */
1311struct nfp_cpp *
1312nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
1313{
1314	struct nfp6000_pcie *nfp;
1315	u16 interface;
1316	int err;
1317
1318	/*  Finished with card initialization. */
1319	dev_info(&pdev->dev, "Network Flow Processor %s PCIe Card Probe\n",
1320		 dev_info->chip_names);
1321	pcie_print_link_status(pdev);
1322
1323	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
1324	if (!nfp) {
1325		err = -ENOMEM;
1326		goto err_ret;
1327	}
1328
1329	nfp->dev = &pdev->dev;
1330	nfp->pdev = pdev;
1331	nfp->dev_info = dev_info;
1332	init_waitqueue_head(&nfp->bar_waiters);
1333	spin_lock_init(&nfp->bar_lock);
1334
1335	interface = nfp6000_get_interface(&pdev->dev);
1336
1337	if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
1338	    NFP_CPP_INTERFACE_TYPE_PCI) {
1339		dev_err(&pdev->dev,
1340			"Interface type %d is not the expected %d\n",
1341			NFP_CPP_INTERFACE_TYPE_of(interface),
1342			NFP_CPP_INTERFACE_TYPE_PCI);
1343		err = -ENODEV;
1344		goto err_free_nfp;
1345	}
1346
1347	if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
1348	    NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
1349		dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
1350			NFP_CPP_INTERFACE_CHANNEL_of(interface),
1351			NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
1352		err = -ENODEV;
1353		goto err_free_nfp;
1354	}
1355
1356	err = enable_bars(nfp, interface);
1357	if (err)
1358		goto err_free_nfp;
1359
1360	/* Probe for all the common NFP devices */
1361	return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
1362
1363err_free_nfp:
1364	kfree(nfp);
1365err_ret:
1366	dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
1367	return ERR_PTR(err);
1368}
1369