1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/interrupt.h>
10#include <linux/delay.h>
11#include <linux/irq.h>
12#include <linux/pci.h>
13#include <linux/sysfs.h>
14
15#include "cgx.h"
16#include "rvu.h"
17#include "rvu_reg.h"
18#include "ptp.h"
19#include "mcs.h"
20
21#include "rvu_trace.h"
22#include "rvu_npc_hash.h"
23
24#define DRV_NAME	"rvu_af"
25#define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
26
27static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28				struct rvu_block *block, int lf);
29static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30				  struct rvu_block *block, int lf);
31static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
32
33static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
34			 int type, int num,
35			 void (mbox_handler)(struct work_struct *),
36			 void (mbox_up_handler)(struct work_struct *));
37enum {
38	TYPE_AFVF,
39	TYPE_AFPF,
40};
41
42/* Supported devices */
43static const struct pci_device_id rvu_id_table[] = {
44	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45	{ 0, }  /* end of table */
46};
47
48MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49MODULE_DESCRIPTION(DRV_STRING);
50MODULE_LICENSE("GPL v2");
51MODULE_DEVICE_TABLE(pci, rvu_id_table);
52
53static char *mkex_profile; /* MKEX profile name */
54module_param(mkex_profile, charp, 0000);
55MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
56
57static char *kpu_profile; /* KPU profile name */
58module_param(kpu_profile, charp, 0000);
59MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
60
61static void rvu_setup_hw_capabilities(struct rvu *rvu)
62{
63	struct rvu_hwinfo *hw = rvu->hw;
64
65	hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66	hw->cap.nix_fixed_txschq_mapping = false;
67	hw->cap.nix_shaping = true;
68	hw->cap.nix_tx_link_bp = true;
69	hw->cap.nix_rx_multicast = true;
70	hw->cap.nix_shaper_toggle_wait = false;
71	hw->cap.npc_hash_extract = false;
72	hw->cap.npc_exact_match_enabled = false;
73	hw->rvu = rvu;
74
75	if (is_rvu_pre_96xx_C0(rvu)) {
76		hw->cap.nix_fixed_txschq_mapping = true;
77		hw->cap.nix_txsch_per_cgx_lmac = 4;
78		hw->cap.nix_txsch_per_lbk_lmac = 132;
79		hw->cap.nix_txsch_per_sdp_lmac = 76;
80		hw->cap.nix_shaping = false;
81		hw->cap.nix_tx_link_bp = false;
82		if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
83			hw->cap.nix_rx_multicast = false;
84	}
85	if (!is_rvu_pre_96xx_C0(rvu))
86		hw->cap.nix_shaper_toggle_wait = true;
87
88	if (!is_rvu_otx2(rvu))
89		hw->cap.per_pf_mbox_regs = true;
90
91	if (is_rvu_npc_hash_extract_en(rvu))
92		hw->cap.npc_hash_extract = true;
93}
94
95/* Poll a RVU block's register 'offset', for a 'zero'
96 * or 'nonzero' at bits specified by 'mask'
97 */
98int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
99{
100	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
101	bool twice = false;
102	void __iomem *reg;
103	u64 reg_val;
104
105	reg = rvu->afreg_base + ((block << 28) | offset);
106again:
107	reg_val = readq(reg);
108	if (zero && !(reg_val & mask))
109		return 0;
110	if (!zero && (reg_val & mask))
111		return 0;
112	if (time_before(jiffies, timeout)) {
113		usleep_range(1, 5);
114		goto again;
115	}
116	/* In scenarios where CPU is scheduled out before checking
117	 * 'time_before' (above) and gets scheduled in such that
118	 * jiffies are beyond timeout value, then check again if HW is
119	 * done with the operation in the meantime.
120	 */
121	if (!twice) {
122		twice = true;
123		goto again;
124	}
125	return -EBUSY;
126}
127
128int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
129{
130	int id;
131
132	if (!rsrc->bmap)
133		return -EINVAL;
134
135	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
136	if (id >= rsrc->max)
137		return -ENOSPC;
138
139	__set_bit(id, rsrc->bmap);
140
141	return id;
142}
143
144int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
145{
146	int start;
147
148	if (!rsrc->bmap)
149		return -EINVAL;
150
151	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
152	if (start >= rsrc->max)
153		return -ENOSPC;
154
155	bitmap_set(rsrc->bmap, start, nrsrc);
156	return start;
157}
158
159void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
160{
161	if (!rsrc->bmap)
162		return;
163	if (start >= rsrc->max)
164		return;
165
166	bitmap_clear(rsrc->bmap, start, nrsrc);
167}
168
169bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
170{
171	int start;
172
173	if (!rsrc->bmap)
174		return false;
175
176	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
177	if (start >= rsrc->max)
178		return false;
179
180	return true;
181}
182
183void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
184{
185	if (!rsrc->bmap)
186		return;
187
188	__clear_bit(id, rsrc->bmap);
189}
190
191int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
192{
193	int used;
194
195	if (!rsrc->bmap)
196		return 0;
197
198	used = bitmap_weight(rsrc->bmap, rsrc->max);
199	return (rsrc->max - used);
200}
201
202bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
203{
204	if (!rsrc->bmap)
205		return false;
206
207	return !test_bit(id, rsrc->bmap);
208}
209
210int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
211{
212	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
213			     sizeof(long), GFP_KERNEL);
214	if (!rsrc->bmap)
215		return -ENOMEM;
216	return 0;
217}
218
219void rvu_free_bitmap(struct rsrc_bmap *rsrc)
220{
221	kfree(rsrc->bmap);
222}
223
224/* Get block LF's HW index from a PF_FUNC's block slot number */
225int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
226{
227	u16 match = 0;
228	int lf;
229
230	mutex_lock(&rvu->rsrc_lock);
231	for (lf = 0; lf < block->lf.max; lf++) {
232		if (block->fn_map[lf] == pcifunc) {
233			if (slot == match) {
234				mutex_unlock(&rvu->rsrc_lock);
235				return lf;
236			}
237			match++;
238		}
239	}
240	mutex_unlock(&rvu->rsrc_lock);
241	return -ENODEV;
242}
243
244/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
245 * Some silicon variants of OcteonTX2 supports
246 * multiple blocks of same type.
247 *
248 * @pcifunc has to be zero when no LF is yet attached.
249 *
250 * For a pcifunc if LFs are attached from multiple blocks of same type, then
251 * return blkaddr of first encountered block.
252 */
253int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
254{
255	int devnum, blkaddr = -ENODEV;
256	u64 cfg, reg;
257	bool is_pf;
258
259	switch (blktype) {
260	case BLKTYPE_NPC:
261		blkaddr = BLKADDR_NPC;
262		goto exit;
263	case BLKTYPE_NPA:
264		blkaddr = BLKADDR_NPA;
265		goto exit;
266	case BLKTYPE_NIX:
267		/* For now assume NIX0 */
268		if (!pcifunc) {
269			blkaddr = BLKADDR_NIX0;
270			goto exit;
271		}
272		break;
273	case BLKTYPE_SSO:
274		blkaddr = BLKADDR_SSO;
275		goto exit;
276	case BLKTYPE_SSOW:
277		blkaddr = BLKADDR_SSOW;
278		goto exit;
279	case BLKTYPE_TIM:
280		blkaddr = BLKADDR_TIM;
281		goto exit;
282	case BLKTYPE_CPT:
283		/* For now assume CPT0 */
284		if (!pcifunc) {
285			blkaddr = BLKADDR_CPT0;
286			goto exit;
287		}
288		break;
289	}
290
291	/* Check if this is a RVU PF or VF */
292	if (pcifunc & RVU_PFVF_FUNC_MASK) {
293		is_pf = false;
294		devnum = rvu_get_hwvf(rvu, pcifunc);
295	} else {
296		is_pf = true;
297		devnum = rvu_get_pf(pcifunc);
298	}
299
300	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
301	 * 'BLKADDR_NIX1'.
302	 */
303	if (blktype == BLKTYPE_NIX) {
304		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
305			RVU_PRIV_HWVFX_NIXX_CFG(0);
306		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
307		if (cfg) {
308			blkaddr = BLKADDR_NIX0;
309			goto exit;
310		}
311
312		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
313			RVU_PRIV_HWVFX_NIXX_CFG(1);
314		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
315		if (cfg)
316			blkaddr = BLKADDR_NIX1;
317	}
318
319	if (blktype == BLKTYPE_CPT) {
320		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
321			RVU_PRIV_HWVFX_CPTX_CFG(0);
322		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
323		if (cfg) {
324			blkaddr = BLKADDR_CPT0;
325			goto exit;
326		}
327
328		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
329			RVU_PRIV_HWVFX_CPTX_CFG(1);
330		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
331		if (cfg)
332			blkaddr = BLKADDR_CPT1;
333	}
334
335exit:
336	if (is_block_implemented(rvu->hw, blkaddr))
337		return blkaddr;
338	return -ENODEV;
339}
340
341static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
342				struct rvu_block *block, u16 pcifunc,
343				u16 lf, bool attach)
344{
345	int devnum, num_lfs = 0;
346	bool is_pf;
347	u64 reg;
348
349	if (lf >= block->lf.max) {
350		dev_err(&rvu->pdev->dev,
351			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
352			__func__, lf, block->name, block->lf.max);
353		return;
354	}
355
356	/* Check if this is for a RVU PF or VF */
357	if (pcifunc & RVU_PFVF_FUNC_MASK) {
358		is_pf = false;
359		devnum = rvu_get_hwvf(rvu, pcifunc);
360	} else {
361		is_pf = true;
362		devnum = rvu_get_pf(pcifunc);
363	}
364
365	block->fn_map[lf] = attach ? pcifunc : 0;
366
367	switch (block->addr) {
368	case BLKADDR_NPA:
369		pfvf->npalf = attach ? true : false;
370		num_lfs = pfvf->npalf;
371		break;
372	case BLKADDR_NIX0:
373	case BLKADDR_NIX1:
374		pfvf->nixlf = attach ? true : false;
375		num_lfs = pfvf->nixlf;
376		break;
377	case BLKADDR_SSO:
378		attach ? pfvf->sso++ : pfvf->sso--;
379		num_lfs = pfvf->sso;
380		break;
381	case BLKADDR_SSOW:
382		attach ? pfvf->ssow++ : pfvf->ssow--;
383		num_lfs = pfvf->ssow;
384		break;
385	case BLKADDR_TIM:
386		attach ? pfvf->timlfs++ : pfvf->timlfs--;
387		num_lfs = pfvf->timlfs;
388		break;
389	case BLKADDR_CPT0:
390		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
391		num_lfs = pfvf->cptlfs;
392		break;
393	case BLKADDR_CPT1:
394		attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
395		num_lfs = pfvf->cpt1_lfs;
396		break;
397	}
398
399	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
400	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
401}
402
403inline int rvu_get_pf(u16 pcifunc)
404{
405	return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
406}
407
408void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
409{
410	u64 cfg;
411
412	/* Get numVFs attached to this PF and first HWVF */
413	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
414	if (numvfs)
415		*numvfs = (cfg >> 12) & 0xFF;
416	if (hwvf)
417		*hwvf = cfg & 0xFFF;
418}
419
420int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
421{
422	int pf, func;
423	u64 cfg;
424
425	pf = rvu_get_pf(pcifunc);
426	func = pcifunc & RVU_PFVF_FUNC_MASK;
427
428	/* Get first HWVF attached to this PF */
429	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
430
431	return ((cfg & 0xFFF) + func - 1);
432}
433
434struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
435{
436	/* Check if it is a PF or VF */
437	if (pcifunc & RVU_PFVF_FUNC_MASK)
438		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
439	else
440		return &rvu->pf[rvu_get_pf(pcifunc)];
441}
442
443static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
444{
445	int pf, vf, nvfs;
446	u64 cfg;
447
448	pf = rvu_get_pf(pcifunc);
449	if (pf >= rvu->hw->total_pfs)
450		return false;
451
452	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
453		return true;
454
455	/* Check if VF is within number of VFs attached to this PF */
456	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
457	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
458	nvfs = (cfg >> 12) & 0xFF;
459	if (vf >= nvfs)
460		return false;
461
462	return true;
463}
464
465bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
466{
467	struct rvu_block *block;
468
469	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
470		return false;
471
472	block = &hw->block[blkaddr];
473	return block->implemented;
474}
475
476static void rvu_check_block_implemented(struct rvu *rvu)
477{
478	struct rvu_hwinfo *hw = rvu->hw;
479	struct rvu_block *block;
480	int blkid;
481	u64 cfg;
482
483	/* For each block check if 'implemented' bit is set */
484	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
485		block = &hw->block[blkid];
486		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
487		if (cfg & BIT_ULL(11))
488			block->implemented = true;
489	}
490}
491
492static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
493{
494	rvu_write64(rvu, BLKADDR_RVUM,
495		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
496		    RVU_BLK_RVUM_REVID);
497}
498
499static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
500{
501	rvu_write64(rvu, BLKADDR_RVUM,
502		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
503}
504
505int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
506{
507	int err;
508
509	if (!block->implemented)
510		return 0;
511
512	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
513	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
514			   true);
515	return err;
516}
517
518static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
519{
520	struct rvu_block *block = &rvu->hw->block[blkaddr];
521	int err;
522
523	if (!block->implemented)
524		return;
525
526	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
527	err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
528	if (err) {
529		dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
530		while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
531			;
532	}
533}
534
535static void rvu_reset_all_blocks(struct rvu *rvu)
536{
537	/* Do a HW reset of all RVU blocks */
538	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
539	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
540	rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
541	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
542	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
543	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
544	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
545	rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
546	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
547	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
548	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
549	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
550	rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
551}
552
553static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
554{
555	struct rvu_pfvf *pfvf;
556	u64 cfg;
557	int lf;
558
559	for (lf = 0; lf < block->lf.max; lf++) {
560		cfg = rvu_read64(rvu, block->addr,
561				 block->lfcfg_reg | (lf << block->lfshift));
562		if (!(cfg & BIT_ULL(63)))
563			continue;
564
565		/* Set this resource as being used */
566		__set_bit(lf, block->lf.bmap);
567
568		/* Get, to whom this LF is attached */
569		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
570		rvu_update_rsrc_map(rvu, pfvf, block,
571				    (cfg >> 8) & 0xFFFF, lf, true);
572
573		/* Set start MSIX vector for this LF within this PF/VF */
574		rvu_set_msix_offset(rvu, pfvf, block, lf);
575	}
576}
577
578static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
579{
580	int min_vecs;
581
582	if (!vf)
583		goto check_pf;
584
585	if (!nvecs) {
586		dev_warn(rvu->dev,
587			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
588			 pf, vf - 1, nvecs);
589	}
590	return;
591
592check_pf:
593	if (pf == 0)
594		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
595	else
596		min_vecs = RVU_PF_INT_VEC_CNT;
597
598	if (!(nvecs < min_vecs))
599		return;
600	dev_warn(rvu->dev,
601		 "PF%d is configured with too few vectors, %d, min is %d\n",
602		 pf, nvecs, min_vecs);
603}
604
605static int rvu_setup_msix_resources(struct rvu *rvu)
606{
607	struct rvu_hwinfo *hw = rvu->hw;
608	int pf, vf, numvfs, hwvf, err;
609	int nvecs, offset, max_msix;
610	struct rvu_pfvf *pfvf;
611	u64 cfg, phy_addr;
612	dma_addr_t iova;
613
614	for (pf = 0; pf < hw->total_pfs; pf++) {
615		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
616		/* If PF is not enabled, nothing to do */
617		if (!((cfg >> 20) & 0x01))
618			continue;
619
620		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
621
622		pfvf = &rvu->pf[pf];
623		/* Get num of MSIX vectors attached to this PF */
624		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
625		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
626		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
627
628		/* Alloc msix bitmap for this PF */
629		err = rvu_alloc_bitmap(&pfvf->msix);
630		if (err)
631			return err;
632
633		/* Allocate memory for MSIX vector to RVU block LF mapping */
634		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
635						sizeof(u16), GFP_KERNEL);
636		if (!pfvf->msix_lfmap)
637			return -ENOMEM;
638
639		/* For PF0 (AF) firmware will set msix vector offsets for
640		 * AF, block AF and PF0_INT vectors, so jump to VFs.
641		 */
642		if (!pf)
643			goto setup_vfmsix;
644
645		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
646		 * These are allocated on driver init and never freed,
647		 * so no need to set 'msix_lfmap' for these.
648		 */
649		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
650		nvecs = (cfg >> 12) & 0xFF;
651		cfg &= ~0x7FFULL;
652		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
653		rvu_write64(rvu, BLKADDR_RVUM,
654			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
655setup_vfmsix:
656		/* Alloc msix bitmap for VFs */
657		for (vf = 0; vf < numvfs; vf++) {
658			pfvf =  &rvu->hwvf[hwvf + vf];
659			/* Get num of MSIX vectors attached to this VF */
660			cfg = rvu_read64(rvu, BLKADDR_RVUM,
661					 RVU_PRIV_PFX_MSIX_CFG(pf));
662			pfvf->msix.max = (cfg & 0xFFF) + 1;
663			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
664
665			/* Alloc msix bitmap for this VF */
666			err = rvu_alloc_bitmap(&pfvf->msix);
667			if (err)
668				return err;
669
670			pfvf->msix_lfmap =
671				devm_kcalloc(rvu->dev, pfvf->msix.max,
672					     sizeof(u16), GFP_KERNEL);
673			if (!pfvf->msix_lfmap)
674				return -ENOMEM;
675
676			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
677			 * These are allocated on driver init and never freed,
678			 * so no need to set 'msix_lfmap' for these.
679			 */
680			cfg = rvu_read64(rvu, BLKADDR_RVUM,
681					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
682			nvecs = (cfg >> 12) & 0xFF;
683			cfg &= ~0x7FFULL;
684			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
685			rvu_write64(rvu, BLKADDR_RVUM,
686				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
687				    cfg | offset);
688		}
689	}
690
691	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
692	 * create an IOMMU mapping for the physical address configured by
693	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
694	 */
695	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
696	max_msix = cfg & 0xFFFFF;
697	if (rvu->fwdata && rvu->fwdata->msixtr_base)
698		phy_addr = rvu->fwdata->msixtr_base;
699	else
700		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
701
702	iova = dma_map_resource(rvu->dev, phy_addr,
703				max_msix * PCI_MSIX_ENTRY_SIZE,
704				DMA_BIDIRECTIONAL, 0);
705
706	if (dma_mapping_error(rvu->dev, iova))
707		return -ENOMEM;
708
709	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
710	rvu->msix_base_iova = iova;
711	rvu->msixtr_base_phy = phy_addr;
712
713	return 0;
714}
715
716static void rvu_reset_msix(struct rvu *rvu)
717{
718	/* Restore msixtr base register */
719	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
720		    rvu->msixtr_base_phy);
721}
722
723static void rvu_free_hw_resources(struct rvu *rvu)
724{
725	struct rvu_hwinfo *hw = rvu->hw;
726	struct rvu_block *block;
727	struct rvu_pfvf  *pfvf;
728	int id, max_msix;
729	u64 cfg;
730
731	rvu_npa_freemem(rvu);
732	rvu_npc_freemem(rvu);
733	rvu_nix_freemem(rvu);
734
735	/* Free block LF bitmaps */
736	for (id = 0; id < BLK_COUNT; id++) {
737		block = &hw->block[id];
738		kfree(block->lf.bmap);
739	}
740
741	/* Free MSIX bitmaps */
742	for (id = 0; id < hw->total_pfs; id++) {
743		pfvf = &rvu->pf[id];
744		kfree(pfvf->msix.bmap);
745	}
746
747	for (id = 0; id < hw->total_vfs; id++) {
748		pfvf = &rvu->hwvf[id];
749		kfree(pfvf->msix.bmap);
750	}
751
752	/* Unmap MSIX vector base IOVA mapping */
753	if (!rvu->msix_base_iova)
754		return;
755	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
756	max_msix = cfg & 0xFFFFF;
757	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
758			   max_msix * PCI_MSIX_ENTRY_SIZE,
759			   DMA_BIDIRECTIONAL, 0);
760
761	rvu_reset_msix(rvu);
762	mutex_destroy(&rvu->rsrc_lock);
763}
764
765static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
766{
767	struct rvu_hwinfo *hw = rvu->hw;
768	int pf, vf, numvfs, hwvf;
769	struct rvu_pfvf *pfvf;
770	u64 *mac;
771
772	for (pf = 0; pf < hw->total_pfs; pf++) {
773		/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
774		if (!pf)
775			goto lbkvf;
776
777		if (!is_pf_cgxmapped(rvu, pf))
778			continue;
779		/* Assign MAC address to PF */
780		pfvf = &rvu->pf[pf];
781		if (rvu->fwdata && pf < PF_MACNUM_MAX) {
782			mac = &rvu->fwdata->pf_macs[pf];
783			if (*mac)
784				u64_to_ether_addr(*mac, pfvf->mac_addr);
785			else
786				eth_random_addr(pfvf->mac_addr);
787		} else {
788			eth_random_addr(pfvf->mac_addr);
789		}
790		ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
791
792lbkvf:
793		/* Assign MAC address to VFs*/
794		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
795		for (vf = 0; vf < numvfs; vf++, hwvf++) {
796			pfvf = &rvu->hwvf[hwvf];
797			if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
798				mac = &rvu->fwdata->vf_macs[hwvf];
799				if (*mac)
800					u64_to_ether_addr(*mac, pfvf->mac_addr);
801				else
802					eth_random_addr(pfvf->mac_addr);
803			} else {
804				eth_random_addr(pfvf->mac_addr);
805			}
806			ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
807		}
808	}
809}
810
811static int rvu_fwdata_init(struct rvu *rvu)
812{
813	u64 fwdbase;
814	int err;
815
816	/* Get firmware data base address */
817	err = cgx_get_fwdata_base(&fwdbase);
818	if (err)
819		goto fail;
820
821	BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
822	rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
823	if (!rvu->fwdata)
824		goto fail;
825	if (!is_rvu_fwdata_valid(rvu)) {
826		dev_err(rvu->dev,
827			"Mismatch in 'fwdata' struct btw kernel and firmware\n");
828		iounmap(rvu->fwdata);
829		rvu->fwdata = NULL;
830		return -EINVAL;
831	}
832	return 0;
833fail:
834	dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
835	return -EIO;
836}
837
838static void rvu_fwdata_exit(struct rvu *rvu)
839{
840	if (rvu->fwdata)
841		iounmap(rvu->fwdata);
842}
843
844static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
845{
846	struct rvu_hwinfo *hw = rvu->hw;
847	struct rvu_block *block;
848	int blkid;
849	u64 cfg;
850
851	/* Init NIX LF's bitmap */
852	block = &hw->block[blkaddr];
853	if (!block->implemented)
854		return 0;
855	blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
856	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
857	block->lf.max = cfg & 0xFFF;
858	block->addr = blkaddr;
859	block->type = BLKTYPE_NIX;
860	block->lfshift = 8;
861	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
862	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
863	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
864	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
865	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
866	block->lfreset_reg = NIX_AF_LF_RST;
867	block->rvu = rvu;
868	sprintf(block->name, "NIX%d", blkid);
869	rvu->nix_blkaddr[blkid] = blkaddr;
870	return rvu_alloc_bitmap(&block->lf);
871}
872
873static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
874{
875	struct rvu_hwinfo *hw = rvu->hw;
876	struct rvu_block *block;
877	int blkid;
878	u64 cfg;
879
880	/* Init CPT LF's bitmap */
881	block = &hw->block[blkaddr];
882	if (!block->implemented)
883		return 0;
884	blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
885	cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
886	block->lf.max = cfg & 0xFF;
887	block->addr = blkaddr;
888	block->type = BLKTYPE_CPT;
889	block->multislot = true;
890	block->lfshift = 3;
891	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
892	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
893	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
894	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
895	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
896	block->lfreset_reg = CPT_AF_LF_RST;
897	block->rvu = rvu;
898	sprintf(block->name, "CPT%d", blkid);
899	return rvu_alloc_bitmap(&block->lf);
900}
901
902static void rvu_get_lbk_bufsize(struct rvu *rvu)
903{
904	struct pci_dev *pdev = NULL;
905	void __iomem *base;
906	u64 lbk_const;
907
908	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
909			      PCI_DEVID_OCTEONTX2_LBK, pdev);
910	if (!pdev)
911		return;
912
913	base = pci_ioremap_bar(pdev, 0);
914	if (!base)
915		goto err_put;
916
917	lbk_const = readq(base + LBK_CONST);
918
919	/* cache fifo size */
920	rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
921
922	iounmap(base);
923err_put:
924	pci_dev_put(pdev);
925}
926
927static int rvu_setup_hw_resources(struct rvu *rvu)
928{
929	struct rvu_hwinfo *hw = rvu->hw;
930	struct rvu_block *block;
931	int blkid, err;
932	u64 cfg;
933
934	/* Get HW supported max RVU PF & VF count */
935	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
936	hw->total_pfs = (cfg >> 32) & 0xFF;
937	hw->total_vfs = (cfg >> 20) & 0xFFF;
938	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
939
940	if (!is_rvu_otx2(rvu))
941		rvu_apr_block_cn10k_init(rvu);
942
943	/* Init NPA LF's bitmap */
944	block = &hw->block[BLKADDR_NPA];
945	if (!block->implemented)
946		goto nix;
947	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
948	block->lf.max = (cfg >> 16) & 0xFFF;
949	block->addr = BLKADDR_NPA;
950	block->type = BLKTYPE_NPA;
951	block->lfshift = 8;
952	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
953	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
954	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
955	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
956	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
957	block->lfreset_reg = NPA_AF_LF_RST;
958	block->rvu = rvu;
959	sprintf(block->name, "NPA");
960	err = rvu_alloc_bitmap(&block->lf);
961	if (err) {
962		dev_err(rvu->dev,
963			"%s: Failed to allocate NPA LF bitmap\n", __func__);
964		return err;
965	}
966
967nix:
968	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
969	if (err) {
970		dev_err(rvu->dev,
971			"%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
972		return err;
973	}
974
975	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
976	if (err) {
977		dev_err(rvu->dev,
978			"%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
979		return err;
980	}
981
982	/* Init SSO group's bitmap */
983	block = &hw->block[BLKADDR_SSO];
984	if (!block->implemented)
985		goto ssow;
986	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
987	block->lf.max = cfg & 0xFFFF;
988	block->addr = BLKADDR_SSO;
989	block->type = BLKTYPE_SSO;
990	block->multislot = true;
991	block->lfshift = 3;
992	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
993	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
994	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
995	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
996	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
997	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
998	block->rvu = rvu;
999	sprintf(block->name, "SSO GROUP");
1000	err = rvu_alloc_bitmap(&block->lf);
1001	if (err) {
1002		dev_err(rvu->dev,
1003			"%s: Failed to allocate SSO LF bitmap\n", __func__);
1004		return err;
1005	}
1006
1007ssow:
1008	/* Init SSO workslot's bitmap */
1009	block = &hw->block[BLKADDR_SSOW];
1010	if (!block->implemented)
1011		goto tim;
1012	block->lf.max = (cfg >> 56) & 0xFF;
1013	block->addr = BLKADDR_SSOW;
1014	block->type = BLKTYPE_SSOW;
1015	block->multislot = true;
1016	block->lfshift = 3;
1017	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1018	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1019	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1020	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1021	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1022	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1023	block->rvu = rvu;
1024	sprintf(block->name, "SSOWS");
1025	err = rvu_alloc_bitmap(&block->lf);
1026	if (err) {
1027		dev_err(rvu->dev,
1028			"%s: Failed to allocate SSOW LF bitmap\n", __func__);
1029		return err;
1030	}
1031
1032tim:
1033	/* Init TIM LF's bitmap */
1034	block = &hw->block[BLKADDR_TIM];
1035	if (!block->implemented)
1036		goto cpt;
1037	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1038	block->lf.max = cfg & 0xFFFF;
1039	block->addr = BLKADDR_TIM;
1040	block->type = BLKTYPE_TIM;
1041	block->multislot = true;
1042	block->lfshift = 3;
1043	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1044	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1045	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1046	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1047	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1048	block->lfreset_reg = TIM_AF_LF_RST;
1049	block->rvu = rvu;
1050	sprintf(block->name, "TIM");
1051	err = rvu_alloc_bitmap(&block->lf);
1052	if (err) {
1053		dev_err(rvu->dev,
1054			"%s: Failed to allocate TIM LF bitmap\n", __func__);
1055		return err;
1056	}
1057
1058cpt:
1059	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1060	if (err) {
1061		dev_err(rvu->dev,
1062			"%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1063		return err;
1064	}
1065	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1066	if (err) {
1067		dev_err(rvu->dev,
1068			"%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1069		return err;
1070	}
1071
1072	/* Allocate memory for PFVF data */
1073	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1074			       sizeof(struct rvu_pfvf), GFP_KERNEL);
1075	if (!rvu->pf) {
1076		dev_err(rvu->dev,
1077			"%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1078		return -ENOMEM;
1079	}
1080
1081	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1082				 sizeof(struct rvu_pfvf), GFP_KERNEL);
1083	if (!rvu->hwvf) {
1084		dev_err(rvu->dev,
1085			"%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1086		return -ENOMEM;
1087	}
1088
1089	mutex_init(&rvu->rsrc_lock);
1090
1091	rvu_fwdata_init(rvu);
1092
1093	err = rvu_setup_msix_resources(rvu);
1094	if (err) {
1095		dev_err(rvu->dev,
1096			"%s: Failed to setup MSIX resources\n", __func__);
1097		return err;
1098	}
1099
1100	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1101		block = &hw->block[blkid];
1102		if (!block->lf.bmap)
1103			continue;
1104
1105		/* Allocate memory for block LF/slot to pcifunc mapping info */
1106		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1107					     sizeof(u16), GFP_KERNEL);
1108		if (!block->fn_map) {
1109			err = -ENOMEM;
1110			goto msix_err;
1111		}
1112
1113		/* Scan all blocks to check if low level firmware has
1114		 * already provisioned any of the resources to a PF/VF.
1115		 */
1116		rvu_scan_block(rvu, block);
1117	}
1118
1119	err = rvu_set_channels_base(rvu);
1120	if (err)
1121		goto msix_err;
1122
1123	err = rvu_npc_init(rvu);
1124	if (err) {
1125		dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1126		goto npc_err;
1127	}
1128
1129	err = rvu_cgx_init(rvu);
1130	if (err) {
1131		dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1132		goto cgx_err;
1133	}
1134
1135	err = rvu_npc_exact_init(rvu);
1136	if (err) {
1137		dev_err(rvu->dev, "failed to initialize exact match table\n");
1138		return err;
1139	}
1140
1141	/* Assign MACs for CGX mapped functions */
1142	rvu_setup_pfvf_macaddress(rvu);
1143
1144	err = rvu_npa_init(rvu);
1145	if (err) {
1146		dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1147		goto npa_err;
1148	}
1149
1150	rvu_get_lbk_bufsize(rvu);
1151
1152	err = rvu_nix_init(rvu);
1153	if (err) {
1154		dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1155		goto nix_err;
1156	}
1157
1158	err = rvu_sdp_init(rvu);
1159	if (err) {
1160		dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1161		goto nix_err;
1162	}
1163
1164	rvu_program_channels(rvu);
1165
1166	err = rvu_mcs_init(rvu);
1167	if (err) {
1168		dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
1169		goto nix_err;
1170	}
1171
1172	err = rvu_cpt_init(rvu);
1173	if (err) {
1174		dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
1175		goto mcs_err;
1176	}
1177
1178	return 0;
1179
1180mcs_err:
1181	rvu_mcs_exit(rvu);
1182nix_err:
1183	rvu_nix_freemem(rvu);
1184npa_err:
1185	rvu_npa_freemem(rvu);
1186cgx_err:
1187	rvu_cgx_exit(rvu);
1188npc_err:
1189	rvu_npc_freemem(rvu);
1190	rvu_fwdata_exit(rvu);
1191msix_err:
1192	rvu_reset_msix(rvu);
1193	return err;
1194}
1195
1196/* NPA and NIX admin queue APIs */
1197void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1198{
1199	if (!aq)
1200		return;
1201
1202	qmem_free(rvu->dev, aq->inst);
1203	qmem_free(rvu->dev, aq->res);
1204	devm_kfree(rvu->dev, aq);
1205}
1206
1207int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1208		 int qsize, int inst_size, int res_size)
1209{
1210	struct admin_queue *aq;
1211	int err;
1212
1213	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1214	if (!*ad_queue)
1215		return -ENOMEM;
1216	aq = *ad_queue;
1217
1218	/* Alloc memory for instructions i.e AQ */
1219	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1220	if (err) {
1221		devm_kfree(rvu->dev, aq);
1222		return err;
1223	}
1224
1225	/* Alloc memory for results */
1226	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1227	if (err) {
1228		rvu_aq_free(rvu, aq);
1229		return err;
1230	}
1231
1232	spin_lock_init(&aq->lock);
1233	return 0;
1234}
1235
1236int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1237			   struct ready_msg_rsp *rsp)
1238{
1239	if (rvu->fwdata) {
1240		rsp->rclk_freq = rvu->fwdata->rclk;
1241		rsp->sclk_freq = rvu->fwdata->sclk;
1242	}
1243	return 0;
1244}
1245
1246/* Get current count of a RVU block's LF/slots
1247 * provisioned to a given RVU func.
1248 */
1249u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1250{
1251	switch (blkaddr) {
1252	case BLKADDR_NPA:
1253		return pfvf->npalf ? 1 : 0;
1254	case BLKADDR_NIX0:
1255	case BLKADDR_NIX1:
1256		return pfvf->nixlf ? 1 : 0;
1257	case BLKADDR_SSO:
1258		return pfvf->sso;
1259	case BLKADDR_SSOW:
1260		return pfvf->ssow;
1261	case BLKADDR_TIM:
1262		return pfvf->timlfs;
1263	case BLKADDR_CPT0:
1264		return pfvf->cptlfs;
1265	case BLKADDR_CPT1:
1266		return pfvf->cpt1_lfs;
1267	}
1268	return 0;
1269}
1270
1271/* Return true if LFs of block type are attached to pcifunc */
1272static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1273{
1274	switch (blktype) {
1275	case BLKTYPE_NPA:
1276		return pfvf->npalf ? 1 : 0;
1277	case BLKTYPE_NIX:
1278		return pfvf->nixlf ? 1 : 0;
1279	case BLKTYPE_SSO:
1280		return !!pfvf->sso;
1281	case BLKTYPE_SSOW:
1282		return !!pfvf->ssow;
1283	case BLKTYPE_TIM:
1284		return !!pfvf->timlfs;
1285	case BLKTYPE_CPT:
1286		return pfvf->cptlfs || pfvf->cpt1_lfs;
1287	}
1288
1289	return false;
1290}
1291
1292bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1293{
1294	struct rvu_pfvf *pfvf;
1295
1296	if (!is_pf_func_valid(rvu, pcifunc))
1297		return false;
1298
1299	pfvf = rvu_get_pfvf(rvu, pcifunc);
1300
1301	/* Check if this PFFUNC has a LF of type blktype attached */
1302	if (!is_blktype_attached(pfvf, blktype))
1303		return false;
1304
1305	return true;
1306}
1307
1308static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1309			   int pcifunc, int slot)
1310{
1311	u64 val;
1312
1313	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1314	rvu_write64(rvu, block->addr, block->lookup_reg, val);
1315	/* Wait for the lookup to finish */
1316	/* TODO: put some timeout here */
1317	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1318		;
1319
1320	val = rvu_read64(rvu, block->addr, block->lookup_reg);
1321
1322	/* Check LF valid bit */
1323	if (!(val & (1ULL << 12)))
1324		return -1;
1325
1326	return (val & 0xFFF);
1327}
1328
1329int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1330			      u16 global_slot, u16 *slot_in_block)
1331{
1332	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1333	int numlfs, total_lfs = 0, nr_blocks = 0;
1334	int i, num_blkaddr[BLK_COUNT] = { 0 };
1335	struct rvu_block *block;
1336	int blkaddr;
1337	u16 start_slot;
1338
1339	if (!is_blktype_attached(pfvf, blktype))
1340		return -ENODEV;
1341
1342	/* Get all the block addresses from which LFs are attached to
1343	 * the given pcifunc in num_blkaddr[].
1344	 */
1345	for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1346		block = &rvu->hw->block[blkaddr];
1347		if (block->type != blktype)
1348			continue;
1349		if (!is_block_implemented(rvu->hw, blkaddr))
1350			continue;
1351
1352		numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1353		if (numlfs) {
1354			total_lfs += numlfs;
1355			num_blkaddr[nr_blocks] = blkaddr;
1356			nr_blocks++;
1357		}
1358	}
1359
1360	if (global_slot >= total_lfs)
1361		return -ENODEV;
1362
1363	/* Based on the given global slot number retrieve the
1364	 * correct block address out of all attached block
1365	 * addresses and slot number in that block.
1366	 */
1367	total_lfs = 0;
1368	blkaddr = -ENODEV;
1369	for (i = 0; i < nr_blocks; i++) {
1370		numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1371		total_lfs += numlfs;
1372		if (global_slot < total_lfs) {
1373			blkaddr = num_blkaddr[i];
1374			start_slot = total_lfs - numlfs;
1375			*slot_in_block = global_slot - start_slot;
1376			break;
1377		}
1378	}
1379
1380	return blkaddr;
1381}
1382
1383static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1384{
1385	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1386	struct rvu_hwinfo *hw = rvu->hw;
1387	struct rvu_block *block;
1388	int slot, lf, num_lfs;
1389	int blkaddr;
1390
1391	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1392	if (blkaddr < 0)
1393		return;
1394
1395	if (blktype == BLKTYPE_NIX)
1396		rvu_nix_reset_mac(pfvf, pcifunc);
1397
1398	block = &hw->block[blkaddr];
1399
1400	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1401	if (!num_lfs)
1402		return;
1403
1404	for (slot = 0; slot < num_lfs; slot++) {
1405		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1406		if (lf < 0) /* This should never happen */
1407			continue;
1408
1409		/* Disable the LF */
1410		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1411			    (lf << block->lfshift), 0x00ULL);
1412
1413		/* Update SW maintained mapping info as well */
1414		rvu_update_rsrc_map(rvu, pfvf, block,
1415				    pcifunc, lf, false);
1416
1417		/* Free the resource */
1418		rvu_free_rsrc(&block->lf, lf);
1419
1420		/* Clear MSIX vector offset for this LF */
1421		rvu_clear_msix_offset(rvu, pfvf, block, lf);
1422	}
1423}
1424
1425static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1426			    u16 pcifunc)
1427{
1428	struct rvu_hwinfo *hw = rvu->hw;
1429	bool detach_all = true;
1430	struct rvu_block *block;
1431	int blkid;
1432
1433	mutex_lock(&rvu->rsrc_lock);
1434
1435	/* Check for partial resource detach */
1436	if (detach && detach->partial)
1437		detach_all = false;
1438
1439	/* Check for RVU block's LFs attached to this func,
1440	 * if so, detach them.
1441	 */
1442	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1443		block = &hw->block[blkid];
1444		if (!block->lf.bmap)
1445			continue;
1446		if (!detach_all && detach) {
1447			if (blkid == BLKADDR_NPA && !detach->npalf)
1448				continue;
1449			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1450				continue;
1451			else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1452				continue;
1453			else if ((blkid == BLKADDR_SSO) && !detach->sso)
1454				continue;
1455			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1456				continue;
1457			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1458				continue;
1459			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1460				continue;
1461			else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1462				continue;
1463		}
1464		rvu_detach_block(rvu, pcifunc, block->type);
1465	}
1466
1467	mutex_unlock(&rvu->rsrc_lock);
1468	return 0;
1469}
1470
1471int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1472				      struct rsrc_detach *detach,
1473				      struct msg_rsp *rsp)
1474{
1475	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1476}
1477
1478int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1479{
1480	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1481	int blkaddr = BLKADDR_NIX0, vf;
1482	struct rvu_pfvf *pf;
1483
1484	pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1485
1486	/* All CGX mapped PFs are set with assigned NIX block during init */
1487	if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1488		blkaddr = pf->nix_blkaddr;
1489	} else if (is_lbk_vf(rvu, pcifunc)) {
1490		vf = pcifunc - 1;
1491		/* Assign NIX based on VF number. All even numbered VFs get
1492		 * NIX0 and odd numbered gets NIX1
1493		 */
1494		blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1495		/* NIX1 is not present on all silicons */
1496		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1497			blkaddr = BLKADDR_NIX0;
1498	}
1499
1500	/* if SDP1 then the blkaddr is NIX1 */
1501	if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1502		blkaddr = BLKADDR_NIX1;
1503
1504	switch (blkaddr) {
1505	case BLKADDR_NIX1:
1506		pfvf->nix_blkaddr = BLKADDR_NIX1;
1507		pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1508		pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1509		break;
1510	case BLKADDR_NIX0:
1511	default:
1512		pfvf->nix_blkaddr = BLKADDR_NIX0;
1513		pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1514		pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1515		break;
1516	}
1517
1518	return pfvf->nix_blkaddr;
1519}
1520
1521static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1522				  u16 pcifunc, struct rsrc_attach *attach)
1523{
1524	int blkaddr;
1525
1526	switch (blktype) {
1527	case BLKTYPE_NIX:
1528		blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1529		break;
1530	case BLKTYPE_CPT:
1531		if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1532			return rvu_get_blkaddr(rvu, blktype, 0);
1533		blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1534			  BLKADDR_CPT0;
1535		if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1536			return -ENODEV;
1537		break;
1538	default:
1539		return rvu_get_blkaddr(rvu, blktype, 0);
1540	}
1541
1542	if (is_block_implemented(rvu->hw, blkaddr))
1543		return blkaddr;
1544
1545	return -ENODEV;
1546}
1547
1548static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1549			     int num_lfs, struct rsrc_attach *attach)
1550{
1551	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1552	struct rvu_hwinfo *hw = rvu->hw;
1553	struct rvu_block *block;
1554	int slot, lf;
1555	int blkaddr;
1556	u64 cfg;
1557
1558	if (!num_lfs)
1559		return;
1560
1561	blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1562	if (blkaddr < 0)
1563		return;
1564
1565	block = &hw->block[blkaddr];
1566	if (!block->lf.bmap)
1567		return;
1568
1569	for (slot = 0; slot < num_lfs; slot++) {
1570		/* Allocate the resource */
1571		lf = rvu_alloc_rsrc(&block->lf);
1572		if (lf < 0)
1573			return;
1574
1575		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1576		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1577			    (lf << block->lfshift), cfg);
1578		rvu_update_rsrc_map(rvu, pfvf, block,
1579				    pcifunc, lf, true);
1580
1581		/* Set start MSIX vector for this LF within this PF/VF */
1582		rvu_set_msix_offset(rvu, pfvf, block, lf);
1583	}
1584}
1585
1586static int rvu_check_rsrc_availability(struct rvu *rvu,
1587				       struct rsrc_attach *req, u16 pcifunc)
1588{
1589	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1590	int free_lfs, mappedlfs, blkaddr;
1591	struct rvu_hwinfo *hw = rvu->hw;
1592	struct rvu_block *block;
1593
1594	/* Only one NPA LF can be attached */
1595	if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1596		block = &hw->block[BLKADDR_NPA];
1597		free_lfs = rvu_rsrc_free_count(&block->lf);
1598		if (!free_lfs)
1599			goto fail;
1600	} else if (req->npalf) {
1601		dev_err(&rvu->pdev->dev,
1602			"Func 0x%x: Invalid req, already has NPA\n",
1603			 pcifunc);
1604		return -EINVAL;
1605	}
1606
1607	/* Only one NIX LF can be attached */
1608	if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1609		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1610						 pcifunc, req);
1611		if (blkaddr < 0)
1612			return blkaddr;
1613		block = &hw->block[blkaddr];
1614		free_lfs = rvu_rsrc_free_count(&block->lf);
1615		if (!free_lfs)
1616			goto fail;
1617	} else if (req->nixlf) {
1618		dev_err(&rvu->pdev->dev,
1619			"Func 0x%x: Invalid req, already has NIX\n",
1620			pcifunc);
1621		return -EINVAL;
1622	}
1623
1624	if (req->sso) {
1625		block = &hw->block[BLKADDR_SSO];
1626		/* Is request within limits ? */
1627		if (req->sso > block->lf.max) {
1628			dev_err(&rvu->pdev->dev,
1629				"Func 0x%x: Invalid SSO req, %d > max %d\n",
1630				 pcifunc, req->sso, block->lf.max);
1631			return -EINVAL;
1632		}
1633		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1634		free_lfs = rvu_rsrc_free_count(&block->lf);
1635		/* Check if additional resources are available */
1636		if (req->sso > mappedlfs &&
1637		    ((req->sso - mappedlfs) > free_lfs))
1638			goto fail;
1639	}
1640
1641	if (req->ssow) {
1642		block = &hw->block[BLKADDR_SSOW];
1643		if (req->ssow > block->lf.max) {
1644			dev_err(&rvu->pdev->dev,
1645				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
1646				 pcifunc, req->sso, block->lf.max);
1647			return -EINVAL;
1648		}
1649		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1650		free_lfs = rvu_rsrc_free_count(&block->lf);
1651		if (req->ssow > mappedlfs &&
1652		    ((req->ssow - mappedlfs) > free_lfs))
1653			goto fail;
1654	}
1655
1656	if (req->timlfs) {
1657		block = &hw->block[BLKADDR_TIM];
1658		if (req->timlfs > block->lf.max) {
1659			dev_err(&rvu->pdev->dev,
1660				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1661				 pcifunc, req->timlfs, block->lf.max);
1662			return -EINVAL;
1663		}
1664		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1665		free_lfs = rvu_rsrc_free_count(&block->lf);
1666		if (req->timlfs > mappedlfs &&
1667		    ((req->timlfs - mappedlfs) > free_lfs))
1668			goto fail;
1669	}
1670
1671	if (req->cptlfs) {
1672		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1673						 pcifunc, req);
1674		if (blkaddr < 0)
1675			return blkaddr;
1676		block = &hw->block[blkaddr];
1677		if (req->cptlfs > block->lf.max) {
1678			dev_err(&rvu->pdev->dev,
1679				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1680				 pcifunc, req->cptlfs, block->lf.max);
1681			return -EINVAL;
1682		}
1683		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1684		free_lfs = rvu_rsrc_free_count(&block->lf);
1685		if (req->cptlfs > mappedlfs &&
1686		    ((req->cptlfs - mappedlfs) > free_lfs))
1687			goto fail;
1688	}
1689
1690	return 0;
1691
1692fail:
1693	dev_info(rvu->dev, "Request for %s failed\n", block->name);
1694	return -ENOSPC;
1695}
1696
1697static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1698				       struct rsrc_attach *attach)
1699{
1700	int blkaddr, num_lfs;
1701
1702	blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1703					 attach->hdr.pcifunc, attach);
1704	if (blkaddr < 0)
1705		return false;
1706
1707	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1708					blkaddr);
1709	/* Requester already has LFs from given block ? */
1710	return !!num_lfs;
1711}
1712
1713int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1714				      struct rsrc_attach *attach,
1715				      struct msg_rsp *rsp)
1716{
1717	u16 pcifunc = attach->hdr.pcifunc;
1718	int err;
1719
1720	/* If first request, detach all existing attached resources */
1721	if (!attach->modify)
1722		rvu_detach_rsrcs(rvu, NULL, pcifunc);
1723
1724	mutex_lock(&rvu->rsrc_lock);
1725
1726	/* Check if the request can be accommodated */
1727	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1728	if (err)
1729		goto exit;
1730
1731	/* Now attach the requested resources */
1732	if (attach->npalf)
1733		rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1734
1735	if (attach->nixlf)
1736		rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1737
1738	if (attach->sso) {
1739		/* RVU func doesn't know which exact LF or slot is attached
1740		 * to it, it always sees as slot 0,1,2. So for a 'modify'
1741		 * request, simply detach all existing attached LFs/slots
1742		 * and attach a fresh.
1743		 */
1744		if (attach->modify)
1745			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1746		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1747				 attach->sso, attach);
1748	}
1749
1750	if (attach->ssow) {
1751		if (attach->modify)
1752			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1753		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1754				 attach->ssow, attach);
1755	}
1756
1757	if (attach->timlfs) {
1758		if (attach->modify)
1759			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1760		rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1761				 attach->timlfs, attach);
1762	}
1763
1764	if (attach->cptlfs) {
1765		if (attach->modify &&
1766		    rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1767			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1768		rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1769				 attach->cptlfs, attach);
1770	}
1771
1772exit:
1773	mutex_unlock(&rvu->rsrc_lock);
1774	return err;
1775}
1776
1777static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1778			       int blkaddr, int lf)
1779{
1780	u16 vec;
1781
1782	if (lf < 0)
1783		return MSIX_VECTOR_INVALID;
1784
1785	for (vec = 0; vec < pfvf->msix.max; vec++) {
1786		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1787			return vec;
1788	}
1789	return MSIX_VECTOR_INVALID;
1790}
1791
1792static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1793				struct rvu_block *block, int lf)
1794{
1795	u16 nvecs, vec, offset;
1796	u64 cfg;
1797
1798	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1799			 (lf << block->lfshift));
1800	nvecs = (cfg >> 12) & 0xFF;
1801
1802	/* Check and alloc MSIX vectors, must be contiguous */
1803	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1804		return;
1805
1806	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1807
1808	/* Config MSIX offset in LF */
1809	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1810		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1811
1812	/* Update the bitmap as well */
1813	for (vec = 0; vec < nvecs; vec++)
1814		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1815}
1816
1817static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1818				  struct rvu_block *block, int lf)
1819{
1820	u16 nvecs, vec, offset;
1821	u64 cfg;
1822
1823	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1824			 (lf << block->lfshift));
1825	nvecs = (cfg >> 12) & 0xFF;
1826
1827	/* Clear MSIX offset in LF */
1828	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1829		    (lf << block->lfshift), cfg & ~0x7FFULL);
1830
1831	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1832
1833	/* Update the mapping */
1834	for (vec = 0; vec < nvecs; vec++)
1835		pfvf->msix_lfmap[offset + vec] = 0;
1836
1837	/* Free the same in MSIX bitmap */
1838	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1839}
1840
1841int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1842				 struct msix_offset_rsp *rsp)
1843{
1844	struct rvu_hwinfo *hw = rvu->hw;
1845	u16 pcifunc = req->hdr.pcifunc;
1846	struct rvu_pfvf *pfvf;
1847	int lf, slot, blkaddr;
1848
1849	pfvf = rvu_get_pfvf(rvu, pcifunc);
1850	if (!pfvf->msix.bmap)
1851		return 0;
1852
1853	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
1854	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1855	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1856
1857	/* Get BLKADDR from which LFs are attached to pcifunc */
1858	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1859	if (blkaddr < 0) {
1860		rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1861	} else {
1862		lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1863		rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1864	}
1865
1866	rsp->sso = pfvf->sso;
1867	for (slot = 0; slot < rsp->sso; slot++) {
1868		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1869		rsp->sso_msixoff[slot] =
1870			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1871	}
1872
1873	rsp->ssow = pfvf->ssow;
1874	for (slot = 0; slot < rsp->ssow; slot++) {
1875		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1876		rsp->ssow_msixoff[slot] =
1877			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1878	}
1879
1880	rsp->timlfs = pfvf->timlfs;
1881	for (slot = 0; slot < rsp->timlfs; slot++) {
1882		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1883		rsp->timlf_msixoff[slot] =
1884			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1885	}
1886
1887	rsp->cptlfs = pfvf->cptlfs;
1888	for (slot = 0; slot < rsp->cptlfs; slot++) {
1889		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1890		rsp->cptlf_msixoff[slot] =
1891			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1892	}
1893
1894	rsp->cpt1_lfs = pfvf->cpt1_lfs;
1895	for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1896		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1897		rsp->cpt1_lf_msixoff[slot] =
1898			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1899	}
1900
1901	return 0;
1902}
1903
1904int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1905				   struct free_rsrcs_rsp *rsp)
1906{
1907	struct rvu_hwinfo *hw = rvu->hw;
1908	struct rvu_block *block;
1909	struct nix_txsch *txsch;
1910	struct nix_hw *nix_hw;
1911
1912	mutex_lock(&rvu->rsrc_lock);
1913
1914	block = &hw->block[BLKADDR_NPA];
1915	rsp->npa = rvu_rsrc_free_count(&block->lf);
1916
1917	block = &hw->block[BLKADDR_NIX0];
1918	rsp->nix = rvu_rsrc_free_count(&block->lf);
1919
1920	block = &hw->block[BLKADDR_NIX1];
1921	rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1922
1923	block = &hw->block[BLKADDR_SSO];
1924	rsp->sso = rvu_rsrc_free_count(&block->lf);
1925
1926	block = &hw->block[BLKADDR_SSOW];
1927	rsp->ssow = rvu_rsrc_free_count(&block->lf);
1928
1929	block = &hw->block[BLKADDR_TIM];
1930	rsp->tim = rvu_rsrc_free_count(&block->lf);
1931
1932	block = &hw->block[BLKADDR_CPT0];
1933	rsp->cpt = rvu_rsrc_free_count(&block->lf);
1934
1935	block = &hw->block[BLKADDR_CPT1];
1936	rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1937
1938	if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1939		rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1940		rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1941		rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1942		rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1943		/* NIX1 */
1944		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1945			goto out;
1946		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1947		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1948		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1949		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1950	} else {
1951		nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1952		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1953		rsp->schq[NIX_TXSCH_LVL_SMQ] =
1954				rvu_rsrc_free_count(&txsch->schq);
1955
1956		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1957		rsp->schq[NIX_TXSCH_LVL_TL4] =
1958				rvu_rsrc_free_count(&txsch->schq);
1959
1960		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1961		rsp->schq[NIX_TXSCH_LVL_TL3] =
1962				rvu_rsrc_free_count(&txsch->schq);
1963
1964		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1965		rsp->schq[NIX_TXSCH_LVL_TL2] =
1966				rvu_rsrc_free_count(&txsch->schq);
1967
1968		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1969			goto out;
1970
1971		nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1972		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1973		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1974				rvu_rsrc_free_count(&txsch->schq);
1975
1976		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1977		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1978				rvu_rsrc_free_count(&txsch->schq);
1979
1980		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1981		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1982				rvu_rsrc_free_count(&txsch->schq);
1983
1984		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1985		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1986				rvu_rsrc_free_count(&txsch->schq);
1987	}
1988
1989	rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1990out:
1991	rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1992	mutex_unlock(&rvu->rsrc_lock);
1993
1994	return 0;
1995}
1996
1997int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1998			    struct msg_rsp *rsp)
1999{
2000	u16 pcifunc = req->hdr.pcifunc;
2001	u16 vf, numvfs;
2002	u64 cfg;
2003
2004	vf = pcifunc & RVU_PFVF_FUNC_MASK;
2005	cfg = rvu_read64(rvu, BLKADDR_RVUM,
2006			 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
2007	numvfs = (cfg >> 12) & 0xFF;
2008
2009	if (vf && vf <= numvfs)
2010		__rvu_flr_handler(rvu, pcifunc);
2011	else
2012		return RVU_INVALID_VF_ID;
2013
2014	return 0;
2015}
2016
2017int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
2018				struct get_hw_cap_rsp *rsp)
2019{
2020	struct rvu_hwinfo *hw = rvu->hw;
2021
2022	rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
2023	rsp->nix_shaping = hw->cap.nix_shaping;
2024	rsp->npc_hash_extract = hw->cap.npc_hash_extract;
2025
2026	return 0;
2027}
2028
2029int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
2030				 struct msg_rsp *rsp)
2031{
2032	struct rvu_hwinfo *hw = rvu->hw;
2033	u16 pcifunc = req->hdr.pcifunc;
2034	struct rvu_pfvf *pfvf;
2035	int blkaddr, nixlf;
2036	u16 target;
2037
2038	/* Only PF can add VF permissions */
2039	if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
2040		return -EOPNOTSUPP;
2041
2042	target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2043	pfvf = rvu_get_pfvf(rvu, target);
2044
2045	if (req->flags & RESET_VF_PERM) {
2046		pfvf->flags &= RVU_CLEAR_VF_PERM;
2047	} else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2048		 (req->flags & VF_TRUSTED)) {
2049		change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2050		/* disable multicast and promisc entries */
2051		if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2052			blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2053			if (blkaddr < 0)
2054				return 0;
2055			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2056					   target, 0);
2057			if (nixlf < 0)
2058				return 0;
2059			npc_enadis_default_mce_entry(rvu, target, nixlf,
2060						     NIXLF_ALLMULTI_ENTRY,
2061						     false);
2062			npc_enadis_default_mce_entry(rvu, target, nixlf,
2063						     NIXLF_PROMISC_ENTRY,
2064						     false);
2065		}
2066	}
2067
2068	return 0;
2069}
2070
2071static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2072				struct mbox_msghdr *req)
2073{
2074	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2075
2076	/* Check if valid, if not reply with a invalid msg */
2077	if (req->sig != OTX2_MBOX_REQ_SIG)
2078		goto bad_message;
2079
2080	switch (req->id) {
2081#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
2082	case _id: {							\
2083		struct _rsp_type *rsp;					\
2084		int err;						\
2085									\
2086		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
2087			mbox, devid,					\
2088			sizeof(struct _rsp_type));			\
2089		/* some handlers should complete even if reply */	\
2090		/* could not be allocated */				\
2091		if (!rsp &&						\
2092		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
2093		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
2094		    _id != MBOX_MSG_VF_FLR)				\
2095			return -ENOMEM;					\
2096		if (rsp) {						\
2097			rsp->hdr.id = _id;				\
2098			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
2099			rsp->hdr.pcifunc = req->pcifunc;		\
2100			rsp->hdr.rc = 0;				\
2101		}							\
2102									\
2103		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
2104						    (struct _req_type *)req, \
2105						    rsp);		\
2106		if (rsp && err)						\
2107			rsp->hdr.rc = err;				\
2108									\
2109		trace_otx2_msg_process(mbox->pdev, _id, err);		\
2110		return rsp ? err : -ENOMEM;				\
2111	}
2112MBOX_MESSAGES
2113#undef M
2114
2115bad_message:
2116	default:
2117		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2118		return -ENODEV;
2119	}
2120}
2121
2122static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
2123{
2124	struct rvu *rvu = mwork->rvu;
2125	int offset, err, id, devid;
2126	struct otx2_mbox_dev *mdev;
2127	struct mbox_hdr *req_hdr;
2128	struct mbox_msghdr *msg;
2129	struct mbox_wq_info *mw;
2130	struct otx2_mbox *mbox;
2131
2132	switch (type) {
2133	case TYPE_AFPF:
2134		mw = &rvu->afpf_wq_info;
2135		break;
2136	case TYPE_AFVF:
2137		mw = &rvu->afvf_wq_info;
2138		break;
2139	default:
2140		return;
2141	}
2142
2143	devid = mwork - mw->mbox_wrk;
2144	mbox = &mw->mbox;
2145	mdev = &mbox->dev[devid];
2146
2147	/* Process received mbox messages */
2148	req_hdr = mdev->mbase + mbox->rx_start;
2149	if (mw->mbox_wrk[devid].num_msgs == 0)
2150		return;
2151
2152	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2153
2154	for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2155		msg = mdev->mbase + offset;
2156
2157		/* Set which PF/VF sent this message based on mbox IRQ */
2158		switch (type) {
2159		case TYPE_AFPF:
2160			msg->pcifunc &=
2161				~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2162			msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2163			break;
2164		case TYPE_AFVF:
2165			msg->pcifunc &=
2166				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2167			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2168			break;
2169		}
2170
2171		err = rvu_process_mbox_msg(mbox, devid, msg);
2172		if (!err) {
2173			offset = mbox->rx_start + msg->next_msgoff;
2174			continue;
2175		}
2176
2177		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2178			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2179				 err, otx2_mbox_id2name(msg->id),
2180				 msg->id, rvu_get_pf(msg->pcifunc),
2181				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2182		else
2183			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2184				 err, otx2_mbox_id2name(msg->id),
2185				 msg->id, devid);
2186	}
2187	mw->mbox_wrk[devid].num_msgs = 0;
2188
2189	if (poll)
2190		otx2_mbox_wait_for_zero(mbox, devid);
2191
2192	/* Send mbox responses to VF/PF */
2193	otx2_mbox_msg_send(mbox, devid);
2194}
2195
2196static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2197{
2198	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2199	struct rvu *rvu = mwork->rvu;
2200
2201	mutex_lock(&rvu->mbox_lock);
2202	__rvu_mbox_handler(mwork, TYPE_AFPF, true);
2203	mutex_unlock(&rvu->mbox_lock);
2204}
2205
2206static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2207{
2208	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2209
2210	__rvu_mbox_handler(mwork, TYPE_AFVF, false);
2211}
2212
2213static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2214{
2215	struct rvu *rvu = mwork->rvu;
2216	struct otx2_mbox_dev *mdev;
2217	struct mbox_hdr *rsp_hdr;
2218	struct mbox_msghdr *msg;
2219	struct mbox_wq_info *mw;
2220	struct otx2_mbox *mbox;
2221	int offset, id, devid;
2222
2223	switch (type) {
2224	case TYPE_AFPF:
2225		mw = &rvu->afpf_wq_info;
2226		break;
2227	case TYPE_AFVF:
2228		mw = &rvu->afvf_wq_info;
2229		break;
2230	default:
2231		return;
2232	}
2233
2234	devid = mwork - mw->mbox_wrk_up;
2235	mbox = &mw->mbox_up;
2236	mdev = &mbox->dev[devid];
2237
2238	rsp_hdr = mdev->mbase + mbox->rx_start;
2239	if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2240		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2241		return;
2242	}
2243
2244	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2245
2246	for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2247		msg = mdev->mbase + offset;
2248
2249		if (msg->id >= MBOX_MSG_MAX) {
2250			dev_err(rvu->dev,
2251				"Mbox msg with unknown ID 0x%x\n", msg->id);
2252			goto end;
2253		}
2254
2255		if (msg->sig != OTX2_MBOX_RSP_SIG) {
2256			dev_err(rvu->dev,
2257				"Mbox msg with wrong signature %x, ID 0x%x\n",
2258				msg->sig, msg->id);
2259			goto end;
2260		}
2261
2262		switch (msg->id) {
2263		case MBOX_MSG_CGX_LINK_EVENT:
2264			break;
2265		default:
2266			if (msg->rc)
2267				dev_err(rvu->dev,
2268					"Mbox msg response has err %d, ID 0x%x\n",
2269					msg->rc, msg->id);
2270			break;
2271		}
2272end:
2273		offset = mbox->rx_start + msg->next_msgoff;
2274		mdev->msgs_acked++;
2275	}
2276	mw->mbox_wrk_up[devid].up_num_msgs = 0;
2277
2278	otx2_mbox_reset(mbox, devid);
2279}
2280
2281static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2282{
2283	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2284
2285	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
2286}
2287
2288static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2289{
2290	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2291
2292	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
2293}
2294
2295static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2296				int num, int type, unsigned long *pf_bmap)
2297{
2298	struct rvu_hwinfo *hw = rvu->hw;
2299	int region;
2300	u64 bar4;
2301
2302	/* For cn10k platform VF mailbox regions of a PF follows after the
2303	 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2304	 * RVU_PF_VF_BAR4_ADDR register.
2305	 */
2306	if (type == TYPE_AFVF) {
2307		for (region = 0; region < num; region++) {
2308			if (!test_bit(region, pf_bmap))
2309				continue;
2310
2311			if (hw->cap.per_pf_mbox_regs) {
2312				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2313						  RVU_AF_PFX_BAR4_ADDR(0)) +
2314						  MBOX_SIZE;
2315				bar4 += region * MBOX_SIZE;
2316			} else {
2317				bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2318				bar4 += region * MBOX_SIZE;
2319			}
2320			mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2321			if (!mbox_addr[region])
2322				goto error;
2323		}
2324		return 0;
2325	}
2326
2327	/* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2328	 * PF registers. Whereas for Octeontx2 it is read from
2329	 * RVU_AF_PF_BAR4_ADDR register.
2330	 */
2331	for (region = 0; region < num; region++) {
2332		if (!test_bit(region, pf_bmap))
2333			continue;
2334
2335		if (hw->cap.per_pf_mbox_regs) {
2336			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2337					  RVU_AF_PFX_BAR4_ADDR(region));
2338		} else {
2339			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2340					  RVU_AF_PF_BAR4_ADDR);
2341			bar4 += region * MBOX_SIZE;
2342		}
2343		mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2344		if (!mbox_addr[region])
2345			goto error;
2346	}
2347	return 0;
2348
2349error:
2350	while (region--)
2351		iounmap((void __iomem *)mbox_addr[region]);
2352	return -ENOMEM;
2353}
2354
2355static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2356			 int type, int num,
2357			 void (mbox_handler)(struct work_struct *),
2358			 void (mbox_up_handler)(struct work_struct *))
2359{
2360	int err = -EINVAL, i, dir, dir_up;
2361	void __iomem *reg_base;
2362	struct rvu_work *mwork;
2363	unsigned long *pf_bmap;
2364	void **mbox_regions;
2365	const char *name;
2366	u64 cfg;
2367
2368	pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
2369	if (!pf_bmap)
2370		return -ENOMEM;
2371
2372	/* RVU VFs */
2373	if (type == TYPE_AFVF)
2374		bitmap_set(pf_bmap, 0, num);
2375
2376	if (type == TYPE_AFPF) {
2377		/* Mark enabled PFs in bitmap */
2378		for (i = 0; i < num; i++) {
2379			cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
2380			if (cfg & BIT_ULL(20))
2381				set_bit(i, pf_bmap);
2382		}
2383	}
2384
2385	mutex_init(&rvu->mbox_lock);
2386
2387	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2388	if (!mbox_regions) {
2389		err = -ENOMEM;
2390		goto free_bitmap;
2391	}
2392
2393	switch (type) {
2394	case TYPE_AFPF:
2395		name = "rvu_afpf_mailbox";
2396		dir = MBOX_DIR_AFPF;
2397		dir_up = MBOX_DIR_AFPF_UP;
2398		reg_base = rvu->afreg_base;
2399		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
2400		if (err)
2401			goto free_regions;
2402		break;
2403	case TYPE_AFVF:
2404		name = "rvu_afvf_mailbox";
2405		dir = MBOX_DIR_PFVF;
2406		dir_up = MBOX_DIR_PFVF_UP;
2407		reg_base = rvu->pfreg_base;
2408		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
2409		if (err)
2410			goto free_regions;
2411		break;
2412	default:
2413		goto free_regions;
2414	}
2415
2416	mw->mbox_wq = alloc_workqueue(name,
2417				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2418				      num);
2419	if (!mw->mbox_wq) {
2420		err = -ENOMEM;
2421		goto unmap_regions;
2422	}
2423
2424	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2425				    sizeof(struct rvu_work), GFP_KERNEL);
2426	if (!mw->mbox_wrk) {
2427		err = -ENOMEM;
2428		goto exit;
2429	}
2430
2431	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2432				       sizeof(struct rvu_work), GFP_KERNEL);
2433	if (!mw->mbox_wrk_up) {
2434		err = -ENOMEM;
2435		goto exit;
2436	}
2437
2438	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2439				     reg_base, dir, num, pf_bmap);
2440	if (err)
2441		goto exit;
2442
2443	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2444				     reg_base, dir_up, num, pf_bmap);
2445	if (err)
2446		goto exit;
2447
2448	for (i = 0; i < num; i++) {
2449		if (!test_bit(i, pf_bmap))
2450			continue;
2451
2452		mwork = &mw->mbox_wrk[i];
2453		mwork->rvu = rvu;
2454		INIT_WORK(&mwork->work, mbox_handler);
2455
2456		mwork = &mw->mbox_wrk_up[i];
2457		mwork->rvu = rvu;
2458		INIT_WORK(&mwork->work, mbox_up_handler);
2459	}
2460	goto free_regions;
2461
2462exit:
2463	destroy_workqueue(mw->mbox_wq);
2464unmap_regions:
2465	while (num--)
2466		iounmap((void __iomem *)mbox_regions[num]);
2467free_regions:
2468	kfree(mbox_regions);
2469free_bitmap:
2470	bitmap_free(pf_bmap);
2471	return err;
2472}
2473
2474static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2475{
2476	struct otx2_mbox *mbox = &mw->mbox;
2477	struct otx2_mbox_dev *mdev;
2478	int devid;
2479
2480	if (mw->mbox_wq) {
2481		destroy_workqueue(mw->mbox_wq);
2482		mw->mbox_wq = NULL;
2483	}
2484
2485	for (devid = 0; devid < mbox->ndevs; devid++) {
2486		mdev = &mbox->dev[devid];
2487		if (mdev->hwbase)
2488			iounmap((void __iomem *)mdev->hwbase);
2489	}
2490
2491	otx2_mbox_destroy(&mw->mbox);
2492	otx2_mbox_destroy(&mw->mbox_up);
2493}
2494
2495static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2496			   int mdevs, u64 intr)
2497{
2498	struct otx2_mbox_dev *mdev;
2499	struct otx2_mbox *mbox;
2500	struct mbox_hdr *hdr;
2501	int i;
2502
2503	for (i = first; i < mdevs; i++) {
2504		/* start from 0 */
2505		if (!(intr & BIT_ULL(i - first)))
2506			continue;
2507
2508		mbox = &mw->mbox;
2509		mdev = &mbox->dev[i];
2510		hdr = mdev->mbase + mbox->rx_start;
2511
2512		/*The hdr->num_msgs is set to zero immediately in the interrupt
2513		 * handler to  ensure that it holds a correct value next time
2514		 * when the interrupt handler is called.
2515		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2516		 * pf>mbox.up_num_msgs holds the data for use in
2517		 * pfaf_mbox_up_handler.
2518		 */
2519
2520		if (hdr->num_msgs) {
2521			mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2522			hdr->num_msgs = 0;
2523			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2524		}
2525		mbox = &mw->mbox_up;
2526		mdev = &mbox->dev[i];
2527		hdr = mdev->mbase + mbox->rx_start;
2528		if (hdr->num_msgs) {
2529			mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2530			hdr->num_msgs = 0;
2531			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2532		}
2533	}
2534}
2535
2536static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
2537{
2538	struct rvu *rvu = (struct rvu *)rvu_irq;
2539	u64 intr;
2540
2541	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2542	/* Clear interrupts */
2543	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2544	if (intr)
2545		trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2546
2547	/* Sync with mbox memory region */
2548	rmb();
2549
2550	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2551
2552	return IRQ_HANDLED;
2553}
2554
2555static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2556{
2557	struct rvu *rvu = (struct rvu *)rvu_irq;
2558	int vfs = rvu->vfs;
2559	u64 intr;
2560
2561	/* Sync with mbox memory region */
2562	rmb();
2563
2564	/* Handle VF interrupts */
2565	if (vfs > 64) {
2566		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2567		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2568
2569		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2570		vfs -= 64;
2571	}
2572
2573	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2574	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2575	if (intr)
2576		trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2577
2578	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2579
2580	return IRQ_HANDLED;
2581}
2582
2583static void rvu_enable_mbox_intr(struct rvu *rvu)
2584{
2585	struct rvu_hwinfo *hw = rvu->hw;
2586
2587	/* Clear spurious irqs, if any */
2588	rvu_write64(rvu, BLKADDR_RVUM,
2589		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2590
2591	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2592	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2593		    INTR_MASK(hw->total_pfs) & ~1ULL);
2594}
2595
2596static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2597{
2598	struct rvu_block *block;
2599	int slot, lf, num_lfs;
2600	int err;
2601
2602	block = &rvu->hw->block[blkaddr];
2603	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2604					block->addr);
2605	if (!num_lfs)
2606		return;
2607	for (slot = 0; slot < num_lfs; slot++) {
2608		lf = rvu_get_lf(rvu, block, pcifunc, slot);
2609		if (lf < 0)
2610			continue;
2611
2612		/* Cleanup LF and reset it */
2613		if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2614			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2615		else if (block->addr == BLKADDR_NPA)
2616			rvu_npa_lf_teardown(rvu, pcifunc, lf);
2617		else if ((block->addr == BLKADDR_CPT0) ||
2618			 (block->addr == BLKADDR_CPT1))
2619			rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2620					    slot);
2621
2622		err = rvu_lf_reset(rvu, block, lf);
2623		if (err) {
2624			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2625				block->addr, lf);
2626		}
2627	}
2628}
2629
2630static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2631{
2632	if (rvu_npc_exact_has_match_table(rvu))
2633		rvu_npc_exact_reset(rvu, pcifunc);
2634
2635	mutex_lock(&rvu->flr_lock);
2636	/* Reset order should reflect inter-block dependencies:
2637	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2638	 * 2. Flush and reset SSO/SSOW
2639	 * 3. Cleanup pools (NPA)
2640	 */
2641
2642	/* Free allocated BPIDs */
2643	rvu_nix_flr_free_bpids(rvu, pcifunc);
2644
2645	/* Free multicast/mirror node associated with the 'pcifunc' */
2646	rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
2647
2648	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2649	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2650	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2651	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2652	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2653	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2654	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2655	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2656	rvu_reset_lmt_map_tbl(rvu, pcifunc);
2657	rvu_detach_rsrcs(rvu, NULL, pcifunc);
2658	/* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
2659	 * entries, check and free the MCAM entries explicitly to avoid leak.
2660	 * Since LF is detached use LF number as -1.
2661	 */
2662	rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
2663	rvu_mac_reset(rvu, pcifunc);
2664
2665	if (rvu->mcs_blk_cnt)
2666		rvu_mcs_flr_handler(rvu, pcifunc);
2667
2668	mutex_unlock(&rvu->flr_lock);
2669}
2670
2671static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2672{
2673	int reg = 0;
2674
2675	/* pcifunc = 0(PF0) | (vf + 1) */
2676	__rvu_flr_handler(rvu, vf + 1);
2677
2678	if (vf >= 64) {
2679		reg = 1;
2680		vf = vf - 64;
2681	}
2682
2683	/* Signal FLR finish and enable IRQ */
2684	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2685	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2686}
2687
2688static void rvu_flr_handler(struct work_struct *work)
2689{
2690	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2691	struct rvu *rvu = flrwork->rvu;
2692	u16 pcifunc, numvfs, vf;
2693	u64 cfg;
2694	int pf;
2695
2696	pf = flrwork - rvu->flr_wrk;
2697	if (pf >= rvu->hw->total_pfs) {
2698		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2699		return;
2700	}
2701
2702	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2703	numvfs = (cfg >> 12) & 0xFF;
2704	pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2705
2706	for (vf = 0; vf < numvfs; vf++)
2707		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2708
2709	__rvu_flr_handler(rvu, pcifunc);
2710
2711	/* Signal FLR finish */
2712	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2713
2714	/* Enable interrupt */
2715	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2716}
2717
2718static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2719{
2720	int dev, vf, reg = 0;
2721	u64 intr;
2722
2723	if (start_vf >= 64)
2724		reg = 1;
2725
2726	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2727	if (!intr)
2728		return;
2729
2730	for (vf = 0; vf < numvfs; vf++) {
2731		if (!(intr & BIT_ULL(vf)))
2732			continue;
2733		/* Clear and disable the interrupt */
2734		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2735		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2736
2737		dev = vf + start_vf + rvu->hw->total_pfs;
2738		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2739	}
2740}
2741
2742static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2743{
2744	struct rvu *rvu = (struct rvu *)rvu_irq;
2745	u64 intr;
2746	u8  pf;
2747
2748	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2749	if (!intr)
2750		goto afvf_flr;
2751
2752	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2753		if (intr & (1ULL << pf)) {
2754			/* clear interrupt */
2755			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2756				    BIT_ULL(pf));
2757			/* Disable the interrupt */
2758			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2759				    BIT_ULL(pf));
2760			/* PF is already dead do only AF related operations */
2761			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2762		}
2763	}
2764
2765afvf_flr:
2766	rvu_afvf_queue_flr_work(rvu, 0, 64);
2767	if (rvu->vfs > 64)
2768		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2769
2770	return IRQ_HANDLED;
2771}
2772
2773static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2774{
2775	int vf;
2776
2777	/* Nothing to be done here other than clearing the
2778	 * TRPEND bit.
2779	 */
2780	for (vf = 0; vf < 64; vf++) {
2781		if (intr & (1ULL << vf)) {
2782			/* clear the trpend due to ME(master enable) */
2783			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2784			/* clear interrupt */
2785			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2786		}
2787	}
2788}
2789
2790/* Handles ME interrupts from VFs of AF */
2791static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2792{
2793	struct rvu *rvu = (struct rvu *)rvu_irq;
2794	int vfset;
2795	u64 intr;
2796
2797	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2798
2799	for (vfset = 0; vfset <= 1; vfset++) {
2800		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2801		if (intr)
2802			rvu_me_handle_vfset(rvu, vfset, intr);
2803	}
2804
2805	return IRQ_HANDLED;
2806}
2807
2808/* Handles ME interrupts from PFs */
2809static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2810{
2811	struct rvu *rvu = (struct rvu *)rvu_irq;
2812	u64 intr;
2813	u8  pf;
2814
2815	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2816
2817	/* Nothing to be done here other than clearing the
2818	 * TRPEND bit.
2819	 */
2820	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2821		if (intr & (1ULL << pf)) {
2822			/* clear the trpend due to ME(master enable) */
2823			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2824				    BIT_ULL(pf));
2825			/* clear interrupt */
2826			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2827				    BIT_ULL(pf));
2828		}
2829	}
2830
2831	return IRQ_HANDLED;
2832}
2833
2834static void rvu_unregister_interrupts(struct rvu *rvu)
2835{
2836	int irq;
2837
2838	rvu_cpt_unregister_interrupts(rvu);
2839
2840	/* Disable the Mbox interrupt */
2841	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2842		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2843
2844	/* Disable the PF FLR interrupt */
2845	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2846		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2847
2848	/* Disable the PF ME interrupt */
2849	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2850		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2851
2852	for (irq = 0; irq < rvu->num_vec; irq++) {
2853		if (rvu->irq_allocated[irq]) {
2854			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2855			rvu->irq_allocated[irq] = false;
2856		}
2857	}
2858
2859	pci_free_irq_vectors(rvu->pdev);
2860	rvu->num_vec = 0;
2861}
2862
2863static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2864{
2865	struct rvu_pfvf *pfvf = &rvu->pf[0];
2866	int offset;
2867
2868	pfvf = &rvu->pf[0];
2869	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2870
2871	/* Make sure there are enough MSIX vectors configured so that
2872	 * VF interrupts can be handled. Offset equal to zero means
2873	 * that PF vectors are not configured and overlapping AF vectors.
2874	 */
2875	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2876	       offset;
2877}
2878
2879static int rvu_register_interrupts(struct rvu *rvu)
2880{
2881	int ret, offset, pf_vec_start;
2882
2883	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2884
2885	rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2886					   NAME_SIZE, GFP_KERNEL);
2887	if (!rvu->irq_name)
2888		return -ENOMEM;
2889
2890	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2891					  sizeof(bool), GFP_KERNEL);
2892	if (!rvu->irq_allocated)
2893		return -ENOMEM;
2894
2895	/* Enable MSI-X */
2896	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2897				    rvu->num_vec, PCI_IRQ_MSIX);
2898	if (ret < 0) {
2899		dev_err(rvu->dev,
2900			"RVUAF: Request for %d msix vectors failed, ret %d\n",
2901			rvu->num_vec, ret);
2902		return ret;
2903	}
2904
2905	/* Register mailbox interrupt handler */
2906	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2907	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2908			  rvu_mbox_pf_intr_handler, 0,
2909			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2910	if (ret) {
2911		dev_err(rvu->dev,
2912			"RVUAF: IRQ registration failed for mbox irq\n");
2913		goto fail;
2914	}
2915
2916	rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2917
2918	/* Enable mailbox interrupts from all PFs */
2919	rvu_enable_mbox_intr(rvu);
2920
2921	/* Register FLR interrupt handler */
2922	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2923		"RVUAF FLR");
2924	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2925			  rvu_flr_intr_handler, 0,
2926			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2927			  rvu);
2928	if (ret) {
2929		dev_err(rvu->dev,
2930			"RVUAF: IRQ registration failed for FLR\n");
2931		goto fail;
2932	}
2933	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2934
2935	/* Enable FLR interrupt for all PFs*/
2936	rvu_write64(rvu, BLKADDR_RVUM,
2937		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2938
2939	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2940		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2941
2942	/* Register ME interrupt handler */
2943	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2944		"RVUAF ME");
2945	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2946			  rvu_me_pf_intr_handler, 0,
2947			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2948			  rvu);
2949	if (ret) {
2950		dev_err(rvu->dev,
2951			"RVUAF: IRQ registration failed for ME\n");
2952	}
2953	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2954
2955	/* Clear TRPEND bit for all PF */
2956	rvu_write64(rvu, BLKADDR_RVUM,
2957		    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2958	/* Enable ME interrupt for all PFs*/
2959	rvu_write64(rvu, BLKADDR_RVUM,
2960		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2961
2962	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2963		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2964
2965	if (!rvu_afvf_msix_vectors_num_ok(rvu))
2966		return 0;
2967
2968	/* Get PF MSIX vectors offset. */
2969	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2970				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2971
2972	/* Register MBOX0 interrupt. */
2973	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2974	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2975	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2976			  rvu_mbox_intr_handler, 0,
2977			  &rvu->irq_name[offset * NAME_SIZE],
2978			  rvu);
2979	if (ret)
2980		dev_err(rvu->dev,
2981			"RVUAF: IRQ registration failed for Mbox0\n");
2982
2983	rvu->irq_allocated[offset] = true;
2984
2985	/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2986	 * simply increment current offset by 1.
2987	 */
2988	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2989	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2990	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2991			  rvu_mbox_intr_handler, 0,
2992			  &rvu->irq_name[offset * NAME_SIZE],
2993			  rvu);
2994	if (ret)
2995		dev_err(rvu->dev,
2996			"RVUAF: IRQ registration failed for Mbox1\n");
2997
2998	rvu->irq_allocated[offset] = true;
2999
3000	/* Register FLR interrupt handler for AF's VFs */
3001	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
3002	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
3003	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3004			  rvu_flr_intr_handler, 0,
3005			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3006	if (ret) {
3007		dev_err(rvu->dev,
3008			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
3009		goto fail;
3010	}
3011	rvu->irq_allocated[offset] = true;
3012
3013	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
3014	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
3015	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3016			  rvu_flr_intr_handler, 0,
3017			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3018	if (ret) {
3019		dev_err(rvu->dev,
3020			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
3021		goto fail;
3022	}
3023	rvu->irq_allocated[offset] = true;
3024
3025	/* Register ME interrupt handler for AF's VFs */
3026	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
3027	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
3028	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3029			  rvu_me_vf_intr_handler, 0,
3030			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3031	if (ret) {
3032		dev_err(rvu->dev,
3033			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
3034		goto fail;
3035	}
3036	rvu->irq_allocated[offset] = true;
3037
3038	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
3039	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
3040	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3041			  rvu_me_vf_intr_handler, 0,
3042			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3043	if (ret) {
3044		dev_err(rvu->dev,
3045			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
3046		goto fail;
3047	}
3048	rvu->irq_allocated[offset] = true;
3049
3050	ret = rvu_cpt_register_interrupts(rvu);
3051	if (ret)
3052		goto fail;
3053
3054	return 0;
3055
3056fail:
3057	rvu_unregister_interrupts(rvu);
3058	return ret;
3059}
3060
3061static void rvu_flr_wq_destroy(struct rvu *rvu)
3062{
3063	if (rvu->flr_wq) {
3064		destroy_workqueue(rvu->flr_wq);
3065		rvu->flr_wq = NULL;
3066	}
3067}
3068
3069static int rvu_flr_init(struct rvu *rvu)
3070{
3071	int dev, num_devs;
3072	u64 cfg;
3073	int pf;
3074
3075	/* Enable FLR for all PFs*/
3076	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3077		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3078		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
3079			    cfg | BIT_ULL(22));
3080	}
3081
3082	rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
3083					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
3084	if (!rvu->flr_wq)
3085		return -ENOMEM;
3086
3087	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
3088	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
3089				    sizeof(struct rvu_work), GFP_KERNEL);
3090	if (!rvu->flr_wrk) {
3091		destroy_workqueue(rvu->flr_wq);
3092		return -ENOMEM;
3093	}
3094
3095	for (dev = 0; dev < num_devs; dev++) {
3096		rvu->flr_wrk[dev].rvu = rvu;
3097		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3098	}
3099
3100	mutex_init(&rvu->flr_lock);
3101
3102	return 0;
3103}
3104
3105static void rvu_disable_afvf_intr(struct rvu *rvu)
3106{
3107	int vfs = rvu->vfs;
3108
3109	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3110	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3111	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3112	if (vfs <= 64)
3113		return;
3114
3115	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3116		      INTR_MASK(vfs - 64));
3117	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3118	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3119}
3120
3121static void rvu_enable_afvf_intr(struct rvu *rvu)
3122{
3123	int vfs = rvu->vfs;
3124
3125	/* Clear any pending interrupts and enable AF VF interrupts for
3126	 * the first 64 VFs.
3127	 */
3128	/* Mbox */
3129	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3130	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3131
3132	/* FLR */
3133	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3134	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3135	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3136
3137	/* Same for remaining VFs, if any. */
3138	if (vfs <= 64)
3139		return;
3140
3141	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3142	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3143		      INTR_MASK(vfs - 64));
3144
3145	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3146	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3147	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3148}
3149
3150int rvu_get_num_lbk_chans(void)
3151{
3152	struct pci_dev *pdev;
3153	void __iomem *base;
3154	int ret = -EIO;
3155
3156	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3157			      NULL);
3158	if (!pdev)
3159		goto err;
3160
3161	base = pci_ioremap_bar(pdev, 0);
3162	if (!base)
3163		goto err_put;
3164
3165	/* Read number of available LBK channels from LBK(0)_CONST register. */
3166	ret = (readq(base + 0x10) >> 32) & 0xffff;
3167	iounmap(base);
3168err_put:
3169	pci_dev_put(pdev);
3170err:
3171	return ret;
3172}
3173
3174static int rvu_enable_sriov(struct rvu *rvu)
3175{
3176	struct pci_dev *pdev = rvu->pdev;
3177	int err, chans, vfs;
3178	int pos = 0;
3179
3180	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3181		dev_warn(&pdev->dev,
3182			 "Skipping SRIOV enablement since not enough IRQs are available\n");
3183		return 0;
3184	}
3185
3186	/* Get RVU VFs device id */
3187	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3188	if (!pos)
3189		return 0;
3190	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
3191
3192	chans = rvu_get_num_lbk_chans();
3193	if (chans < 0)
3194		return chans;
3195
3196	vfs = pci_sriov_get_totalvfs(pdev);
3197
3198	/* Limit VFs in case we have more VFs than LBK channels available. */
3199	if (vfs > chans)
3200		vfs = chans;
3201
3202	if (!vfs)
3203		return 0;
3204
3205	/* LBK channel number 63 is used for switching packets between
3206	 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3207	 */
3208	if (vfs > 62)
3209		vfs = 62;
3210
3211	/* Save VFs number for reference in VF interrupts handlers.
3212	 * Since interrupts might start arriving during SRIOV enablement
3213	 * ordinary API cannot be used to get number of enabled VFs.
3214	 */
3215	rvu->vfs = vfs;
3216
3217	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3218			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3219	if (err)
3220		return err;
3221
3222	rvu_enable_afvf_intr(rvu);
3223	/* Make sure IRQs are enabled before SRIOV. */
3224	mb();
3225
3226	err = pci_enable_sriov(pdev, vfs);
3227	if (err) {
3228		rvu_disable_afvf_intr(rvu);
3229		rvu_mbox_destroy(&rvu->afvf_wq_info);
3230		return err;
3231	}
3232
3233	return 0;
3234}
3235
3236static void rvu_disable_sriov(struct rvu *rvu)
3237{
3238	rvu_disable_afvf_intr(rvu);
3239	rvu_mbox_destroy(&rvu->afvf_wq_info);
3240	pci_disable_sriov(rvu->pdev);
3241}
3242
3243static void rvu_update_module_params(struct rvu *rvu)
3244{
3245	const char *default_pfl_name = "default";
3246
3247	strscpy(rvu->mkex_pfl_name,
3248		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3249	strscpy(rvu->kpu_pfl_name,
3250		kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3251}
3252
3253static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3254{
3255	struct device *dev = &pdev->dev;
3256	struct rvu *rvu;
3257	int    err;
3258
3259	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3260	if (!rvu)
3261		return -ENOMEM;
3262
3263	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3264	if (!rvu->hw) {
3265		devm_kfree(dev, rvu);
3266		return -ENOMEM;
3267	}
3268
3269	pci_set_drvdata(pdev, rvu);
3270	rvu->pdev = pdev;
3271	rvu->dev = &pdev->dev;
3272
3273	err = pci_enable_device(pdev);
3274	if (err) {
3275		dev_err(dev, "Failed to enable PCI device\n");
3276		goto err_freemem;
3277	}
3278
3279	err = pci_request_regions(pdev, DRV_NAME);
3280	if (err) {
3281		dev_err(dev, "PCI request regions failed 0x%x\n", err);
3282		goto err_disable_device;
3283	}
3284
3285	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3286	if (err) {
3287		dev_err(dev, "DMA mask config failed, abort\n");
3288		goto err_release_regions;
3289	}
3290
3291	pci_set_master(pdev);
3292
3293	rvu->ptp = ptp_get();
3294	if (IS_ERR(rvu->ptp)) {
3295		err = PTR_ERR(rvu->ptp);
3296		if (err)
3297			goto err_release_regions;
3298		rvu->ptp = NULL;
3299	}
3300
3301	/* Map Admin function CSRs */
3302	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3303	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3304	if (!rvu->afreg_base || !rvu->pfreg_base) {
3305		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3306		err = -ENOMEM;
3307		goto err_put_ptp;
3308	}
3309
3310	/* Store module params in rvu structure */
3311	rvu_update_module_params(rvu);
3312
3313	/* Check which blocks the HW supports */
3314	rvu_check_block_implemented(rvu);
3315
3316	rvu_reset_all_blocks(rvu);
3317
3318	rvu_setup_hw_capabilities(rvu);
3319
3320	err = rvu_setup_hw_resources(rvu);
3321	if (err)
3322		goto err_put_ptp;
3323
3324	/* Init mailbox btw AF and PFs */
3325	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3326			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3327			    rvu_afpf_mbox_up_handler);
3328	if (err) {
3329		dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3330		goto err_hwsetup;
3331	}
3332
3333	err = rvu_flr_init(rvu);
3334	if (err) {
3335		dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3336		goto err_mbox;
3337	}
3338
3339	err = rvu_register_interrupts(rvu);
3340	if (err) {
3341		dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3342		goto err_flr;
3343	}
3344
3345	err = rvu_register_dl(rvu);
3346	if (err) {
3347		dev_err(dev, "%s: Failed to register devlink\n", __func__);
3348		goto err_irq;
3349	}
3350
3351	rvu_setup_rvum_blk_revid(rvu);
3352
3353	/* Enable AF's VFs (if any) */
3354	err = rvu_enable_sriov(rvu);
3355	if (err) {
3356		dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3357		goto err_dl;
3358	}
3359
3360	/* Initialize debugfs */
3361	rvu_dbg_init(rvu);
3362
3363	mutex_init(&rvu->rswitch.switch_lock);
3364
3365	if (rvu->fwdata)
3366		ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3367			  rvu->fwdata->ptp_ext_tstamp);
3368
3369	return 0;
3370err_dl:
3371	rvu_unregister_dl(rvu);
3372err_irq:
3373	rvu_unregister_interrupts(rvu);
3374err_flr:
3375	rvu_flr_wq_destroy(rvu);
3376err_mbox:
3377	rvu_mbox_destroy(&rvu->afpf_wq_info);
3378err_hwsetup:
3379	rvu_cgx_exit(rvu);
3380	rvu_fwdata_exit(rvu);
3381	rvu_mcs_exit(rvu);
3382	rvu_reset_all_blocks(rvu);
3383	rvu_free_hw_resources(rvu);
3384	rvu_clear_rvum_blk_revid(rvu);
3385err_put_ptp:
3386	ptp_put(rvu->ptp);
3387err_release_regions:
3388	pci_release_regions(pdev);
3389err_disable_device:
3390	pci_disable_device(pdev);
3391err_freemem:
3392	pci_set_drvdata(pdev, NULL);
3393	devm_kfree(&pdev->dev, rvu->hw);
3394	devm_kfree(dev, rvu);
3395	return err;
3396}
3397
3398static void rvu_remove(struct pci_dev *pdev)
3399{
3400	struct rvu *rvu = pci_get_drvdata(pdev);
3401
3402	rvu_dbg_exit(rvu);
3403	rvu_unregister_dl(rvu);
3404	rvu_unregister_interrupts(rvu);
3405	rvu_flr_wq_destroy(rvu);
3406	rvu_cgx_exit(rvu);
3407	rvu_fwdata_exit(rvu);
3408	rvu_mcs_exit(rvu);
3409	rvu_mbox_destroy(&rvu->afpf_wq_info);
3410	rvu_disable_sriov(rvu);
3411	rvu_reset_all_blocks(rvu);
3412	rvu_free_hw_resources(rvu);
3413	rvu_clear_rvum_blk_revid(rvu);
3414	ptp_put(rvu->ptp);
3415	pci_release_regions(pdev);
3416	pci_disable_device(pdev);
3417	pci_set_drvdata(pdev, NULL);
3418
3419	devm_kfree(&pdev->dev, rvu->hw);
3420	devm_kfree(&pdev->dev, rvu);
3421}
3422
3423static struct pci_driver rvu_driver = {
3424	.name = DRV_NAME,
3425	.id_table = rvu_id_table,
3426	.probe = rvu_probe,
3427	.remove = rvu_remove,
3428};
3429
3430static int __init rvu_init_module(void)
3431{
3432	int err;
3433
3434	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3435
3436	err = pci_register_driver(&cgx_driver);
3437	if (err < 0)
3438		return err;
3439
3440	err = pci_register_driver(&ptp_driver);
3441	if (err < 0)
3442		goto ptp_err;
3443
3444	err = pci_register_driver(&mcs_driver);
3445	if (err < 0)
3446		goto mcs_err;
3447
3448	err =  pci_register_driver(&rvu_driver);
3449	if (err < 0)
3450		goto rvu_err;
3451
3452	return 0;
3453rvu_err:
3454	pci_unregister_driver(&mcs_driver);
3455mcs_err:
3456	pci_unregister_driver(&ptp_driver);
3457ptp_err:
3458	pci_unregister_driver(&cgx_driver);
3459
3460	return err;
3461}
3462
3463static void __exit rvu_cleanup_module(void)
3464{
3465	pci_unregister_driver(&rvu_driver);
3466	pci_unregister_driver(&mcs_driver);
3467	pci_unregister_driver(&ptp_driver);
3468	pci_unregister_driver(&cgx_driver);
3469}
3470
3471module_init(rvu_init_module);
3472module_exit(rvu_cleanup_module);
3473