1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTx2 CGX driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8#include <linux/acpi.h>
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/ethtool.h>
15#include <linux/phy.h>
16#include <linux/of.h>
17#include <linux/of_mdio.h>
18#include <linux/of_net.h>
19
20#include "cgx.h"
21#include "rvu.h"
22#include "lmac_common.h"
23
24#define DRV_NAME	"Marvell-CGX/RPM"
25#define DRV_STRING      "Marvell CGX/RPM Driver"
26
27static LIST_HEAD(cgx_list);
28
29/* Convert firmware speed encoding to user format(Mbps) */
30static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
31	[CGX_LINK_NONE] = 0,
32	[CGX_LINK_10M] = 10,
33	[CGX_LINK_100M] = 100,
34	[CGX_LINK_1G] = 1000,
35	[CGX_LINK_2HG] = 2500,
36	[CGX_LINK_5G] = 5000,
37	[CGX_LINK_10G] = 10000,
38	[CGX_LINK_20G] = 20000,
39	[CGX_LINK_25G] = 25000,
40	[CGX_LINK_40G] = 40000,
41	[CGX_LINK_50G] = 50000,
42	[CGX_LINK_80G] = 80000,
43	[CGX_LINK_100G] = 100000,
44};
45
46/* Convert firmware lmac type encoding to string */
47static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
48	[LMAC_MODE_SGMII] = "SGMII",
49	[LMAC_MODE_XAUI] = "XAUI",
50	[LMAC_MODE_RXAUI] = "RXAUI",
51	[LMAC_MODE_10G_R] = "10G_R",
52	[LMAC_MODE_40G_R] = "40G_R",
53	[LMAC_MODE_QSGMII] = "QSGMII",
54	[LMAC_MODE_25G_R] = "25G_R",
55	[LMAC_MODE_50G_R] = "50G_R",
56	[LMAC_MODE_100G_R] = "100G_R",
57	[LMAC_MODE_USXGMII] = "USXGMII",
58	[LMAC_MODE_USGMII] = "USGMII",
59};
60
61/* CGX PHY management internal APIs */
62static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
63
64/* Supported devices */
65static const struct pci_device_id cgx_id_table[] = {
66	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
67	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
68	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
69	{ 0, }  /* end of table */
70};
71
72MODULE_DEVICE_TABLE(pci, cgx_id_table);
73
74static bool is_dev_rpm(void *cgxd)
75{
76	struct cgx *cgx = cgxd;
77
78	return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
79	       (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
80}
81
82bool is_lmac_valid(struct cgx *cgx, int lmac_id)
83{
84	if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
85		return false;
86	return test_bit(lmac_id, &cgx->lmac_bmap);
87}
88
89/* Helper function to get sequential index
90 * given the enabled LMAC of a CGX
91 */
92static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
93{
94	int tmp, id = 0;
95
96	for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
97		if (tmp == lmac_id)
98			break;
99		id++;
100	}
101
102	return id;
103}
104
105struct mac_ops *get_mac_ops(void *cgxd)
106{
107	if (!cgxd)
108		return cgxd;
109
110	return ((struct cgx *)cgxd)->mac_ops;
111}
112
113void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
114{
115	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
116	       offset);
117}
118
119u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
120{
121	return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
122		     offset);
123}
124
125struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
126{
127	if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
128		return NULL;
129
130	return cgx->lmac_idmap[lmac_id];
131}
132
133int cgx_get_cgxcnt_max(void)
134{
135	struct cgx *cgx_dev;
136	int idmax = -ENODEV;
137
138	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
139		if (cgx_dev->cgx_id > idmax)
140			idmax = cgx_dev->cgx_id;
141
142	if (idmax < 0)
143		return 0;
144
145	return idmax + 1;
146}
147
148int cgx_get_lmac_cnt(void *cgxd)
149{
150	struct cgx *cgx = cgxd;
151
152	if (!cgx)
153		return -ENODEV;
154
155	return cgx->lmac_count;
156}
157
158void *cgx_get_pdata(int cgx_id)
159{
160	struct cgx *cgx_dev;
161
162	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
163		if (cgx_dev->cgx_id == cgx_id)
164			return cgx_dev;
165	}
166	return NULL;
167}
168
169void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
170{
171	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
172
173	/* Software must not access disabled LMAC registers */
174	if (!is_lmac_valid(cgx_dev, lmac_id))
175		return;
176	cgx_write(cgx_dev, lmac_id, offset, val);
177}
178
179u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
180{
181	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
182
183	/* Software must not access disabled LMAC registers */
184	if (!is_lmac_valid(cgx_dev, lmac_id))
185		return 0;
186
187	return cgx_read(cgx_dev, lmac_id, offset);
188}
189
190int cgx_get_cgxid(void *cgxd)
191{
192	struct cgx *cgx = cgxd;
193
194	if (!cgx)
195		return -EINVAL;
196
197	return cgx->cgx_id;
198}
199
200u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
201{
202	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
203	u64 cfg;
204
205	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
206
207	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
208}
209
210/* Ensure the required lock for event queue(where asynchronous events are
211 * posted) is acquired before calling this API. Else an asynchronous event(with
212 * latest link status) can reach the destination before this function returns
213 * and could make the link status appear wrong.
214 */
215int cgx_get_link_info(void *cgxd, int lmac_id,
216		      struct cgx_link_user_info *linfo)
217{
218	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
219
220	if (!lmac)
221		return -ENODEV;
222
223	*linfo = lmac->link_info;
224	return 0;
225}
226
227int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
228{
229	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
230	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
231	struct mac_ops *mac_ops;
232	int index, id;
233	u64 cfg;
234
235	if (!lmac)
236		return -ENODEV;
237
238	/* access mac_ops to know csr_offset */
239	mac_ops = cgx_dev->mac_ops;
240
241	/* copy 6bytes from macaddr */
242	/* memcpy(&cfg, mac_addr, 6); */
243
244	cfg = ether_addr_to_u64(mac_addr);
245
246	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
247
248	index = id * lmac->mac_to_index_bmap.max;
249
250	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
251		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
252
253	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
254	cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
255		CGX_DMAC_MCAST_MODE);
256	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
257
258	return 0;
259}
260
261u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
262{
263	struct mac_ops *mac_ops;
264	struct cgx *cgx = cgxd;
265
266	if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
267		return 0;
268
269	cgx = cgxd;
270	/* Get mac_ops to know csr offset */
271	mac_ops = cgx->mac_ops;
272
273	return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
274}
275
276u64 cgx_read_dmac_entry(void *cgxd, int index)
277{
278	struct mac_ops *mac_ops;
279	struct cgx *cgx;
280
281	if (!cgxd)
282		return 0;
283
284	cgx = cgxd;
285	mac_ops = cgx->mac_ops;
286	return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
287}
288
289int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
290{
291	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
292	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
293	struct mac_ops *mac_ops;
294	int index, idx;
295	u64 cfg = 0;
296	int id;
297
298	if (!lmac)
299		return -ENODEV;
300
301	mac_ops = cgx_dev->mac_ops;
302	/* Get available index where entry is to be installed */
303	idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
304	if (idx < 0)
305		return idx;
306
307	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
308
309	index = id * lmac->mac_to_index_bmap.max + idx;
310
311	cfg = ether_addr_to_u64(mac_addr);
312	cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
313	cfg |= ((u64)lmac_id << 49);
314	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
315
316	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
317	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
318
319	if (is_multicast_ether_addr(mac_addr)) {
320		cfg &= ~GENMASK_ULL(2, 1);
321		cfg |= CGX_DMAC_MCAST_MODE_CAM;
322		lmac->mcast_filters_count++;
323	} else if (!lmac->mcast_filters_count) {
324		cfg |= CGX_DMAC_MCAST_MODE;
325	}
326
327	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
328
329	return idx;
330}
331
332int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
333{
334	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
335	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
336	struct mac_ops *mac_ops;
337	u8 index = 0, id;
338	u64 cfg;
339
340	if (!lmac)
341		return -ENODEV;
342
343	mac_ops = cgx_dev->mac_ops;
344	/* Restore index 0 to its default init value as done during
345	 * cgx_lmac_init
346	 */
347	set_bit(0, lmac->mac_to_index_bmap.bmap);
348
349	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
350
351	index = id * lmac->mac_to_index_bmap.max + index;
352	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
353
354	/* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
355	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
356	cfg &= ~CGX_DMAC_CAM_ACCEPT;
357	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
358	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
359
360	return 0;
361}
362
363/* Allows caller to change macaddress associated with index
364 * in dmac filter table including index 0 reserved for
365 * interface mac address
366 */
367int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
368{
369	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
370	struct mac_ops *mac_ops;
371	struct lmac *lmac;
372	u64 cfg;
373	int id;
374
375	lmac = lmac_pdata(lmac_id, cgx_dev);
376	if (!lmac)
377		return -ENODEV;
378
379	mac_ops = cgx_dev->mac_ops;
380	/* Validate the index */
381	if (index >= lmac->mac_to_index_bmap.max)
382		return -EINVAL;
383
384	/* ensure index is already set */
385	if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
386		return -EINVAL;
387
388	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
389
390	index = id * lmac->mac_to_index_bmap.max + index;
391
392	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
393	cfg &= ~CGX_RX_DMAC_ADR_MASK;
394	cfg |= ether_addr_to_u64(mac_addr);
395
396	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
397	return 0;
398}
399
400int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
401{
402	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
403	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
404	struct mac_ops *mac_ops;
405	u8 mac[ETH_ALEN];
406	u64 cfg;
407	int id;
408
409	if (!lmac)
410		return -ENODEV;
411
412	mac_ops = cgx_dev->mac_ops;
413	/* Validate the index */
414	if (index >= lmac->mac_to_index_bmap.max)
415		return -EINVAL;
416
417	/* Skip deletion for reserved index i.e. index 0 */
418	if (index == 0)
419		return 0;
420
421	rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
422
423	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
424
425	index = id * lmac->mac_to_index_bmap.max + index;
426
427	/* Read MAC address to check whether it is ucast or mcast */
428	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
429
430	u64_to_ether_addr(cfg, mac);
431	if (is_multicast_ether_addr(mac))
432		lmac->mcast_filters_count--;
433
434	if (!lmac->mcast_filters_count) {
435		cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
436		cfg &= ~GENMASK_ULL(2, 1);
437		cfg |= CGX_DMAC_MCAST_MODE;
438		cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
439	}
440
441	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
442
443	return 0;
444}
445
446int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
447{
448	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
449	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
450
451	if (lmac)
452		return lmac->mac_to_index_bmap.max;
453
454	return 0;
455}
456
457u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
458{
459	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
460	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
461	struct mac_ops *mac_ops;
462	int index;
463	u64 cfg;
464	int id;
465
466	mac_ops = cgx_dev->mac_ops;
467
468	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
469
470	index = id * lmac->mac_to_index_bmap.max;
471
472	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
473	return cfg & CGX_RX_DMAC_ADR_MASK;
474}
475
476int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
477{
478	struct cgx *cgx = cgxd;
479
480	if (!is_lmac_valid(cgx, lmac_id))
481		return -ENODEV;
482
483	cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
484	return 0;
485}
486
487static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
488{
489	struct cgx *cgx = cgxd;
490	u64 cfg;
491
492	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
493	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
494}
495
496static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
497{
498	struct cgx *cgx = cgxd;
499	u8 num_lmacs;
500	u32 fifo_len;
501
502	fifo_len = cgx->mac_ops->fifo_len;
503	num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
504
505	switch (num_lmacs) {
506	case 1:
507		return fifo_len;
508	case 2:
509		return fifo_len / 2;
510	case 3:
511		/* LMAC0 gets half of the FIFO, reset 1/4th */
512		if (lmac_id == 0)
513			return fifo_len / 2;
514		return fifo_len / 4;
515	case 4:
516	default:
517		return fifo_len / 4;
518	}
519	return 0;
520}
521
522/* Configure CGX LMAC in internal loopback mode */
523int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
524{
525	struct cgx *cgx = cgxd;
526	struct lmac *lmac;
527	u64 cfg;
528
529	if (!is_lmac_valid(cgx, lmac_id))
530		return -ENODEV;
531
532	lmac = lmac_pdata(lmac_id, cgx);
533	if (lmac->lmac_type == LMAC_MODE_SGMII ||
534	    lmac->lmac_type == LMAC_MODE_QSGMII) {
535		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
536		if (enable)
537			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
538		else
539			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
540		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
541	} else {
542		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
543		if (enable)
544			cfg |= CGXX_SPUX_CONTROL1_LBK;
545		else
546			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
547		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
548	}
549	return 0;
550}
551
552void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
553{
554	struct cgx *cgx = cgx_get_pdata(cgx_id);
555	struct lmac *lmac = lmac_pdata(lmac_id, cgx);
556	struct mac_ops *mac_ops;
557	u16 max_dmac;
558	int index, i;
559	u64 cfg = 0;
560	int id;
561
562	if (!cgx || !lmac)
563		return;
564
565	max_dmac = lmac->mac_to_index_bmap.max;
566	id = get_sequence_id_of_lmac(cgx, lmac_id);
567
568	mac_ops = cgx->mac_ops;
569	if (enable) {
570		/* Enable promiscuous mode on LMAC */
571		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
572		cfg &= ~CGX_DMAC_CAM_ACCEPT;
573		cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
574		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
575
576		for (i = 0; i < max_dmac; i++) {
577			index = id * max_dmac + i;
578			cfg = cgx_read(cgx, 0,
579				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
580			cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
581			cgx_write(cgx, 0,
582				  (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
583		}
584	} else {
585		/* Disable promiscuous mode */
586		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
587		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
588		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
589		for (i = 0; i < max_dmac; i++) {
590			index = id * max_dmac + i;
591			cfg = cgx_read(cgx, 0,
592				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
593			if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
594				cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
595				cgx_write(cgx, 0,
596					  (CGXX_CMRX_RX_DMAC_CAM0 +
597					   index * 0x8),
598					  cfg);
599			}
600		}
601	}
602}
603
604static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
605					 u8 *tx_pause, u8 *rx_pause)
606{
607	struct cgx *cgx = cgxd;
608	u64 cfg;
609
610	if (is_dev_rpm(cgx))
611		return 0;
612
613	if (!is_lmac_valid(cgx, lmac_id))
614		return -ENODEV;
615
616	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
617	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
618
619	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
620	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
621	return 0;
622}
623
624/* Enable or disable forwarding received pause frames to Tx block */
625void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
626{
627	struct cgx *cgx = cgxd;
628	u8 rx_pause, tx_pause;
629	bool is_pfc_enabled;
630	struct lmac *lmac;
631	u64 cfg;
632
633	if (!cgx)
634		return;
635
636	lmac = lmac_pdata(lmac_id, cgx);
637	if (!lmac)
638		return;
639
640	/* Pause frames are not enabled just return */
641	if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
642		return;
643
644	cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
645	is_pfc_enabled = rx_pause ? false : true;
646
647	if (enable) {
648		if (!is_pfc_enabled) {
649			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
650			cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
651			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
652
653			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
654			cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
655			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
656		} else {
657			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
658			cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
659			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
660		}
661	} else {
662
663		if (!is_pfc_enabled) {
664			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
665			cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
666			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
667
668			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
669			cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
670			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
671		} else {
672			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
673			cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
674			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
675		}
676	}
677}
678
679int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
680{
681	struct cgx *cgx = cgxd;
682
683	if (!is_lmac_valid(cgx, lmac_id))
684		return -ENODEV;
685	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
686	return 0;
687}
688
689int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
690{
691	struct cgx *cgx = cgxd;
692
693	if (!is_lmac_valid(cgx, lmac_id))
694		return -ENODEV;
695	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
696	return 0;
697}
698
699u64 cgx_features_get(void *cgxd)
700{
701	return ((struct cgx *)cgxd)->hw_features;
702}
703
704static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
705{
706	if (!linfo->fec)
707		return 0;
708
709	switch (linfo->lmac_type_id) {
710	case LMAC_MODE_SGMII:
711	case LMAC_MODE_XAUI:
712	case LMAC_MODE_RXAUI:
713	case LMAC_MODE_QSGMII:
714		return 0;
715	case LMAC_MODE_10G_R:
716	case LMAC_MODE_25G_R:
717	case LMAC_MODE_100G_R:
718	case LMAC_MODE_USXGMII:
719		return 1;
720	case LMAC_MODE_40G_R:
721		return 4;
722	case LMAC_MODE_50G_R:
723		if (linfo->fec == OTX2_FEC_BASER)
724			return 2;
725		else
726			return 1;
727	default:
728		return 0;
729	}
730}
731
732int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
733{
734	int stats, fec_stats_count = 0;
735	int corr_reg, uncorr_reg;
736	struct cgx *cgx = cgxd;
737
738	if (!is_lmac_valid(cgx, lmac_id))
739		return -ENODEV;
740
741	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
742		return 0;
743
744	fec_stats_count =
745		cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
746	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
747		corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
748		uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
749	} else {
750		corr_reg = CGXX_SPUX_RSFEC_CORR;
751		uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
752	}
753	for (stats = 0; stats < fec_stats_count; stats++) {
754		rsp->fec_corr_blks +=
755			cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
756		rsp->fec_uncorr_blks +=
757			cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
758	}
759	return 0;
760}
761
762int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
763{
764	struct cgx *cgx = cgxd;
765	u64 cfg;
766
767	if (!is_lmac_valid(cgx, lmac_id))
768		return -ENODEV;
769
770	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
771	if (enable)
772		cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
773	else
774		cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
775	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
776	return 0;
777}
778
779int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
780{
781	struct cgx *cgx = cgxd;
782	u64 cfg, last;
783
784	if (!is_lmac_valid(cgx, lmac_id))
785		return -ENODEV;
786
787	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
788	last = cfg;
789	if (enable)
790		cfg |= DATA_PKT_TX_EN;
791	else
792		cfg &= ~DATA_PKT_TX_EN;
793
794	if (cfg != last)
795		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
796	return !!(last & DATA_PKT_TX_EN);
797}
798
799static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
800				     u8 tx_pause, u8 rx_pause)
801{
802	struct cgx *cgx = cgxd;
803	u64 cfg;
804
805	if (is_dev_rpm(cgx))
806		return 0;
807
808	if (!is_lmac_valid(cgx, lmac_id))
809		return -ENODEV;
810
811	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
812	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
813	cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
814	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
815
816	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
817	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
818	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
819	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
820
821	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
822	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
823	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
824	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
825
826	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
827	if (tx_pause) {
828		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
829	} else {
830		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
831		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
832	}
833	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
834	return 0;
835}
836
837static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
838{
839	struct cgx *cgx = cgxd;
840	u64 cfg;
841
842	if (!is_lmac_valid(cgx, lmac_id))
843		return;
844
845	if (enable) {
846		/* Set pause time and interval */
847		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
848			  DEFAULT_PAUSE_TIME);
849		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
850		cfg &= ~0xFFFFULL;
851		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
852			  cfg | (DEFAULT_PAUSE_TIME / 2));
853
854		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
855			  DEFAULT_PAUSE_TIME);
856
857		cfg = cgx_read(cgx, lmac_id,
858			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
859		cfg &= ~0xFFFFULL;
860		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
861			  cfg | (DEFAULT_PAUSE_TIME / 2));
862	}
863
864	/* ALL pause frames received are completely ignored */
865	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
866	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
867	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
868
869	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
870	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
871	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
872
873	/* Disable pause frames transmission */
874	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
875	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
876	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
877
878	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
879	cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
880	cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
881	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
882
883	/* Disable all PFC classes by default */
884	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
885	cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
886	cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
887}
888
889int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
890		       int pfvf_idx)
891{
892	struct cgx *cgx = cgxd;
893	struct lmac *lmac;
894
895	lmac = lmac_pdata(lmac_id, cgx);
896	if (!lmac)
897		return -ENODEV;
898
899	if (!rx_pause)
900		clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
901	else
902		set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
903
904	if (!tx_pause)
905		clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
906	else
907		set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
908
909	/* check if other pfvfs are using flow control */
910	if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
911		dev_warn(&cgx->pdev->dev,
912			 "Receive Flow control disable not permitted as its used by other PFVFs\n");
913		return -EPERM;
914	}
915
916	if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
917		dev_warn(&cgx->pdev->dev,
918			 "Transmit Flow control disable not permitted as its used by other PFVFs\n");
919		return -EPERM;
920	}
921
922	return 0;
923}
924
925int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
926			u8 rx_pause, u16 pfc_en)
927{
928	struct cgx *cgx = cgxd;
929	u64 cfg;
930
931	if (!is_lmac_valid(cgx, lmac_id))
932		return -ENODEV;
933
934	/* Return as no traffic classes are requested */
935	if (tx_pause && !pfc_en)
936		return 0;
937
938	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
939	pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
940
941	if (rx_pause) {
942		cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
943			CGXX_SMUX_CBFC_CTL_BCK_EN |
944			CGXX_SMUX_CBFC_CTL_DRP_EN);
945	} else {
946		cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
947			CGXX_SMUX_CBFC_CTL_BCK_EN |
948			CGXX_SMUX_CBFC_CTL_DRP_EN);
949	}
950
951	if (tx_pause) {
952		cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
953		cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
954	} else {
955		cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
956		cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
957	}
958
959	cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
960
961	/* Write source MAC address which will be filled into PFC packet */
962	cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
963	cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
964
965	return 0;
966}
967
968int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
969			     u8 *rx_pause)
970{
971	struct cgx *cgx = cgxd;
972	u64 cfg;
973
974	if (!is_lmac_valid(cgx, lmac_id))
975		return -ENODEV;
976
977	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
978
979	*rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
980	*tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
981
982	return 0;
983}
984
985void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
986{
987	struct cgx *cgx = cgxd;
988	u64 cfg;
989
990	if (!cgx)
991		return;
992
993	if (enable) {
994		/* Enable inbound PTP timestamping */
995		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
996		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
997		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
998
999		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1000		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1001		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
1002	} else {
1003		/* Disable inbound PTP stamping */
1004		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1005		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1006		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1007
1008		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1009		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1010		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1011	}
1012}
1013
1014/* CGX Firmware interface low level support */
1015int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1016{
1017	struct cgx *cgx = lmac->cgx;
1018	struct device *dev;
1019	int err = 0;
1020	u64 cmd;
1021
1022	/* Ensure no other command is in progress */
1023	err = mutex_lock_interruptible(&lmac->cmd_lock);
1024	if (err)
1025		return err;
1026
1027	/* Ensure command register is free */
1028	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
1029	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1030		err = -EBUSY;
1031		goto unlock;
1032	}
1033
1034	/* Update ownership in command request */
1035	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1036
1037	/* Mark this lmac as pending, before we start */
1038	lmac->cmd_pend = true;
1039
1040	/* Start command in hardware */
1041	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1042
1043	/* Ensure command is completed without errors */
1044	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1045				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1046		dev = &cgx->pdev->dev;
1047		dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1048			cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1049		err = LMAC_AF_ERR_CMD_TIMEOUT;
1050		goto unlock;
1051	}
1052
1053	/* we have a valid command response */
1054	smp_rmb(); /* Ensure the latest updates are visible */
1055	*resp = lmac->resp;
1056
1057unlock:
1058	mutex_unlock(&lmac->cmd_lock);
1059
1060	return err;
1061}
1062
1063int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1064{
1065	struct lmac *lmac;
1066	int err;
1067
1068	lmac = lmac_pdata(lmac_id, cgx);
1069	if (!lmac)
1070		return -ENODEV;
1071
1072	err = cgx_fwi_cmd_send(req, resp, lmac);
1073
1074	/* Check for valid response */
1075	if (!err) {
1076		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1077			return -EIO;
1078		else
1079			return 0;
1080	}
1081
1082	return err;
1083}
1084
1085static int cgx_link_usertable_index_map(int speed)
1086{
1087	switch (speed) {
1088	case SPEED_10:
1089		return CGX_LINK_10M;
1090	case SPEED_100:
1091		return CGX_LINK_100M;
1092	case SPEED_1000:
1093		return CGX_LINK_1G;
1094	case SPEED_2500:
1095		return CGX_LINK_2HG;
1096	case SPEED_5000:
1097		return CGX_LINK_5G;
1098	case SPEED_10000:
1099		return CGX_LINK_10G;
1100	case SPEED_20000:
1101		return CGX_LINK_20G;
1102	case SPEED_25000:
1103		return CGX_LINK_25G;
1104	case SPEED_40000:
1105		return CGX_LINK_40G;
1106	case SPEED_50000:
1107		return CGX_LINK_50G;
1108	case 80000:
1109		return CGX_LINK_80G;
1110	case SPEED_100000:
1111		return CGX_LINK_100G;
1112	case SPEED_UNKNOWN:
1113		return CGX_LINK_NONE;
1114	}
1115	return CGX_LINK_NONE;
1116}
1117
1118static void set_mod_args(struct cgx_set_link_mode_args *args,
1119			 u32 speed, u8 duplex, u8 autoneg, u64 mode)
1120{
1121	/* Fill default values incase of user did not pass
1122	 * valid parameters
1123	 */
1124	if (args->duplex == DUPLEX_UNKNOWN)
1125		args->duplex = duplex;
1126	if (args->speed == SPEED_UNKNOWN)
1127		args->speed = speed;
1128	if (args->an == AUTONEG_UNKNOWN)
1129		args->an = autoneg;
1130	args->mode = mode;
1131	args->ports = 0;
1132}
1133
1134static void otx2_map_ethtool_link_modes(u64 bitmask,
1135					struct cgx_set_link_mode_args *args)
1136{
1137	switch (bitmask) {
1138	case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1139		set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1140		break;
1141	case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1142		set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1143		break;
1144	case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1145		set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1146		break;
1147	case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1148		set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1149		break;
1150	case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1151		set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1152		break;
1153	case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1154		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1155		break;
1156	case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1157		set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1158		break;
1159	case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1160		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1161		break;
1162	case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1163		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1164		break;
1165	case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1166		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1167		break;
1168	case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1169		set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1170		break;
1171	case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1172		set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1173		break;
1174	case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1175		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1176		break;
1177	case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1178		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1179		break;
1180	case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1181		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1182		break;
1183	case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1184		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1185		break;
1186	case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1187		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1188		break;
1189	case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1190		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1191		break;
1192	case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1193		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1194		break;
1195	case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1196		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1197		break;
1198	case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1199		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1200		break;
1201	case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1202		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1203		break;
1204	case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1205		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1206		break;
1207	case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1208		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1209		break;
1210	case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1211		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1212		break;
1213	case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1214		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1215		break;
1216	default:
1217		set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1218		break;
1219	}
1220}
1221
1222static inline void link_status_user_format(u64 lstat,
1223					   struct cgx_link_user_info *linfo,
1224					   struct cgx *cgx, u8 lmac_id)
1225{
1226	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1227	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1228	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1229	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1230	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1231	linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
1232
1233	if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
1234		dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
1235			linfo->lmac_type_id, cgx->cgx_id, lmac_id);
1236		strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type));
1237		return;
1238	}
1239
1240	strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id],
1241		sizeof(linfo->lmac_type));
1242}
1243
1244/* Hardware event handlers */
1245static inline void cgx_link_change_handler(u64 lstat,
1246					   struct lmac *lmac)
1247{
1248	struct cgx_link_user_info *linfo;
1249	struct cgx *cgx = lmac->cgx;
1250	struct cgx_link_event event;
1251	struct device *dev;
1252	int err_type;
1253
1254	dev = &cgx->pdev->dev;
1255
1256	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1257	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1258
1259	event.cgx_id = cgx->cgx_id;
1260	event.lmac_id = lmac->lmac_id;
1261
1262	/* update the local copy of link status */
1263	lmac->link_info = event.link_uinfo;
1264	linfo = &lmac->link_info;
1265
1266	if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1267		return;
1268
1269	/* Ensure callback doesn't get unregistered until we finish it */
1270	spin_lock(&lmac->event_cb_lock);
1271
1272	if (!lmac->event_cb.notify_link_chg) {
1273		dev_dbg(dev, "cgx port %d:%d Link change handler null",
1274			cgx->cgx_id, lmac->lmac_id);
1275		if (err_type != CGX_ERR_NONE) {
1276			dev_err(dev, "cgx port %d:%d Link error %d\n",
1277				cgx->cgx_id, lmac->lmac_id, err_type);
1278		}
1279		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1280			 cgx->cgx_id, lmac->lmac_id,
1281			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
1282		goto err;
1283	}
1284
1285	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1286		dev_err(dev, "event notification failure\n");
1287err:
1288	spin_unlock(&lmac->event_cb_lock);
1289}
1290
1291static inline bool cgx_cmdresp_is_linkevent(u64 event)
1292{
1293	u8 id;
1294
1295	id = FIELD_GET(EVTREG_ID, event);
1296	if (id == CGX_CMD_LINK_BRING_UP ||
1297	    id == CGX_CMD_LINK_BRING_DOWN ||
1298	    id == CGX_CMD_MODE_CHANGE)
1299		return true;
1300	else
1301		return false;
1302}
1303
1304static inline bool cgx_event_is_linkevent(u64 event)
1305{
1306	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1307		return true;
1308	else
1309		return false;
1310}
1311
1312static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1313{
1314	u64 event, offset, clear_bit;
1315	struct lmac *lmac = data;
1316	struct cgx *cgx;
1317
1318	cgx = lmac->cgx;
1319
1320	/* Clear SW_INT for RPM and CMR_INT for CGX */
1321	offset     = cgx->mac_ops->int_register;
1322	clear_bit  = cgx->mac_ops->int_ena_bit;
1323
1324	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1325
1326	if (!FIELD_GET(EVTREG_ACK, event))
1327		return IRQ_NONE;
1328
1329	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1330	case CGX_EVT_CMD_RESP:
1331		/* Copy the response. Since only one command is active at a
1332		 * time, there is no way a response can get overwritten
1333		 */
1334		lmac->resp = event;
1335		/* Ensure response is updated before thread context starts */
1336		smp_wmb();
1337
1338		/* There wont be separate events for link change initiated from
1339		 * software; Hence report the command responses as events
1340		 */
1341		if (cgx_cmdresp_is_linkevent(event))
1342			cgx_link_change_handler(event, lmac);
1343
1344		/* Release thread waiting for completion  */
1345		lmac->cmd_pend = false;
1346		wake_up(&lmac->wq_cmd_cmplt);
1347		break;
1348	case CGX_EVT_ASYNC:
1349		if (cgx_event_is_linkevent(event))
1350			cgx_link_change_handler(event, lmac);
1351		break;
1352	}
1353
1354	/* Any new event or command response will be posted by firmware
1355	 * only after the current status is acked.
1356	 * Ack the interrupt register as well.
1357	 */
1358	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1359	cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1360
1361	return IRQ_HANDLED;
1362}
1363
1364/* APIs for PHY management using CGX firmware interface */
1365
1366/* callback registration for hardware events like link change */
1367int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1368{
1369	struct cgx *cgx = cgxd;
1370	struct lmac *lmac;
1371
1372	lmac = lmac_pdata(lmac_id, cgx);
1373	if (!lmac)
1374		return -ENODEV;
1375
1376	lmac->event_cb = *cb;
1377
1378	return 0;
1379}
1380
1381int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1382{
1383	struct lmac *lmac;
1384	unsigned long flags;
1385	struct cgx *cgx = cgxd;
1386
1387	lmac = lmac_pdata(lmac_id, cgx);
1388	if (!lmac)
1389		return -ENODEV;
1390
1391	spin_lock_irqsave(&lmac->event_cb_lock, flags);
1392	lmac->event_cb.notify_link_chg = NULL;
1393	lmac->event_cb.data = NULL;
1394	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1395
1396	return 0;
1397}
1398
1399int cgx_get_fwdata_base(u64 *base)
1400{
1401	u64 req = 0, resp;
1402	struct cgx *cgx;
1403	int first_lmac;
1404	int err;
1405
1406	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1407	if (!cgx)
1408		return -ENXIO;
1409
1410	first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1411	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1412	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1413	if (!err)
1414		*base = FIELD_GET(RESP_FWD_BASE, resp);
1415
1416	return err;
1417}
1418
1419int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1420		      int cgx_id, int lmac_id)
1421{
1422	struct cgx *cgx = cgxd;
1423	u64 req = 0, resp;
1424
1425	if (!cgx)
1426		return -ENODEV;
1427
1428	if (args.mode)
1429		otx2_map_ethtool_link_modes(args.mode, &args);
1430	if (!args.speed && args.duplex && !args.an)
1431		return -EINVAL;
1432
1433	req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1434	req = FIELD_SET(CMDMODECHANGE_SPEED,
1435			cgx_link_usertable_index_map(args.speed), req);
1436	req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1437	req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1438	req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1439	req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1440
1441	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1442}
1443int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1444{
1445	u64 req = 0, resp;
1446	struct cgx *cgx;
1447	int err = 0;
1448
1449	cgx = cgx_get_pdata(cgx_id);
1450	if (!cgx)
1451		return -ENXIO;
1452
1453	req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1454	req = FIELD_SET(CMDSETFEC, fec, req);
1455	err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1456	if (err)
1457		return err;
1458
1459	cgx->lmac_idmap[lmac_id]->link_info.fec =
1460			FIELD_GET(RESP_LINKSTAT_FEC, resp);
1461	return cgx->lmac_idmap[lmac_id]->link_info.fec;
1462}
1463
1464int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1465{
1466	struct cgx *cgx = cgxd;
1467	u64 req = 0, resp;
1468
1469	if (!cgx)
1470		return -ENODEV;
1471
1472	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1473	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1474}
1475
1476static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1477{
1478	u64 req = 0;
1479	u64 resp;
1480
1481	if (enable) {
1482		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1483		/* On CN10K firmware offloads link bring up/down operations to ECP
1484		 * On Octeontx2 link operations are handled by firmware itself
1485		 * which can cause mbox errors so configure maximum time firmware
1486		 * poll for Link as 1000 ms
1487		 */
1488		if (!is_dev_rpm(cgx))
1489			req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1490
1491	} else {
1492		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1493	}
1494	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1495}
1496
1497static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1498{
1499	int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1500	u64 req = 0;
1501
1502	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1503	return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1504}
1505
1506static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1507{
1508	struct device *dev = &cgx->pdev->dev;
1509	int major_ver, minor_ver;
1510	u64 resp;
1511	int err;
1512
1513	if (!cgx->lmac_count)
1514		return 0;
1515
1516	err = cgx_fwi_read_version(&resp, cgx);
1517	if (err)
1518		return err;
1519
1520	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1521	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1522	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1523		major_ver, minor_ver);
1524	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1525		return -EIO;
1526	else
1527		return 0;
1528}
1529
1530static void cgx_lmac_linkup_work(struct work_struct *work)
1531{
1532	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1533	struct device *dev = &cgx->pdev->dev;
1534	int i, err;
1535
1536	/* Do Link up for all the enabled lmacs */
1537	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1538		err = cgx_fwi_link_change(cgx, i, true);
1539		if (err)
1540			dev_info(dev, "cgx port %d:%d Link up command failed\n",
1541				 cgx->cgx_id, i);
1542	}
1543}
1544
1545int cgx_lmac_linkup_start(void *cgxd)
1546{
1547	struct cgx *cgx = cgxd;
1548
1549	if (!cgx)
1550		return -ENODEV;
1551
1552	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1553
1554	return 0;
1555}
1556
1557int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
1558{
1559	struct cgx *cgx = cgxd;
1560	u64 cfg;
1561
1562	if (!is_lmac_valid(cgx, lmac_id))
1563		return -ENODEV;
1564
1565	/* Resetting PFC related CSRs */
1566	cfg = 0xff;
1567	cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
1568
1569	if (pf_req_flr)
1570		cgx_lmac_internal_loopback(cgxd, lmac_id, false);
1571	return 0;
1572}
1573
1574static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1575				   int cnt, bool req_free)
1576{
1577	struct mac_ops *mac_ops = cgx->mac_ops;
1578	u64 offset, ena_bit;
1579	unsigned int irq;
1580	int err;
1581
1582	irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1583				  cnt * mac_ops->irq_offset);
1584	offset   = mac_ops->int_set_reg;
1585	ena_bit  = mac_ops->int_ena_bit;
1586
1587	if (req_free) {
1588		free_irq(irq, lmac);
1589		return 0;
1590	}
1591
1592	err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1593	if (err)
1594		return err;
1595
1596	/* Enable interrupt */
1597	cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1598	return 0;
1599}
1600
1601int cgx_get_nr_lmacs(void *cgxd)
1602{
1603	struct cgx *cgx = cgxd;
1604
1605	return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1606}
1607
1608u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1609{
1610	struct cgx *cgx = cgxd;
1611
1612	return cgx->lmac_idmap[lmac_index]->lmac_id;
1613}
1614
1615unsigned long cgx_get_lmac_bmap(void *cgxd)
1616{
1617	struct cgx *cgx = cgxd;
1618
1619	return cgx->lmac_bmap;
1620}
1621
1622static int cgx_lmac_init(struct cgx *cgx)
1623{
1624	struct lmac *lmac;
1625	u64 lmac_list;
1626	int i, err;
1627
1628	/* lmac_list specifies which lmacs are enabled
1629	 * when bit n is set to 1, LMAC[n] is enabled
1630	 */
1631	if (cgx->mac_ops->non_contiguous_serdes_lane) {
1632		if (is_dev_rpm2(cgx))
1633			lmac_list =
1634				cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
1635		else
1636			lmac_list =
1637				cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1638	}
1639
1640	if (cgx->lmac_count > cgx->max_lmac_per_mac)
1641		cgx->lmac_count = cgx->max_lmac_per_mac;
1642
1643	for (i = 0; i < cgx->lmac_count; i++) {
1644		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1645		if (!lmac)
1646			return -ENOMEM;
1647		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1648		if (!lmac->name) {
1649			err = -ENOMEM;
1650			goto err_lmac_free;
1651		}
1652		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1653		if (cgx->mac_ops->non_contiguous_serdes_lane) {
1654			lmac->lmac_id = __ffs64(lmac_list);
1655			lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1656		} else {
1657			lmac->lmac_id = i;
1658		}
1659
1660		lmac->cgx = cgx;
1661		lmac->mac_to_index_bmap.max =
1662				cgx->mac_ops->dmac_filter_count /
1663				cgx->lmac_count;
1664
1665		err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1666		if (err)
1667			goto err_name_free;
1668
1669		/* Reserve first entry for default MAC address */
1670		set_bit(0, lmac->mac_to_index_bmap.bmap);
1671
1672		lmac->rx_fc_pfvf_bmap.max = 128;
1673		err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1674		if (err)
1675			goto err_dmac_bmap_free;
1676
1677		lmac->tx_fc_pfvf_bmap.max = 128;
1678		err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1679		if (err)
1680			goto err_rx_fc_bmap_free;
1681
1682		init_waitqueue_head(&lmac->wq_cmd_cmplt);
1683		mutex_init(&lmac->cmd_lock);
1684		spin_lock_init(&lmac->event_cb_lock);
1685		err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1686		if (err)
1687			goto err_bitmap_free;
1688
1689		/* Add reference */
1690		cgx->lmac_idmap[lmac->lmac_id] = lmac;
1691		set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1692		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1693		lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
1694	}
1695
1696	return cgx_lmac_verify_fwi_version(cgx);
1697
1698err_bitmap_free:
1699	rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1700err_rx_fc_bmap_free:
1701	rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1702err_dmac_bmap_free:
1703	rvu_free_bitmap(&lmac->mac_to_index_bmap);
1704err_name_free:
1705	kfree(lmac->name);
1706err_lmac_free:
1707	kfree(lmac);
1708	return err;
1709}
1710
1711static int cgx_lmac_exit(struct cgx *cgx)
1712{
1713	struct lmac *lmac;
1714	int i;
1715
1716	if (cgx->cgx_cmd_workq) {
1717		destroy_workqueue(cgx->cgx_cmd_workq);
1718		cgx->cgx_cmd_workq = NULL;
1719	}
1720
1721	/* Free all lmac related resources */
1722	for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1723		lmac = cgx->lmac_idmap[i];
1724		if (!lmac)
1725			continue;
1726		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1727		cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1728		kfree(lmac->mac_to_index_bmap.bmap);
1729		kfree(lmac->name);
1730		kfree(lmac);
1731	}
1732
1733	return 0;
1734}
1735
1736static void cgx_populate_features(struct cgx *cgx)
1737{
1738	u64 cfg;
1739
1740	cfg = cgx_read(cgx, 0, CGX_CONST);
1741	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1742	cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
1743
1744	if (is_dev_rpm(cgx))
1745		cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1746				    RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1747	else
1748		cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1749				    RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1750}
1751
1752static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
1753{
1754	if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
1755	    is_dev_rpm2(cgx))
1756		return 0x80;
1757	else
1758		return 0x60;
1759}
1760
1761static struct mac_ops	cgx_mac_ops    = {
1762	.name		=       "cgx",
1763	.csr_offset	=       0,
1764	.lmac_offset    =       18,
1765	.int_register	=       CGXX_CMRX_INT,
1766	.int_set_reg	=       CGXX_CMRX_INT_ENA_W1S,
1767	.irq_offset	=       9,
1768	.int_ena_bit    =       FW_CGX_INT,
1769	.lmac_fwi	=	CGX_LMAC_FWI,
1770	.non_contiguous_serdes_lane = false,
1771	.rx_stats_cnt   =       9,
1772	.tx_stats_cnt   =       18,
1773	.dmac_filter_count =    32,
1774	.get_nr_lmacs	=	cgx_get_nr_lmacs,
1775	.get_lmac_type  =       cgx_get_lmac_type,
1776	.lmac_fifo_len	=	cgx_get_lmac_fifo_len,
1777	.mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1778	.mac_get_rx_stats  =	cgx_get_rx_stats,
1779	.mac_get_tx_stats  =	cgx_get_tx_stats,
1780	.get_fec_stats	   =	cgx_get_fec_stats,
1781	.mac_enadis_rx_pause_fwding =	cgx_lmac_enadis_rx_pause_fwding,
1782	.mac_get_pause_frm_status =	cgx_lmac_get_pause_frm_status,
1783	.mac_enadis_pause_frm =		cgx_lmac_enadis_pause_frm,
1784	.mac_pause_frm_config =		cgx_lmac_pause_frm_config,
1785	.mac_enadis_ptp_config =	cgx_lmac_ptp_config,
1786	.mac_rx_tx_enable =		cgx_lmac_rx_tx_enable,
1787	.mac_tx_enable =		cgx_lmac_tx_enable,
1788	.pfc_config =                   cgx_lmac_pfc_config,
1789	.mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1790	.mac_reset   =			cgx_lmac_reset,
1791};
1792
1793static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1794{
1795	struct device *dev = &pdev->dev;
1796	struct cgx *cgx;
1797	int err, nvec;
1798
1799	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1800	if (!cgx)
1801		return -ENOMEM;
1802	cgx->pdev = pdev;
1803
1804	pci_set_drvdata(pdev, cgx);
1805
1806	/* Use mac_ops to get MAC specific features */
1807	if (is_dev_rpm(cgx))
1808		cgx->mac_ops = rpm_get_mac_ops(cgx);
1809	else
1810		cgx->mac_ops = &cgx_mac_ops;
1811
1812	cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
1813
1814	err = pci_enable_device(pdev);
1815	if (err) {
1816		dev_err(dev, "Failed to enable PCI device\n");
1817		pci_set_drvdata(pdev, NULL);
1818		return err;
1819	}
1820
1821	err = pci_request_regions(pdev, DRV_NAME);
1822	if (err) {
1823		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1824		goto err_disable_device;
1825	}
1826
1827	/* MAP configuration registers */
1828	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1829	if (!cgx->reg_base) {
1830		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1831		err = -ENOMEM;
1832		goto err_release_regions;
1833	}
1834
1835	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1836	if (!cgx->lmac_count) {
1837		dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1838		err = -EOPNOTSUPP;
1839		goto err_release_regions;
1840	}
1841
1842	nvec = pci_msix_vec_count(cgx->pdev);
1843	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1844	if (err < 0 || err != nvec) {
1845		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1846			nvec, err);
1847		goto err_release_regions;
1848	}
1849
1850	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1851		& CGX_ID_MASK;
1852
1853	/* init wq for processing linkup requests */
1854	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1855	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1856	if (!cgx->cgx_cmd_workq) {
1857		dev_err(dev, "alloc workqueue failed for cgx cmd");
1858		err = -ENOMEM;
1859		goto err_free_irq_vectors;
1860	}
1861
1862	list_add(&cgx->cgx_list, &cgx_list);
1863
1864
1865	cgx_populate_features(cgx);
1866
1867	mutex_init(&cgx->lock);
1868
1869	err = cgx_lmac_init(cgx);
1870	if (err)
1871		goto err_release_lmac;
1872
1873	return 0;
1874
1875err_release_lmac:
1876	cgx_lmac_exit(cgx);
1877	list_del(&cgx->cgx_list);
1878err_free_irq_vectors:
1879	pci_free_irq_vectors(pdev);
1880err_release_regions:
1881	pci_release_regions(pdev);
1882err_disable_device:
1883	pci_disable_device(pdev);
1884	pci_set_drvdata(pdev, NULL);
1885	return err;
1886}
1887
1888static void cgx_remove(struct pci_dev *pdev)
1889{
1890	struct cgx *cgx = pci_get_drvdata(pdev);
1891
1892	if (cgx) {
1893		cgx_lmac_exit(cgx);
1894		list_del(&cgx->cgx_list);
1895	}
1896	pci_free_irq_vectors(pdev);
1897	pci_release_regions(pdev);
1898	pci_disable_device(pdev);
1899	pci_set_drvdata(pdev, NULL);
1900}
1901
1902struct pci_driver cgx_driver = {
1903	.name = DRV_NAME,
1904	.id_table = cgx_id_table,
1905	.probe = cgx_probe,
1906	.remove = cgx_remove,
1907};
1908