1// SPDX-License-Identifier: GPL-2.0+
2/* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7#include <linux/module.h>
8#include <linux/phy/phy.h>
9#include <net/dcbnl.h>
10
11#include "sparx5_main_regs.h"
12#include "sparx5_main.h"
13#include "sparx5_port.h"
14
15#define SPX5_ETYPE_TAG_C     0x8100
16#define SPX5_ETYPE_TAG_S     0x88a8
17
18#define SPX5_WAIT_US         1000
19#define SPX5_WAIT_MAX_US     2000
20
21enum port_error {
22	SPX5_PERR_SPEED,
23	SPX5_PERR_IFTYPE,
24};
25
26#define PAUSE_DISCARD        0xC
27#define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
28
29static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
30{
31	status->an_complete = true;
32	if (!(lp_abil & LPA_SGMII_LINK)) {
33		status->link = false;
34		return;
35	}
36
37	switch (lp_abil & LPA_SGMII_SPD_MASK) {
38	case LPA_SGMII_10:
39		status->speed = SPEED_10;
40		break;
41	case LPA_SGMII_100:
42		status->speed = SPEED_100;
43		break;
44	case LPA_SGMII_1000:
45		status->speed = SPEED_1000;
46		break;
47	default:
48		status->link = false;
49		return;
50	}
51	if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52		status->duplex = DUPLEX_FULL;
53	else
54		status->duplex = DUPLEX_HALF;
55}
56
57static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
58{
59	status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60	status->an_complete = true;
61	status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62		DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
63
64	if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65	    (lp_abil & ADVERTISE_1000XPAUSE)) {
66		status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67	} else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68		   (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69		status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
70			MLO_PAUSE_TX : 0;
71		status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
72			MLO_PAUSE_RX : 0;
73	} else {
74		status->pause = MLO_PAUSE_NONE;
75	}
76}
77
78static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79				    struct sparx5_port *port,
80				    struct sparx5_port_status *status)
81{
82	u32 portno = port->portno;
83	u16 lp_adv, ld_adv;
84	u32 value;
85
86	/* Get PCS Link down sticky */
87	value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88	status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89	if (status->link_down)	/* Clear the sticky */
90		spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
91
92	/* Get both current Link and Sync status */
93	value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94	status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95		       DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
96
97	if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98		status->speed = SPEED_1000;
99	else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100		status->speed = SPEED_2500;
101
102	status->duplex = DUPLEX_FULL;
103
104	/* Get PCS ANEG status register */
105	value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
106
107	/* Aneg complete provides more information  */
108	if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109		lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110		if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111			decode_sgmii_word(lp_adv, status);
112		} else {
113			value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114			ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115			decode_cl37_word(lp_adv, ld_adv, status);
116		}
117	}
118	return 0;
119}
120
121static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122				 struct sparx5_port *port,
123				 struct sparx5_port_status *status)
124{
125	bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126	u32 portno = port->portno;
127	u32 value, dev, tinst;
128	void __iomem *inst;
129
130	if (!high_speed_dev) {
131		netdev_err(port->ndev, "error: low speed and SFI mode\n");
132		return -EINVAL;
133	}
134
135	dev = sparx5_to_high_dev(portno);
136	tinst = sparx5_port_dev_index(portno);
137	inst = spx5_inst_get(sparx5, dev, tinst);
138
139	value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140	if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141		/* The link is or has been down. Clear the sticky bit */
142		status->link_down = 1;
143		spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144		value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
145	}
146	status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147	status->duplex = DUPLEX_FULL;
148	if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149		status->speed = SPEED_5000;
150	else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151		status->speed = SPEED_10000;
152	else
153		status->speed = SPEED_25000;
154
155	return 0;
156}
157
158/* Get link status of 1000Base-X/in-band and SFI ports.
159 */
160int sparx5_get_port_status(struct sparx5 *sparx5,
161			   struct sparx5_port *port,
162			   struct sparx5_port_status *status)
163{
164	memset(status, 0, sizeof(*status));
165	status->speed = port->conf.speed;
166	if (port->conf.power_down) {
167		status->link = false;
168		return 0;
169	}
170	switch (port->conf.portmode) {
171	case PHY_INTERFACE_MODE_SGMII:
172	case PHY_INTERFACE_MODE_QSGMII:
173	case PHY_INTERFACE_MODE_1000BASEX:
174	case PHY_INTERFACE_MODE_2500BASEX:
175		return sparx5_get_dev2g5_status(sparx5, port, status);
176	case PHY_INTERFACE_MODE_5GBASER:
177	case PHY_INTERFACE_MODE_10GBASER:
178	case PHY_INTERFACE_MODE_25GBASER:
179		return sparx5_get_sfi_status(sparx5, port, status);
180	case PHY_INTERFACE_MODE_NA:
181		return 0;
182	default:
183		netdev_err(port->ndev, "Status not supported");
184		return -ENODEV;
185	}
186	return 0;
187}
188
189static int sparx5_port_error(struct sparx5_port *port,
190			     struct sparx5_port_config *conf,
191			     enum port_error errtype)
192{
193	switch (errtype) {
194	case SPX5_PERR_SPEED:
195		netdev_err(port->ndev,
196			   "Interface does not support speed: %u: for %s\n",
197			   conf->speed, phy_modes(conf->portmode));
198		break;
199	case SPX5_PERR_IFTYPE:
200		netdev_err(port->ndev,
201			   "Switch port does not support interface type: %s\n",
202			   phy_modes(conf->portmode));
203		break;
204	default:
205		netdev_err(port->ndev,
206			   "Interface configuration error\n");
207	}
208
209	return -EINVAL;
210}
211
212static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213				    struct sparx5_port *port,
214				    struct sparx5_port_config *conf)
215{
216	if ((sparx5_port_is_2g5(port->portno) &&
217	     conf->speed > SPEED_2500) ||
218	    (sparx5_port_is_5g(port->portno)  &&
219	     conf->speed > SPEED_5000) ||
220	    (sparx5_port_is_10g(port->portno) &&
221	     conf->speed > SPEED_10000))
222		return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
223
224	switch (conf->portmode) {
225	case PHY_INTERFACE_MODE_NA:
226		return -EINVAL;
227	case PHY_INTERFACE_MODE_1000BASEX:
228		if (conf->speed != SPEED_1000 ||
229		    sparx5_port_is_2g5(port->portno))
230			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
231		if (sparx5_port_is_2g5(port->portno))
232			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
233		break;
234	case PHY_INTERFACE_MODE_2500BASEX:
235		if (conf->speed != SPEED_2500 ||
236		    sparx5_port_is_2g5(port->portno))
237			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
238		break;
239	case PHY_INTERFACE_MODE_QSGMII:
240		if (port->portno > 47)
241			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
242		fallthrough;
243	case PHY_INTERFACE_MODE_SGMII:
244		if (conf->speed != SPEED_1000 &&
245		    conf->speed != SPEED_100 &&
246		    conf->speed != SPEED_10 &&
247		    conf->speed != SPEED_2500)
248			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
249		break;
250	case PHY_INTERFACE_MODE_5GBASER:
251	case PHY_INTERFACE_MODE_10GBASER:
252	case PHY_INTERFACE_MODE_25GBASER:
253		if ((conf->speed != SPEED_5000 &&
254		     conf->speed != SPEED_10000 &&
255		     conf->speed != SPEED_25000))
256			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
257		break;
258	default:
259		return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
260	}
261	return 0;
262}
263
264static bool sparx5_dev_change(struct sparx5 *sparx5,
265			      struct sparx5_port *port,
266			      struct sparx5_port_config *conf)
267{
268	return sparx5_is_baser(port->conf.portmode) ^
269		sparx5_is_baser(conf->portmode);
270}
271
272static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
273{
274	u32  value, resource, prio, delay_cnt = 0;
275	bool poll_src = true;
276	char *mem = "";
277
278	/* Resource == 0: Memory tracked per source (SRC-MEM)
279	 * Resource == 1: Frame references tracked per source (SRC-REF)
280	 * Resource == 2: Memory tracked per destination (DST-MEM)
281	 * Resource == 3: Frame references tracked per destination. (DST-REF)
282	 */
283	while (1) {
284		bool empty = true;
285
286		for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
287			u32 base;
288
289			base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
290			for (prio = 0; prio < SPX5_PRIOS; prio++) {
291				value = spx5_rd(sparx5,
292						QRES_RES_STAT(base + prio));
293				if (value) {
294					mem = resource == 0 ?
295						"DST-MEM" : "SRC-MEM";
296					empty = false;
297				}
298			}
299		}
300
301		if (empty)
302			break;
303
304		if (delay_cnt++ == 2000) {
305			dev_err(sparx5->dev,
306				"Flush timeout port %u. %s queue not empty\n",
307				portno, mem);
308			return -EINVAL;
309		}
310
311		usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
312	}
313	return 0;
314}
315
316static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
317{
318	u32 tinst = high_spd_dev ?
319		    sparx5_port_dev_index(port->portno) : port->portno;
320	u32 dev = high_spd_dev ?
321		  sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
322	void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
323	u32 spd = port->conf.speed;
324	u32 spd_prm;
325	int err;
326
327	if (high_spd_dev) {
328		/* 1: Reset the PCS Rx clock domain  */
329		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
330			      DEV10G_DEV_RST_CTRL_PCS_RX_RST,
331			      devinst,
332			      DEV10G_DEV_RST_CTRL(0));
333
334		/* 2: Disable MAC frame reception */
335		spx5_inst_rmw(0,
336			      DEV10G_MAC_ENA_CFG_RX_ENA,
337			      devinst,
338			      DEV10G_MAC_ENA_CFG(0));
339	} else {
340		/* 1: Reset the PCS Rx clock domain  */
341		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
342			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
343			      devinst,
344			      DEV2G5_DEV_RST_CTRL(0));
345		/* 2: Disable MAC frame reception */
346		spx5_inst_rmw(0,
347			      DEV2G5_MAC_ENA_CFG_RX_ENA,
348			      devinst,
349			      DEV2G5_MAC_ENA_CFG(0));
350	}
351	/* 3: Disable traffic being sent to or from switch port->portno */
352	spx5_rmw(0,
353		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
354		 sparx5,
355		 QFWD_SWITCH_PORT_MODE(port->portno));
356
357	/* 4: Disable dequeuing from the egress queues  */
358	spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
359		 HSCH_PORT_MODE_DEQUEUE_DIS,
360		 sparx5,
361		 HSCH_PORT_MODE(port->portno));
362
363	/* 5: Disable Flowcontrol */
364	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
365		 QSYS_PAUSE_CFG_PAUSE_STOP,
366		 sparx5,
367		 QSYS_PAUSE_CFG(port->portno));
368
369	spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
370	/* 6: Wait while the last frame is exiting the queues */
371	usleep_range(8 * spd_prm, 10 * spd_prm);
372
373	/* 7: Flush the queues accociated with the port->portno */
374	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
375		 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
376		 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
377		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
378		 HSCH_FLUSH_CTRL_FLUSH_PORT |
379		 HSCH_FLUSH_CTRL_FLUSH_DST |
380		 HSCH_FLUSH_CTRL_FLUSH_SRC |
381		 HSCH_FLUSH_CTRL_FLUSH_ENA,
382		 sparx5,
383		 HSCH_FLUSH_CTRL);
384
385	/* 8: Enable dequeuing from the egress queues */
386	spx5_rmw(0,
387		 HSCH_PORT_MODE_DEQUEUE_DIS,
388		 sparx5,
389		 HSCH_PORT_MODE(port->portno));
390
391	/* 9: Wait until flushing is complete */
392	err = sparx5_port_flush_poll(sparx5, port->portno);
393	if (err)
394		return err;
395
396	/* 10: Reset the  MAC clock domain */
397	if (high_spd_dev) {
398		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
399			      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
400			      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
401			      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
402			      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
403			      DEV10G_DEV_RST_CTRL_MAC_TX_RST,
404			      devinst,
405			      DEV10G_DEV_RST_CTRL(0));
406
407	} else {
408		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
409			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
410			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
411			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
412			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
413			      DEV2G5_DEV_RST_CTRL_SPEED_SEL |
414			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
415			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
416			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
417			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
418			      devinst,
419			      DEV2G5_DEV_RST_CTRL(0));
420	}
421	/* 11: Clear flushing */
422	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
423		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
424		 HSCH_FLUSH_CTRL_FLUSH_PORT |
425		 HSCH_FLUSH_CTRL_FLUSH_ENA,
426		 sparx5,
427		 HSCH_FLUSH_CTRL);
428
429	if (high_spd_dev) {
430		u32 pcs = sparx5_to_pcs_dev(port->portno);
431		void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
432
433		/* 12: Disable 5G/10G/25 BaseR PCS */
434		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
435			      PCS10G_BR_PCS_CFG_PCS_ENA,
436			      pcsinst,
437			      PCS10G_BR_PCS_CFG(0));
438
439		if (sparx5_port_is_25g(port->portno))
440			/* Disable 25G PCS */
441			spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
442				 DEV25G_PCS25G_CFG_PCS25G_ENA,
443				 sparx5,
444				 DEV25G_PCS25G_CFG(tinst));
445	} else {
446		/* 12: Disable 1G PCS */
447		spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
448			 DEV2G5_PCS1G_CFG_PCS_ENA,
449			 sparx5,
450			 DEV2G5_PCS1G_CFG(port->portno));
451	}
452
453	/* The port is now flushed and disabled  */
454	return 0;
455}
456
457static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
458			       u32 portno, u32 speed)
459{
460	u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
461	const u32 taxi_dist[SPX5_PORTS_ALL] = {
462		6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
463		4, 4, 4, 4,
464		11, 12, 13, 14, 15, 16, 17, 18,
465		11, 12, 13, 14, 15, 16, 17, 18,
466		11, 12, 13, 14, 15, 16, 17, 18,
467		11, 12, 13, 14, 15, 16, 17, 18,
468		4, 6, 8, 4, 6, 8, 6, 8,
469		2, 2, 2, 2, 2, 2, 2, 4, 2
470	};
471	u32 mac_per    = 6400, tmp1, tmp2, tmp3;
472	u32 fifo_width = 16;
473	u32 mac_width  = 8;
474	u32 addition   = 0;
475
476	switch (speed) {
477	case SPEED_25000:
478		return 0;
479	case SPEED_10000:
480		mac_per = 6400;
481		mac_width = 8;
482		addition = 1;
483		break;
484	case SPEED_5000:
485		mac_per = 12800;
486		mac_width = 8;
487		addition = 0;
488		break;
489	case SPEED_2500:
490		mac_per = 3200;
491		mac_width = 1;
492		addition = 0;
493		break;
494	case SPEED_1000:
495		mac_per =  8000;
496		mac_width = 1;
497		addition = 0;
498		break;
499	case SPEED_100:
500	case SPEED_10:
501		return 1;
502	default:
503		break;
504	}
505
506	tmp1 = 1000 * mac_width / fifo_width;
507	tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
508		       * sys_clk / mac_per);
509	tmp3 = tmp1 * tmp2 / 1000;
510	return  (tmp3 + 2000 + 999) / 1000 + addition;
511}
512
513/* Configure port muxing:
514 * QSGMII:     4x2G5 devices
515 */
516static int sparx5_port_mux_set(struct sparx5 *sparx5,
517			       struct sparx5_port *port,
518			       struct sparx5_port_config *conf)
519{
520	u32 portno = port->portno;
521	u32 inst;
522
523	if (port->conf.portmode == conf->portmode)
524		return 0; /* Nothing to do */
525
526	switch (conf->portmode) {
527	case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
528		inst = (portno - portno % 4) / 4;
529		spx5_rmw(BIT(inst),
530			 BIT(inst),
531			 sparx5,
532			 PORT_CONF_QSGMII_ENA);
533
534		if ((portno / 4 % 2) == 0) {
535			/* Affects d0-d3,d8-d11..d40-d43 */
536			spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
537				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
538				 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
539				 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
540				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
541				 PORT_CONF_USGMII_CFG_QUAD_MODE,
542				 sparx5,
543				 PORT_CONF_USGMII_CFG((portno / 8)));
544		}
545		break;
546	default:
547		break;
548	}
549	return 0;
550}
551
552static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
553				    struct sparx5_port *port)
554{
555	enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
556	int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
557			      max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
558	bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
559	enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
560	bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
561	u32 dev             = sparx5_to_high_dev(port->portno);
562	u32 tinst           = sparx5_port_dev_index(port->portno);
563	void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
564	u32 etype;
565
566	etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
567		 port->custom_etype :
568		 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
569		 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
570
571	spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
572		DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
573		DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
574		DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
575		sparx5,
576		DEV2G5_MAC_TAGS_CFG(port->portno));
577
578	if (sparx5_port_is_2g5(port->portno))
579		return 0;
580
581	spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
582		      DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
583		      DEV10G_MAC_TAGS_CFG_TAG_ID |
584		      DEV10G_MAC_TAGS_CFG_TAG_ENA,
585		      inst,
586		      DEV10G_MAC_TAGS_CFG(0, 0));
587
588	spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
589		      DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
590		      inst,
591		      DEV10G_MAC_NUM_TAGS_CFG(0));
592
593	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
594		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
595		      inst,
596		      DEV10G_MAC_MAXLEN_CFG(0));
597	return 0;
598}
599
600int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
601{
602	u32 clk_period_ps = 1600; /* 625Mhz for now */
603	u32 urg = 672000;
604
605	switch (speed) {
606	case SPEED_10:
607	case SPEED_100:
608	case SPEED_1000:
609		urg = 672000;
610		break;
611	case SPEED_2500:
612		urg = 270000;
613		break;
614	case SPEED_5000:
615		urg = 135000;
616		break;
617	case SPEED_10000:
618		urg = 67200;
619		break;
620	case SPEED_25000:
621		urg = 27000;
622		break;
623	}
624	return urg / clk_period_ps - 1;
625}
626
627static u16 sparx5_wm_enc(u16 value)
628{
629	if (value >= 2048)
630		return 2048 + value / 16;
631
632	return value;
633}
634
635static int sparx5_port_fc_setup(struct sparx5 *sparx5,
636				struct sparx5_port *port,
637				struct sparx5_port_config *conf)
638{
639	bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
640	u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
641
642	if (conf->pause & MLO_PAUSE_TX)
643		pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
644						 SPX5_BUFFER_CELL_SZ));
645
646	/* Set HDX flowcontrol */
647	spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
648		 DSM_MAC_CFG_HDX_BACKPREASSURE,
649		 sparx5,
650		 DSM_MAC_CFG(port->portno));
651
652	/* Obey flowcontrol  */
653	spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
654		 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
655		 sparx5,
656		 DSM_RX_PAUSE_CFG(port->portno));
657
658	/* Disable forward pressure */
659	spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
660		 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
661		 sparx5,
662		 QSYS_FWD_PRESSURE(port->portno));
663
664	/* Generate pause frames */
665	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
666		 QSYS_PAUSE_CFG_PAUSE_STOP,
667		 sparx5,
668		 QSYS_PAUSE_CFG(port->portno));
669
670	return 0;
671}
672
673static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
674{
675	if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
676		return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
677	else
678		return 1; /* Enable SGMII Aneg */
679}
680
681int sparx5_serdes_set(struct sparx5 *sparx5,
682		      struct sparx5_port *port,
683		      struct sparx5_port_config *conf)
684{
685	int portmode, err, speed = conf->speed;
686
687	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
688	    ((port->portno % 4) != 0)) {
689		return 0;
690	}
691	if (sparx5_is_baser(conf->portmode)) {
692		if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
693			speed = SPEED_25000;
694		else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
695			speed = SPEED_10000;
696		else
697			speed = SPEED_5000;
698	}
699
700	err = phy_set_media(port->serdes, conf->media);
701	if (err)
702		return err;
703	if (speed > 0) {
704		err = phy_set_speed(port->serdes, speed);
705		if (err)
706			return err;
707	}
708	if (conf->serdes_reset) {
709		err = phy_reset(port->serdes);
710		if (err)
711			return err;
712	}
713
714	/* Configure SerDes with port parameters
715	 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
716	 */
717	portmode = conf->portmode;
718	if (sparx5_is_baser(conf->portmode))
719		portmode = PHY_INTERFACE_MODE_10GBASER;
720	err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
721	if (err)
722		return err;
723	conf->serdes_reset = false;
724	return err;
725}
726
727static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
728				   struct sparx5_port *port,
729				   struct sparx5_port_config *conf)
730{
731	bool sgmii = false, inband_aneg = false;
732	int err;
733
734	if (conf->inband) {
735		if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
736		    conf->portmode == PHY_INTERFACE_MODE_QSGMII)
737			inband_aneg = true; /* Cisco-SGMII in-band-aneg */
738		else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
739			 conf->autoneg)
740			inband_aneg = true; /* Clause-37 in-band-aneg */
741
742		err = sparx5_serdes_set(sparx5, port, conf);
743		if (err)
744			return -EINVAL;
745	} else {
746		sgmii = true; /* Phy is connected to the MAC */
747	}
748
749	/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
750	spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
751		 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
752		 sparx5,
753		 DEV2G5_PCS1G_MODE_CFG(port->portno));
754
755	/* Enable PCS */
756	spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
757		sparx5,
758		DEV2G5_PCS1G_CFG(port->portno));
759
760	if (inband_aneg) {
761		u16 abil = sparx5_get_aneg_word(conf);
762
763		/* Enable in-band aneg */
764		spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
765			DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
766			DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
767			DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
768			sparx5,
769			DEV2G5_PCS1G_ANEG_CFG(port->portno));
770	} else {
771		spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
772	}
773
774	/* Take PCS out of reset */
775	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
776		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
777		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
778		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
779		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
780		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
781		 sparx5,
782		 DEV2G5_DEV_RST_CTRL(port->portno));
783
784	return 0;
785}
786
787static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
788				    struct sparx5_port *port,
789				    struct sparx5_port_config *conf)
790{
791	u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
792	u32 pix = sparx5_port_dev_index(port->portno);
793	u32 dev = sparx5_to_high_dev(port->portno);
794	u32 pcs = sparx5_to_pcs_dev(port->portno);
795	void __iomem *devinst;
796	void __iomem *pcsinst;
797	int err;
798
799	devinst = spx5_inst_get(sparx5, dev, pix);
800	pcsinst = spx5_inst_get(sparx5, pcs, pix);
801
802	/*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
803	err = sparx5_serdes_set(sparx5, port, conf);
804	if (err)
805		return -EINVAL;
806	if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
807		/* Enable PCS for 25G device, speed 25G */
808		spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
809			 DEV25G_PCS25G_CFG_PCS25G_ENA,
810			 sparx5,
811			 DEV25G_PCS25G_CFG(pix));
812	} else {
813		/* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
814		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
815			      PCS10G_BR_PCS_CFG_PCS_ENA,
816			      pcsinst,
817			      PCS10G_BR_PCS_CFG(0));
818	}
819
820	/* Enable 5G/10G/25G MAC module */
821	spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
822		     DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
823		     devinst,
824		     DEV10G_MAC_ENA_CFG(0));
825
826	/* Take the device out of reset */
827	spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
828		      DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
829		      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
830		      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
831		      DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
832		      DEV10G_DEV_RST_CTRL_PCS_RX_RST |
833		      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
834		      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
835		      DEV10G_DEV_RST_CTRL_MAC_TX_RST |
836		      DEV10G_DEV_RST_CTRL_SPEED_SEL,
837		      devinst,
838		      DEV10G_DEV_RST_CTRL(0));
839
840	return 0;
841}
842
843/* Switch between 1G/2500 and 5G/10G/25G devices */
844static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
845{
846	int bt_indx = BIT(sparx5_port_dev_index(port));
847
848	if (sparx5_port_is_5g(port)) {
849		spx5_rmw(hsd ? 0 : bt_indx,
850			 bt_indx,
851			 sparx5,
852			 PORT_CONF_DEV5G_MODES);
853	} else if (sparx5_port_is_10g(port)) {
854		spx5_rmw(hsd ? 0 : bt_indx,
855			 bt_indx,
856			 sparx5,
857			 PORT_CONF_DEV10G_MODES);
858	} else if (sparx5_port_is_25g(port)) {
859		spx5_rmw(hsd ? 0 : bt_indx,
860			 bt_indx,
861			 sparx5,
862			 PORT_CONF_DEV25G_MODES);
863	}
864}
865
866/* Configure speed/duplex dependent registers */
867static int sparx5_port_config_low_set(struct sparx5 *sparx5,
868				      struct sparx5_port *port,
869				      struct sparx5_port_config *conf)
870{
871	u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
872	bool fdx = conf->duplex == DUPLEX_FULL;
873	int spd = conf->speed;
874
875	clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
876	gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
877	tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
878	hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
879	hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
880
881	/* GIG/FDX mode */
882	spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
883		 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
884		 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
885		 DEV2G5_MAC_MODE_CFG_FDX_ENA,
886		 sparx5,
887		 DEV2G5_MAC_MODE_CFG(port->portno));
888
889	/* Set MAC IFG Gaps */
890	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
891		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
892		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
893		sparx5,
894		DEV2G5_MAC_IFG_CFG(port->portno));
895
896	/* Disabling frame aging when in HDX (due to HDX issue) */
897	spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
898		 HSCH_PORT_MODE_AGE_DIS,
899		 sparx5,
900		 HSCH_PORT_MODE(port->portno));
901
902	/* Enable MAC module */
903	spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
904		DEV2G5_MAC_ENA_CFG_TX_ENA,
905		sparx5,
906		DEV2G5_MAC_ENA_CFG(port->portno));
907
908	/* Select speed and take MAC out of reset */
909	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
910		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
911		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
912		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
913		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
914		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
915		 sparx5,
916		 DEV2G5_DEV_RST_CTRL(port->portno));
917
918	return 0;
919}
920
921int sparx5_port_pcs_set(struct sparx5 *sparx5,
922			struct sparx5_port *port,
923			struct sparx5_port_config *conf)
924
925{
926	bool high_speed_dev = sparx5_is_baser(conf->portmode);
927	int err;
928
929	if (sparx5_dev_change(sparx5, port, conf)) {
930		/* switch device */
931		sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
932
933		/* Disable the not-in-use device */
934		err = sparx5_port_disable(sparx5, port, !high_speed_dev);
935		if (err)
936			return err;
937	}
938	/* Disable the port before re-configuring */
939	err = sparx5_port_disable(sparx5, port, high_speed_dev);
940	if (err)
941		return -EINVAL;
942
943	if (high_speed_dev)
944		err = sparx5_port_pcs_high_set(sparx5, port, conf);
945	else
946		err = sparx5_port_pcs_low_set(sparx5, port, conf);
947
948	if (err)
949		return -EINVAL;
950
951	if (conf->inband) {
952		/* Enable/disable 1G counters in ASM */
953		spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
954			 ASM_PORT_CFG_CSC_STAT_DIS,
955			 sparx5,
956			 ASM_PORT_CFG(port->portno));
957
958		/* Enable/disable 1G counters in DSM */
959		spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
960			 DSM_BUF_CFG_CSC_STAT_DIS,
961			 sparx5,
962			 DSM_BUF_CFG(port->portno));
963	}
964
965	port->conf = *conf;
966
967	return 0;
968}
969
970int sparx5_port_config(struct sparx5 *sparx5,
971		       struct sparx5_port *port,
972		       struct sparx5_port_config *conf)
973{
974	bool high_speed_dev = sparx5_is_baser(conf->portmode);
975	int err, urgency, stop_wm;
976
977	err = sparx5_port_verify_speed(sparx5, port, conf);
978	if (err)
979		return err;
980
981	/* high speed device is already configured */
982	if (!high_speed_dev)
983		sparx5_port_config_low_set(sparx5, port, conf);
984
985	/* Configure flow control */
986	err = sparx5_port_fc_setup(sparx5, port, conf);
987	if (err)
988		return err;
989
990	/* Set the DSM stop watermark */
991	stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
992	spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
993		 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
994		 sparx5,
995		 DSM_DEV_TX_STOP_WM_CFG(port->portno));
996
997	/* Enable port in queue system */
998	urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
999	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1000		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1001		 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1002		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1003		 sparx5,
1004		 QFWD_SWITCH_PORT_MODE(port->portno));
1005
1006	/* Save the new values */
1007	port->conf = *conf;
1008
1009	return 0;
1010}
1011
1012/* Initialize port config to default */
1013int sparx5_port_init(struct sparx5 *sparx5,
1014		     struct sparx5_port *port,
1015		     struct sparx5_port_config *conf)
1016{
1017	u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1018	u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1019	u32 devhigh = sparx5_to_high_dev(port->portno);
1020	u32 pix = sparx5_port_dev_index(port->portno);
1021	u32 pcs = sparx5_to_pcs_dev(port->portno);
1022	bool sd_pol = port->signd_active_high;
1023	bool sd_sel = !port->signd_internal;
1024	bool sd_ena = port->signd_enable;
1025	u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1026	void __iomem *devinst;
1027	void __iomem *pcsinst;
1028	int err;
1029
1030	devinst = spx5_inst_get(sparx5, devhigh, pix);
1031	pcsinst = spx5_inst_get(sparx5, pcs, pix);
1032
1033	/* Set the mux port mode  */
1034	err = sparx5_port_mux_set(sparx5, port, conf);
1035	if (err)
1036		return err;
1037
1038	/* Configure MAC vlan awareness */
1039	err = sparx5_port_max_tags_set(sparx5, port);
1040	if (err)
1041		return err;
1042
1043	/* Set Max Length */
1044	spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1045		 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1046		 sparx5,
1047		 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1048
1049	/* 1G/2G5: Signal Detect configuration */
1050	spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1051		DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1052		DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1053		sparx5,
1054		DEV2G5_PCS1G_SD_CFG(port->portno));
1055
1056	/* Set Pause WM hysteresis */
1057	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1058		 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1059		 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1060		 QSYS_PAUSE_CFG_PAUSE_START |
1061		 QSYS_PAUSE_CFG_PAUSE_STOP |
1062		 QSYS_PAUSE_CFG_PAUSE_ENA,
1063		 sparx5,
1064		 QSYS_PAUSE_CFG(port->portno));
1065
1066	/* Port ATOP. Frames are tail dropped when this WM is hit */
1067	spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1068		sparx5,
1069		QSYS_ATOP(port->portno));
1070
1071	/* Discard pause frame 01-80-C2-00-00-01 */
1072	spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1073
1074	/* Discard SMAC multicast */
1075	spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1076		 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1077		 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1078
1079	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1080	    conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1081		err = sparx5_serdes_set(sparx5, port, conf);
1082		if (err)
1083			return err;
1084
1085		if (!sparx5_port_is_2g5(port->portno))
1086			/* Enable shadow device */
1087			spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1088				 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1089				 sparx5,
1090				 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1091
1092		sparx5_dev_switch(sparx5, port->portno, false);
1093	}
1094	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1095		// All ports must be PCS enabled in QSGMII mode
1096		spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1097			 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1098			 sparx5,
1099			 DEV2G5_DEV_RST_CTRL(port->portno));
1100	}
1101	/* Default IFGs for 1G */
1102	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1103		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1104		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1105		sparx5,
1106		DEV2G5_MAC_IFG_CFG(port->portno));
1107
1108	if (sparx5_port_is_2g5(port->portno))
1109		return 0; /* Low speed device only - return */
1110
1111	/* Now setup the high speed device */
1112	if (conf->portmode == PHY_INTERFACE_MODE_NA)
1113		conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1114
1115	if (sparx5_is_baser(conf->portmode))
1116		sparx5_dev_switch(sparx5, port->portno, true);
1117
1118	/* Set Max Length */
1119	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1120		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1121		      devinst,
1122		      DEV10G_MAC_ENA_CFG(0));
1123
1124	/* Handle Signal Detect in 10G PCS */
1125	spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1126		     PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1127		     PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1128		     pcsinst,
1129		     PCS10G_BR_PCS_SD_CFG(0));
1130
1131	if (sparx5_port_is_25g(port->portno)) {
1132		/* Handle Signal Detect in 25G PCS */
1133		spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1134			DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1135			DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1136			sparx5,
1137			DEV25G_PCS25G_SD_CFG(pix));
1138	}
1139
1140	return 0;
1141}
1142
1143void sparx5_port_enable(struct sparx5_port *port, bool enable)
1144{
1145	struct sparx5 *sparx5 = port->sparx5;
1146
1147	/* Enable port for frame transfer? */
1148	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1149		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1150		 sparx5,
1151		 QFWD_SWITCH_PORT_MODE(port->portno));
1152}
1153
1154int sparx5_port_qos_set(struct sparx5_port *port,
1155			struct sparx5_port_qos *qos)
1156{
1157	sparx5_port_qos_dscp_set(port, &qos->dscp);
1158	sparx5_port_qos_pcp_set(port, &qos->pcp);
1159	sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1160	sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1161	sparx5_port_qos_default_set(port, qos);
1162
1163	return 0;
1164}
1165
1166int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1167				 struct sparx5_port_qos_pcp_rewr *qos)
1168{
1169	int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1170	struct sparx5 *sparx5 = port->sparx5;
1171	u8 pcp, dei;
1172
1173	/* Use mapping table, with classified QoS as index, to map QoS and DP
1174	 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1175	 * PCP. Classified PCP equals frame PCP.
1176	 */
1177	if (qos->enable)
1178		mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1179
1180	spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1181		 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1182		 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1183		 port->sparx5, REW_TAG_CTRL(port->portno));
1184
1185	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1186		/* Extract PCP and DEI */
1187		pcp = qos->map.map[i];
1188		if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1189			dei = 1;
1190		else
1191			dei = 0;
1192
1193		/* Rewrite PCP and DEI, for each classified QoS class and DP
1194		 * level. This table is only used if tag ctrl mode is set to
1195		 * 'mapped'.
1196		 *
1197		 * 0:0nd   - prio=0 and dp:0 => pcp=0 and dei=0
1198		 * 0:0de   - prio=0 and dp:1 => pcp=0 and dei=1
1199		 */
1200		if (dei) {
1201			spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1202				 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1203				 REW_PCP_MAP_DE1(port->portno, i));
1204
1205			spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1206				 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1207				 REW_DEI_MAP_DE1(port->portno, i));
1208		} else {
1209			spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1210				 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1211				 REW_PCP_MAP_DE0(port->portno, i));
1212
1213			spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1214				 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1215				 REW_DEI_MAP_DE0(port->portno, i));
1216		}
1217	}
1218
1219	return 0;
1220}
1221
1222int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1223			    struct sparx5_port_qos_pcp *qos)
1224{
1225	struct sparx5 *sparx5 = port->sparx5;
1226	u8 *pcp_itr = qos->map.map;
1227	u8 pcp, dp;
1228	int i;
1229
1230	/* Enable/disable pcp and dp for qos classification. */
1231	spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1232		 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1233		 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1234		 sparx5, ANA_CL_QOS_CFG(port->portno));
1235
1236	/* Map each pcp and dei value to priority and dp */
1237	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1238		pcp = *(pcp_itr + i);
1239		dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1240		spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1241			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1242			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1243			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1244			 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1245	}
1246
1247	return 0;
1248}
1249
1250void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1251					int mode)
1252{
1253	spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1254		 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1255		 ANA_CL_QOS_CFG(port->portno));
1256}
1257
1258int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1259				  struct sparx5_port_qos_dscp_rewr *qos)
1260{
1261	struct sparx5 *sparx5 = port->sparx5;
1262	bool rewr = false;
1263	u16 dscp;
1264	int i;
1265
1266	/* On egress, rewrite DSCP value to either classified DSCP or frame
1267	 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1268	 */
1269	if (qos->enable)
1270		rewr = true;
1271
1272	spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1273		 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1274		 REW_DSCP_MAP(port->portno));
1275
1276	/* On ingress, map each classified QoS class and DP to classified DSCP
1277	 * value. This mapping table is global for all ports.
1278	 */
1279	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1280		dscp = qos->map.map[i];
1281		spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1282			 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1283			 ANA_CL_QOS_MAP_CFG(i));
1284	}
1285
1286	return 0;
1287}
1288
1289int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1290			     struct sparx5_port_qos_dscp *qos)
1291{
1292	struct sparx5 *sparx5 = port->sparx5;
1293	u8 *dscp = qos->map.map;
1294	int i;
1295
1296	/* Enable/disable dscp and dp for qos classification.
1297	 * Disable rewrite of dscp values for now.
1298	 */
1299	spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1300		 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1301		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1302		 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1303		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1304		 ANA_CL_QOS_CFG(port->portno));
1305
1306	/* Map each dscp value to priority and dp */
1307	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1308		spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1309			 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1310			 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1311			 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1312			 ANA_CL_DSCP_CFG(i));
1313	}
1314
1315	/* Set per-dscp trust */
1316	for (i = 0; i <  ARRAY_SIZE(qos->map.map); i++) {
1317		if (qos->qos_enable) {
1318			spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1319				 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1320				 ANA_CL_DSCP_CFG(i));
1321		}
1322	}
1323
1324	return 0;
1325}
1326
1327int sparx5_port_qos_default_set(const struct sparx5_port *port,
1328				const struct sparx5_port_qos *qos)
1329{
1330	struct sparx5 *sparx5 = port->sparx5;
1331
1332	/* Set default prio and dp level */
1333	spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1334		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1335		 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1336		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1337		 sparx5, ANA_CL_QOS_CFG(port->portno));
1338
1339	/* Set default pcp and dei for untagged frames */
1340	spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1341		 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1342		 ANA_CL_VLAN_CTRL_PORT_PCP |
1343		 ANA_CL_VLAN_CTRL_PORT_DEI,
1344		 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1345
1346	return 0;
1347}
1348