1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * Functions to configure the BGX MAC.
6 */
7
8#include <log.h>
9#include <linux/delay.h>
10
11#include <mach/cvmx-regs.h>
12#include <mach/cvmx-csr.h>
13#include <mach/cvmx-bootmem.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-coremask.h>
21
22#include <mach/cvmx-agl-defs.h>
23#include <mach/cvmx-bgxx-defs.h>
24#include <mach/cvmx-gmxx-defs.h>
25#include <mach/cvmx-ipd-defs.h>
26#include <mach/cvmx-pki-defs.h>
27#include <mach/cvmx-xcv-defs.h>
28
29#include <mach/cvmx-helper.h>
30#include <mach/cvmx-helper-board.h>
31#include <mach/cvmx-helper-fdt.h>
32#include <mach/cvmx-helper-bgx.h>
33#include <mach/cvmx-helper-cfg.h>
34#include <mach/cvmx-helper-util.h>
35#include <mach/cvmx-helper-pki.h>
36
37#include <mach/cvmx-global-resources.h>
38#include <mach/cvmx-pko-internal-ports-range.h>
39#include <mach/cvmx-ilk.h>
40#include <mach/cvmx-pip.h>
41
42/* Enable this define to see BGX error messages */
43/*#define DEBUG_BGX */
44
45/* Enable this variable to trace functions called for initializing BGX */
46static const int debug;
47
48/**
49 * cvmx_helper_bgx_override_autoneg(int xiface, int index) is a function pointer
50 * to override enabling/disabling of autonegotiation for SGMII, 10G-KR or 40G-KR4
51 * interfaces. This function is called when interface is initialized.
52 */
53int (*cvmx_helper_bgx_override_autoneg)(int xiface, int index) = NULL;
54
55/*
56 * cvmx_helper_bgx_override_fec(int xiface) is a function pointer
57 * to override enabling/disabling of FEC for 10G interfaces. This function
58 * is called when interface is initialized.
59 */
60int (*cvmx_helper_bgx_override_fec)(int xiface, int index) = NULL;
61
62/**
63 * Delay after enabling an interface based on the mode.  Different modes take
64 * different amounts of time.
65 */
66static void
67__cvmx_helper_bgx_interface_enable_delay(cvmx_helper_interface_mode_t mode)
68{
69	switch (mode) {
70	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
71	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
72	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
73	case CVMX_HELPER_INTERFACE_MODE_XFI:
74		mdelay(250);
75		break;
76	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
77	case CVMX_HELPER_INTERFACE_MODE_XAUI:
78		mdelay(100);
79		break;
80	case CVMX_HELPER_INTERFACE_MODE_SGMII:
81		mdelay(50);
82		break;
83	default:
84		mdelay(50);
85		break;
86	}
87}
88
89/**
90 * @INTERNAL
91 *
92 * Returns number of ports based on interface
93 * @param xiface Which xiface
94 * @return Number of ports based on xiface
95 */
96int __cvmx_helper_bgx_enumerate(int xiface)
97{
98	cvmx_bgxx_cmr_tx_lmacs_t lmacs;
99	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
100
101	lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
102	return lmacs.s.lmacs;
103}
104
105/**
106 * @INTERNAL
107 *
108 * Returns mode of each BGX LMAC (port).
109 * This is different than 'cvmx_helper_interface_get_mode()' which
110 * provides mode of an entire interface, but when BGX is in "mixed"
111 * mode this function should be called instead to get the protocol
112 * for each port (BGX LMAC) individually.
113 * Both function return the same enumerated mode.
114 *
115 * @param xiface is the global interface identifier
116 * @param index is the interface port index
117 * @returns mode of the individual port
118 */
119cvmx_helper_interface_mode_t cvmx_helper_bgx_get_mode(int xiface, int index)
120{
121	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
122	cvmx_bgxx_cmrx_config_t cmr_config;
123	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
124
125	cmr_config.u64 = csr_rd_node(
126		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
127
128	switch (cmr_config.s.lmac_type) {
129	case 0:
130		return CVMX_HELPER_INTERFACE_MODE_SGMII;
131	case 1:
132		return CVMX_HELPER_INTERFACE_MODE_XAUI;
133	case 2:
134		return CVMX_HELPER_INTERFACE_MODE_RXAUI;
135	case 3:
136		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
137			return cvmx_helper_interface_get_mode(xiface);
138		pmd_control.u64 = csr_rd_node(
139			xi.node,
140			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
141		if (pmd_control.s.train_en)
142			return CVMX_HELPER_INTERFACE_MODE_10G_KR;
143		else
144			return CVMX_HELPER_INTERFACE_MODE_XFI;
145		break;
146	case 4:
147		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
148			return cvmx_helper_interface_get_mode(xiface);
149		pmd_control.u64 = csr_rd_node(
150			xi.node,
151			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
152		if (pmd_control.s.train_en)
153			return CVMX_HELPER_INTERFACE_MODE_40G_KR4;
154		else
155			return CVMX_HELPER_INTERFACE_MODE_XLAUI;
156		break;
157	case 5:
158		return CVMX_HELPER_INTERFACE_MODE_RGMII;
159	default:
160		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
161	}
162}
163
164static int __cvmx_helper_bgx_rgmii_speed(cvmx_helper_link_info_t link_info)
165{
166	cvmx_xcv_reset_t xcv_reset;
167	cvmx_xcv_ctl_t xcv_ctl;
168	cvmx_xcv_batch_crd_ret_t crd_ret;
169	cvmx_xcv_dll_ctl_t dll_ctl;
170	cvmx_xcv_comp_ctl_t comp_ctl;
171	int speed;
172	int up = link_info.s.link_up;
173	int do_credits;
174
175	if (link_info.s.speed == 100)
176		speed = 1;
177	else if (link_info.s.speed == 10)
178		speed = 0;
179	else
180		speed = 2;
181
182	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
183	xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
184	do_credits = up && !xcv_reset.s.enable;
185
186	if (xcv_ctl.s.lpbk_int) {
187		xcv_reset.s.clkrst = 0;
188		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
189	}
190
191	if (up && (!xcv_reset.s.enable || xcv_ctl.s.speed != speed)) {
192		if (debug)
193			debug("%s: *** Enabling XCV block\n", __func__);
194		/* Enable the XCV block */
195		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
196		xcv_reset.s.enable = 1;
197		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
198
199		/* Set operating mode */
200		xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
201		xcv_ctl.s.speed = speed;
202		csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
203
204		/* Configure DLL - enable or bypass */
205		/* TX no bypass, RX bypass */
206		dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
207		dll_ctl.s.clkrx_set = 0;
208		dll_ctl.s.clkrx_byp = 1;
209		dll_ctl.s.clktx_byp = 0;
210		csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
211
212		/* Enable */
213		dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
214		dll_ctl.s.refclk_sel = 0;
215		csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
216		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
217		xcv_reset.s.dllrst = 0;
218		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
219
220		/* Delay deems to be need so XCV_DLL_CTL[CLK_SET] works */
221		udelay(10);
222
223		comp_ctl.u64 = csr_rd(CVMX_XCV_COMP_CTL);
224		//comp_ctl.s.drv_pctl = 0;
225		//comp_ctl.s.drv_nctl = 0;
226		comp_ctl.s.drv_byp = 0;
227		csr_wr(CVMX_XCV_COMP_CTL, comp_ctl.u64);
228
229		/* enable */
230		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
231		xcv_reset.s.comp = 1;
232		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
233
234		/* setup the RXC */
235		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
236		xcv_reset.s.clkrst = !xcv_ctl.s.lpbk_int;
237		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
238
239		/* datapaths come out of the reset
240		 * - the datapath resets will disengage BGX from the RGMII
241		 *   interface
242		 * - XCV will continue to return TX credits for each tick that
243		 *   is sent on the TX data path
244		 */
245		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
246		xcv_reset.s.tx_dat_rst_n = 1;
247		xcv_reset.s.rx_dat_rst_n = 1;
248		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
249	} else if (debug) {
250		debug("%s: *** Not enabling XCV\n", __func__);
251		debug("  up: %s, xcv_reset.s.enable: %d, xcv_ctl.s.speed: %d, speed: %d\n",
252		      up ? "true" : "false", (unsigned int)xcv_reset.s.enable,
253		      (unsigned int)xcv_ctl.s.speed, speed);
254	}
255
256	/* enable the packet flow
257	 * - The packet resets will be only disengage on packet boundaries
258	 * - XCV will continue to return TX credits for each tick that is
259	 *   sent on the TX datapath
260	 */
261	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
262	xcv_reset.s.tx_pkt_rst_n = up;
263	xcv_reset.s.rx_pkt_rst_n = up;
264	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
265
266	/* Full reset when link is down */
267	if (!up) {
268		if (debug)
269			debug("%s: *** Disabling XCV reset\n", __func__);
270		/* wait 2*MTU in time */
271		mdelay(10);
272		/* reset the world */
273		csr_wr(CVMX_XCV_RESET, 0);
274	}
275
276	/* grant PKO TX credits */
277	if (do_credits) {
278		crd_ret.u64 = csr_rd(CVMX_XCV_BATCH_CRD_RET);
279		crd_ret.s.crd_ret = 1;
280		csr_wr(CVMX_XCV_BATCH_CRD_RET, crd_ret.u64);
281	}
282
283	return 0;
284}
285
286static void __cvmx_bgx_common_init_pknd(int xiface, int index)
287{
288	int num_ports;
289	int num_chl = 16;
290	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
291	int node = xi.node;
292	int pknd;
293	cvmx_bgxx_cmrx_rx_bp_on_t bgx_rx_bp_on;
294	cvmx_bgxx_cmrx_rx_id_map_t cmr_rx_id_map;
295	cvmx_bgxx_cmr_chan_msk_and_t chan_msk_and;
296	cvmx_bgxx_cmr_chan_msk_or_t chan_msk_or;
297
298	if (debug)
299		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
300		      xi.interface, index);
301
302	num_ports = cvmx_helper_ports_on_interface(xiface);
303	/* Modify bp_on mark, depending on number of LMACS on that interface
304	 * and write it for every port
305	 */
306	bgx_rx_bp_on.u64 = 0;
307	bgx_rx_bp_on.s.mark = (CVMX_BGX_RX_FIFO_SIZE / (num_ports * 4 * 16));
308
309	/* Setup pkind */
310	pknd = cvmx_helper_get_pknd(xiface, index);
311	cmr_rx_id_map.u64 = csr_rd_node(
312		node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface));
313	cmr_rx_id_map.s.pknd = pknd;
314	/* Change the default reassembly id (RID), as max 14 RIDs allowed */
315	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
316		cmr_rx_id_map.s.rid = ((4 * xi.interface) + 2 + index);
317	csr_wr_node(node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface),
318		    cmr_rx_id_map.u64);
319	/* Set backpressure channel mask AND/OR registers */
320	chan_msk_and.u64 =
321		csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface));
322	chan_msk_or.u64 =
323		csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface));
324	chan_msk_and.s.msk_and |= ((1 << num_chl) - 1) << (16 * index);
325	chan_msk_or.s.msk_or |= ((1 << num_chl) - 1) << (16 * index);
326	csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface),
327		    chan_msk_and.u64);
328	csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface),
329		    chan_msk_or.u64);
330	/* set rx back pressure (bp_on) on value */
331	csr_wr_node(node, CVMX_BGXX_CMRX_RX_BP_ON(index, xi.interface),
332		    bgx_rx_bp_on.u64);
333}
334
335/**
336 * @INTERNAL
337 * Probe a SGMII interface and determine the number of ports
338 * connected to it. The SGMII interface should still be down after
339 * this call. This is used by interfaces using the bgx mac.
340 *
341 * @param xiface Interface to probe
342 *
343 * @return Number of ports on the interface. Zero to disable.
344 */
345int __cvmx_helper_bgx_probe(int xiface)
346{
347	return __cvmx_helper_bgx_enumerate(xiface);
348}
349
350/**
351 * @INTERNAL
352 * Return the size of the BGX TX_FIFO for a given LMAC,
353 * or 0 if the requested LMAC is inactive.
354 *
355 * TBD: Need also to add a "__cvmx_helper_bgx_speed()" function to
356 * return the speed of each LMAC.
357 */
358int __cvmx_helper_bgx_fifo_size(int xiface, unsigned int lmac)
359{
360	cvmx_bgxx_cmr_tx_lmacs_t lmacs;
361	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
362	unsigned int tx_fifo_size = CVMX_BGX_TX_FIFO_SIZE;
363
364	/* FIXME: Add validation for interface# < BGX_count */
365	lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
366
367	switch (lmacs.s.lmacs) {
368	case 1:
369		if (lmac > 0)
370			return 0;
371		else
372			return tx_fifo_size;
373	case 2:
374		if (lmac > 1)
375			return 0;
376		else
377			return tx_fifo_size >> 1;
378	case 3:
379		if (lmac > 2)
380			return 0;
381		else
382			return tx_fifo_size >> 2;
383	case 4:
384		if (lmac > 3)
385			return 0;
386		else
387			return tx_fifo_size >> 2;
388	default:
389		return 0;
390	}
391}
392
393/**
394 * @INTERNAL
395 * Perform initialization required only once for an SGMII port.
396 *
397 * @param xiface Interface to init
398 * @param index     Index of prot on the interface
399 *
400 * @return Zero on success, negative on failure
401 */
402static int __cvmx_helper_bgx_sgmii_hardware_init_one_time(int xiface, int index)
403{
404	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
405	int node = xi.node;
406	const u64 clock_mhz = 1200; /* todo: fixme */
407	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
408	cvmx_bgxx_gmp_pcs_linkx_timer_t gmp_timer;
409
410	if (!cvmx_helper_is_port_valid(xi.interface, index))
411		return 0;
412
413	if (debug)
414		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
415		      xi.interface, index);
416
417	/*
418	 * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
419	 * appropriate value. 1000BASE-X specifies a 10ms
420	 * interval. SGMII specifies a 1.6ms interval.
421	 */
422	gmp_misc_ctl.u64 = csr_rd_node(
423		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
424	/* Adjust the MAC mode if requested by device tree */
425	gmp_misc_ctl.s.mac_phy = cvmx_helper_get_mac_phy_mode(xiface, index);
426	gmp_misc_ctl.s.mode = cvmx_helper_get_1000x_mode(xiface, index);
427	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
428		    gmp_misc_ctl.u64);
429
430	gmp_timer.u64 = csr_rd_node(
431		node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface));
432	if (gmp_misc_ctl.s.mode)
433		/* 1000BASE-X */
434		gmp_timer.s.count = (10000ull * clock_mhz) >> 10;
435	else
436		/* SGMII */
437		gmp_timer.s.count = (1600ull * clock_mhz) >> 10;
438
439	csr_wr_node(node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface),
440		    gmp_timer.u64);
441
442	/*
443	 * Write the advertisement register to be used as the
444	 * tx_Config_Reg<D15:D0> of the autonegotiation.  In
445	 * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
446	 * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
447	 * PCS*_SGM*_AN_ADV_REG.  In SGMII MAC mode,
448	 * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
449	 * step can be skipped.
450	 */
451	if (gmp_misc_ctl.s.mode) {
452		/* 1000BASE-X */
453		cvmx_bgxx_gmp_pcs_anx_adv_t gmp_an_adv;
454
455		gmp_an_adv.u64 = csr_rd_node(
456			node, CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface));
457		gmp_an_adv.s.rem_flt = 0;
458		gmp_an_adv.s.pause = 3;
459		gmp_an_adv.s.hfd = 1;
460		gmp_an_adv.s.fd = 1;
461		csr_wr_node(node,
462			    CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface),
463			    gmp_an_adv.u64);
464	} else {
465		if (gmp_misc_ctl.s.mac_phy) {
466			/* PHY Mode */
467			cvmx_bgxx_gmp_pcs_sgmx_an_adv_t gmp_sgmx_an_adv;
468
469			gmp_sgmx_an_adv.u64 =
470				csr_rd_node(node, CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(
471							  index, xi.interface));
472			gmp_sgmx_an_adv.s.dup = 1;
473			gmp_sgmx_an_adv.s.speed = 2;
474			csr_wr_node(node,
475				    CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(index,
476								  xi.interface),
477				    gmp_sgmx_an_adv.u64);
478		} else {
479			/* MAC Mode - Nothing to do */
480		}
481	}
482	return 0;
483}
484
485/**
486 * @INTERNAL
487 * Bring up the SGMII interface to be ready for packet I/O but
488 * leave I/O disabled using the GMX override. This function
489 * follows the bringup documented in 10.6.3 of the manual.
490 *
491 * @param xiface Interface to bringup
492 * @param num_ports Number of ports on the interface
493 *
494 * @return Zero on success, negative on failure
495 */
496static int __cvmx_helper_bgx_sgmii_hardware_init(int xiface, int num_ports)
497{
498	int index;
499	int do_link_set = 1;
500
501	for (index = 0; index < num_ports; index++) {
502		int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
503		cvmx_helper_interface_mode_t mode;
504
505		if (!cvmx_helper_is_port_valid(xiface, index))
506			continue;
507
508		__cvmx_helper_bgx_port_init(xipd_port, 0);
509
510		mode = cvmx_helper_bgx_get_mode(xiface, index);
511		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
512			continue;
513
514		if (do_link_set)
515			__cvmx_helper_bgx_sgmii_link_set(
516				xipd_port,
517				__cvmx_helper_bgx_sgmii_link_get(xipd_port));
518	}
519
520	return 0;
521}
522
523/**
524 * @INTERNAL
525 * Bringup and enable a SGMII interface. After this call packet
526 * I/O should be fully functional. This is called with IPD
527 * enabled but PKO disabled. This is used by interfaces using
528 * the bgx mac.
529 *
530 * @param xiface Interface to bring up
531 *
532 * @return Zero on success, negative on failure
533 */
534int __cvmx_helper_bgx_sgmii_enable(int xiface)
535{
536	int num_ports;
537
538	num_ports = cvmx_helper_ports_on_interface(xiface);
539	__cvmx_helper_bgx_sgmii_hardware_init(xiface, num_ports);
540
541	return 0;
542}
543
544/**
545 * @INTERNAL
546 * Initialize the SERDES link for the first time or after a loss
547 * of link.
548 *
549 * @param xiface Interface to init
550 * @param index     Index of prot on the interface
551 *
552 * @return Zero on success, negative on failure
553 */
554static int __cvmx_helper_bgx_sgmii_hardware_init_link(int xiface, int index)
555{
556	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
557	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
558	cvmx_bgxx_cmrx_config_t cmr_config;
559	int phy_mode, mode_1000x;
560	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
561	int interface = xi.interface;
562	int node = xi.node;
563	int autoneg = 0;
564
565	if (!cvmx_helper_is_port_valid(xiface, index))
566		return 0;
567
568	if (debug)
569		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
570		      xi.interface, index);
571
572	gmp_control.u64 = csr_rd_node(
573		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
574	/* Take PCS through a reset sequence */
575	gmp_control.s.reset = 1;
576	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
577		    gmp_control.u64);
578
579	/* Wait until GMP_PCS_MRX_CONTROL[reset] comes out of reset */
580	if (CVMX_WAIT_FOR_FIELD64_NODE(
581		    node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
582		    cvmx_bgxx_gmp_pcs_mrx_control_t, reset, ==, 0, 10000)) {
583		debug("SGMII%d: Timeout waiting for port %d to finish reset\n",
584		      interface, index);
585		return -1;
586	}
587
588	cmr_config.u64 =
589		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
590
591	gmp_control.u64 = csr_rd_node(
592		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
593	if (cvmx_helper_get_port_phy_present(xiface, index)) {
594		gmp_control.s.pwr_dn = 0;
595	} else {
596		gmp_control.s.spdmsb = 1;
597		gmp_control.s.spdlsb = 0;
598		gmp_control.s.pwr_dn = 0;
599	}
600	/* Write GMP_PCS_MR*_CONTROL[RST_AN]=1 to ensure a fresh SGMII
601	 * negotiation starts.
602	 */
603	autoneg = cvmx_helper_get_port_autonegotiation(xiface, index);
604	gmp_control.s.rst_an = 1;
605	gmp_control.s.an_en = (cmr_config.s.lmac_type != 5) && autoneg;
606	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
607		    gmp_control.u64);
608
609	phy_mode = cvmx_helper_get_mac_phy_mode(xiface, index);
610	mode_1000x = cvmx_helper_get_1000x_mode(xiface, index);
611
612	gmp_misc_ctl.u64 = csr_rd_node(
613		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
614	gmp_misc_ctl.s.mac_phy = phy_mode;
615	gmp_misc_ctl.s.mode = mode_1000x;
616	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
617		    gmp_misc_ctl.u64);
618
619	if (phy_mode || !autoneg)
620		/* In PHY mode we can't query the link status so we just
621		 * assume that the link is up
622		 */
623		return 0;
624
625	/* Wait for GMP_PCS_MRX_CONTROL[an_cpt] to be set, indicating that
626	 * SGMII autonegotiation is complete. In MAC mode this isn't an
627	 * ethernet link, but a link between OCTEON and PHY.
628	 */
629	if (cmr_config.s.lmac_type != 5 &&
630	    CVMX_WAIT_FOR_FIELD64_NODE(
631		    node, CVMX_BGXX_GMP_PCS_MRX_STATUS(index, xi.interface),
632		    cvmx_bgxx_gmp_pcs_mrx_status_t, an_cpt, ==, 1, 10000)) {
633		debug("SGMII%d: Port %d link timeout\n", interface, index);
634		return -1;
635	}
636
637	return 0;
638}
639
640/**
641 * @INTERNAL
642 * Configure an SGMII link to the specified speed after the SERDES
643 * link is up.
644 *
645 * @param xiface Interface to init
646 * @param index     Index of prot on the interface
647 * @param link_info Link state to configure
648 *
649 * @return Zero on success, negative on failure
650 */
651static int __cvmx_helper_bgx_sgmii_hardware_init_link_speed(
652	int xiface, int index, cvmx_helper_link_info_t link_info)
653{
654	cvmx_bgxx_cmrx_config_t cmr_config;
655	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_miscx_ctl;
656	cvmx_bgxx_gmp_gmi_prtx_cfg_t gmp_prtx_cfg;
657	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
658	int node = xi.node;
659
660	if (!cvmx_helper_is_port_valid(xiface, index))
661		return 0;
662
663	if (debug)
664		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
665		      xi.interface, index);
666
667	/* Disable GMX before we make any changes. */
668	cmr_config.u64 =
669		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
670	cmr_config.s.data_pkt_tx_en = 0;
671	cmr_config.s.data_pkt_rx_en = 0;
672	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
673		    cmr_config.u64);
674
675	/* Wait for GMX to be idle */
676	if (CVMX_WAIT_FOR_FIELD64_NODE(
677		    node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
678		    cvmx_bgxx_gmp_gmi_prtx_cfg_t, rx_idle, ==, 1, 10000) ||
679	    CVMX_WAIT_FOR_FIELD64_NODE(
680		    node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
681		    cvmx_bgxx_gmp_gmi_prtx_cfg_t, tx_idle, ==, 1, 10000)) {
682		debug("SGMII%d:%d: Timeout waiting for port %d to be idle\n",
683		      node, xi.interface, index);
684		return -1;
685	}
686
687	/* Read GMX CFG again to make sure the disable completed */
688	gmp_prtx_cfg.u64 = csr_rd_node(
689		node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
690
691	/*
692	 * Get the misc control for PCS. We will need to set the
693	 * duplication amount.
694	 */
695	gmp_miscx_ctl.u64 = csr_rd_node(
696		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
697
698	/*
699	 * Use GMXENO to force the link down if the status we get says
700	 * it should be down.
701	 */
702	gmp_miscx_ctl.s.gmxeno = !link_info.s.link_up;
703
704	/* Only change the duplex setting if the link is up */
705	if (link_info.s.link_up)
706		gmp_prtx_cfg.s.duplex = link_info.s.full_duplex;
707
708	/* Do speed based setting for GMX */
709	switch (link_info.s.speed) {
710	case 10:
711		gmp_prtx_cfg.s.speed = 0;
712		gmp_prtx_cfg.s.speed_msb = 1;
713		gmp_prtx_cfg.s.slottime = 0;
714		/* Setting from GMX-603 */
715		gmp_miscx_ctl.s.samp_pt = 25;
716		csr_wr_node(node,
717			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
718			    64);
719		csr_wr_node(node,
720			    CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
721			    0);
722		break;
723	case 100:
724		gmp_prtx_cfg.s.speed = 0;
725		gmp_prtx_cfg.s.speed_msb = 0;
726		gmp_prtx_cfg.s.slottime = 0;
727		gmp_miscx_ctl.s.samp_pt = 0x5;
728		csr_wr_node(node,
729			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
730			    64);
731		csr_wr_node(node,
732			    CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
733			    0);
734		break;
735	case 1000:
736		gmp_prtx_cfg.s.speed = 1;
737		gmp_prtx_cfg.s.speed_msb = 0;
738		gmp_prtx_cfg.s.slottime = 1;
739		gmp_miscx_ctl.s.samp_pt = 1;
740		csr_wr_node(node,
741			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
742			    512);
743		if (gmp_prtx_cfg.s.duplex)
744			/* full duplex */
745			csr_wr_node(node,
746				    CVMX_BGXX_GMP_GMI_TXX_BURST(index,
747								xi.interface),
748				    0);
749		else
750			/* half duplex */
751			csr_wr_node(node,
752				    CVMX_BGXX_GMP_GMI_TXX_BURST(index,
753								xi.interface),
754				    8192);
755		break;
756	default:
757		break;
758	}
759
760	/* Write the new misc control for PCS */
761	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
762		    gmp_miscx_ctl.u64);
763
764	/* Write the new GMX settings with the port still disabled */
765	csr_wr_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
766		    gmp_prtx_cfg.u64);
767
768	/* Read GMX CFG again to make sure the config completed */
769	csr_rd_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
770
771	/* Enable back BGX. */
772	cmr_config.u64 =
773		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
774	if (debug)
775		debug("%s: Enabling tx and rx packets on %d:%d\n", __func__,
776		      xi.interface, index);
777	cmr_config.s.data_pkt_tx_en = 1;
778	cmr_config.s.data_pkt_rx_en = 1;
779	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
780		    cmr_config.u64);
781
782	return 0;
783}
784
785/**
786 * @INTERNAL
787 * Return the link state of an IPD/PKO port as returned by
788 * auto negotiation. The result of this function may not match
789 * Octeon's link config if auto negotiation has changed since
790 * the last call to cvmx_helper_link_set(). This is used by
791 * interfaces using the bgx mac.
792 *
793 * @param xipd_port IPD/PKO port to query
794 *
795 * @return Link state
796 */
797cvmx_helper_link_info_t __cvmx_helper_bgx_sgmii_link_get(int xipd_port)
798{
799	cvmx_helper_link_info_t result;
800	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
801	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
802	int xiface = cvmx_helper_get_interface_num(xipd_port);
803	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
804	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
805	int node = xi.node;
806	int index = cvmx_helper_get_interface_index_num(xp.port);
807
808	result.u64 = 0;
809
810	if (!cvmx_helper_is_port_valid(xiface, index))
811		return result;
812
813	if (debug)
814		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
815		      xi.interface, index);
816
817	gmp_control.u64 = csr_rd_node(
818		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
819	if (gmp_control.s.loopbck1) {
820		int qlm = cvmx_qlm_lmac(xiface, index);
821		int speed;
822
823		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
824			speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
825		else
826			speed = cvmx_qlm_get_gbaud_mhz(qlm);
827		/* Force 1Gbps full duplex link for internal loopback */
828		result.s.link_up = 1;
829		result.s.full_duplex = 1;
830		result.s.speed = speed * 8 / 10;
831		return result;
832	}
833
834	gmp_misc_ctl.u64 = csr_rd_node(
835		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
836	if (gmp_misc_ctl.s.mac_phy ||
837	    cvmx_helper_get_port_force_link_up(xiface, index)) {
838		int qlm = cvmx_qlm_lmac(xiface, index);
839		int speed;
840
841		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
842			speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
843		else
844			speed = cvmx_qlm_get_gbaud_mhz(qlm);
845		/* PHY Mode */
846		/* Note that this also works for 1000base-X mode */
847
848		result.s.speed = speed * 8 / 10;
849		result.s.full_duplex = 1;
850		result.s.link_up = 1;
851		return result;
852	}
853
854	/* MAC Mode */
855	return __cvmx_helper_board_link_get(xipd_port);
856}
857
858/**
859 * This sequence brings down the link for the XCV RGMII interface
860 *
861 * @param interface	Interface (BGX) number.  Port index is always 0
862 */
863static void __cvmx_helper_bgx_rgmii_link_set_down(int interface)
864{
865	union cvmx_xcv_reset xcv_reset;
866	union cvmx_bgxx_cmrx_config cmr_config;
867	union cvmx_bgxx_gmp_pcs_mrx_control mr_control;
868	union cvmx_bgxx_cmrx_rx_fifo_len rx_fifo_len;
869	union cvmx_bgxx_cmrx_tx_fifo_len tx_fifo_len;
870
871	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
872	xcv_reset.s.rx_pkt_rst_n = 0;
873	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
874	csr_rd(CVMX_XCV_RESET);
875	mdelay(10); /* Wait for 1 MTU */
876
877	cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
878	cmr_config.s.data_pkt_rx_en = 0;
879	csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
880
881	/* Wait for RX and TX to be idle */
882	do {
883		rx_fifo_len.u64 =
884			csr_rd(CVMX_BGXX_CMRX_RX_FIFO_LEN(0, interface));
885		tx_fifo_len.u64 =
886			csr_rd(CVMX_BGXX_CMRX_TX_FIFO_LEN(0, interface));
887	} while (rx_fifo_len.s.fifo_len > 0 && tx_fifo_len.s.lmac_idle != 1);
888
889	cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
890	cmr_config.s.data_pkt_tx_en = 0;
891	csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
892
893	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
894	xcv_reset.s.tx_pkt_rst_n = 0;
895	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
896	mr_control.u64 = csr_rd(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface));
897	mr_control.s.pwr_dn = 1;
898	csr_wr(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface), mr_control.u64);
899}
900
901/**
902 * Sets a BGS SGMII link down.
903 *
904 * @param node	Octeon node number
905 * @param iface	BGX interface number
906 * @param index	BGX port index
907 */
908static void __cvmx_helper_bgx_sgmii_link_set_down(int node, int iface,
909						  int index)
910{
911	union cvmx_bgxx_gmp_pcs_miscx_ctl gmp_misc_ctl;
912	union cvmx_bgxx_gmp_pcs_mrx_control gmp_control;
913	union cvmx_bgxx_cmrx_config cmr_config;
914
915	cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
916	cmr_config.s.data_pkt_tx_en = 0;
917	cmr_config.s.data_pkt_rx_en = 0;
918	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface), cmr_config.u64);
919
920	gmp_misc_ctl.u64 =
921		csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
922
923	/* Disable autonegotiation only when in MAC mode. */
924	if (gmp_misc_ctl.s.mac_phy == 0) {
925		gmp_control.u64 = csr_rd_node(
926			node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface));
927		gmp_control.s.an_en = 0;
928		csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface),
929			    gmp_control.u64);
930	}
931
932	/* Use GMXENO to force the link down.  It will get reenabled later... */
933	gmp_misc_ctl.s.gmxeno = 1;
934	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface),
935		    gmp_misc_ctl.u64);
936	csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
937}
938
939/**
940 * @INTERNAL
941 * Configure an IPD/PKO port for the specified link state. This
942 * function does not influence auto negotiation at the PHY level.
943 * The passed link state must always match the link state returned
944 * by cvmx_helper_link_get(). It is normally best to use
945 * cvmx_helper_link_autoconf() instead. This is used by interfaces
946 * using the bgx mac.
947 *
948 * @param xipd_port  IPD/PKO port to configure
949 * @param link_info The new link state
950 *
951 * @return Zero on success, negative on failure
952 */
953int __cvmx_helper_bgx_sgmii_link_set(int xipd_port,
954				     cvmx_helper_link_info_t link_info)
955{
956	cvmx_bgxx_cmrx_config_t cmr_config;
957	int xiface = cvmx_helper_get_interface_num(xipd_port);
958	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
959	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
960	int node = xi.node;
961	int index = cvmx_helper_get_interface_index_num(xp.port);
962	const int iface = xi.interface;
963	int rc = 0;
964
965	if (!cvmx_helper_is_port_valid(xiface, index))
966		return 0;
967
968	if (debug)
969		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
970		      xi.interface, index);
971
972	cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
973	if (link_info.s.link_up) {
974		cmr_config.s.enable = 1;
975		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface),
976			    cmr_config.u64);
977		/* Apply workaround for errata BGX-22429 */
978		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
979			cvmx_bgxx_cmrx_config_t cmr0;
980
981			cmr0.u64 = csr_rd_node(node,
982					       CVMX_BGXX_CMRX_CONFIG(0, iface));
983			cmr0.s.enable = 1;
984			csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, iface),
985				    cmr0.u64);
986		}
987		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
988	} else if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
989		if (debug)
990			debug("%s: Bringing down XCV RGMII interface %d\n",
991			      __func__, xi.interface);
992		__cvmx_helper_bgx_rgmii_link_set_down(xi.interface);
993	} else { /* Link is down, not RGMII */
994		__cvmx_helper_bgx_sgmii_link_set_down(node, iface, index);
995		return 0;
996	}
997	rc = __cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
998							      link_info);
999	if (cvmx_helper_bgx_is_rgmii(xiface, index))
1000		rc = __cvmx_helper_bgx_rgmii_speed(link_info);
1001
1002	return rc;
1003}
1004
1005/**
1006 * @INTERNAL
1007 * Bringup XAUI interface. After this call packet I/O should be
1008 * fully functional.
1009 *
1010 * @param index port on interface to bring up
1011 * @param xiface Interface to bring up
1012 *
1013 * @return Zero on success, negative on failure
1014 */
1015static int __cvmx_helper_bgx_xaui_init(int index, int xiface)
1016{
1017	cvmx_bgxx_cmrx_config_t cmr_config;
1018	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1019	cvmx_bgxx_spux_misc_control_t spu_misc_control;
1020	cvmx_bgxx_spux_control1_t spu_control1;
1021	cvmx_bgxx_spux_an_control_t spu_an_control;
1022	cvmx_bgxx_spux_an_adv_t spu_an_adv;
1023	cvmx_bgxx_spux_fec_control_t spu_fec_control;
1024	cvmx_bgxx_spu_dbg_control_t spu_dbg_control;
1025	cvmx_bgxx_smux_tx_append_t smu_tx_append;
1026	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
1027	cvmx_helper_interface_mode_t mode;
1028	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1029	int interface = xi.interface;
1030	int node = xi.node;
1031	int use_auto_neg = 0;
1032	int kr_mode = 0;
1033
1034	if (debug)
1035		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
1036		      xi.interface, index);
1037
1038	mode = cvmx_helper_bgx_get_mode(xiface, index);
1039
1040	if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
1041	    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
1042		kr_mode = 1;
1043		if (cvmx_helper_bgx_override_autoneg)
1044			use_auto_neg =
1045				cvmx_helper_bgx_override_autoneg(xiface, index);
1046		else
1047			use_auto_neg = cvmx_helper_get_port_autonegotiation(
1048				xiface, index);
1049	}
1050
1051	/* NOTE: This code was moved first, out of order compared to the HRM
1052	 * because the RESET causes all SPU registers to loose their value
1053	 */
1054	/* 4. Next, bring up the SMU/SPU and the BGX reconciliation layer
1055	 * logic:
1056	 */
1057	/* 4a. Take SMU/SPU through a reset sequence. Write
1058	 * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] = 1. Read
1059	 * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] until it changes value to 0. Keep
1060	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1 to disable
1061	 * reception.
1062	 */
1063	spu_control1.u64 =
1064		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
1065	spu_control1.s.reset = 1;
1066	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1067		    spu_control1.u64);
1068
1069	/* 1. Wait for PCS to come out of reset */
1070	if (CVMX_WAIT_FOR_FIELD64_NODE(
1071		    node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1072		    cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
1073		debug("BGX%d:%d: SPU stuck in reset\n", node, interface);
1074		return -1;
1075	}
1076
1077	/* 2. Write BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] to 0,
1078	 * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
1079	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
1080	 */
1081	spu_control1.u64 =
1082		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
1083	spu_control1.s.lo_pwr = 1;
1084	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1085		    spu_control1.u64);
1086
1087	spu_misc_control.u64 = csr_rd_node(
1088		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
1089	spu_misc_control.s.rx_packet_dis = 1;
1090	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
1091		    spu_misc_control.u64);
1092
1093	/* 3. At this point, it may be appropriate to disable all BGX and
1094	 * SMU/SPU interrupts, as a number of them will occur during bring-up
1095	 * of the Link.
1096	 * - zero BGX(0..5)_SMU(0..3)_RX_INT
1097	 * - zero BGX(0..5)_SMU(0..3)_TX_INT
1098	 * - zero BGX(0..5)_SPU(0..3)_INT
1099	 */
1100	csr_wr_node(node, CVMX_BGXX_SMUX_RX_INT(index, xi.interface),
1101		    csr_rd_node(node,
1102				CVMX_BGXX_SMUX_RX_INT(index, xi.interface)));
1103	csr_wr_node(node, CVMX_BGXX_SMUX_TX_INT(index, xi.interface),
1104		    csr_rd_node(node,
1105				CVMX_BGXX_SMUX_TX_INT(index, xi.interface)));
1106	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface),
1107		    csr_rd_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface)));
1108
1109	/* 4. Configure the BGX LMAC. */
1110	/* 4a. Configure the LMAC type (40GBASE-R/10GBASE-R/RXAUI/XAUI) and
1111	 * SerDes selection in the BGX(0..5)_CMR(0..3)_CONFIG register, but keep
1112	 * the ENABLE, DATA_PKT_TX_EN and DATA_PKT_RX_EN bits clear.
1113	 */
1114	/* Already done in bgx_setup_one_time */
1115
1116	/* 4b. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
1117	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
1118	 */
1119	/* 4b. Initialize the selected SerDes lane(s) in the QLM. See Section
1120	 * 28.1.2.2 in the GSER chapter.
1121	 */
1122	/* Already done in QLM setup */
1123
1124	/* 4c. For 10GBASE-KR or 40GBASE-KR, enable link training by writing
1125	 * BGX(0..5)_SPU(0..3)_BR_PMD_CONTROL[TRAIN_EN] = 1.
1126	 */
1127
1128	if (kr_mode && !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
1129		csr_wr_node(node,
1130			    CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, interface), 0);
1131		csr_wr_node(node,
1132			    CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, interface), 0);
1133		csr_wr_node(node,
1134			    CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, interface), 0);
1135		pmd_control.u64 = csr_rd_node(
1136			node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface));
1137		pmd_control.s.train_en = 1;
1138		csr_wr_node(node,
1139			    CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface),
1140			    pmd_control.u64);
1141	}
1142
1143	/* 4d. Program all other relevant BGX configuration while
1144	 * BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 0. This includes all things
1145	 * described in this chapter.
1146	 */
1147	/* Always add FCS to PAUSE frames */
1148	smu_tx_append.u64 = csr_rd_node(
1149		node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
1150	smu_tx_append.s.fcs_c = 1;
1151	csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
1152		    smu_tx_append.u64);
1153
1154	/* 4e. If Forward Error Correction is desired for 10GBASE-R or
1155	 * 40GBASE-R, enable it by writing
1156	 * BGX(0..5)_SPU(0..3)_FEC_CONTROL[FEC_EN] = 1.
1157	 */
1158	/* FEC is optional for 10GBASE-KR, 40GBASE-KR4, and XLAUI. We're going
1159	 * to disable it by default
1160	 */
1161	spu_fec_control.u64 = csr_rd_node(
1162		node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
1163	if (cvmx_helper_bgx_override_fec)
1164		spu_fec_control.s.fec_en =
1165			cvmx_helper_bgx_override_fec(xiface, index);
1166	else
1167		spu_fec_control.s.fec_en =
1168			cvmx_helper_get_port_fec(xiface, index);
1169	csr_wr_node(node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface),
1170		    spu_fec_control.u64);
1171
1172	/* 4f. If Auto-Negotiation is desired, configure and enable
1173	 * Auto-Negotiation as described in Section 33.6.2.
1174	 */
1175	spu_an_control.u64 = csr_rd_node(
1176		node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
1177	/* Disable extended next pages */
1178	spu_an_control.s.xnp_en = 0;
1179	spu_an_control.s.an_en = use_auto_neg;
1180	csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface),
1181		    spu_an_control.u64);
1182
1183	spu_fec_control.u64 = csr_rd_node(
1184		node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
1185	spu_an_adv.u64 =
1186		csr_rd_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface));
1187	spu_an_adv.s.fec_req = spu_fec_control.s.fec_en;
1188	spu_an_adv.s.fec_able = 1;
1189	spu_an_adv.s.a100g_cr10 = 0;
1190	spu_an_adv.s.a40g_cr4 = 0;
1191	spu_an_adv.s.a40g_kr4 = (mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4);
1192	spu_an_adv.s.a10g_kr = (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
1193	spu_an_adv.s.a10g_kx4 = 0;
1194	spu_an_adv.s.a1g_kx = 0;
1195	spu_an_adv.s.xnp_able = 0;
1196	spu_an_adv.s.rf = 0;
1197	csr_wr_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface),
1198		    spu_an_adv.u64);
1199
1200	/* 3. Set BGX(0..5)_SPU_DBG_CONTROL[AN_ARB_LINK_CHK_EN] = 1. */
1201	spu_dbg_control.u64 =
1202		csr_rd_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
1203	spu_dbg_control.s.an_nonce_match_dis = 1; /* Needed for loopback */
1204	spu_dbg_control.s.an_arb_link_chk_en |= kr_mode;
1205	csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
1206		    spu_dbg_control.u64);
1207
1208	/* 4. Execute the link bring-up sequence in Section 33.6.3. */
1209
1210	/* 5. If the auto-negotiation protocol is successful,
1211	 * BGX(0..5)_SPU(0..3)_AN_ADV[AN_COMPLETE] is set along with
1212	 * BGX(0..5)_SPU(0..3)_INT[AN_COMPLETE] when the link is up.
1213	 */
1214
1215	/* 3h. Set BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 1 and
1216	 * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0 to enable the LMAC.
1217	 */
1218	cmr_config.u64 =
1219		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
1220	cmr_config.s.enable = 1;
1221	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
1222		    cmr_config.u64);
1223	/* Apply workaround for errata BGX-22429 */
1224	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
1225		cvmx_bgxx_cmrx_config_t cmr0;
1226
1227		cmr0.u64 = csr_rd_node(node,
1228				       CVMX_BGXX_CMRX_CONFIG(0, xi.interface));
1229		cmr0.s.enable = 1;
1230		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, xi.interface),
1231			    cmr0.u64);
1232	}
1233
1234	spu_control1.u64 =
1235		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
1236	spu_control1.s.lo_pwr = 0;
1237	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1238		    spu_control1.u64);
1239
1240	/* 4g. Set the polarity and lane swapping of the QLM SerDes. Refer to
1241	 * Section 33.4.1, BGX(0..5)_SPU(0..3)_MISC_CONTROL[XOR_TXPLRT,XOR_RXPLRT]
1242	 * and BGX(0..5)_SPU(0..3)_MISC_CONTROL[TXPLRT,RXPLRT].
1243	 */
1244
1245	/* 4c. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0. */
1246	spu_control1.u64 =
1247		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
1248	spu_control1.s.lo_pwr = 0;
1249	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1250		    spu_control1.u64);
1251
1252	/* 4d. Select Deficit Idle Count mode and unidirectional enable/disable
1253	 * via BGX(0..5)_SMU(0..3)_TX_CTL[DIC_EN,UNI_EN].
1254	 */
1255	smu_tx_ctl.u64 =
1256		csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
1257	smu_tx_ctl.s.dic_en = 1;
1258	smu_tx_ctl.s.uni_en = 0;
1259	csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface),
1260		    smu_tx_ctl.u64);
1261
1262	{
1263		/* Calculate the number of s-clk cycles per usec. */
1264		const u64 clock_mhz = 1200; /* todo: fixme */
1265		cvmx_bgxx_spu_dbg_control_t dbg_control;
1266
1267		dbg_control.u64 = csr_rd_node(
1268			node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
1269		dbg_control.s.us_clk_period = clock_mhz - 1;
1270		csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
1271			    dbg_control.u64);
1272	}
1273	/* The PHY often takes at least 100ms to stabilize */
1274	__cvmx_helper_bgx_interface_enable_delay(mode);
1275	return 0;
1276}
1277
1278static void __cvmx_bgx_start_training(int node, int unit, int index)
1279{
1280	cvmx_bgxx_spux_int_t spu_int;
1281	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1282	cvmx_bgxx_spux_an_control_t an_control;
1283
1284	/* Clear the training interrupts (W1C) */
1285	spu_int.u64 = 0;
1286	spu_int.s.training_failure = 1;
1287	spu_int.s.training_done = 1;
1288	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
1289
1290	/* These registers aren't cleared when training is restarted. Manually
1291	 * clear them as per Errata BGX-20968.
1292	 */
1293	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
1294	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
1295	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
1296
1297	/* Disable autonegotiation */
1298	an_control.u64 =
1299		csr_rd_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit));
1300	an_control.s.an_en = 0;
1301	csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit),
1302		    an_control.u64);
1303	udelay(1);
1304
1305	/* Restart training */
1306	pmd_control.u64 =
1307		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
1308	pmd_control.s.train_en = 1;
1309	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
1310		    pmd_control.u64);
1311
1312	udelay(1);
1313	pmd_control.u64 =
1314		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
1315	pmd_control.s.train_restart = 1;
1316	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
1317		    pmd_control.u64);
1318}
1319
1320static void __cvmx_bgx_restart_training(int node, int unit, int index)
1321{
1322	cvmx_bgxx_spux_int_t spu_int;
1323	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1324
1325	/* Clear the training interrupts (W1C) */
1326	spu_int.u64 = 0;
1327	spu_int.s.training_failure = 1;
1328	spu_int.s.training_done = 1;
1329	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
1330
1331	udelay(1700); /* Wait 1.7 msec */
1332
1333	/* These registers aren't cleared when training is restarted. Manually
1334	 * clear them as per Errata BGX-20968.
1335	 */
1336	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
1337	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
1338	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
1339
1340	/* Restart training */
1341	pmd_control.u64 =
1342		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
1343	pmd_control.s.train_restart = 1;
1344	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
1345		    pmd_control.u64);
1346}
1347
1348/*
1349 * @INTERNAL
1350 * Wrapper function to configure the BGX, does not enable.
1351 *
1352 * @param xipd_port IPD/PKO port to configure.
1353 * @param phy_pres  If set, enable disparity, only applies to RXAUI interface
1354 *
1355 * @return Zero on success, negative on failure.
1356 */
1357int __cvmx_helper_bgx_port_init(int xipd_port, int phy_pres)
1358{
1359	int xiface = cvmx_helper_get_interface_num(xipd_port);
1360	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1361	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
1362	int index = cvmx_helper_get_interface_index_num(xp.port);
1363	cvmx_helper_interface_mode_t mode;
1364
1365	if (debug)
1366		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
1367		      xi.interface, index);
1368
1369	mode = cvmx_helper_bgx_get_mode(xiface, index);
1370
1371	__cvmx_bgx_common_init_pknd(xiface, index);
1372
1373	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
1374	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
1375		cvmx_bgxx_gmp_gmi_txx_thresh_t gmi_tx_thresh;
1376		cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
1377		cvmx_bgxx_gmp_gmi_txx_sgmii_ctl_t gmp_sgmii_ctl;
1378
1379		/* Set TX Threshold */
1380		gmi_tx_thresh.u64 = 0;
1381		gmi_tx_thresh.s.cnt = 0x20;
1382		csr_wr_node(xi.node,
1383			    CVMX_BGXX_GMP_GMI_TXX_THRESH(index, xi.interface),
1384			    gmi_tx_thresh.u64);
1385		__cvmx_helper_bgx_sgmii_hardware_init_one_time(xiface, index);
1386		gmp_txx_append.u64 = csr_rd_node(
1387			xi.node,
1388			CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
1389		gmp_sgmii_ctl.u64 = csr_rd_node(
1390			xi.node,
1391			CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index, xi.interface));
1392		gmp_sgmii_ctl.s.align = gmp_txx_append.s.preamble ? 0 : 1;
1393		csr_wr_node(xi.node,
1394			    CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index,
1395							    xi.interface),
1396			    gmp_sgmii_ctl.u64);
1397		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
1398			/* Disable XCV interface when initialized */
1399			union cvmx_xcv_reset xcv_reset;
1400
1401			if (debug)
1402				debug("%s: Disabling RGMII XCV interface\n",
1403				      __func__);
1404			xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
1405			xcv_reset.s.enable = 0;
1406			xcv_reset.s.tx_pkt_rst_n = 0;
1407			xcv_reset.s.rx_pkt_rst_n = 0;
1408			csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
1409		}
1410	} else {
1411		int res, cred;
1412		cvmx_bgxx_smux_tx_thresh_t smu_tx_thresh;
1413
1414		res = __cvmx_helper_bgx_xaui_init(index, xiface);
1415		if (res == -1) {
1416#ifdef DEBUG_BGX
1417			debug("Failed to enable XAUI for %d:BGX(%d,%d)\n",
1418			      xi.node, xi.interface, index);
1419#endif
1420			return res;
1421		}
1422		/* See BVX_SMU_TX_THRESH register descriptin */
1423		cred = __cvmx_helper_bgx_fifo_size(xiface, index) >> 4;
1424		smu_tx_thresh.u64 = 0;
1425		smu_tx_thresh.s.cnt = cred - 10;
1426		csr_wr_node(xi.node,
1427			    CVMX_BGXX_SMUX_TX_THRESH(index, xi.interface),
1428			    smu_tx_thresh.u64);
1429		if (debug)
1430			debug("%s: BGX%d:%d TX-thresh=%d\n", __func__,
1431			      xi.interface, index,
1432			      (unsigned int)smu_tx_thresh.s.cnt);
1433
1434		/* Set disparity for RXAUI interface as described in the
1435		 * Marvell RXAUI Interface specification.
1436		 */
1437		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI && phy_pres) {
1438			cvmx_bgxx_spux_misc_control_t misc_control;
1439
1440			misc_control.u64 = csr_rd_node(
1441				xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(
1442						 index, xi.interface));
1443			misc_control.s.intlv_rdisp = 1;
1444			csr_wr_node(xi.node,
1445				    CVMX_BGXX_SPUX_MISC_CONTROL(index,
1446								xi.interface),
1447				    misc_control.u64);
1448		}
1449	}
1450	return 0;
1451}
1452
1453/**
1454 * @INTERNAL
1455 * Configure a port for internal and/or external loopback. Internal loopback
1456 * causes packets sent by the port to be received by Octeon. External loopback
1457 * causes packets received from the wire to sent out again. This is used by
1458 * interfaces using the bgx mac.
1459 *
1460 * @param xipd_port IPD/PKO port to loopback.
1461 * @param enable_internal
1462 *                 Non zero if you want internal loopback
1463 * @param enable_external
1464 *                 Non zero if you want external loopback
1465 *
1466 * @return Zero on success, negative on failure.
1467 */
1468int __cvmx_helper_bgx_sgmii_configure_loopback(int xipd_port,
1469					       int enable_internal,
1470					       int enable_external)
1471{
1472	int xiface = cvmx_helper_get_interface_num(xipd_port);
1473	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1474	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
1475	int node = xi.node;
1476	int index = cvmx_helper_get_interface_index_num(xp.port);
1477	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_mrx_control;
1478	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
1479
1480	if (!cvmx_helper_is_port_valid(xiface, index))
1481		return 0;
1482
1483	if (debug)
1484		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
1485		      xi.interface, index);
1486
1487	if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
1488		cvmx_xcv_ctl_t xcv_ctl;
1489		cvmx_helper_link_info_t link_info;
1490
1491		xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
1492		xcv_ctl.s.lpbk_int = enable_internal;
1493		xcv_ctl.s.lpbk_ext = enable_external;
1494		csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
1495
1496		/* Initialize link and speed */
1497		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
1498		link_info = __cvmx_helper_bgx_sgmii_link_get(xipd_port);
1499		__cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
1500								 link_info);
1501		__cvmx_helper_bgx_rgmii_speed(link_info);
1502	} else {
1503		gmp_mrx_control.u64 = csr_rd_node(
1504			node,
1505			CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
1506		gmp_mrx_control.s.loopbck1 = enable_internal;
1507		csr_wr_node(node,
1508			    CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
1509			    gmp_mrx_control.u64);
1510
1511		gmp_misc_ctl.u64 = csr_rd_node(
1512			node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
1513		gmp_misc_ctl.s.loopbck2 = enable_external;
1514		csr_wr_node(node,
1515			    CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
1516			    gmp_misc_ctl.u64);
1517		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
1518	}
1519
1520	return 0;
1521}
1522
1523static int __cvmx_helper_bgx_xaui_link_init(int index, int xiface)
1524{
1525	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1526	int node = xi.node;
1527	cvmx_bgxx_spux_status1_t spu_status1;
1528	cvmx_bgxx_spux_status2_t spu_status2;
1529	cvmx_bgxx_spux_br_status2_t br_status2;
1530	cvmx_bgxx_spux_int_t spu_int;
1531	cvmx_bgxx_spux_misc_control_t spu_misc_control;
1532	cvmx_bgxx_spux_an_control_t spu_an_control;
1533	cvmx_bgxx_spux_an_status_t spu_an_status;
1534	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
1535	cvmx_bgxx_cmrx_config_t cmr_config;
1536	cvmx_helper_interface_mode_t mode;
1537	int use_training = 0;
1538	int rgmii_first = 0;
1539	int qlm = cvmx_qlm_lmac(xiface, index);
1540	int use_ber = 0;
1541	u64 err_blks;
1542	u64 ber_cnt;
1543	u64 error_debounce;
1544
1545	if (debug)
1546		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
1547		      xi.interface, index);
1548
1549	rgmii_first = cvmx_helper_bgx_is_rgmii(xi.interface, index);
1550
1551	mode = cvmx_helper_bgx_get_mode(xiface, index);
1552	if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
1553	    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)
1554		use_training = 1;
1555
1556	if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
1557	     mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
1558	     mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
1559	     mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4))
1560		use_ber = 1;
1561
1562	/* Disable packet reception, CMR as well as SPU block */
1563	cmr_config.u64 =
1564		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
1565	cmr_config.s.data_pkt_tx_en = 0;
1566	cmr_config.s.data_pkt_rx_en = 0;
1567	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
1568		    cmr_config.u64);
1569	spu_misc_control.u64 = csr_rd_node(
1570		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
1571	spu_misc_control.s.rx_packet_dis = 1;
1572	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
1573		    spu_misc_control.u64);
1574
1575	spu_an_control.u64 = csr_rd_node(
1576		node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
1577	if (spu_an_control.s.an_en) {
1578		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
1579			cvmx_bgxx_spux_int_t spu_int;
1580
1581			spu_int.u64 = csr_rd_node(
1582				node, CVMX_BGXX_SPUX_INT(index, xi.interface));
1583			if (!spu_int.s.an_link_good) {
1584				static u64 restart_auto_neg[2][6][4] = {
1585					[0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
1586				};
1587				u64 now = get_timer(0);
1588				u64 next_restart =
1589					restart_auto_neg[node][xi.interface]
1590							[index] +
1591					2000;
1592
1593				if (now >= next_restart)
1594					return -1;
1595
1596				restart_auto_neg[node][xi.interface][index] =
1597					now;
1598
1599				/* Clear the auto negotiation (W1C) */
1600				spu_int.u64 = 0;
1601				spu_int.s.an_complete = 1;
1602				spu_int.s.an_link_good = 1;
1603				spu_int.s.an_page_rx = 1;
1604				csr_wr_node(node,
1605					    CVMX_BGXX_SPUX_INT(index,
1606							       xi.interface),
1607					    spu_int.u64);
1608				/* Restart auto negotiation */
1609				spu_an_control.u64 = csr_rd_node(
1610					node, CVMX_BGXX_SPUX_AN_CONTROL(
1611						      index, xi.interface));
1612				spu_an_control.s.an_restart = 1;
1613				csr_wr_node(node,
1614					    CVMX_BGXX_SPUX_AN_CONTROL(
1615						    index, xi.interface),
1616					    spu_an_control.u64);
1617				return -1;
1618			}
1619		} else {
1620			spu_an_status.u64 = csr_rd_node(
1621				node,
1622				CVMX_BGXX_SPUX_AN_STATUS(index, xi.interface));
1623			if (!spu_an_status.s.an_complete) {
1624				static u64 restart_auto_neg[2][6][4] = {
1625					[0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
1626				};
1627				u64 now = get_timer(0);
1628				u64 next_restart =
1629					restart_auto_neg[node][xi.interface]
1630							[index] +
1631					2000;
1632				if (now >= next_restart) {
1633#ifdef DEBUG_BGX
1634					debug("WARNING: BGX%d:%d: Waiting for autoneg to complete\n",
1635					      xi.interface, index);
1636#endif
1637					return -1;
1638				}
1639
1640				restart_auto_neg[node][xi.interface][index] =
1641					now;
1642				/* Restart auto negotiation */
1643				spu_an_control.u64 = csr_rd_node(
1644					node, CVMX_BGXX_SPUX_AN_CONTROL(
1645						      index, xi.interface));
1646				spu_an_control.s.an_restart = 1;
1647				csr_wr_node(node,
1648					    CVMX_BGXX_SPUX_AN_CONTROL(
1649						    index, xi.interface),
1650					    spu_an_control.u64);
1651				return -1;
1652			}
1653		}
1654	}
1655
1656	if (use_training) {
1657		spu_int.u64 = csr_rd_node(
1658			node, CVMX_BGXX_SPUX_INT(index, xi.interface));
1659		pmd_control.u64 = csr_rd_node(
1660			node,
1661			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
1662		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
1663		    pmd_control.s.train_en == 0) {
1664			__cvmx_bgx_start_training(node, xi.interface, index);
1665			return -1;
1666		}
1667		cvmx_qlm_gser_errata_27882(node, qlm, index);
1668		spu_int.u64 = csr_rd_node(
1669			node, CVMX_BGXX_SPUX_INT(index, xi.interface));
1670
1671		if (spu_int.s.training_failure &&
1672		    !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
1673			__cvmx_bgx_restart_training(node, xi.interface, index);
1674			return -1;
1675		}
1676		if (!spu_int.s.training_done) {
1677			debug("Waiting for link training\n");
1678			return -1;
1679		}
1680	}
1681
1682	/* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
1683	 * channel with DXAUI, RXAUI, XFI and XLAUI, we need to perform
1684	 * RX equalization when the link is receiving data the first time
1685	 */
1686	if (use_training == 0) {
1687		int lane = index;
1688		cvmx_bgxx_spux_control1_t control1;
1689
1690		cmr_config.u64 = csr_rd_node(
1691			node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
1692		control1.u64 = csr_rd_node(
1693			node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
1694		if (control1.s.loopbck) {
1695			/* Skip RX equalization when in loopback */
1696		} else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
1697			   mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
1698			lane = -1;
1699			if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
1700#ifdef DEBUG_BGX
1701				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
1702				      node, xi.interface, index, qlm);
1703#endif
1704				return -1;
1705			}
1706			/* If BGX2 uses both dlms, then configure other dlm also. */
1707			if (OCTEON_IS_MODEL(OCTEON_CN73XX) &&
1708			    xi.interface == 2) {
1709				if (__cvmx_qlm_rx_equalization(node, 6, lane)) {
1710#ifdef DEBUG_BGX
1711					debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
1712					      node, xi.interface, index, qlm);
1713#endif
1714					return -1;
1715				}
1716			}
1717			/* RXAUI */
1718		} else if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
1719			lane = index * 2;
1720			if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
1721			    xi.interface == 2) {
1722				lane = 0;
1723			}
1724			if (rgmii_first)
1725				lane--;
1726			if (__cvmx_qlm_rx_equalization(node, qlm, lane) ||
1727			    __cvmx_qlm_rx_equalization(node, qlm, lane + 1)) {
1728#ifdef DEBUG_BGX
1729				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
1730				      node, xi.interface, index, qlm);
1731#endif
1732				return -1;
1733			}
1734			/* XFI */
1735		} else if (cmr_config.s.lmac_type != 5) {
1736			if (rgmii_first)
1737				lane--;
1738			if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
1739			    xi.interface == 2) {
1740				lane = index - 2;
1741			} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX) &&
1742				   index >= 2) {
1743				lane = index - 2;
1744			}
1745			if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
1746#ifdef DEBUG_BGX
1747				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
1748				      node, xi.interface, index, qlm);
1749#endif
1750				return -1;
1751			}
1752		}
1753	}
1754
1755	if (CVMX_WAIT_FOR_FIELD64_NODE(
1756		    node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
1757		    cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
1758#ifdef DEBUG_BGX
1759		debug("ERROR: %d:BGX%d:%d: PCS in reset", node, xi.interface,
1760		      index);
1761#endif
1762		return -1;
1763	}
1764
1765	if (use_ber) {
1766		if (CVMX_WAIT_FOR_FIELD64_NODE(
1767			    node,
1768			    CVMX_BGXX_SPUX_BR_STATUS1(index, xi.interface),
1769			    cvmx_bgxx_spux_br_status1_t, blk_lock, ==, 1,
1770			    10000)) {
1771#ifdef DEBUG_BGX
1772			debug("ERROR: %d:BGX%d:%d: BASE-R PCS block not locked\n",
1773			      node, xi.interface, index);
1774
1775			if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
1776			    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
1777				cvmx_bgxx_spux_br_algn_status_t bstatus;
1778
1779				bstatus.u64 = csr_rd_node(
1780					node, CVMX_BGXX_SPUX_BR_ALGN_STATUS(
1781						      index, xi.interface));
1782				debug("ERROR: %d:BGX%d:%d: LANE BLOCK_LOCK:%x LANE MARKER_LOCK:%x\n",
1783				      node, xi.interface, index,
1784				      bstatus.s.block_lock,
1785				      bstatus.s.marker_lock);
1786			}
1787#endif
1788			return -1;
1789		}
1790	} else {
1791		/* (5) Check to make sure that the link appears up and stable.
1792		 */
1793		/* Wait for PCS to be aligned */
1794		if (CVMX_WAIT_FOR_FIELD64_NODE(
1795			    node, CVMX_BGXX_SPUX_BX_STATUS(index, xi.interface),
1796			    cvmx_bgxx_spux_bx_status_t, alignd, ==, 1, 10000)) {
1797#ifdef DEBUG_BGX
1798			debug("ERROR: %d:BGX%d:%d: PCS not aligned\n", node,
1799			      xi.interface, index);
1800#endif
1801			return -1;
1802		}
1803	}
1804
1805	if (use_ber) {
1806		/* Set the BGXX_SPUX_BR_STATUS2.latched_lock bit (latching low).
1807		 * This will be checked prior to enabling packet tx and rx,
1808		 * ensuring block lock is sustained throughout the BGX link-up
1809		 * procedure
1810		 */
1811		br_status2.u64 = csr_rd_node(
1812			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
1813		br_status2.s.latched_lock = 1;
1814		csr_wr_node(node,
1815			    CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface),
1816			    br_status2.u64);
1817	}
1818
1819	/* Clear rcvflt bit (latching high) and read it back */
1820	spu_status2.u64 =
1821		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
1822	spu_status2.s.rcvflt = 1;
1823	csr_wr_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface),
1824		    spu_status2.u64);
1825
1826	spu_status2.u64 =
1827		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
1828	if (spu_status2.s.rcvflt) {
1829#ifdef DEBUG_BGX
1830		debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
1831		      node, xi.interface, index);
1832#endif
1833		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && use_training)
1834			__cvmx_bgx_restart_training(node, xi.interface, index);
1835		/* debug("training restarting\n"); */
1836		return -1;
1837	}
1838
1839	/* Wait for MAC RX to be ready */
1840	if (CVMX_WAIT_FOR_FIELD64_NODE(
1841		    node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface),
1842		    cvmx_bgxx_smux_rx_ctl_t, status, ==, 0, 10000)) {
1843#ifdef DEBUG_BGX
1844		debug("ERROR: %d:BGX%d:%d: RX not ready\n", node, xi.interface,
1845		      index);
1846#endif
1847		return -1;
1848	}
1849
1850	/* Wait for BGX RX to be idle */
1851	if (CVMX_WAIT_FOR_FIELD64_NODE(
1852		    node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
1853		    cvmx_bgxx_smux_ctrl_t, rx_idle, ==, 1, 10000)) {
1854#ifdef DEBUG_BGX
1855		debug("ERROR: %d:BGX%d:%d: RX not idle\n", node, xi.interface,
1856		      index);
1857#endif
1858		return -1;
1859	}
1860
1861	/* Wait for GMX TX to be idle */
1862	if (CVMX_WAIT_FOR_FIELD64_NODE(
1863		    node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
1864		    cvmx_bgxx_smux_ctrl_t, tx_idle, ==, 1, 10000)) {
1865#ifdef DEBUG_BGX
1866		debug("ERROR: %d:BGX%d:%d: TX not idle\n", node, xi.interface,
1867		      index);
1868#endif
1869		return -1;
1870	}
1871
1872	/* rcvflt should still be 0 */
1873	spu_status2.u64 =
1874		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
1875	if (spu_status2.s.rcvflt) {
1876#ifdef DEBUG_BGX
1877		debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
1878		      node, xi.interface, index);
1879#endif
1880		return -1;
1881	}
1882
1883	/* Receive link is latching low. Force it high and verify it */
1884	spu_status1.u64 =
1885		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
1886	spu_status1.s.rcv_lnk = 1;
1887	csr_wr_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
1888		    spu_status1.u64);
1889
1890	if (CVMX_WAIT_FOR_FIELD64_NODE(
1891		    node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
1892		    cvmx_bgxx_spux_status1_t, rcv_lnk, ==, 1, 10000)) {
1893#ifdef DEBUG_BGX
1894		debug("ERROR: %d:BGX%d:%d: Receive link down\n", node,
1895		      xi.interface, index);
1896#endif
1897		return -1;
1898	}
1899
1900	if (use_ber) {
1901		/* Clearing BER_CNT and ERR_BLKs */
1902		br_status2.u64 = csr_rd_node(
1903			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
1904
1905		/* If set, clear the LATCHED_BER by writing it to a one.  */
1906		if (br_status2.s.latched_ber)
1907			csr_wr_node(node,
1908				    CVMX_BGXX_SPUX_BR_STATUS2(index,
1909							      xi.interface),
1910				    br_status2.u64);
1911
1912		error_debounce = get_timer(0);
1913
1914		/* Clear error counts */
1915		err_blks = 0;
1916		ber_cnt = 0;
1917
1918		/* Verify that the link is up and  error free for 100ms */
1919		while (get_timer(error_debounce) < 100) {
1920			spu_status1.u64 = csr_rd_node(
1921				node,
1922				CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
1923			/* Checking that Receive link is still up (rcv_lnk = 1 (up)) */
1924			if (!spu_status1.s.rcv_lnk) {
1925#ifdef DEBUG_BGX
1926				debug("ERROR: %d:BGX%d:%d: Receive link down\n",
1927				      node, xi.interface, index);
1928#endif
1929				return -1;
1930			}
1931
1932			/* Checking if latched_ber = 1 (BER >= 10e^4) */
1933			br_status2.u64 = csr_rd_node(
1934				node,
1935				CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
1936			err_blks += br_status2.s.err_blks;
1937			ber_cnt += br_status2.s.ber_cnt;
1938
1939			if (br_status2.s.latched_ber) {
1940#ifdef DEBUG_BGX
1941				debug("ERROR: %d:BGX%d:%d: BER test failed, BER >= 10e^4, need to retry\n",
1942				      node, xi.interface, index);
1943#endif
1944				return -1;
1945			}
1946			/* Checking that latched BLOCK_LOCK is still set (Block Lock never lost) */
1947			if (!br_status2.s.latched_lock) {
1948#ifdef DEBUG_BGX
1949				debug("ERROR: %d:BGX%d:%d: BASE-R PCS block lock lost, need to retry\n",
1950				      node, xi.interface, index);
1951#endif
1952				return -1;
1953			}
1954
1955			/* Check error counters. Must be 0 (this error rate#
1956			 * is much higher than 1E-12)
1957			 */
1958			if (err_blks > 0) {
1959#ifdef DEBUG_BGX
1960				debug("ERROR: %d:BGX%d:%d: BASE-R errored-blocks (%llu) detected, need to retry\n",
1961				      node, xi.interface, index,
1962				      (unsigned long long)err_blks);
1963#endif
1964				return -1;
1965			}
1966
1967			if (ber_cnt > 0) {
1968#ifdef DEBUG_BGX
1969				debug("ERROR: %d:BGX%d:%d: BASE-R bit-errors (%llu) detected, need to retry\n",
1970				      node, xi.interface, index,
1971				      (unsigned long long)ber_cnt);
1972#endif
1973				return -1;
1974			}
1975
1976			udelay(1000);
1977		}
1978
1979		/* Clear out the BGX error counters/bits. These errors are
1980		 * expected as part of the BGX link up procedure
1981		 */
1982		/* BIP_ERR counters clear as part of this read */
1983		csr_rd_node(node,
1984			    CVMX_BGXX_SPUX_BR_BIP_ERR_CNT(index, xi.interface));
1985		/* BER_CNT and ERR_BLKs clear as part of this read */
1986		br_status2.u64 = csr_rd_node(
1987			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
1988	}
1989
1990	/* (7) Enable packet transmit and receive */
1991	spu_misc_control.u64 = csr_rd_node(
1992		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
1993	spu_misc_control.s.rx_packet_dis = 0;
1994	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
1995		    spu_misc_control.u64);
1996
1997	if (debug)
1998		debug("%s: Enabling tx and rx data packets\n", __func__);
1999	cmr_config.u64 =
2000		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2001	cmr_config.s.data_pkt_tx_en = 1;
2002	cmr_config.s.data_pkt_rx_en = 1;
2003	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
2004		    cmr_config.u64);
2005	return 0;
2006}
2007
2008int __cvmx_helper_bgx_xaui_enable(int xiface)
2009{
2010	int index;
2011	cvmx_helper_interface_mode_t mode;
2012	int num_ports = cvmx_helper_ports_on_interface(xiface);
2013
2014	for (index = 0; index < num_ports; index++) {
2015		int res;
2016		int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
2017		int phy_pres;
2018		struct cvmx_xiface xi =
2019			cvmx_helper_xiface_to_node_interface(xiface);
2020		static int count
2021			[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
2022			[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
2023				[0 ... CVMX_MAX_NODES -
2024				 1][0 ... CVMX_HELPER_MAX_IFACE -
2025				    1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
2026					    1] = 0 }
2027			};
2028
2029		mode = cvmx_helper_bgx_get_mode(xiface, index);
2030
2031		/* Set disparity for RXAUI interface as described in the
2032		 * Marvell RXAUI Interface specification.
2033		 */
2034		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
2035		    (cvmx_helper_get_port_phy_present(xiface, index)))
2036			phy_pres = 1;
2037		else
2038			phy_pres = 0;
2039		__cvmx_helper_bgx_port_init(xipd_port, phy_pres);
2040
2041retry_link:
2042		res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
2043		/* RX Equalization or autonegotiation can take little longer
2044		 * retry the link maybe 5 times for now
2045		 */
2046		if (res == -1 && count[xi.node][xi.interface][index] < 5) {
2047			count[xi.node][xi.interface][index]++;
2048#ifdef DEBUG_BGX
2049			debug("%d:BGX(%d,%d): Failed to get link, retrying\n",
2050			      xi.node, xi.interface, index);
2051#endif
2052			goto retry_link;
2053		}
2054
2055		if (res == -1) {
2056#ifdef DEBUG_BGX
2057			debug("%d:BGX(%d,%d): Failed to get link\n", xi.node,
2058			      xi.interface, index);
2059#endif
2060			continue;
2061		}
2062	}
2063	return 0;
2064}
2065
2066cvmx_helper_link_info_t __cvmx_helper_bgx_xaui_link_get(int xipd_port)
2067{
2068	int xiface = cvmx_helper_get_interface_num(xipd_port);
2069	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2070	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
2071	int index = cvmx_helper_get_interface_index_num(xp.port);
2072	cvmx_bgxx_spux_status1_t spu_status1;
2073	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
2074	cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
2075	cvmx_bgxx_cmrx_config_t cmr_config;
2076	cvmx_helper_link_info_t result;
2077	cvmx_helper_interface_mode_t mode;
2078	cvmx_bgxx_spux_misc_control_t spu_misc_control;
2079	cvmx_bgxx_spux_br_status2_t br_status2;
2080
2081	result.u64 = 0;
2082
2083	if (debug)
2084		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
2085		      xi.interface, index);
2086
2087	mode = cvmx_helper_bgx_get_mode(xiface, index);
2088	if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2089		return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
2090
2091	/* Reading current rx/tx link status */
2092	spu_status1.u64 = csr_rd_node(
2093		xi.node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
2094	smu_tx_ctl.u64 = csr_rd_node(
2095		xi.node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
2096	smu_rx_ctl.u64 = csr_rd_node(
2097		xi.node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
2098	/* Reading tx/rx packet enables */
2099	cmr_config.u64 = csr_rd_node(
2100		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2101	spu_misc_control.u64 = csr_rd_node(
2102		xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
2103
2104	if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
2105	    cmr_config.s.data_pkt_tx_en == 1 &&
2106	    cmr_config.s.data_pkt_rx_en == 1 &&
2107	    spu_misc_control.s.rx_packet_dis == 0 &&
2108	    spu_status1.s.rcv_lnk) {
2109		int lanes;
2110		int qlm = cvmx_qlm_lmac(xiface, index);
2111		u64 speed;
2112
2113		result.s.link_up = 1;
2114		result.s.full_duplex = 1;
2115		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
2116			speed = cvmx_qlm_get_gbaud_mhz_node(xi.node, qlm);
2117		else
2118			speed = cvmx_qlm_get_gbaud_mhz(qlm);
2119
2120		cmr_config.u64 = csr_rd_node(
2121			xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2122		switch (cmr_config.s.lmac_type) {
2123		default:
2124		case 1: // XAUI
2125			speed = (speed * 8 + 5) / 10;
2126			lanes = 4;
2127			break;
2128		case 2: // RXAUI
2129			speed = (speed * 8 + 5) / 10;
2130			lanes = 2;
2131			break;
2132		case 3: // XFI
2133			speed = (speed * 64 + 33) / 66;
2134			lanes = 1;
2135			break;
2136		case 4: // XLAUI
2137			/* Adjust the speed when XLAUI is configured at 6.250Gbps */
2138			if (speed == 6250)
2139				speed = 6445;
2140			speed = (speed * 64 + 33) / 66;
2141			lanes = 4;
2142			break;
2143		}
2144
2145		if (debug)
2146			debug("%s: baud: %llu, lanes: %d\n", __func__,
2147			      (unsigned long long)speed, lanes);
2148		speed *= lanes;
2149		result.s.speed = speed;
2150	} else {
2151		int res;
2152		u64 err_blks = 0;
2153		u64 ber_cnt = 0;
2154
2155		/* Check for err_blk and ber errors if 10G or 40G */
2156		if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
2157		     mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
2158		     mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
2159		     mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)) {
2160			br_status2.u64 = csr_rd_node(
2161				xi.node,
2162				CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
2163			err_blks = br_status2.s.err_blks;
2164			ber_cnt = br_status2.s.ber_cnt;
2165		}
2166
2167		/* Checking if the link is up and error-free but we are receiving remote-faults */
2168		if (smu_tx_ctl.s.ls != 1 && smu_rx_ctl.s.status != 1 &&
2169		    cmr_config.s.data_pkt_tx_en == 1 &&
2170		    cmr_config.s.data_pkt_rx_en == 1 &&
2171		    spu_misc_control.s.rx_packet_dis == 0 &&
2172		    err_blks == 0 && ber_cnt == 0 &&
2173		    spu_status1.s.rcv_lnk) {
2174			result.s.init_success = 1;
2175#ifdef DEBUG_BGX
2176			debug("Receiving remote-fault ordered sets %d:BGX(%d,%d)\n",
2177			      xi.node, xi.interface, index);
2178#endif
2179
2180		} else {
2181			res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
2182			if (res == -1) {
2183#ifdef DEBUG_BGX
2184				debug("Failed to get %d:BGX(%d,%d) link\n",
2185				      xi.node, xi.interface, index);
2186#endif
2187			} else {
2188#ifdef DEBUG_BGX
2189				debug("Link initialization successful %d:BGX(%d,%d)\n",
2190				      xi.node, xi.interface, index);
2191#endif
2192				result.s.init_success = 1;
2193			}
2194		}
2195	}
2196
2197	return result;
2198}
2199
2200int __cvmx_helper_bgx_xaui_link_set(int xipd_port,
2201				    cvmx_helper_link_info_t link_info)
2202{
2203	int xiface = cvmx_helper_get_interface_num(xipd_port);
2204	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2205	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
2206	int node = xi.node;
2207	int index = cvmx_helper_get_interface_index_num(xp.port);
2208	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
2209	cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
2210	cvmx_bgxx_spux_status1_t spu_status1;
2211	cvmx_helper_interface_mode_t mode;
2212	cvmx_bgxx_cmrx_config_t cmr_config;
2213	cvmx_bgxx_spux_misc_control_t spu_misc_control;
2214
2215	if (debug)
2216		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
2217		      xi.interface, index);
2218
2219	mode = cvmx_helper_bgx_get_mode(xiface, index);
2220	if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2221		return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
2222
2223	/* Reading current rx/tx link status */
2224	smu_tx_ctl.u64 =
2225		csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
2226	smu_rx_ctl.u64 =
2227		csr_rd_node(node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
2228	spu_status1.u64 =
2229		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
2230	/* Reading tx/rx packet enables */
2231	cmr_config.u64 = csr_rd_node(
2232		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2233	spu_misc_control.u64 = csr_rd_node(
2234		xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
2235
2236	/* If the link shouldn't be up, then just return */
2237	if (!link_info.s.link_up)
2238		return 0;
2239
2240	/* Do nothing if both RX and TX are happy and packet
2241	 * transmission/reception is enabled
2242	 */
2243	if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
2244	    cmr_config.s.data_pkt_tx_en == 1 &&
2245	    cmr_config.s.data_pkt_rx_en == 1 &&
2246	    spu_misc_control.s.rx_packet_dis == 0 && spu_status1.s.rcv_lnk)
2247		return 0;
2248
2249	/* Bring the link up */
2250	return __cvmx_helper_bgx_xaui_link_init(index, xiface);
2251}
2252
2253int __cvmx_helper_bgx_xaui_configure_loopback(int xipd_port,
2254					      int enable_internal,
2255					      int enable_external)
2256{
2257	int xiface = cvmx_helper_get_interface_num(xipd_port);
2258	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2259	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
2260	int node = xi.node;
2261	int index = cvmx_helper_get_interface_index_num(xp.port);
2262	cvmx_bgxx_spux_control1_t spu_control1;
2263	cvmx_bgxx_smux_ext_loopback_t smu_ext_loopback;
2264
2265	if (debug)
2266		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
2267		      xi.interface, index);
2268
2269	/* INT_BEAT_GEN must be set for loopback if the QLMs are not clocked.
2270	 * Set it whenever we use internal loopback
2271	 */
2272	if (enable_internal) {
2273		cvmx_bgxx_cmrx_config_t cmr_config;
2274
2275		cmr_config.u64 = csr_rd_node(
2276			node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2277		cmr_config.s.int_beat_gen = 1;
2278		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
2279			    cmr_config.u64);
2280	}
2281	/* Set the internal loop */
2282	spu_control1.u64 =
2283		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
2284	spu_control1.s.loopbck = enable_internal;
2285	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
2286		    spu_control1.u64);
2287	/* Set the external loop */
2288	smu_ext_loopback.u64 = csr_rd_node(
2289		node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface));
2290	smu_ext_loopback.s.en = enable_external;
2291	csr_wr_node(node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface),
2292		    smu_ext_loopback.u64);
2293
2294	return __cvmx_helper_bgx_xaui_link_init(index, xiface);
2295}
2296
2297int __cvmx_helper_bgx_mixed_enable(int xiface)
2298{
2299	int index;
2300	int num_ports = cvmx_helper_ports_on_interface(xiface);
2301	cvmx_helper_interface_mode_t mode;
2302
2303	for (index = 0; index < num_ports; index++) {
2304		int xipd_port, phy_pres = 0;
2305
2306		if (!cvmx_helper_is_port_valid(xiface, index))
2307			continue;
2308
2309		mode = cvmx_helper_bgx_get_mode(xiface, index);
2310
2311		xipd_port = cvmx_helper_get_ipd_port(xiface, index);
2312
2313		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
2314		    (cvmx_helper_get_port_phy_present(xiface, index)))
2315			phy_pres = 1;
2316
2317		if (__cvmx_helper_bgx_port_init(xipd_port, phy_pres))
2318			continue;
2319
2320		/* For RGMII interface, initialize the link after PKO is setup */
2321		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2322			continue;
2323		/* Call SGMII init code for lmac_type = 0|5 */
2324		else if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII) {
2325			int do_link_set = 1;
2326
2327			if (do_link_set)
2328				__cvmx_helper_bgx_sgmii_link_set(
2329					xipd_port,
2330					__cvmx_helper_bgx_sgmii_link_get(
2331						xipd_port));
2332			/* All other lmac type call XAUI init code */
2333		} else {
2334			int res;
2335			struct cvmx_xiface xi =
2336				cvmx_helper_xiface_to_node_interface(xiface);
2337			static int count
2338				[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
2339				[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
2340					[0 ... CVMX_MAX_NODES -
2341					 1][0 ... CVMX_HELPER_MAX_IFACE -
2342					    1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
2343						    1] = 0 }
2344				};
2345
2346retry_link:
2347			res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
2348			/* RX Equalization or autonegotiation can take little
2349			 * longer retry the link maybe 5 times for now
2350			 */
2351			if (res == -1 &&
2352			    count[xi.node][xi.interface][index] < 5) {
2353				count[xi.node][xi.interface][index]++;
2354				goto retry_link;
2355			}
2356
2357			if (res == -1) {
2358#ifdef DEBUG_BGX
2359				debug("Failed to get %d:BGX(%d,%d) link\n",
2360				      xi.node, xi.interface, index);
2361#endif
2362				continue;
2363			}
2364		}
2365	}
2366	return 0;
2367}
2368
2369cvmx_helper_link_info_t __cvmx_helper_bgx_mixed_link_get(int xipd_port)
2370{
2371	int xiface = cvmx_helper_get_interface_num(xipd_port);
2372	int index = cvmx_helper_get_interface_index_num(xipd_port);
2373	cvmx_helper_interface_mode_t mode;
2374
2375	mode = cvmx_helper_bgx_get_mode(xiface, index);
2376	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
2377	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2378		return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
2379	else
2380		return __cvmx_helper_bgx_xaui_link_get(xipd_port);
2381}
2382
2383int __cvmx_helper_bgx_mixed_link_set(int xipd_port,
2384				     cvmx_helper_link_info_t link_info)
2385{
2386	int xiface = cvmx_helper_get_interface_num(xipd_port);
2387	int index = cvmx_helper_get_interface_index_num(xipd_port);
2388	cvmx_helper_interface_mode_t mode;
2389
2390	mode = cvmx_helper_bgx_get_mode(xiface, index);
2391	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
2392	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2393		return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
2394	else
2395		return __cvmx_helper_bgx_xaui_link_set(xipd_port, link_info);
2396}
2397
2398int __cvmx_helper_bgx_mixed_configure_loopback(int xipd_port,
2399					       int enable_internal,
2400					       int enable_external)
2401{
2402	int xiface = cvmx_helper_get_interface_num(xipd_port);
2403	int index = cvmx_helper_get_interface_index_num(xipd_port);
2404	cvmx_helper_interface_mode_t mode;
2405
2406	mode = cvmx_helper_bgx_get_mode(xiface, index);
2407	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
2408	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
2409		return __cvmx_helper_bgx_sgmii_configure_loopback(
2410			xipd_port, enable_internal, enable_external);
2411	else
2412		return __cvmx_helper_bgx_xaui_configure_loopback(
2413			xipd_port, enable_internal, enable_external);
2414}
2415
2416/**
2417 * @INTERNAL
2418 * Configure Priority-Based Flow Control (a.k.a. PFC/CBFC)
2419 * on a specific BGX interface/port.
2420 */
2421void __cvmx_helper_bgx_xaui_config_pfc(unsigned int node,
2422				       unsigned int interface,
2423				       unsigned int index, bool pfc_enable)
2424{
2425	int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
2426	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2427	cvmx_bgxx_smux_cbfc_ctl_t cbfc_ctl;
2428
2429	if (debug)
2430		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
2431		      xi.interface, index);
2432
2433	cbfc_ctl.u64 =
2434		csr_rd_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface));
2435
2436	/* Enable all PFC controls if requiested */
2437	cbfc_ctl.s.rx_en = pfc_enable;
2438	cbfc_ctl.s.tx_en = pfc_enable;
2439	if (debug)
2440		debug("%s: CVMX_BGXX_SMUX_CBFC_CTL(%d,%d)=%#llx\n", __func__,
2441		      index, xi.interface, (unsigned long long)cbfc_ctl.u64);
2442	csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface),
2443		    cbfc_ctl.u64);
2444}
2445
2446/**
2447 * Function to control the generation of FCS, padding by the BGX
2448 *
2449 */
2450void cvmx_helper_bgx_tx_options(unsigned int node, unsigned int interface,
2451				unsigned int index, bool fcs_enable,
2452				bool pad_enable)
2453{
2454	cvmx_bgxx_cmrx_config_t cmr_config;
2455	cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
2456	cvmx_bgxx_gmp_gmi_txx_min_pkt_t gmp_min_pkt;
2457	cvmx_bgxx_smux_tx_min_pkt_t smu_min_pkt;
2458	cvmx_bgxx_smux_tx_append_t smu_tx_append;
2459	int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
2460	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2461
2462	if (!cvmx_helper_is_port_valid(xiface, index))
2463		return;
2464
2465	if (debug)
2466		debug("%s: interface %u:%d/%d, fcs: %s, pad: %s\n", __func__,
2467		      xi.node, xi.interface, index,
2468		      fcs_enable ? "true" : "false",
2469		      pad_enable ? "true" : "false");
2470
2471	cmr_config.u64 =
2472		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2473
2474	(void)cmr_config; /* In case we need LMAC_TYPE later */
2475
2476	/* Setting options for both BGX subsystems, regardless of LMAC type */
2477
2478	/* Set GMP (SGMII) Tx options */
2479	gmp_min_pkt.u64 = 0;
2480	/* per HRM Sec 34.3.4.4 */
2481	gmp_min_pkt.s.min_size = 59;
2482	csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_MIN_PKT(index, xi.interface),
2483		    gmp_min_pkt.u64);
2484	gmp_txx_append.u64 = csr_rd_node(
2485		node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
2486	gmp_txx_append.s.fcs = fcs_enable;
2487	gmp_txx_append.s.pad = pad_enable;
2488	csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface),
2489		    gmp_txx_append.u64);
2490
2491	/* Set SMUX (XAUI/XFI) Tx options */
2492	/* HRM Sec 33.3.4.3 should read 64 */
2493	smu_min_pkt.u64 = 0;
2494	smu_min_pkt.s.min_size = 0x40;
2495	csr_wr_node(node, CVMX_BGXX_SMUX_TX_MIN_PKT(index, xi.interface),
2496		    smu_min_pkt.u64);
2497	smu_tx_append.u64 = csr_rd_node(
2498		node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
2499	smu_tx_append.s.fcs_d = fcs_enable; /* Set data-packet FCS */
2500	smu_tx_append.s.pad = pad_enable;
2501	csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
2502		    smu_tx_append.u64);
2503}
2504
2505/**
2506 * Set mac for the ipd_port
2507 *
2508 * @param xipd_port ipd_port to set the mac
2509 * @param bcst      If set, accept all broadcast packets
2510 * @param mcst      Multicast mode
2511 *		    0 = Force reject all multicast packets
2512 *		    1 = Force accept all multicast packets
2513 *		    2 = use the address filter CAM.
2514 * @param mac       mac address for the ipd_port, or 0 to disable MAC filtering
2515 */
2516void cvmx_helper_bgx_set_mac(int xipd_port, int bcst, int mcst, u64 mac)
2517{
2518	int xiface = cvmx_helper_get_interface_num(xipd_port);
2519	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2520	int node = xi.node;
2521	int index;
2522	cvmx_bgxx_cmr_rx_adrx_cam_t adr_cam;
2523	cvmx_bgxx_cmrx_rx_adr_ctl_t adr_ctl;
2524	cvmx_bgxx_cmrx_config_t cmr_config;
2525	int saved_state_tx, saved_state_rx;
2526
2527	index = cvmx_helper_get_interface_index_num(xipd_port);
2528
2529	if (!cvmx_helper_is_port_valid(xiface, index))
2530		return;
2531
2532	if (debug)
2533		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
2534		      xi.interface, index);
2535
2536	cmr_config.u64 =
2537		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
2538	saved_state_tx = cmr_config.s.data_pkt_tx_en;
2539	saved_state_rx = cmr_config.s.data_pkt_rx_en;
2540	cmr_config.s.data_pkt_tx_en = 0;
2541	cmr_config.s.data_pkt_rx_en = 0;
2542	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
2543		    cmr_config.u64);
2544
2545	/* Set the mac */
2546	adr_cam.u64 = 0;
2547	adr_cam.s.id = index;
2548
2549	if (mac != 0ull)
2550		adr_cam.s.en = 1;
2551	adr_cam.s.adr = mac;
2552
2553	csr_wr_node(node, CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface),
2554		    adr_cam.u64);
2555
2556	adr_ctl.u64 = csr_rd_node(
2557		node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface));
2558	if (mac != 0ull)
2559		adr_ctl.s.cam_accept =
2560			1; /* Accept the packet on DMAC CAM address */
2561	else
2562		adr_ctl.s.cam_accept = 0; /* No filtering, promiscuous */
2563
2564	adr_ctl.s.mcst_mode = mcst;   /* Use the address filter CAM */
2565	adr_ctl.s.bcst_accept = bcst; /* Accept all broadcast packets */
2566	csr_wr_node(node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface),
2567		    adr_ctl.u64);
2568	/* Set SMAC for PAUSE frames */
2569	csr_wr_node(node, CVMX_BGXX_GMP_GMI_SMACX(index, xi.interface), mac);
2570
2571	/* Restore back the interface state */
2572	cmr_config.s.data_pkt_tx_en = saved_state_tx;
2573	cmr_config.s.data_pkt_rx_en = saved_state_rx;
2574	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
2575		    cmr_config.u64);
2576
2577	/* Wait 100ms after bringing up the link to give the PHY some time */
2578	if (cmr_config.s.enable) {
2579		cvmx_helper_interface_mode_t mode;
2580
2581		mode = cvmx_helper_bgx_get_mode(xiface, index);
2582		__cvmx_helper_bgx_interface_enable_delay(mode);
2583	}
2584}
2585
2586/**
2587 * Disables the sending of flow control (pause) frames on the specified
2588 * BGX port(s).
2589 *
2590 * @param xiface Which xiface
2591 * @param port_mask Mask (4bits) of which ports on the interface to disable
2592 *                  backpressure on.
2593 *                  1 => disable backpressure
2594 *                  0 => enable backpressure
2595 *
2596 * @return 0 on success
2597 *         -1 on error
2598 *
2599 * FIXME: Should change the API to handle a single port in every
2600 * invocation, for consistency with other API calls.
2601 */
2602int cvmx_bgx_set_backpressure_override(int xiface, unsigned int port_mask)
2603{
2604	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
2605	cvmx_bgxx_cmr_rx_ovr_bp_t rx_ovr_bp;
2606	int node = xi.node;
2607
2608	if (xi.interface >= CVMX_HELPER_MAX_GMX)
2609		return 0;
2610
2611	if (debug)
2612		debug("%s: interface %u:%d port_mask=%#x\n", __func__, xi.node,
2613		      xi.interface, port_mask);
2614
2615	/* Check for valid arguments */
2616	rx_ovr_bp.u64 = 0;
2617	rx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
2618	rx_ovr_bp.s.ign_fifo_bp =
2619		port_mask; /* Ignore the RX FIFO full when computing BP */
2620
2621	csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), rx_ovr_bp.u64);
2622	return 0;
2623}
2624
2625int cvmx_bgx_set_flowctl_mode(int xipd_port, cvmx_qos_proto_t qos,
2626			      cvmx_qos_pkt_mode_t fc_mode)
2627{
2628	int node, xiface, iface, index, mode;
2629	struct cvmx_xiface xi;
2630	const struct {
2631		int bck;
2632		int drp;
2633	} fcmode[4] = { [CVMX_QOS_PKT_MODE_HWONLY] = { 1, 1 },
2634			[CVMX_QOS_PKT_MODE_SWONLY] = { 0, 0 },
2635			[CVMX_QOS_PKT_MODE_HWSW] = { 1, 0 },
2636			[CVMX_QOS_PKT_MODE_DROP] = { 0, 1 } };
2637
2638	xiface = cvmx_helper_get_interface_num(xipd_port);
2639	xi = cvmx_helper_xiface_to_node_interface(xiface);
2640	node = xi.node;
2641	iface = xi.interface;
2642
2643	if (xi.interface >= CVMX_HELPER_MAX_GMX)
2644		return 0;
2645
2646	index = cvmx_helper_get_interface_index_num(xipd_port);
2647	mode = cvmx_helper_bgx_get_mode(xiface, index);
2648	switch (mode) {
2649	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
2650	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
2651	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
2652	case CVMX_HELPER_INTERFACE_MODE_XFI:
2653	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
2654	case CVMX_HELPER_INTERFACE_MODE_XAUI: {
2655		cvmx_bgxx_smux_tx_ctl_t txctl;
2656		cvmx_bgxx_smux_cbfc_ctl_t cbfc;
2657		cvmx_bgxx_smux_rx_frm_ctl_t frmctl;
2658		cvmx_bgxx_smux_hg2_control_t hg2ctl;
2659
2660		txctl.u64 =
2661			csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface));
2662		cbfc.u64 = csr_rd_node(node,
2663				       CVMX_BGXX_SMUX_CBFC_CTL(index, iface));
2664		frmctl.u64 = csr_rd_node(
2665			node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface));
2666		hg2ctl.u64 = csr_rd_node(
2667			node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface));
2668		switch (qos) {
2669		case CVMX_QOS_PROTO_PAUSE:
2670			cbfc.u64 = 0;
2671			hg2ctl.u64 = 0;
2672			frmctl.s.ctl_bck = fcmode[fc_mode].bck;
2673			frmctl.s.ctl_drp = fcmode[fc_mode].drp;
2674			frmctl.s.ctl_mcst = 1;
2675			txctl.s.l2p_bp_conv = 1;
2676			break;
2677		case CVMX_QOS_PROTO_PFC:
2678			hg2ctl.u64 = 0;
2679			hg2ctl.s.logl_en = 0xff;
2680			frmctl.s.ctl_bck = fcmode[fc_mode].bck;
2681			frmctl.s.ctl_drp = fcmode[fc_mode].drp;
2682			frmctl.s.ctl_mcst = 1;
2683			cbfc.s.bck_en = fcmode[fc_mode].bck;
2684			cbfc.s.drp_en = fcmode[fc_mode].drp;
2685			cbfc.s.phys_en = 0;
2686			cbfc.s.logl_en = 0xff;
2687			cbfc.s.tx_en = 1;
2688			cbfc.s.rx_en = 1;
2689			break;
2690		case CVMX_QOS_PROTO_NONE:
2691			cbfc.u64 = 0;
2692			hg2ctl.u64 = 0;
2693			frmctl.s.ctl_bck = fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
2694			frmctl.s.ctl_drp = fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
2695			txctl.s.l2p_bp_conv = 0;
2696			break;
2697		default:
2698			break;
2699		}
2700		csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, iface),
2701			    cbfc.u64);
2702		csr_wr_node(node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface),
2703			    frmctl.u64);
2704		csr_wr_node(node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface),
2705			    hg2ctl.u64);
2706		csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface),
2707			    txctl.u64);
2708		break;
2709	}
2710	case CVMX_HELPER_INTERFACE_MODE_SGMII:
2711	case CVMX_HELPER_INTERFACE_MODE_RGMII: {
2712		cvmx_bgxx_gmp_gmi_rxx_frm_ctl_t gmi_frmctl;
2713
2714		gmi_frmctl.u64 = csr_rd_node(
2715			node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface));
2716		switch (qos) {
2717		case CVMX_QOS_PROTO_PAUSE:
2718			gmi_frmctl.s.ctl_bck = fcmode[fc_mode].bck;
2719			gmi_frmctl.s.ctl_drp = fcmode[fc_mode].drp;
2720			gmi_frmctl.s.ctl_mcst = 1;
2721			break;
2722		case CVMX_QOS_PROTO_NONE:
2723			gmi_frmctl.s.ctl_bck =
2724				fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
2725			gmi_frmctl.s.ctl_drp =
2726				fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
2727			break;
2728		default:
2729			break;
2730		}
2731		csr_wr_node(node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface),
2732			    gmi_frmctl.u64);
2733	}
2734	} /*switch*/
2735
2736	return 0;
2737}
2738