ixgbe_82598.c revision 238149
1/******************************************************************************
2
3  Copyright (c) 2001-2012, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 238149 2012-07-05 20:51:44Z jfv $*/
34
35#include "ixgbe_type.h"
36#include "ixgbe_82598.h"
37#include "ixgbe_api.h"
38#include "ixgbe_common.h"
39#include "ixgbe_phy.h"
40
41static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42					     ixgbe_link_speed *speed,
43					     bool *autoneg);
44static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46				      bool autoneg_wait_to_complete);
47static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48				      ixgbe_link_speed *speed, bool *link_up,
49				      bool link_up_wait_to_complete);
50static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51				      ixgbe_link_speed speed,
52				      bool autoneg,
53				      bool autoneg_wait_to_complete);
54static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55					 ixgbe_link_speed speed,
56					 bool autoneg,
57					 bool autoneg_wait_to_complete);
58static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62				  u32 headroom, int strategy);
63
64/**
65 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 *  @hw: pointer to the HW structure
67 *
68 *  The defaults for 82598 should be in the range of 50us to 50ms,
69 *  however the hardware default for these parts is 500us to 1ms which is less
70 *  than the 10ms recommended by the pci-e spec.  To address this we need to
71 *  increase the value to either 10ms to 250ms for capability version 1 config,
72 *  or 16ms to 55ms for version 2.
73 **/
74void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75{
76	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77	u16 pcie_devctl2;
78
79	/* only take action if timeout value is defaulted to 0 */
80	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81		goto out;
82
83	/*
84	 * if capababilities version is type 1 we can write the
85	 * timeout of 10ms to 250ms through the GCR register
86	 */
87	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89		goto out;
90	}
91
92	/*
93	 * for version 2 capabilities we need to write the config space
94	 * directly in order to set the completion timeout value for
95	 * 16ms to 55ms
96	 */
97	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100out:
101	/* disable completion timeout resend */
102	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104}
105
106/**
107 *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 *  @hw: pointer to hardware structure
109 *
110 *  Initialize the function pointers and assign the MAC type for 82598.
111 *  Does not touch the hardware.
112 **/
113s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114{
115	struct ixgbe_mac_info *mac = &hw->mac;
116	struct ixgbe_phy_info *phy = &hw->phy;
117	s32 ret_val;
118
119	DEBUGFUNC("ixgbe_init_ops_82598");
120
121	ret_val = ixgbe_init_phy_ops_generic(hw);
122	ret_val = ixgbe_init_ops_generic(hw);
123
124	/* PHY */
125	phy->ops.init = &ixgbe_init_phy_ops_82598;
126
127	/* MAC */
128	mac->ops.start_hw = &ixgbe_start_hw_82598;
129	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
132	mac->ops.get_supported_physical_layer =
133				&ixgbe_get_supported_physical_layer_82598;
134	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
137
138	/* RAR, Multicast, VLAN */
139	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142	mac->ops.set_vlvf = NULL;
143	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
144
145	/* Flow Control */
146	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
147
148	mac->mcft_size		= 128;
149	mac->vft_size		= 128;
150	mac->num_rar_entries	= 16;
151	mac->rx_pb_size		= 512;
152	mac->max_tx_queues	= 32;
153	mac->max_rx_queues	= 64;
154	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
155
156	/* SFP+ Module */
157	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
158
159	/* Link */
160	mac->ops.check_link = &ixgbe_check_mac_link_82598;
161	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162	mac->ops.flap_tx_laser = NULL;
163	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164	mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165
166	/* Manageability interface */
167	mac->ops.set_fw_drv_ver = NULL;
168
169	return ret_val;
170}
171
172/**
173 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 *  @hw: pointer to hardware structure
175 *
176 *  Initialize any function pointers that were not able to be
177 *  set during init_shared_code because the PHY/SFP type was
178 *  not known.  Perform the SFP init if necessary.
179 *
180 **/
181s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182{
183	struct ixgbe_mac_info *mac = &hw->mac;
184	struct ixgbe_phy_info *phy = &hw->phy;
185	s32 ret_val = IXGBE_SUCCESS;
186	u16 list_offset, data_offset;
187
188	DEBUGFUNC("ixgbe_init_phy_ops_82598");
189
190	/* Identify the PHY */
191	phy->ops.identify(hw);
192
193	/* Overwrite the link function pointers if copper PHY */
194	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
196		mac->ops.get_link_capabilities =
197				&ixgbe_get_copper_link_capabilities_generic;
198	}
199
200	switch (hw->phy.type) {
201	case ixgbe_phy_tn:
202		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
204		phy->ops.get_firmware_version =
205					&ixgbe_get_phy_firmware_version_tnx;
206		break;
207	case ixgbe_phy_nl:
208		phy->ops.reset = &ixgbe_reset_phy_nl;
209
210		/* Call SFP+ identify routine to get the SFP+ module type */
211		ret_val = phy->ops.identify_sfp(hw);
212		if (ret_val != IXGBE_SUCCESS)
213			goto out;
214		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216			goto out;
217		}
218
219		/* Check to see if SFP+ module is supported */
220		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221							      &list_offset,
222							      &data_offset);
223		if (ret_val != IXGBE_SUCCESS) {
224			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225			goto out;
226		}
227		break;
228	default:
229		break;
230	}
231
232out:
233	return ret_val;
234}
235
236/**
237 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 *  @hw: pointer to hardware structure
239 *
240 *  Starts the hardware using the generic start_hw function.
241 *  Disables relaxed ordering Then set pcie completion timeout
242 *
243 **/
244s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245{
246	u32 regval;
247	u32 i;
248	s32 ret_val = IXGBE_SUCCESS;
249
250	DEBUGFUNC("ixgbe_start_hw_82598");
251
252	ret_val = ixgbe_start_hw_generic(hw);
253
254	/* Disable relaxed ordering */
255	for (i = 0; ((i < hw->mac.max_tx_queues) &&
256	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260	}
261
262	for (i = 0; ((i < hw->mac.max_rx_queues) &&
263	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268	}
269
270	/* set the completion timeout for interface */
271	if (ret_val == IXGBE_SUCCESS)
272		ixgbe_set_pcie_completion_timeout(hw);
273
274	return ret_val;
275}
276
277/**
278 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 *  @hw: pointer to hardware structure
280 *  @speed: pointer to link speed
281 *  @autoneg: boolean auto-negotiation value
282 *
283 *  Determines the link capabilities by reading the AUTOC register.
284 **/
285static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286					     ixgbe_link_speed *speed,
287					     bool *autoneg)
288{
289	s32 status = IXGBE_SUCCESS;
290	u32 autoc = 0;
291
292	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
293
294	/*
295	 * Determine link capabilities based on the stored value of AUTOC,
296	 * which represents EEPROM defaults.  If AUTOC value has not been
297	 * stored, use the current register value.
298	 */
299	if (hw->mac.orig_link_settings_stored)
300		autoc = hw->mac.orig_autoc;
301	else
302		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
303
304	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
305	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
306		*speed = IXGBE_LINK_SPEED_1GB_FULL;
307		*autoneg = FALSE;
308		break;
309
310	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
311		*speed = IXGBE_LINK_SPEED_10GB_FULL;
312		*autoneg = FALSE;
313		break;
314
315	case IXGBE_AUTOC_LMS_1G_AN:
316		*speed = IXGBE_LINK_SPEED_1GB_FULL;
317		*autoneg = TRUE;
318		break;
319
320	case IXGBE_AUTOC_LMS_KX4_AN:
321	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
322		*speed = IXGBE_LINK_SPEED_UNKNOWN;
323		if (autoc & IXGBE_AUTOC_KX4_SUPP)
324			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
325		if (autoc & IXGBE_AUTOC_KX_SUPP)
326			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
327		*autoneg = TRUE;
328		break;
329
330	default:
331		status = IXGBE_ERR_LINK_SETUP;
332		break;
333	}
334
335	return status;
336}
337
338/**
339 *  ixgbe_get_media_type_82598 - Determines media type
340 *  @hw: pointer to hardware structure
341 *
342 *  Returns the media type (fiber, copper, backplane)
343 **/
344static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
345{
346	enum ixgbe_media_type media_type;
347
348	DEBUGFUNC("ixgbe_get_media_type_82598");
349
350	/* Detect if there is a copper PHY attached. */
351	switch (hw->phy.type) {
352	case ixgbe_phy_cu_unknown:
353	case ixgbe_phy_tn:
354		media_type = ixgbe_media_type_copper;
355		goto out;
356	default:
357		break;
358	}
359
360	/* Media type for I82598 is based on device ID */
361	switch (hw->device_id) {
362	case IXGBE_DEV_ID_82598:
363	case IXGBE_DEV_ID_82598_BX:
364		/* Default device ID is mezzanine card KX/KX4 */
365		media_type = ixgbe_media_type_backplane;
366		break;
367	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
368	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
369	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
370	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
371	case IXGBE_DEV_ID_82598EB_XF_LR:
372	case IXGBE_DEV_ID_82598EB_SFP_LOM:
373		media_type = ixgbe_media_type_fiber;
374		break;
375	case IXGBE_DEV_ID_82598EB_CX4:
376	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
377		media_type = ixgbe_media_type_cx4;
378		break;
379	case IXGBE_DEV_ID_82598AT:
380	case IXGBE_DEV_ID_82598AT2:
381		media_type = ixgbe_media_type_copper;
382		break;
383	default:
384		media_type = ixgbe_media_type_unknown;
385		break;
386	}
387out:
388	return media_type;
389}
390
391/**
392 *  ixgbe_fc_enable_82598 - Enable flow control
393 *  @hw: pointer to hardware structure
394 *
395 *  Enable flow control according to the current settings.
396 **/
397s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
398{
399	s32 ret_val = IXGBE_SUCCESS;
400	u32 fctrl_reg;
401	u32 rmcs_reg;
402	u32 reg;
403	u32 fcrtl, fcrth;
404	u32 link_speed = 0;
405	int i;
406	bool link_up;
407
408	DEBUGFUNC("ixgbe_fc_enable_82598");
409
410	/* Validate the water mark configuration */
411	if (!hw->fc.pause_time) {
412		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413		goto out;
414	}
415
416	/* Low water mark of zero causes XOFF floods */
417	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419		    hw->fc.high_water[i]) {
420			if (!hw->fc.low_water[i] ||
421			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422				DEBUGOUT("Invalid water mark configuration\n");
423				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424				goto out;
425			}
426		}
427	}
428
429	/*
430	 * On 82598 having Rx FC on causes resets while doing 1G
431	 * so if it's on turn it off once we know link_speed. For
432	 * more details see 82598 Specification update.
433	 */
434	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
435	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
436		switch (hw->fc.requested_mode) {
437		case ixgbe_fc_full:
438			hw->fc.requested_mode = ixgbe_fc_tx_pause;
439			break;
440		case ixgbe_fc_rx_pause:
441			hw->fc.requested_mode = ixgbe_fc_none;
442			break;
443		default:
444			/* no change */
445			break;
446		}
447	}
448
449	/* Negotiate the fc mode to use */
450	ixgbe_fc_autoneg(hw);
451
452	/* Disable any previous flow control settings */
453	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
454	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
455
456	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
457	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
458
459	/*
460	 * The possible values of fc.current_mode are:
461	 * 0: Flow control is completely disabled
462	 * 1: Rx flow control is enabled (we can receive pause frames,
463	 *    but not send pause frames).
464	 * 2: Tx flow control is enabled (we can send pause frames but
465	 *     we do not support receiving pause frames).
466	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
467	 * other: Invalid.
468	 */
469	switch (hw->fc.current_mode) {
470	case ixgbe_fc_none:
471		/*
472		 * Flow control is disabled by software override or autoneg.
473		 * The code below will actually disable it in the HW.
474		 */
475		break;
476	case ixgbe_fc_rx_pause:
477		/*
478		 * Rx Flow control is enabled and Tx Flow control is
479		 * disabled by software override. Since there really
480		 * isn't a way to advertise that we are capable of RX
481		 * Pause ONLY, we will advertise that we support both
482		 * symmetric and asymmetric Rx PAUSE.  Later, we will
483		 * disable the adapter's ability to send PAUSE frames.
484		 */
485		fctrl_reg |= IXGBE_FCTRL_RFCE;
486		break;
487	case ixgbe_fc_tx_pause:
488		/*
489		 * Tx Flow control is enabled, and Rx Flow control is
490		 * disabled by software override.
491		 */
492		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
493		break;
494	case ixgbe_fc_full:
495		/* Flow control (both Rx and Tx) is enabled by SW override. */
496		fctrl_reg |= IXGBE_FCTRL_RFCE;
497		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498		break;
499	default:
500		DEBUGOUT("Flow control param set incorrectly\n");
501		ret_val = IXGBE_ERR_CONFIG;
502		goto out;
503		break;
504	}
505
506	/* Set 802.3x based flow control settings. */
507	fctrl_reg |= IXGBE_FCTRL_DPF;
508	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
509	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
510
511	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
512	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
513		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
514		    hw->fc.high_water[i]) {
515			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
516			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
517			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
518			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
519		} else {
520			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
521			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
522		}
523
524	}
525
526	/* Configure pause time (2 TCs per register) */
527	reg = hw->fc.pause_time * 0x00010001;
528	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
529		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
530
531	/* Configure flow control refresh threshold value */
532	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
533
534out:
535	return ret_val;
536}
537
538/**
539 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
540 *  @hw: pointer to hardware structure
541 *
542 *  Configures link settings based on values in the ixgbe_hw struct.
543 *  Restarts the link.  Performs autonegotiation if needed.
544 **/
545static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
546				      bool autoneg_wait_to_complete)
547{
548	u32 autoc_reg;
549	u32 links_reg;
550	u32 i;
551	s32 status = IXGBE_SUCCESS;
552
553	DEBUGFUNC("ixgbe_start_mac_link_82598");
554
555	/* Restart link */
556	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
557	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
558	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
559
560	/* Only poll for autoneg to complete if specified to do so */
561	if (autoneg_wait_to_complete) {
562		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
563		     IXGBE_AUTOC_LMS_KX4_AN ||
564		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
565		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
566			links_reg = 0; /* Just in case Autoneg time = 0 */
567			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
568				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
569				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
570					break;
571				msec_delay(100);
572			}
573			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
574				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
575				DEBUGOUT("Autonegotiation did not complete.\n");
576			}
577		}
578	}
579
580	/* Add delay to filter out noises during initial link setup */
581	msec_delay(50);
582
583	return status;
584}
585
586/**
587 *  ixgbe_validate_link_ready - Function looks for phy link
588 *  @hw: pointer to hardware structure
589 *
590 *  Function indicates success when phy link is available. If phy is not ready
591 *  within 5 seconds of MAC indicating link, the function returns error.
592 **/
593static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
594{
595	u32 timeout;
596	u16 an_reg;
597
598	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
599		return IXGBE_SUCCESS;
600
601	for (timeout = 0;
602	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
603		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
604				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
605
606		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
607		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
608			break;
609
610		msec_delay(100);
611	}
612
613	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
614		DEBUGOUT("Link was indicated but link is down\n");
615		return IXGBE_ERR_LINK_SETUP;
616	}
617
618	return IXGBE_SUCCESS;
619}
620
621/**
622 *  ixgbe_check_mac_link_82598 - Get link/speed status
623 *  @hw: pointer to hardware structure
624 *  @speed: pointer to link speed
625 *  @link_up: TRUE is link is up, FALSE otherwise
626 *  @link_up_wait_to_complete: bool used to wait for link up or not
627 *
628 *  Reads the links register to determine if link is up and the current speed
629 **/
630static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
631				      ixgbe_link_speed *speed, bool *link_up,
632				      bool link_up_wait_to_complete)
633{
634	u32 links_reg;
635	u32 i;
636	u16 link_reg, adapt_comp_reg;
637
638	DEBUGFUNC("ixgbe_check_mac_link_82598");
639
640	/*
641	 * SERDES PHY requires us to read link status from undocumented
642	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
643	 * indicates link down.  OxC00C is read to check that the XAUI lanes
644	 * are active.  Bit 0 clear indicates active; set indicates inactive.
645	 */
646	if (hw->phy.type == ixgbe_phy_nl) {
647		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
649		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
650				     &adapt_comp_reg);
651		if (link_up_wait_to_complete) {
652			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
653				if ((link_reg & 1) &&
654				    ((adapt_comp_reg & 1) == 0)) {
655					*link_up = TRUE;
656					break;
657				} else {
658					*link_up = FALSE;
659				}
660				msec_delay(100);
661				hw->phy.ops.read_reg(hw, 0xC79F,
662						     IXGBE_TWINAX_DEV,
663						     &link_reg);
664				hw->phy.ops.read_reg(hw, 0xC00C,
665						     IXGBE_TWINAX_DEV,
666						     &adapt_comp_reg);
667			}
668		} else {
669			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
670				*link_up = TRUE;
671			else
672				*link_up = FALSE;
673		}
674
675		if (*link_up == FALSE)
676			goto out;
677	}
678
679	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
680	if (link_up_wait_to_complete) {
681		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
682			if (links_reg & IXGBE_LINKS_UP) {
683				*link_up = TRUE;
684				break;
685			} else {
686				*link_up = FALSE;
687			}
688			msec_delay(100);
689			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
690		}
691	} else {
692		if (links_reg & IXGBE_LINKS_UP)
693			*link_up = TRUE;
694		else
695			*link_up = FALSE;
696	}
697
698	if (links_reg & IXGBE_LINKS_SPEED)
699		*speed = IXGBE_LINK_SPEED_10GB_FULL;
700	else
701		*speed = IXGBE_LINK_SPEED_1GB_FULL;
702
703	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
704	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
705		*link_up = FALSE;
706
707out:
708	return IXGBE_SUCCESS;
709}
710
711/**
712 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
713 *  @hw: pointer to hardware structure
714 *  @speed: new link speed
715 *  @autoneg: TRUE if autonegotiation enabled
716 *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
717 *
718 *  Set the link speed in the AUTOC register and restarts link.
719 **/
720static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
721				      ixgbe_link_speed speed, bool autoneg,
722				      bool autoneg_wait_to_complete)
723{
724	s32 status = IXGBE_SUCCESS;
725	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
726	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
727	u32 autoc = curr_autoc;
728	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
729
730	DEBUGFUNC("ixgbe_setup_mac_link_82598");
731
732	/* Check to see if speed passed in is supported. */
733	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
734	speed &= link_capabilities;
735
736	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
737		status = IXGBE_ERR_LINK_SETUP;
738
739	/* Set KX4/KX support according to speed requested */
740	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
741		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
742		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
743		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
744			autoc |= IXGBE_AUTOC_KX4_SUPP;
745		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
746			autoc |= IXGBE_AUTOC_KX_SUPP;
747		if (autoc != curr_autoc)
748			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
749	}
750
751	if (status == IXGBE_SUCCESS) {
752		/*
753		 * Setup and restart the link based on the new values in
754		 * ixgbe_hw This will write the AUTOC register based on the new
755		 * stored values
756		 */
757		status = ixgbe_start_mac_link_82598(hw,
758						    autoneg_wait_to_complete);
759	}
760
761	return status;
762}
763
764
765/**
766 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
767 *  @hw: pointer to hardware structure
768 *  @speed: new link speed
769 *  @autoneg: TRUE if autonegotiation enabled
770 *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
771 *
772 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
773 **/
774static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
775					 ixgbe_link_speed speed,
776					 bool autoneg,
777					 bool autoneg_wait_to_complete)
778{
779	s32 status;
780
781	DEBUGFUNC("ixgbe_setup_copper_link_82598");
782
783	/* Setup the PHY according to input speed */
784	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
785					      autoneg_wait_to_complete);
786	/* Set up MAC */
787	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
788
789	return status;
790}
791
792/**
793 *  ixgbe_reset_hw_82598 - Performs hardware reset
794 *  @hw: pointer to hardware structure
795 *
796 *  Resets the hardware by resetting the transmit and receive units, masks and
797 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
798 *  reset.
799 **/
800static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
801{
802	s32 status = IXGBE_SUCCESS;
803	s32 phy_status = IXGBE_SUCCESS;
804	u32 ctrl;
805	u32 gheccr;
806	u32 i;
807	u32 autoc;
808	u8  analog_val;
809
810	DEBUGFUNC("ixgbe_reset_hw_82598");
811
812	/* Call adapter stop to disable tx/rx and clear interrupts */
813	status = hw->mac.ops.stop_adapter(hw);
814	if (status != IXGBE_SUCCESS)
815		goto reset_hw_out;
816
817	/*
818	 * Power up the Atlas Tx lanes if they are currently powered down.
819	 * Atlas Tx lanes are powered down for MAC loopback tests, but
820	 * they are not automatically restored on reset.
821	 */
822	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
823	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
824		/* Enable Tx Atlas so packets can be transmitted again */
825		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
826					     &analog_val);
827		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
828		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
829					      analog_val);
830
831		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
832					     &analog_val);
833		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
834		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
835					      analog_val);
836
837		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
838					     &analog_val);
839		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
840		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
841					      analog_val);
842
843		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
844					     &analog_val);
845		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
846		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
847					      analog_val);
848	}
849
850	/* Reset PHY */
851	if (hw->phy.reset_disable == FALSE) {
852		/* PHY ops must be identified and initialized prior to reset */
853
854		/* Init PHY and function pointers, perform SFP setup */
855		phy_status = hw->phy.ops.init(hw);
856		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
857			goto reset_hw_out;
858		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
859			goto mac_reset_top;
860
861		hw->phy.ops.reset(hw);
862	}
863
864mac_reset_top:
865	/*
866	 * Issue global reset to the MAC.  This needs to be a SW reset.
867	 * If link reset is used, it might reset the MAC when mng is using it
868	 */
869	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
870	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
871	IXGBE_WRITE_FLUSH(hw);
872
873	/* Poll for reset bit to self-clear indicating reset is complete */
874	for (i = 0; i < 10; i++) {
875		usec_delay(1);
876		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
877		if (!(ctrl & IXGBE_CTRL_RST))
878			break;
879	}
880	if (ctrl & IXGBE_CTRL_RST) {
881		status = IXGBE_ERR_RESET_FAILED;
882		DEBUGOUT("Reset polling failed to complete.\n");
883	}
884
885	msec_delay(50);
886
887	/*
888	 * Double resets are required for recovery from certain error
889	 * conditions.  Between resets, it is necessary to stall to allow time
890	 * for any pending HW events to complete.
891	 */
892	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
893		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
894		goto mac_reset_top;
895	}
896
897	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
898	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
899	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
900
901	/*
902	 * Store the original AUTOC value if it has not been
903	 * stored off yet.  Otherwise restore the stored original
904	 * AUTOC value since the reset operation sets back to deaults.
905	 */
906	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
907	if (hw->mac.orig_link_settings_stored == FALSE) {
908		hw->mac.orig_autoc = autoc;
909		hw->mac.orig_link_settings_stored = TRUE;
910	} else if (autoc != hw->mac.orig_autoc) {
911		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
912	}
913
914	/* Store the permanent mac address */
915	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
916
917	/*
918	 * Store MAC address from RAR0, clear receive address registers, and
919	 * clear the multicast table
920	 */
921	hw->mac.ops.init_rx_addrs(hw);
922
923reset_hw_out:
924	if (phy_status != IXGBE_SUCCESS)
925		status = phy_status;
926
927	return status;
928}
929
930/**
931 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
932 *  @hw: pointer to hardware struct
933 *  @rar: receive address register index to associate with a VMDq index
934 *  @vmdq: VMDq set index
935 **/
936s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
937{
938	u32 rar_high;
939	u32 rar_entries = hw->mac.num_rar_entries;
940
941	DEBUGFUNC("ixgbe_set_vmdq_82598");
942
943	/* Make sure we are using a valid rar index range */
944	if (rar >= rar_entries) {
945		DEBUGOUT1("RAR index %d is out of range.\n", rar);
946		return IXGBE_ERR_INVALID_ARGUMENT;
947	}
948
949	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
950	rar_high &= ~IXGBE_RAH_VIND_MASK;
951	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
952	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
953	return IXGBE_SUCCESS;
954}
955
956/**
957 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
958 *  @hw: pointer to hardware struct
959 *  @rar: receive address register index to associate with a VMDq index
960 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
961 **/
962static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
963{
964	u32 rar_high;
965	u32 rar_entries = hw->mac.num_rar_entries;
966
967	UNREFERENCED_1PARAMETER(vmdq);
968
969	/* Make sure we are using a valid rar index range */
970	if (rar >= rar_entries) {
971		DEBUGOUT1("RAR index %d is out of range.\n", rar);
972		return IXGBE_ERR_INVALID_ARGUMENT;
973	}
974
975	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
976	if (rar_high & IXGBE_RAH_VIND_MASK) {
977		rar_high &= ~IXGBE_RAH_VIND_MASK;
978		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
979	}
980
981	return IXGBE_SUCCESS;
982}
983
984/**
985 *  ixgbe_set_vfta_82598 - Set VLAN filter table
986 *  @hw: pointer to hardware structure
987 *  @vlan: VLAN id to write to VLAN filter
988 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
989 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
990 *
991 *  Turn on/off specified VLAN in the VLAN filter table.
992 **/
993s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
994			 bool vlan_on)
995{
996	u32 regindex;
997	u32 bitindex;
998	u32 bits;
999	u32 vftabyte;
1000
1001	DEBUGFUNC("ixgbe_set_vfta_82598");
1002
1003	if (vlan > 4095)
1004		return IXGBE_ERR_PARAM;
1005
1006	/* Determine 32-bit word position in array */
1007	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1008
1009	/* Determine the location of the (VMD) queue index */
1010	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1011	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1012
1013	/* Set the nibble for VMD queue index */
1014	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1015	bits &= (~(0x0F << bitindex));
1016	bits |= (vind << bitindex);
1017	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1018
1019	/* Determine the location of the bit for this VLAN id */
1020	bitindex = vlan & 0x1F;   /* lower five bits */
1021
1022	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1023	if (vlan_on)
1024		/* Turn on this VLAN id */
1025		bits |= (1 << bitindex);
1026	else
1027		/* Turn off this VLAN id */
1028		bits &= ~(1 << bitindex);
1029	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1030
1031	return IXGBE_SUCCESS;
1032}
1033
1034/**
1035 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1036 *  @hw: pointer to hardware structure
1037 *
1038 *  Clears the VLAN filer table, and the VMDq index associated with the filter
1039 **/
1040static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1041{
1042	u32 offset;
1043	u32 vlanbyte;
1044
1045	DEBUGFUNC("ixgbe_clear_vfta_82598");
1046
1047	for (offset = 0; offset < hw->mac.vft_size; offset++)
1048		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1049
1050	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1051		for (offset = 0; offset < hw->mac.vft_size; offset++)
1052			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1053					0);
1054
1055	return IXGBE_SUCCESS;
1056}
1057
1058/**
1059 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1060 *  @hw: pointer to hardware structure
1061 *  @reg: analog register to read
1062 *  @val: read value
1063 *
1064 *  Performs read operation to Atlas analog register specified.
1065 **/
1066s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1067{
1068	u32  atlas_ctl;
1069
1070	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1071
1072	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1073			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1074	IXGBE_WRITE_FLUSH(hw);
1075	usec_delay(10);
1076	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1077	*val = (u8)atlas_ctl;
1078
1079	return IXGBE_SUCCESS;
1080}
1081
1082/**
1083 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1084 *  @hw: pointer to hardware structure
1085 *  @reg: atlas register to write
1086 *  @val: value to write
1087 *
1088 *  Performs write operation to Atlas analog register specified.
1089 **/
1090s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1091{
1092	u32  atlas_ctl;
1093
1094	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1095
1096	atlas_ctl = (reg << 8) | val;
1097	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1098	IXGBE_WRITE_FLUSH(hw);
1099	usec_delay(10);
1100
1101	return IXGBE_SUCCESS;
1102}
1103
1104/**
1105 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1106 *  @hw: pointer to hardware structure
1107 *  @byte_offset: EEPROM byte offset to read
1108 *  @eeprom_data: value read
1109 *
1110 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1111 **/
1112s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1113				u8 *eeprom_data)
1114{
1115	s32 status = IXGBE_SUCCESS;
1116	u16 sfp_addr = 0;
1117	u16 sfp_data = 0;
1118	u16 sfp_stat = 0;
1119	u32 i;
1120
1121	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1122
1123	if (hw->phy.type == ixgbe_phy_nl) {
1124		/*
1125		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1126		 * 0xC30D. These registers are used to talk to the SFP+
1127		 * module's EEPROM through the SDA/SCL (I2C) interface.
1128		 */
1129		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1130		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1131		hw->phy.ops.write_reg(hw,
1132				      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1133				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1134				      sfp_addr);
1135
1136		/* Poll status */
1137		for (i = 0; i < 100; i++) {
1138			hw->phy.ops.read_reg(hw,
1139					     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1140					     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1141					     &sfp_stat);
1142			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1143			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1144				break;
1145			msec_delay(10);
1146		}
1147
1148		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1149			DEBUGOUT("EEPROM read did not pass.\n");
1150			status = IXGBE_ERR_SFP_NOT_PRESENT;
1151			goto out;
1152		}
1153
1154		/* Read data */
1155		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1156				     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1157
1158		*eeprom_data = (u8)(sfp_data >> 8);
1159	} else {
1160		status = IXGBE_ERR_PHY;
1161		goto out;
1162	}
1163
1164out:
1165	return status;
1166}
1167
1168/**
1169 *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1170 *  @hw: pointer to hardware structure
1171 *
1172 *  Determines physical layer capabilities of the current configuration.
1173 **/
1174u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1175{
1176	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1177	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1178	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1179	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1180	u16 ext_ability = 0;
1181
1182	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1183
1184	hw->phy.ops.identify(hw);
1185
1186	/* Copper PHY must be checked before AUTOC LMS to determine correct
1187	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1188	switch (hw->phy.type) {
1189	case ixgbe_phy_tn:
1190	case ixgbe_phy_cu_unknown:
1191		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1192		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1193		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1194			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1195		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1196			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1197		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1198			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1199		goto out;
1200	default:
1201		break;
1202	}
1203
1204	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1205	case IXGBE_AUTOC_LMS_1G_AN:
1206	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1207		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1208			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1209		else
1210			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1211		break;
1212	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1213		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1214			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1215		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1216			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1217		else /* XAUI */
1218			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1219		break;
1220	case IXGBE_AUTOC_LMS_KX4_AN:
1221	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1222		if (autoc & IXGBE_AUTOC_KX_SUPP)
1223			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1224		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1225			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1226		break;
1227	default:
1228		break;
1229	}
1230
1231	if (hw->phy.type == ixgbe_phy_nl) {
1232		hw->phy.ops.identify_sfp(hw);
1233
1234		switch (hw->phy.sfp_type) {
1235		case ixgbe_sfp_type_da_cu:
1236			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1237			break;
1238		case ixgbe_sfp_type_sr:
1239			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1240			break;
1241		case ixgbe_sfp_type_lr:
1242			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1243			break;
1244		default:
1245			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1246			break;
1247		}
1248	}
1249
1250	switch (hw->device_id) {
1251	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1252		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1253		break;
1254	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1255	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1256	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1257		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1258		break;
1259	case IXGBE_DEV_ID_82598EB_XF_LR:
1260		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1261		break;
1262	default:
1263		break;
1264	}
1265
1266out:
1267	return physical_layer;
1268}
1269
1270/**
1271 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1272 *  port devices.
1273 *  @hw: pointer to the HW structure
1274 *
1275 *  Calls common function and corrects issue with some single port devices
1276 *  that enable LAN1 but not LAN0.
1277 **/
1278void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1279{
1280	struct ixgbe_bus_info *bus = &hw->bus;
1281	u16 pci_gen = 0;
1282	u16 pci_ctrl2 = 0;
1283
1284	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1285
1286	ixgbe_set_lan_id_multi_port_pcie(hw);
1287
1288	/* check if LAN0 is disabled */
1289	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1290	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1291
1292		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1293
1294		/* if LAN0 is completely disabled force function to 0 */
1295		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1296		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1297		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1298
1299			bus->func = 0;
1300		}
1301	}
1302}
1303
1304/**
1305 *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1306 *  @hw: pointer to hardware structure
1307 *
1308 **/
1309void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1310{
1311	u32 regval;
1312	u32 i;
1313
1314	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1315
1316	/* Enable relaxed ordering */
1317	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1318	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1319		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1320		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1321		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1322	}
1323
1324	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1325	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1326		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1327		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1328			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1329		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1330	}
1331
1332}
1333
1334/**
1335 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1336 * @hw: pointer to hardware structure
1337 * @num_pb: number of packet buffers to allocate
1338 * @headroom: reserve n KB of headroom
1339 * @strategy: packet buffer allocation strategy
1340 **/
1341static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1342				  u32 headroom, int strategy)
1343{
1344	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1345	u8 i = 0;
1346	UNREFERENCED_1PARAMETER(headroom);
1347
1348	if (!num_pb)
1349		return;
1350
1351	/* Setup Rx packet buffer sizes */
1352	switch (strategy) {
1353	case PBA_STRATEGY_WEIGHTED:
1354		/* Setup the first four at 80KB */
1355		rxpktsize = IXGBE_RXPBSIZE_80KB;
1356		for (; i < 4; i++)
1357			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1358		/* Setup the last four at 48KB...don't re-init i */
1359		rxpktsize = IXGBE_RXPBSIZE_48KB;
1360		/* Fall Through */
1361	case PBA_STRATEGY_EQUAL:
1362	default:
1363		/* Divide the remaining Rx packet buffer evenly among the TCs */
1364		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1365			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1366		break;
1367	}
1368
1369	/* Setup Tx packet buffer sizes */
1370	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1371		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1372
1373	return;
1374}
1375