ixgbe_82598.c revision 247822
1/******************************************************************************
2
3  Copyright (c) 2001-2013, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 247822 2013-03-04 23:07:40Z jfv $*/
34
35#include "ixgbe_type.h"
36#include "ixgbe_82598.h"
37#include "ixgbe_api.h"
38#include "ixgbe_common.h"
39#include "ixgbe_phy.h"
40
41static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42					     ixgbe_link_speed *speed,
43					     bool *autoneg);
44static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46				      bool autoneg_wait_to_complete);
47static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48				      ixgbe_link_speed *speed, bool *link_up,
49				      bool link_up_wait_to_complete);
50static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51				      ixgbe_link_speed speed,
52				      bool autoneg_wait_to_complete);
53static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
54					 ixgbe_link_speed speed,
55					 bool autoneg_wait_to_complete);
56static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
57static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
58static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
59static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
60				  u32 headroom, int strategy);
61static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
62					u8 *sff8472_data);
63/**
64 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
65 *  @hw: pointer to the HW structure
66 *
67 *  The defaults for 82598 should be in the range of 50us to 50ms,
68 *  however the hardware default for these parts is 500us to 1ms which is less
69 *  than the 10ms recommended by the pci-e spec.  To address this we need to
70 *  increase the value to either 10ms to 250ms for capability version 1 config,
71 *  or 16ms to 55ms for version 2.
72 **/
73void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
74{
75	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
76	u16 pcie_devctl2;
77
78	/* only take action if timeout value is defaulted to 0 */
79	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
80		goto out;
81
82	/*
83	 * if capababilities version is type 1 we can write the
84	 * timeout of 10ms to 250ms through the GCR register
85	 */
86	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
87		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
88		goto out;
89	}
90
91	/*
92	 * for version 2 capabilities we need to write the config space
93	 * directly in order to set the completion timeout value for
94	 * 16ms to 55ms
95	 */
96	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
97	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
98	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
99out:
100	/* disable completion timeout resend */
101	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
102	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
103}
104
105/**
106 *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
107 *  @hw: pointer to hardware structure
108 *
109 *  Initialize the function pointers and assign the MAC type for 82598.
110 *  Does not touch the hardware.
111 **/
112s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
113{
114	struct ixgbe_mac_info *mac = &hw->mac;
115	struct ixgbe_phy_info *phy = &hw->phy;
116	s32 ret_val;
117
118	DEBUGFUNC("ixgbe_init_ops_82598");
119
120	ret_val = ixgbe_init_phy_ops_generic(hw);
121	ret_val = ixgbe_init_ops_generic(hw);
122
123	/* PHY */
124	phy->ops.init = &ixgbe_init_phy_ops_82598;
125
126	/* MAC */
127	mac->ops.start_hw = &ixgbe_start_hw_82598;
128	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
129	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
130	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
131	mac->ops.get_supported_physical_layer =
132				&ixgbe_get_supported_physical_layer_82598;
133	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
134	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
135	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
136
137	/* RAR, Multicast, VLAN */
138	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
139	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
140	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
141	mac->ops.set_vlvf = NULL;
142	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
143
144	/* Flow Control */
145	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
146
147	mac->mcft_size		= 128;
148	mac->vft_size		= 128;
149	mac->num_rar_entries	= 16;
150	mac->rx_pb_size		= 512;
151	mac->max_tx_queues	= 32;
152	mac->max_rx_queues	= 64;
153	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
154
155	/* SFP+ Module */
156	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
157	phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
158
159	/* Link */
160	mac->ops.check_link = &ixgbe_check_mac_link_82598;
161	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162	mac->ops.flap_tx_laser = NULL;
163	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164	mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165
166	/* Manageability interface */
167	mac->ops.set_fw_drv_ver = NULL;
168
169	return ret_val;
170}
171
172/**
173 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 *  @hw: pointer to hardware structure
175 *
176 *  Initialize any function pointers that were not able to be
177 *  set during init_shared_code because the PHY/SFP type was
178 *  not known.  Perform the SFP init if necessary.
179 *
180 **/
181s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182{
183	struct ixgbe_mac_info *mac = &hw->mac;
184	struct ixgbe_phy_info *phy = &hw->phy;
185	s32 ret_val = IXGBE_SUCCESS;
186	u16 list_offset, data_offset;
187
188	DEBUGFUNC("ixgbe_init_phy_ops_82598");
189
190	/* Identify the PHY */
191	phy->ops.identify(hw);
192
193	/* Overwrite the link function pointers if copper PHY */
194	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
196		mac->ops.get_link_capabilities =
197				&ixgbe_get_copper_link_capabilities_generic;
198	}
199
200	switch (hw->phy.type) {
201	case ixgbe_phy_tn:
202		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
204		phy->ops.get_firmware_version =
205					&ixgbe_get_phy_firmware_version_tnx;
206		break;
207	case ixgbe_phy_nl:
208		phy->ops.reset = &ixgbe_reset_phy_nl;
209
210		/* Call SFP+ identify routine to get the SFP+ module type */
211		ret_val = phy->ops.identify_sfp(hw);
212		if (ret_val != IXGBE_SUCCESS)
213			goto out;
214		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216			goto out;
217		}
218
219		/* Check to see if SFP+ module is supported */
220		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221							      &list_offset,
222							      &data_offset);
223		if (ret_val != IXGBE_SUCCESS) {
224			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225			goto out;
226		}
227		break;
228	default:
229		break;
230	}
231
232out:
233	return ret_val;
234}
235
236/**
237 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 *  @hw: pointer to hardware structure
239 *
240 *  Starts the hardware using the generic start_hw function.
241 *  Disables relaxed ordering Then set pcie completion timeout
242 *
243 **/
244s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245{
246	u32 regval;
247	u32 i;
248	s32 ret_val = IXGBE_SUCCESS;
249
250	DEBUGFUNC("ixgbe_start_hw_82598");
251
252	ret_val = ixgbe_start_hw_generic(hw);
253
254	/* Disable relaxed ordering */
255	for (i = 0; ((i < hw->mac.max_tx_queues) &&
256	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260	}
261
262	for (i = 0; ((i < hw->mac.max_rx_queues) &&
263	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268	}
269
270	/* set the completion timeout for interface */
271	if (ret_val == IXGBE_SUCCESS)
272		ixgbe_set_pcie_completion_timeout(hw);
273
274	return ret_val;
275}
276
277/**
278 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 *  @hw: pointer to hardware structure
280 *  @speed: pointer to link speed
281 *  @autoneg: boolean auto-negotiation value
282 *
283 *  Determines the link capabilities by reading the AUTOC register.
284 **/
285static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286					     ixgbe_link_speed *speed,
287					     bool *autoneg)
288{
289	s32 status = IXGBE_SUCCESS;
290	u32 autoc = 0;
291
292	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
293
294	/*
295	 * Determine link capabilities based on the stored value of AUTOC,
296	 * which represents EEPROM defaults.  If AUTOC value has not been
297	 * stored, use the current register value.
298	 */
299	if (hw->mac.orig_link_settings_stored)
300		autoc = hw->mac.orig_autoc;
301	else
302		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
303
304	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
305	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
306		*speed = IXGBE_LINK_SPEED_1GB_FULL;
307		*autoneg = FALSE;
308		break;
309
310	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
311		*speed = IXGBE_LINK_SPEED_10GB_FULL;
312		*autoneg = FALSE;
313		break;
314
315	case IXGBE_AUTOC_LMS_1G_AN:
316		*speed = IXGBE_LINK_SPEED_1GB_FULL;
317		*autoneg = TRUE;
318		break;
319
320	case IXGBE_AUTOC_LMS_KX4_AN:
321	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
322		*speed = IXGBE_LINK_SPEED_UNKNOWN;
323		if (autoc & IXGBE_AUTOC_KX4_SUPP)
324			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
325		if (autoc & IXGBE_AUTOC_KX_SUPP)
326			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
327		*autoneg = TRUE;
328		break;
329
330	default:
331		status = IXGBE_ERR_LINK_SETUP;
332		break;
333	}
334
335	return status;
336}
337
338/**
339 *  ixgbe_get_media_type_82598 - Determines media type
340 *  @hw: pointer to hardware structure
341 *
342 *  Returns the media type (fiber, copper, backplane)
343 **/
344static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
345{
346	enum ixgbe_media_type media_type;
347
348	DEBUGFUNC("ixgbe_get_media_type_82598");
349
350	/* Detect if there is a copper PHY attached. */
351	switch (hw->phy.type) {
352	case ixgbe_phy_cu_unknown:
353	case ixgbe_phy_tn:
354		media_type = ixgbe_media_type_copper;
355		goto out;
356	default:
357		break;
358	}
359
360	/* Media type for I82598 is based on device ID */
361	switch (hw->device_id) {
362	case IXGBE_DEV_ID_82598:
363	case IXGBE_DEV_ID_82598_BX:
364		/* Default device ID is mezzanine card KX/KX4 */
365		media_type = ixgbe_media_type_backplane;
366		break;
367	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
368	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
369	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
370	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
371	case IXGBE_DEV_ID_82598EB_XF_LR:
372	case IXGBE_DEV_ID_82598EB_SFP_LOM:
373		media_type = ixgbe_media_type_fiber;
374		break;
375	case IXGBE_DEV_ID_82598EB_CX4:
376	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
377		media_type = ixgbe_media_type_cx4;
378		break;
379	case IXGBE_DEV_ID_82598AT:
380	case IXGBE_DEV_ID_82598AT2:
381		media_type = ixgbe_media_type_copper;
382		break;
383	default:
384		media_type = ixgbe_media_type_unknown;
385		break;
386	}
387out:
388	return media_type;
389}
390
391/**
392 *  ixgbe_fc_enable_82598 - Enable flow control
393 *  @hw: pointer to hardware structure
394 *
395 *  Enable flow control according to the current settings.
396 **/
397s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
398{
399	s32 ret_val = IXGBE_SUCCESS;
400	u32 fctrl_reg;
401	u32 rmcs_reg;
402	u32 reg;
403	u32 fcrtl, fcrth;
404	u32 link_speed = 0;
405	int i;
406	bool link_up;
407
408	DEBUGFUNC("ixgbe_fc_enable_82598");
409
410	/* Validate the water mark configuration */
411	if (!hw->fc.pause_time) {
412		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413		goto out;
414	}
415
416	/* Low water mark of zero causes XOFF floods */
417	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419		    hw->fc.high_water[i]) {
420			if (!hw->fc.low_water[i] ||
421			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422				DEBUGOUT("Invalid water mark configuration\n");
423				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424				goto out;
425			}
426		}
427	}
428
429	/*
430	 * On 82598 having Rx FC on causes resets while doing 1G
431	 * so if it's on turn it off once we know link_speed. For
432	 * more details see 82598 Specification update.
433	 */
434	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
435	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
436		switch (hw->fc.requested_mode) {
437		case ixgbe_fc_full:
438			hw->fc.requested_mode = ixgbe_fc_tx_pause;
439			break;
440		case ixgbe_fc_rx_pause:
441			hw->fc.requested_mode = ixgbe_fc_none;
442			break;
443		default:
444			/* no change */
445			break;
446		}
447	}
448
449	/* Negotiate the fc mode to use */
450	ixgbe_fc_autoneg(hw);
451
452	/* Disable any previous flow control settings */
453	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
454	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
455
456	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
457	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
458
459	/*
460	 * The possible values of fc.current_mode are:
461	 * 0: Flow control is completely disabled
462	 * 1: Rx flow control is enabled (we can receive pause frames,
463	 *    but not send pause frames).
464	 * 2: Tx flow control is enabled (we can send pause frames but
465	 *     we do not support receiving pause frames).
466	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
467	 * other: Invalid.
468	 */
469	switch (hw->fc.current_mode) {
470	case ixgbe_fc_none:
471		/*
472		 * Flow control is disabled by software override or autoneg.
473		 * The code below will actually disable it in the HW.
474		 */
475		break;
476	case ixgbe_fc_rx_pause:
477		/*
478		 * Rx Flow control is enabled and Tx Flow control is
479		 * disabled by software override. Since there really
480		 * isn't a way to advertise that we are capable of RX
481		 * Pause ONLY, we will advertise that we support both
482		 * symmetric and asymmetric Rx PAUSE.  Later, we will
483		 * disable the adapter's ability to send PAUSE frames.
484		 */
485		fctrl_reg |= IXGBE_FCTRL_RFCE;
486		break;
487	case ixgbe_fc_tx_pause:
488		/*
489		 * Tx Flow control is enabled, and Rx Flow control is
490		 * disabled by software override.
491		 */
492		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
493		break;
494	case ixgbe_fc_full:
495		/* Flow control (both Rx and Tx) is enabled by SW override. */
496		fctrl_reg |= IXGBE_FCTRL_RFCE;
497		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498		break;
499	default:
500		DEBUGOUT("Flow control param set incorrectly\n");
501		ret_val = IXGBE_ERR_CONFIG;
502		goto out;
503		break;
504	}
505
506	/* Set 802.3x based flow control settings. */
507	fctrl_reg |= IXGBE_FCTRL_DPF;
508	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
509	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
510
511	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
512	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
513		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
514		    hw->fc.high_water[i]) {
515			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
516			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
517			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
518			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
519		} else {
520			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
521			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
522		}
523
524	}
525
526	/* Configure pause time (2 TCs per register) */
527	reg = hw->fc.pause_time * 0x00010001;
528	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
529		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
530
531	/* Configure flow control refresh threshold value */
532	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
533
534out:
535	return ret_val;
536}
537
538/**
539 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
540 *  @hw: pointer to hardware structure
541 *
542 *  Configures link settings based on values in the ixgbe_hw struct.
543 *  Restarts the link.  Performs autonegotiation if needed.
544 **/
545static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
546				      bool autoneg_wait_to_complete)
547{
548	u32 autoc_reg;
549	u32 links_reg;
550	u32 i;
551	s32 status = IXGBE_SUCCESS;
552
553	DEBUGFUNC("ixgbe_start_mac_link_82598");
554
555	/* Restart link */
556	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
557	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
558	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
559
560	/* Only poll for autoneg to complete if specified to do so */
561	if (autoneg_wait_to_complete) {
562		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
563		     IXGBE_AUTOC_LMS_KX4_AN ||
564		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
565		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
566			links_reg = 0; /* Just in case Autoneg time = 0 */
567			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
568				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
569				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
570					break;
571				msec_delay(100);
572			}
573			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
574				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
575				DEBUGOUT("Autonegotiation did not complete.\n");
576			}
577		}
578	}
579
580	/* Add delay to filter out noises during initial link setup */
581	msec_delay(50);
582
583	return status;
584}
585
586/**
587 *  ixgbe_validate_link_ready - Function looks for phy link
588 *  @hw: pointer to hardware structure
589 *
590 *  Function indicates success when phy link is available. If phy is not ready
591 *  within 5 seconds of MAC indicating link, the function returns error.
592 **/
593static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
594{
595	u32 timeout;
596	u16 an_reg;
597
598	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
599		return IXGBE_SUCCESS;
600
601	for (timeout = 0;
602	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
603		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
604				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
605
606		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
607		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
608			break;
609
610		msec_delay(100);
611	}
612
613	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
614		DEBUGOUT("Link was indicated but link is down\n");
615		return IXGBE_ERR_LINK_SETUP;
616	}
617
618	return IXGBE_SUCCESS;
619}
620
621/**
622 *  ixgbe_check_mac_link_82598 - Get link/speed status
623 *  @hw: pointer to hardware structure
624 *  @speed: pointer to link speed
625 *  @link_up: TRUE is link is up, FALSE otherwise
626 *  @link_up_wait_to_complete: bool used to wait for link up or not
627 *
628 *  Reads the links register to determine if link is up and the current speed
629 **/
630static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
631				      ixgbe_link_speed *speed, bool *link_up,
632				      bool link_up_wait_to_complete)
633{
634	u32 links_reg;
635	u32 i;
636	u16 link_reg, adapt_comp_reg;
637
638	DEBUGFUNC("ixgbe_check_mac_link_82598");
639
640	/*
641	 * SERDES PHY requires us to read link status from undocumented
642	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
643	 * indicates link down.  OxC00C is read to check that the XAUI lanes
644	 * are active.  Bit 0 clear indicates active; set indicates inactive.
645	 */
646	if (hw->phy.type == ixgbe_phy_nl) {
647		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
649		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
650				     &adapt_comp_reg);
651		if (link_up_wait_to_complete) {
652			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
653				if ((link_reg & 1) &&
654				    ((adapt_comp_reg & 1) == 0)) {
655					*link_up = TRUE;
656					break;
657				} else {
658					*link_up = FALSE;
659				}
660				msec_delay(100);
661				hw->phy.ops.read_reg(hw, 0xC79F,
662						     IXGBE_TWINAX_DEV,
663						     &link_reg);
664				hw->phy.ops.read_reg(hw, 0xC00C,
665						     IXGBE_TWINAX_DEV,
666						     &adapt_comp_reg);
667			}
668		} else {
669			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
670				*link_up = TRUE;
671			else
672				*link_up = FALSE;
673		}
674
675		if (*link_up == FALSE)
676			goto out;
677	}
678
679	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
680	if (link_up_wait_to_complete) {
681		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
682			if (links_reg & IXGBE_LINKS_UP) {
683				*link_up = TRUE;
684				break;
685			} else {
686				*link_up = FALSE;
687			}
688			msec_delay(100);
689			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
690		}
691	} else {
692		if (links_reg & IXGBE_LINKS_UP)
693			*link_up = TRUE;
694		else
695			*link_up = FALSE;
696	}
697
698	if (links_reg & IXGBE_LINKS_SPEED)
699		*speed = IXGBE_LINK_SPEED_10GB_FULL;
700	else
701		*speed = IXGBE_LINK_SPEED_1GB_FULL;
702
703	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
704	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
705		*link_up = FALSE;
706
707out:
708	return IXGBE_SUCCESS;
709}
710
711/**
712 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
713 *  @hw: pointer to hardware structure
714 *  @speed: new link speed
715 *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
716 *
717 *  Set the link speed in the AUTOC register and restarts link.
718 **/
719static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
720				      ixgbe_link_speed speed,
721				      bool autoneg_wait_to_complete)
722{
723	bool autoneg = FALSE;
724	s32 status = IXGBE_SUCCESS;
725	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
726	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
727	u32 autoc = curr_autoc;
728	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
729
730	DEBUGFUNC("ixgbe_setup_mac_link_82598");
731
732	/* Check to see if speed passed in is supported. */
733	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
734	speed &= link_capabilities;
735
736	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
737		status = IXGBE_ERR_LINK_SETUP;
738
739	/* Set KX4/KX support according to speed requested */
740	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
741		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
742		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
743		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
744			autoc |= IXGBE_AUTOC_KX4_SUPP;
745		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
746			autoc |= IXGBE_AUTOC_KX_SUPP;
747		if (autoc != curr_autoc)
748			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
749	}
750
751	if (status == IXGBE_SUCCESS) {
752		/*
753		 * Setup and restart the link based on the new values in
754		 * ixgbe_hw This will write the AUTOC register based on the new
755		 * stored values
756		 */
757		status = ixgbe_start_mac_link_82598(hw,
758						    autoneg_wait_to_complete);
759	}
760
761	return status;
762}
763
764
765/**
766 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
767 *  @hw: pointer to hardware structure
768 *  @speed: new link speed
769 *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
770 *
771 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
772 **/
773static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
774					 ixgbe_link_speed speed,
775					 bool autoneg_wait_to_complete)
776{
777	s32 status;
778
779	DEBUGFUNC("ixgbe_setup_copper_link_82598");
780
781	/* Setup the PHY according to input speed */
782	status = hw->phy.ops.setup_link_speed(hw, speed,
783					      autoneg_wait_to_complete);
784	/* Set up MAC */
785	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
786
787	return status;
788}
789
790/**
791 *  ixgbe_reset_hw_82598 - Performs hardware reset
792 *  @hw: pointer to hardware structure
793 *
794 *  Resets the hardware by resetting the transmit and receive units, masks and
795 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
796 *  reset.
797 **/
798static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
799{
800	s32 status = IXGBE_SUCCESS;
801	s32 phy_status = IXGBE_SUCCESS;
802	u32 ctrl;
803	u32 gheccr;
804	u32 i;
805	u32 autoc;
806	u8  analog_val;
807
808	DEBUGFUNC("ixgbe_reset_hw_82598");
809
810	/* Call adapter stop to disable tx/rx and clear interrupts */
811	status = hw->mac.ops.stop_adapter(hw);
812	if (status != IXGBE_SUCCESS)
813		goto reset_hw_out;
814
815	/*
816	 * Power up the Atlas Tx lanes if they are currently powered down.
817	 * Atlas Tx lanes are powered down for MAC loopback tests, but
818	 * they are not automatically restored on reset.
819	 */
820	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
821	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
822		/* Enable Tx Atlas so packets can be transmitted again */
823		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
824					     &analog_val);
825		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
826		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
827					      analog_val);
828
829		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
830					     &analog_val);
831		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
832		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
833					      analog_val);
834
835		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
836					     &analog_val);
837		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
838		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
839					      analog_val);
840
841		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
842					     &analog_val);
843		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
844		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
845					      analog_val);
846	}
847
848	/* Reset PHY */
849	if (hw->phy.reset_disable == FALSE) {
850		/* PHY ops must be identified and initialized prior to reset */
851
852		/* Init PHY and function pointers, perform SFP setup */
853		phy_status = hw->phy.ops.init(hw);
854		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
855			goto reset_hw_out;
856		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
857			goto mac_reset_top;
858
859		hw->phy.ops.reset(hw);
860	}
861
862mac_reset_top:
863	/*
864	 * Issue global reset to the MAC.  This needs to be a SW reset.
865	 * If link reset is used, it might reset the MAC when mng is using it
866	 */
867	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
868	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
869	IXGBE_WRITE_FLUSH(hw);
870
871	/* Poll for reset bit to self-clear indicating reset is complete */
872	for (i = 0; i < 10; i++) {
873		usec_delay(1);
874		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
875		if (!(ctrl & IXGBE_CTRL_RST))
876			break;
877	}
878	if (ctrl & IXGBE_CTRL_RST) {
879		status = IXGBE_ERR_RESET_FAILED;
880		DEBUGOUT("Reset polling failed to complete.\n");
881	}
882
883	msec_delay(50);
884
885	/*
886	 * Double resets are required for recovery from certain error
887	 * conditions.  Between resets, it is necessary to stall to allow time
888	 * for any pending HW events to complete.
889	 */
890	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
891		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
892		goto mac_reset_top;
893	}
894
895	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
896	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
897	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
898
899	/*
900	 * Store the original AUTOC value if it has not been
901	 * stored off yet.  Otherwise restore the stored original
902	 * AUTOC value since the reset operation sets back to deaults.
903	 */
904	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
905	if (hw->mac.orig_link_settings_stored == FALSE) {
906		hw->mac.orig_autoc = autoc;
907		hw->mac.orig_link_settings_stored = TRUE;
908	} else if (autoc != hw->mac.orig_autoc) {
909		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
910	}
911
912	/* Store the permanent mac address */
913	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
914
915	/*
916	 * Store MAC address from RAR0, clear receive address registers, and
917	 * clear the multicast table
918	 */
919	hw->mac.ops.init_rx_addrs(hw);
920
921reset_hw_out:
922	if (phy_status != IXGBE_SUCCESS)
923		status = phy_status;
924
925	return status;
926}
927
928/**
929 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
930 *  @hw: pointer to hardware struct
931 *  @rar: receive address register index to associate with a VMDq index
932 *  @vmdq: VMDq set index
933 **/
934s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
935{
936	u32 rar_high;
937	u32 rar_entries = hw->mac.num_rar_entries;
938
939	DEBUGFUNC("ixgbe_set_vmdq_82598");
940
941	/* Make sure we are using a valid rar index range */
942	if (rar >= rar_entries) {
943		DEBUGOUT1("RAR index %d is out of range.\n", rar);
944		return IXGBE_ERR_INVALID_ARGUMENT;
945	}
946
947	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
948	rar_high &= ~IXGBE_RAH_VIND_MASK;
949	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
950	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
951	return IXGBE_SUCCESS;
952}
953
954/**
955 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
956 *  @hw: pointer to hardware struct
957 *  @rar: receive address register index to associate with a VMDq index
958 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
959 **/
960static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
961{
962	u32 rar_high;
963	u32 rar_entries = hw->mac.num_rar_entries;
964
965	UNREFERENCED_1PARAMETER(vmdq);
966
967	/* Make sure we are using a valid rar index range */
968	if (rar >= rar_entries) {
969		DEBUGOUT1("RAR index %d is out of range.\n", rar);
970		return IXGBE_ERR_INVALID_ARGUMENT;
971	}
972
973	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
974	if (rar_high & IXGBE_RAH_VIND_MASK) {
975		rar_high &= ~IXGBE_RAH_VIND_MASK;
976		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
977	}
978
979	return IXGBE_SUCCESS;
980}
981
982/**
983 *  ixgbe_set_vfta_82598 - Set VLAN filter table
984 *  @hw: pointer to hardware structure
985 *  @vlan: VLAN id to write to VLAN filter
986 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
987 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
988 *
989 *  Turn on/off specified VLAN in the VLAN filter table.
990 **/
991s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
992			 bool vlan_on)
993{
994	u32 regindex;
995	u32 bitindex;
996	u32 bits;
997	u32 vftabyte;
998
999	DEBUGFUNC("ixgbe_set_vfta_82598");
1000
1001	if (vlan > 4095)
1002		return IXGBE_ERR_PARAM;
1003
1004	/* Determine 32-bit word position in array */
1005	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1006
1007	/* Determine the location of the (VMD) queue index */
1008	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1009	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1010
1011	/* Set the nibble for VMD queue index */
1012	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1013	bits &= (~(0x0F << bitindex));
1014	bits |= (vind << bitindex);
1015	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1016
1017	/* Determine the location of the bit for this VLAN id */
1018	bitindex = vlan & 0x1F;   /* lower five bits */
1019
1020	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1021	if (vlan_on)
1022		/* Turn on this VLAN id */
1023		bits |= (1 << bitindex);
1024	else
1025		/* Turn off this VLAN id */
1026		bits &= ~(1 << bitindex);
1027	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1028
1029	return IXGBE_SUCCESS;
1030}
1031
1032/**
1033 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1034 *  @hw: pointer to hardware structure
1035 *
1036 *  Clears the VLAN filer table, and the VMDq index associated with the filter
1037 **/
1038static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1039{
1040	u32 offset;
1041	u32 vlanbyte;
1042
1043	DEBUGFUNC("ixgbe_clear_vfta_82598");
1044
1045	for (offset = 0; offset < hw->mac.vft_size; offset++)
1046		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1047
1048	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1049		for (offset = 0; offset < hw->mac.vft_size; offset++)
1050			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1051					0);
1052
1053	return IXGBE_SUCCESS;
1054}
1055
1056/**
1057 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1058 *  @hw: pointer to hardware structure
1059 *  @reg: analog register to read
1060 *  @val: read value
1061 *
1062 *  Performs read operation to Atlas analog register specified.
1063 **/
1064s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1065{
1066	u32  atlas_ctl;
1067
1068	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1069
1070	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1071			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1072	IXGBE_WRITE_FLUSH(hw);
1073	usec_delay(10);
1074	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1075	*val = (u8)atlas_ctl;
1076
1077	return IXGBE_SUCCESS;
1078}
1079
1080/**
1081 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1082 *  @hw: pointer to hardware structure
1083 *  @reg: atlas register to write
1084 *  @val: value to write
1085 *
1086 *  Performs write operation to Atlas analog register specified.
1087 **/
1088s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1089{
1090	u32  atlas_ctl;
1091
1092	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1093
1094	atlas_ctl = (reg << 8) | val;
1095	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1096	IXGBE_WRITE_FLUSH(hw);
1097	usec_delay(10);
1098
1099	return IXGBE_SUCCESS;
1100}
1101
1102/**
1103 *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1104 *  @hw: pointer to hardware structure
1105 *  @dev_addr: address to read from
1106 *  @byte_offset: byte offset to read from dev_addr
1107 *  @eeprom_data: value read
1108 *
1109 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1110 **/
1111static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1112				    u8 byte_offset, u8 *eeprom_data)
1113{
1114	s32 status = IXGBE_SUCCESS;
1115	u16 sfp_addr = 0;
1116	u16 sfp_data = 0;
1117	u16 sfp_stat = 0;
1118	u32 i;
1119
1120	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1121
1122	if (hw->phy.type == ixgbe_phy_nl) {
1123		/*
1124		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1125		 * 0xC30D. These registers are used to talk to the SFP+
1126		 * module's EEPROM through the SDA/SCL (I2C) interface.
1127		 */
1128		sfp_addr = (dev_addr << 8) + byte_offset;
1129		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1130		hw->phy.ops.write_reg(hw,
1131				      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1132				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1133				      sfp_addr);
1134
1135		/* Poll status */
1136		for (i = 0; i < 100; i++) {
1137			hw->phy.ops.read_reg(hw,
1138					     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1139					     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1140					     &sfp_stat);
1141			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1142			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1143				break;
1144			msec_delay(10);
1145		}
1146
1147		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1148			DEBUGOUT("EEPROM read did not pass.\n");
1149			status = IXGBE_ERR_SFP_NOT_PRESENT;
1150			goto out;
1151		}
1152
1153		/* Read data */
1154		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1155				     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1156
1157		*eeprom_data = (u8)(sfp_data >> 8);
1158	} else {
1159		status = IXGBE_ERR_PHY;
1160	}
1161
1162out:
1163	return status;
1164}
1165
1166/**
1167 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1168 *  @hw: pointer to hardware structure
1169 *  @byte_offset: EEPROM byte offset to read
1170 *  @eeprom_data: value read
1171 *
1172 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1173 **/
1174s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1175				u8 *eeprom_data)
1176{
1177	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1178					byte_offset, eeprom_data);
1179}
1180
1181/**
1182 *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1183 *  @hw: pointer to hardware structure
1184 *  @byte_offset: byte offset at address 0xA2
1185 *  @eeprom_data: value read
1186 *
1187 *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1188 **/
1189static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1190					u8 *sff8472_data)
1191{
1192	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1193					byte_offset, sff8472_data);
1194}
1195
1196/**
1197 *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1198 *  @hw: pointer to hardware structure
1199 *
1200 *  Determines physical layer capabilities of the current configuration.
1201 **/
1202u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1203{
1204	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1205	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1206	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1207	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1208	u16 ext_ability = 0;
1209
1210	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1211
1212	hw->phy.ops.identify(hw);
1213
1214	/* Copper PHY must be checked before AUTOC LMS to determine correct
1215	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1216	switch (hw->phy.type) {
1217	case ixgbe_phy_tn:
1218	case ixgbe_phy_cu_unknown:
1219		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1220		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1221		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1222			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1223		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1224			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1225		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1226			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1227		goto out;
1228	default:
1229		break;
1230	}
1231
1232	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1233	case IXGBE_AUTOC_LMS_1G_AN:
1234	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1235		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1236			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1237		else
1238			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1239		break;
1240	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1241		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1242			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1243		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1244			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1245		else /* XAUI */
1246			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1247		break;
1248	case IXGBE_AUTOC_LMS_KX4_AN:
1249	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1250		if (autoc & IXGBE_AUTOC_KX_SUPP)
1251			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1252		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1253			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1254		break;
1255	default:
1256		break;
1257	}
1258
1259	if (hw->phy.type == ixgbe_phy_nl) {
1260		hw->phy.ops.identify_sfp(hw);
1261
1262		switch (hw->phy.sfp_type) {
1263		case ixgbe_sfp_type_da_cu:
1264			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1265			break;
1266		case ixgbe_sfp_type_sr:
1267			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1268			break;
1269		case ixgbe_sfp_type_lr:
1270			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1271			break;
1272		default:
1273			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1274			break;
1275		}
1276	}
1277
1278	switch (hw->device_id) {
1279	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1280		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1281		break;
1282	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1283	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1284	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1285		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1286		break;
1287	case IXGBE_DEV_ID_82598EB_XF_LR:
1288		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1289		break;
1290	default:
1291		break;
1292	}
1293
1294out:
1295	return physical_layer;
1296}
1297
1298/**
1299 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1300 *  port devices.
1301 *  @hw: pointer to the HW structure
1302 *
1303 *  Calls common function and corrects issue with some single port devices
1304 *  that enable LAN1 but not LAN0.
1305 **/
1306void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1307{
1308	struct ixgbe_bus_info *bus = &hw->bus;
1309	u16 pci_gen = 0;
1310	u16 pci_ctrl2 = 0;
1311
1312	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1313
1314	ixgbe_set_lan_id_multi_port_pcie(hw);
1315
1316	/* check if LAN0 is disabled */
1317	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1318	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1319
1320		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1321
1322		/* if LAN0 is completely disabled force function to 0 */
1323		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1324		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1325		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1326
1327			bus->func = 0;
1328		}
1329	}
1330}
1331
1332/**
1333 *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1334 *  @hw: pointer to hardware structure
1335 *
1336 **/
1337void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1338{
1339	u32 regval;
1340	u32 i;
1341
1342	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1343
1344	/* Enable relaxed ordering */
1345	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1346	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1347		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1348		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1349		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1350	}
1351
1352	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1353	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1354		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1355		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1356			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1357		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1358	}
1359
1360}
1361
1362/**
1363 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1364 * @hw: pointer to hardware structure
1365 * @num_pb: number of packet buffers to allocate
1366 * @headroom: reserve n KB of headroom
1367 * @strategy: packet buffer allocation strategy
1368 **/
1369static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1370				  u32 headroom, int strategy)
1371{
1372	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1373	u8 i = 0;
1374	UNREFERENCED_1PARAMETER(headroom);
1375
1376	if (!num_pb)
1377		return;
1378
1379	/* Setup Rx packet buffer sizes */
1380	switch (strategy) {
1381	case PBA_STRATEGY_WEIGHTED:
1382		/* Setup the first four at 80KB */
1383		rxpktsize = IXGBE_RXPBSIZE_80KB;
1384		for (; i < 4; i++)
1385			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1386		/* Setup the last four at 48KB...don't re-init i */
1387		rxpktsize = IXGBE_RXPBSIZE_48KB;
1388		/* Fall Through */
1389	case PBA_STRATEGY_EQUAL:
1390	default:
1391		/* Divide the remaining Rx packet buffer evenly among the TCs */
1392		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1393			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1394		break;
1395	}
1396
1397	/* Setup Tx packet buffer sizes */
1398	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1399		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1400
1401	return;
1402}
1403