ixgbe_82598.c revision 256281
1/******************************************************************************
2
3  Copyright (c) 2001-2013, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/ixgbe_82598.c 251964 2013-06-18 21:28:19Z jfv $*/
34
35#include "ixgbe_type.h"
36#include "ixgbe_82598.h"
37#include "ixgbe_api.h"
38#include "ixgbe_common.h"
39#include "ixgbe_phy.h"
40
41static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42					     ixgbe_link_speed *speed,
43					     bool *autoneg);
44static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46				      bool autoneg_wait_to_complete);
47static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48				      ixgbe_link_speed *speed, bool *link_up,
49				      bool link_up_wait_to_complete);
50static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51				      ixgbe_link_speed speed,
52				      bool autoneg_wait_to_complete);
53static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
54					 ixgbe_link_speed speed,
55					 bool autoneg_wait_to_complete);
56static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
57static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
58static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
59static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
60				  u32 headroom, int strategy);
61static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
62					u8 *sff8472_data);
63/**
64 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
65 *  @hw: pointer to the HW structure
66 *
67 *  The defaults for 82598 should be in the range of 50us to 50ms,
68 *  however the hardware default for these parts is 500us to 1ms which is less
69 *  than the 10ms recommended by the pci-e spec.  To address this we need to
70 *  increase the value to either 10ms to 250ms for capability version 1 config,
71 *  or 16ms to 55ms for version 2.
72 **/
73void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
74{
75	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
76	u16 pcie_devctl2;
77
78	/* only take action if timeout value is defaulted to 0 */
79	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
80		goto out;
81
82	/*
83	 * if capababilities version is type 1 we can write the
84	 * timeout of 10ms to 250ms through the GCR register
85	 */
86	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
87		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
88		goto out;
89	}
90
91	/*
92	 * for version 2 capabilities we need to write the config space
93	 * directly in order to set the completion timeout value for
94	 * 16ms to 55ms
95	 */
96	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
97	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
98	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
99out:
100	/* disable completion timeout resend */
101	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
102	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
103}
104
105/**
106 *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
107 *  @hw: pointer to hardware structure
108 *
109 *  Initialize the function pointers and assign the MAC type for 82598.
110 *  Does not touch the hardware.
111 **/
112s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
113{
114	struct ixgbe_mac_info *mac = &hw->mac;
115	struct ixgbe_phy_info *phy = &hw->phy;
116	s32 ret_val;
117
118	DEBUGFUNC("ixgbe_init_ops_82598");
119
120	ret_val = ixgbe_init_phy_ops_generic(hw);
121	ret_val = ixgbe_init_ops_generic(hw);
122
123	/* PHY */
124	phy->ops.init = &ixgbe_init_phy_ops_82598;
125
126	/* MAC */
127	mac->ops.start_hw = &ixgbe_start_hw_82598;
128	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
129	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
130	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
131	mac->ops.get_supported_physical_layer =
132				&ixgbe_get_supported_physical_layer_82598;
133	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
134	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
135	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
136
137	/* RAR, Multicast, VLAN */
138	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
139	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
140	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
141	mac->ops.set_vlvf = NULL;
142	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
143
144	/* Flow Control */
145	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
146
147	mac->mcft_size		= 128;
148	mac->vft_size		= 128;
149	mac->num_rar_entries	= 16;
150	mac->rx_pb_size		= 512;
151	mac->max_tx_queues	= 32;
152	mac->max_rx_queues	= 64;
153	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
154
155	/* SFP+ Module */
156	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
157	phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
158
159	/* Link */
160	mac->ops.check_link = &ixgbe_check_mac_link_82598;
161	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162	mac->ops.flap_tx_laser = NULL;
163	mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164	mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165
166	/* Manageability interface */
167	mac->ops.set_fw_drv_ver = NULL;
168
169	mac->ops.get_rtrup2tc = NULL;
170
171	return ret_val;
172}
173
174/**
175 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
176 *  @hw: pointer to hardware structure
177 *
178 *  Initialize any function pointers that were not able to be
179 *  set during init_shared_code because the PHY/SFP type was
180 *  not known.  Perform the SFP init if necessary.
181 *
182 **/
183s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
184{
185	struct ixgbe_mac_info *mac = &hw->mac;
186	struct ixgbe_phy_info *phy = &hw->phy;
187	s32 ret_val = IXGBE_SUCCESS;
188	u16 list_offset, data_offset;
189
190	DEBUGFUNC("ixgbe_init_phy_ops_82598");
191
192	/* Identify the PHY */
193	phy->ops.identify(hw);
194
195	/* Overwrite the link function pointers if copper PHY */
196	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
197		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
198		mac->ops.get_link_capabilities =
199				&ixgbe_get_copper_link_capabilities_generic;
200	}
201
202	switch (hw->phy.type) {
203	case ixgbe_phy_tn:
204		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
205		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
206		phy->ops.get_firmware_version =
207					&ixgbe_get_phy_firmware_version_tnx;
208		break;
209	case ixgbe_phy_nl:
210		phy->ops.reset = &ixgbe_reset_phy_nl;
211
212		/* Call SFP+ identify routine to get the SFP+ module type */
213		ret_val = phy->ops.identify_sfp(hw);
214		if (ret_val != IXGBE_SUCCESS)
215			goto out;
216		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
217			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
218			goto out;
219		}
220
221		/* Check to see if SFP+ module is supported */
222		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
223							      &list_offset,
224							      &data_offset);
225		if (ret_val != IXGBE_SUCCESS) {
226			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
227			goto out;
228		}
229		break;
230	default:
231		break;
232	}
233
234out:
235	return ret_val;
236}
237
238/**
239 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
240 *  @hw: pointer to hardware structure
241 *
242 *  Starts the hardware using the generic start_hw function.
243 *  Disables relaxed ordering Then set pcie completion timeout
244 *
245 **/
246s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
247{
248	u32 regval;
249	u32 i;
250	s32 ret_val = IXGBE_SUCCESS;
251
252	DEBUGFUNC("ixgbe_start_hw_82598");
253
254	ret_val = ixgbe_start_hw_generic(hw);
255
256	/* Disable relaxed ordering */
257	for (i = 0; ((i < hw->mac.max_tx_queues) &&
258	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
259		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
260		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
261		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
262	}
263
264	for (i = 0; ((i < hw->mac.max_rx_queues) &&
265	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
266		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
267		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
268			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
269		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
270	}
271
272	/* set the completion timeout for interface */
273	if (ret_val == IXGBE_SUCCESS)
274		ixgbe_set_pcie_completion_timeout(hw);
275
276	return ret_val;
277}
278
279/**
280 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
281 *  @hw: pointer to hardware structure
282 *  @speed: pointer to link speed
283 *  @autoneg: boolean auto-negotiation value
284 *
285 *  Determines the link capabilities by reading the AUTOC register.
286 **/
287static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
288					     ixgbe_link_speed *speed,
289					     bool *autoneg)
290{
291	s32 status = IXGBE_SUCCESS;
292	u32 autoc = 0;
293
294	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
295
296	/*
297	 * Determine link capabilities based on the stored value of AUTOC,
298	 * which represents EEPROM defaults.  If AUTOC value has not been
299	 * stored, use the current register value.
300	 */
301	if (hw->mac.orig_link_settings_stored)
302		autoc = hw->mac.orig_autoc;
303	else
304		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
305
306	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
307	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
308		*speed = IXGBE_LINK_SPEED_1GB_FULL;
309		*autoneg = FALSE;
310		break;
311
312	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
313		*speed = IXGBE_LINK_SPEED_10GB_FULL;
314		*autoneg = FALSE;
315		break;
316
317	case IXGBE_AUTOC_LMS_1G_AN:
318		*speed = IXGBE_LINK_SPEED_1GB_FULL;
319		*autoneg = TRUE;
320		break;
321
322	case IXGBE_AUTOC_LMS_KX4_AN:
323	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
324		*speed = IXGBE_LINK_SPEED_UNKNOWN;
325		if (autoc & IXGBE_AUTOC_KX4_SUPP)
326			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
327		if (autoc & IXGBE_AUTOC_KX_SUPP)
328			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
329		*autoneg = TRUE;
330		break;
331
332	default:
333		status = IXGBE_ERR_LINK_SETUP;
334		break;
335	}
336
337	return status;
338}
339
340/**
341 *  ixgbe_get_media_type_82598 - Determines media type
342 *  @hw: pointer to hardware structure
343 *
344 *  Returns the media type (fiber, copper, backplane)
345 **/
346static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
347{
348	enum ixgbe_media_type media_type;
349
350	DEBUGFUNC("ixgbe_get_media_type_82598");
351
352	/* Detect if there is a copper PHY attached. */
353	switch (hw->phy.type) {
354	case ixgbe_phy_cu_unknown:
355	case ixgbe_phy_tn:
356		media_type = ixgbe_media_type_copper;
357		goto out;
358	default:
359		break;
360	}
361
362	/* Media type for I82598 is based on device ID */
363	switch (hw->device_id) {
364	case IXGBE_DEV_ID_82598:
365	case IXGBE_DEV_ID_82598_BX:
366		/* Default device ID is mezzanine card KX/KX4 */
367		media_type = ixgbe_media_type_backplane;
368		break;
369	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
370	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
371	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
372	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
373	case IXGBE_DEV_ID_82598EB_XF_LR:
374	case IXGBE_DEV_ID_82598EB_SFP_LOM:
375		media_type = ixgbe_media_type_fiber;
376		break;
377	case IXGBE_DEV_ID_82598EB_CX4:
378	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
379		media_type = ixgbe_media_type_cx4;
380		break;
381	case IXGBE_DEV_ID_82598AT:
382	case IXGBE_DEV_ID_82598AT2:
383		media_type = ixgbe_media_type_copper;
384		break;
385	default:
386		media_type = ixgbe_media_type_unknown;
387		break;
388	}
389out:
390	return media_type;
391}
392
393/**
394 *  ixgbe_fc_enable_82598 - Enable flow control
395 *  @hw: pointer to hardware structure
396 *
397 *  Enable flow control according to the current settings.
398 **/
399s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
400{
401	s32 ret_val = IXGBE_SUCCESS;
402	u32 fctrl_reg;
403	u32 rmcs_reg;
404	u32 reg;
405	u32 fcrtl, fcrth;
406	u32 link_speed = 0;
407	int i;
408	bool link_up;
409
410	DEBUGFUNC("ixgbe_fc_enable_82598");
411
412	/* Validate the water mark configuration */
413	if (!hw->fc.pause_time) {
414		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
415		goto out;
416	}
417
418	/* Low water mark of zero causes XOFF floods */
419	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
420		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
421		    hw->fc.high_water[i]) {
422			if (!hw->fc.low_water[i] ||
423			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
424				DEBUGOUT("Invalid water mark configuration\n");
425				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
426				goto out;
427			}
428		}
429	}
430
431	/*
432	 * On 82598 having Rx FC on causes resets while doing 1G
433	 * so if it's on turn it off once we know link_speed. For
434	 * more details see 82598 Specification update.
435	 */
436	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
437	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
438		switch (hw->fc.requested_mode) {
439		case ixgbe_fc_full:
440			hw->fc.requested_mode = ixgbe_fc_tx_pause;
441			break;
442		case ixgbe_fc_rx_pause:
443			hw->fc.requested_mode = ixgbe_fc_none;
444			break;
445		default:
446			/* no change */
447			break;
448		}
449	}
450
451	/* Negotiate the fc mode to use */
452	ixgbe_fc_autoneg(hw);
453
454	/* Disable any previous flow control settings */
455	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
456	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
457
458	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
459	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
460
461	/*
462	 * The possible values of fc.current_mode are:
463	 * 0: Flow control is completely disabled
464	 * 1: Rx flow control is enabled (we can receive pause frames,
465	 *    but not send pause frames).
466	 * 2: Tx flow control is enabled (we can send pause frames but
467	 *     we do not support receiving pause frames).
468	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
469	 * other: Invalid.
470	 */
471	switch (hw->fc.current_mode) {
472	case ixgbe_fc_none:
473		/*
474		 * Flow control is disabled by software override or autoneg.
475		 * The code below will actually disable it in the HW.
476		 */
477		break;
478	case ixgbe_fc_rx_pause:
479		/*
480		 * Rx Flow control is enabled and Tx Flow control is
481		 * disabled by software override. Since there really
482		 * isn't a way to advertise that we are capable of RX
483		 * Pause ONLY, we will advertise that we support both
484		 * symmetric and asymmetric Rx PAUSE.  Later, we will
485		 * disable the adapter's ability to send PAUSE frames.
486		 */
487		fctrl_reg |= IXGBE_FCTRL_RFCE;
488		break;
489	case ixgbe_fc_tx_pause:
490		/*
491		 * Tx Flow control is enabled, and Rx Flow control is
492		 * disabled by software override.
493		 */
494		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
495		break;
496	case ixgbe_fc_full:
497		/* Flow control (both Rx and Tx) is enabled by SW override. */
498		fctrl_reg |= IXGBE_FCTRL_RFCE;
499		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
500		break;
501	default:
502		DEBUGOUT("Flow control param set incorrectly\n");
503		ret_val = IXGBE_ERR_CONFIG;
504		goto out;
505		break;
506	}
507
508	/* Set 802.3x based flow control settings. */
509	fctrl_reg |= IXGBE_FCTRL_DPF;
510	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
511	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
512
513	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
514	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
515		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
516		    hw->fc.high_water[i]) {
517			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
518			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
519			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
520			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
521		} else {
522			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
523			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
524		}
525
526	}
527
528	/* Configure pause time (2 TCs per register) */
529	reg = hw->fc.pause_time * 0x00010001;
530	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
531		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
532
533	/* Configure flow control refresh threshold value */
534	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
535
536out:
537	return ret_val;
538}
539
540/**
541 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
542 *  @hw: pointer to hardware structure
543 *
544 *  Configures link settings based on values in the ixgbe_hw struct.
545 *  Restarts the link.  Performs autonegotiation if needed.
546 **/
547static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
548				      bool autoneg_wait_to_complete)
549{
550	u32 autoc_reg;
551	u32 links_reg;
552	u32 i;
553	s32 status = IXGBE_SUCCESS;
554
555	DEBUGFUNC("ixgbe_start_mac_link_82598");
556
557	/* Restart link */
558	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
559	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
560	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
561
562	/* Only poll for autoneg to complete if specified to do so */
563	if (autoneg_wait_to_complete) {
564		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
565		     IXGBE_AUTOC_LMS_KX4_AN ||
566		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
567		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
568			links_reg = 0; /* Just in case Autoneg time = 0 */
569			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
570				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
571				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
572					break;
573				msec_delay(100);
574			}
575			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
576				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
577				DEBUGOUT("Autonegotiation did not complete.\n");
578			}
579		}
580	}
581
582	/* Add delay to filter out noises during initial link setup */
583	msec_delay(50);
584
585	return status;
586}
587
588/**
589 *  ixgbe_validate_link_ready - Function looks for phy link
590 *  @hw: pointer to hardware structure
591 *
592 *  Function indicates success when phy link is available. If phy is not ready
593 *  within 5 seconds of MAC indicating link, the function returns error.
594 **/
595static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
596{
597	u32 timeout;
598	u16 an_reg;
599
600	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
601		return IXGBE_SUCCESS;
602
603	for (timeout = 0;
604	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
605		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
606				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
607
608		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
609		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
610			break;
611
612		msec_delay(100);
613	}
614
615	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
616		DEBUGOUT("Link was indicated but link is down\n");
617		return IXGBE_ERR_LINK_SETUP;
618	}
619
620	return IXGBE_SUCCESS;
621}
622
623/**
624 *  ixgbe_check_mac_link_82598 - Get link/speed status
625 *  @hw: pointer to hardware structure
626 *  @speed: pointer to link speed
627 *  @link_up: TRUE is link is up, FALSE otherwise
628 *  @link_up_wait_to_complete: bool used to wait for link up or not
629 *
630 *  Reads the links register to determine if link is up and the current speed
631 **/
632static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
633				      ixgbe_link_speed *speed, bool *link_up,
634				      bool link_up_wait_to_complete)
635{
636	u32 links_reg;
637	u32 i;
638	u16 link_reg, adapt_comp_reg;
639
640	DEBUGFUNC("ixgbe_check_mac_link_82598");
641
642	/*
643	 * SERDES PHY requires us to read link status from undocumented
644	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
645	 * indicates link down.  OxC00C is read to check that the XAUI lanes
646	 * are active.  Bit 0 clear indicates active; set indicates inactive.
647	 */
648	if (hw->phy.type == ixgbe_phy_nl) {
649		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
650		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
651		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
652				     &adapt_comp_reg);
653		if (link_up_wait_to_complete) {
654			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
655				if ((link_reg & 1) &&
656				    ((adapt_comp_reg & 1) == 0)) {
657					*link_up = TRUE;
658					break;
659				} else {
660					*link_up = FALSE;
661				}
662				msec_delay(100);
663				hw->phy.ops.read_reg(hw, 0xC79F,
664						     IXGBE_TWINAX_DEV,
665						     &link_reg);
666				hw->phy.ops.read_reg(hw, 0xC00C,
667						     IXGBE_TWINAX_DEV,
668						     &adapt_comp_reg);
669			}
670		} else {
671			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
672				*link_up = TRUE;
673			else
674				*link_up = FALSE;
675		}
676
677		if (*link_up == FALSE)
678			goto out;
679	}
680
681	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
682	if (link_up_wait_to_complete) {
683		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
684			if (links_reg & IXGBE_LINKS_UP) {
685				*link_up = TRUE;
686				break;
687			} else {
688				*link_up = FALSE;
689			}
690			msec_delay(100);
691			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
692		}
693	} else {
694		if (links_reg & IXGBE_LINKS_UP)
695			*link_up = TRUE;
696		else
697			*link_up = FALSE;
698	}
699
700	if (links_reg & IXGBE_LINKS_SPEED)
701		*speed = IXGBE_LINK_SPEED_10GB_FULL;
702	else
703		*speed = IXGBE_LINK_SPEED_1GB_FULL;
704
705	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
706	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
707		*link_up = FALSE;
708
709out:
710	return IXGBE_SUCCESS;
711}
712
713/**
714 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
715 *  @hw: pointer to hardware structure
716 *  @speed: new link speed
717 *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
718 *
719 *  Set the link speed in the AUTOC register and restarts link.
720 **/
721static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
722				      ixgbe_link_speed speed,
723				      bool autoneg_wait_to_complete)
724{
725	bool autoneg = FALSE;
726	s32 status = IXGBE_SUCCESS;
727	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
728	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
729	u32 autoc = curr_autoc;
730	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
731
732	DEBUGFUNC("ixgbe_setup_mac_link_82598");
733
734	/* Check to see if speed passed in is supported. */
735	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
736	speed &= link_capabilities;
737
738	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
739		status = IXGBE_ERR_LINK_SETUP;
740
741	/* Set KX4/KX support according to speed requested */
742	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
743		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
744		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
745		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
746			autoc |= IXGBE_AUTOC_KX4_SUPP;
747		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
748			autoc |= IXGBE_AUTOC_KX_SUPP;
749		if (autoc != curr_autoc)
750			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
751	}
752
753	if (status == IXGBE_SUCCESS) {
754		/*
755		 * Setup and restart the link based on the new values in
756		 * ixgbe_hw This will write the AUTOC register based on the new
757		 * stored values
758		 */
759		status = ixgbe_start_mac_link_82598(hw,
760						    autoneg_wait_to_complete);
761	}
762
763	return status;
764}
765
766
767/**
768 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
769 *  @hw: pointer to hardware structure
770 *  @speed: new link speed
771 *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
772 *
773 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
774 **/
775static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
776					 ixgbe_link_speed speed,
777					 bool autoneg_wait_to_complete)
778{
779	s32 status;
780
781	DEBUGFUNC("ixgbe_setup_copper_link_82598");
782
783	/* Setup the PHY according to input speed */
784	status = hw->phy.ops.setup_link_speed(hw, speed,
785					      autoneg_wait_to_complete);
786	/* Set up MAC */
787	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
788
789	return status;
790}
791
792/**
793 *  ixgbe_reset_hw_82598 - Performs hardware reset
794 *  @hw: pointer to hardware structure
795 *
796 *  Resets the hardware by resetting the transmit and receive units, masks and
797 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
798 *  reset.
799 **/
800static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
801{
802	s32 status = IXGBE_SUCCESS;
803	s32 phy_status = IXGBE_SUCCESS;
804	u32 ctrl;
805	u32 gheccr;
806	u32 i;
807	u32 autoc;
808	u8  analog_val;
809
810	DEBUGFUNC("ixgbe_reset_hw_82598");
811
812	/* Call adapter stop to disable tx/rx and clear interrupts */
813	status = hw->mac.ops.stop_adapter(hw);
814	if (status != IXGBE_SUCCESS)
815		goto reset_hw_out;
816
817	/*
818	 * Power up the Atlas Tx lanes if they are currently powered down.
819	 * Atlas Tx lanes are powered down for MAC loopback tests, but
820	 * they are not automatically restored on reset.
821	 */
822	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
823	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
824		/* Enable Tx Atlas so packets can be transmitted again */
825		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
826					     &analog_val);
827		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
828		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
829					      analog_val);
830
831		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
832					     &analog_val);
833		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
834		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
835					      analog_val);
836
837		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
838					     &analog_val);
839		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
840		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
841					      analog_val);
842
843		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
844					     &analog_val);
845		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
846		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
847					      analog_val);
848	}
849
850	/* Reset PHY */
851	if (hw->phy.reset_disable == FALSE) {
852		/* PHY ops must be identified and initialized prior to reset */
853
854		/* Init PHY and function pointers, perform SFP setup */
855		phy_status = hw->phy.ops.init(hw);
856		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
857			goto reset_hw_out;
858		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
859			goto mac_reset_top;
860
861		hw->phy.ops.reset(hw);
862	}
863
864mac_reset_top:
865	/*
866	 * Issue global reset to the MAC.  This needs to be a SW reset.
867	 * If link reset is used, it might reset the MAC when mng is using it
868	 */
869	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
870	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
871	IXGBE_WRITE_FLUSH(hw);
872
873	/* Poll for reset bit to self-clear indicating reset is complete */
874	for (i = 0; i < 10; i++) {
875		usec_delay(1);
876		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
877		if (!(ctrl & IXGBE_CTRL_RST))
878			break;
879	}
880	if (ctrl & IXGBE_CTRL_RST) {
881		status = IXGBE_ERR_RESET_FAILED;
882		DEBUGOUT("Reset polling failed to complete.\n");
883	}
884
885	msec_delay(50);
886
887	/*
888	 * Double resets are required for recovery from certain error
889	 * conditions.  Between resets, it is necessary to stall to allow time
890	 * for any pending HW events to complete.
891	 */
892	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
893		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
894		goto mac_reset_top;
895	}
896
897	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
898	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
899	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
900
901	/*
902	 * Store the original AUTOC value if it has not been
903	 * stored off yet.  Otherwise restore the stored original
904	 * AUTOC value since the reset operation sets back to deaults.
905	 */
906	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
907	if (hw->mac.orig_link_settings_stored == FALSE) {
908		hw->mac.orig_autoc = autoc;
909		hw->mac.orig_link_settings_stored = TRUE;
910	} else if (autoc != hw->mac.orig_autoc) {
911		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
912	}
913
914	/* Store the permanent mac address */
915	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
916
917	/*
918	 * Store MAC address from RAR0, clear receive address registers, and
919	 * clear the multicast table
920	 */
921	hw->mac.ops.init_rx_addrs(hw);
922
923reset_hw_out:
924	if (phy_status != IXGBE_SUCCESS)
925		status = phy_status;
926
927	return status;
928}
929
930/**
931 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
932 *  @hw: pointer to hardware struct
933 *  @rar: receive address register index to associate with a VMDq index
934 *  @vmdq: VMDq set index
935 **/
936s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
937{
938	u32 rar_high;
939	u32 rar_entries = hw->mac.num_rar_entries;
940
941	DEBUGFUNC("ixgbe_set_vmdq_82598");
942
943	/* Make sure we are using a valid rar index range */
944	if (rar >= rar_entries) {
945		DEBUGOUT1("RAR index %d is out of range.\n", rar);
946		return IXGBE_ERR_INVALID_ARGUMENT;
947	}
948
949	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
950	rar_high &= ~IXGBE_RAH_VIND_MASK;
951	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
952	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
953	return IXGBE_SUCCESS;
954}
955
956/**
957 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
958 *  @hw: pointer to hardware struct
959 *  @rar: receive address register index to associate with a VMDq index
960 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
961 **/
962static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
963{
964	u32 rar_high;
965	u32 rar_entries = hw->mac.num_rar_entries;
966
967	UNREFERENCED_1PARAMETER(vmdq);
968
969	/* Make sure we are using a valid rar index range */
970	if (rar >= rar_entries) {
971		DEBUGOUT1("RAR index %d is out of range.\n", rar);
972		return IXGBE_ERR_INVALID_ARGUMENT;
973	}
974
975	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
976	if (rar_high & IXGBE_RAH_VIND_MASK) {
977		rar_high &= ~IXGBE_RAH_VIND_MASK;
978		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
979	}
980
981	return IXGBE_SUCCESS;
982}
983
984/**
985 *  ixgbe_set_vfta_82598 - Set VLAN filter table
986 *  @hw: pointer to hardware structure
987 *  @vlan: VLAN id to write to VLAN filter
988 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
989 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
990 *
991 *  Turn on/off specified VLAN in the VLAN filter table.
992 **/
993s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
994			 bool vlan_on)
995{
996	u32 regindex;
997	u32 bitindex;
998	u32 bits;
999	u32 vftabyte;
1000
1001	DEBUGFUNC("ixgbe_set_vfta_82598");
1002
1003	if (vlan > 4095)
1004		return IXGBE_ERR_PARAM;
1005
1006	/* Determine 32-bit word position in array */
1007	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1008
1009	/* Determine the location of the (VMD) queue index */
1010	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1011	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1012
1013	/* Set the nibble for VMD queue index */
1014	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1015	bits &= (~(0x0F << bitindex));
1016	bits |= (vind << bitindex);
1017	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1018
1019	/* Determine the location of the bit for this VLAN id */
1020	bitindex = vlan & 0x1F;   /* lower five bits */
1021
1022	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1023	if (vlan_on)
1024		/* Turn on this VLAN id */
1025		bits |= (1 << bitindex);
1026	else
1027		/* Turn off this VLAN id */
1028		bits &= ~(1 << bitindex);
1029	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1030
1031	return IXGBE_SUCCESS;
1032}
1033
1034/**
1035 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1036 *  @hw: pointer to hardware structure
1037 *
1038 *  Clears the VLAN filer table, and the VMDq index associated with the filter
1039 **/
1040static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1041{
1042	u32 offset;
1043	u32 vlanbyte;
1044
1045	DEBUGFUNC("ixgbe_clear_vfta_82598");
1046
1047	for (offset = 0; offset < hw->mac.vft_size; offset++)
1048		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1049
1050	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1051		for (offset = 0; offset < hw->mac.vft_size; offset++)
1052			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1053					0);
1054
1055	return IXGBE_SUCCESS;
1056}
1057
1058/**
1059 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1060 *  @hw: pointer to hardware structure
1061 *  @reg: analog register to read
1062 *  @val: read value
1063 *
1064 *  Performs read operation to Atlas analog register specified.
1065 **/
1066s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1067{
1068	u32  atlas_ctl;
1069
1070	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1071
1072	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1073			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1074	IXGBE_WRITE_FLUSH(hw);
1075	usec_delay(10);
1076	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1077	*val = (u8)atlas_ctl;
1078
1079	return IXGBE_SUCCESS;
1080}
1081
1082/**
1083 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1084 *  @hw: pointer to hardware structure
1085 *  @reg: atlas register to write
1086 *  @val: value to write
1087 *
1088 *  Performs write operation to Atlas analog register specified.
1089 **/
1090s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1091{
1092	u32  atlas_ctl;
1093
1094	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1095
1096	atlas_ctl = (reg << 8) | val;
1097	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1098	IXGBE_WRITE_FLUSH(hw);
1099	usec_delay(10);
1100
1101	return IXGBE_SUCCESS;
1102}
1103
1104/**
1105 *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1106 *  @hw: pointer to hardware structure
1107 *  @dev_addr: address to read from
1108 *  @byte_offset: byte offset to read from dev_addr
1109 *  @eeprom_data: value read
1110 *
1111 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1112 **/
1113static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1114				    u8 byte_offset, u8 *eeprom_data)
1115{
1116	s32 status = IXGBE_SUCCESS;
1117	u16 sfp_addr = 0;
1118	u16 sfp_data = 0;
1119	u16 sfp_stat = 0;
1120	u16 gssr;
1121	u32 i;
1122
1123	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1124
1125	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1126		gssr = IXGBE_GSSR_PHY1_SM;
1127	else
1128		gssr = IXGBE_GSSR_PHY0_SM;
1129
1130	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1131		return IXGBE_ERR_SWFW_SYNC;
1132
1133	if (hw->phy.type == ixgbe_phy_nl) {
1134		/*
1135		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1136		 * 0xC30D. These registers are used to talk to the SFP+
1137		 * module's EEPROM through the SDA/SCL (I2C) interface.
1138		 */
1139		sfp_addr = (dev_addr << 8) + byte_offset;
1140		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1141		hw->phy.ops.write_reg_mdi(hw,
1142					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1143					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1144					  sfp_addr);
1145
1146		/* Poll status */
1147		for (i = 0; i < 100; i++) {
1148			hw->phy.ops.read_reg_mdi(hw,
1149						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1150						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1151						&sfp_stat);
1152			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1153			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1154				break;
1155			msec_delay(10);
1156		}
1157
1158		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1159			DEBUGOUT("EEPROM read did not pass.\n");
1160			status = IXGBE_ERR_SFP_NOT_PRESENT;
1161			goto out;
1162		}
1163
1164		/* Read data */
1165		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1166					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1167
1168		*eeprom_data = (u8)(sfp_data >> 8);
1169	} else {
1170		status = IXGBE_ERR_PHY;
1171	}
1172
1173out:
1174	hw->mac.ops.release_swfw_sync(hw, gssr);
1175	return status;
1176}
1177
1178/**
1179 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1180 *  @hw: pointer to hardware structure
1181 *  @byte_offset: EEPROM byte offset to read
1182 *  @eeprom_data: value read
1183 *
1184 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1185 **/
1186s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1187				u8 *eeprom_data)
1188{
1189	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1190					byte_offset, eeprom_data);
1191}
1192
1193/**
1194 *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1195 *  @hw: pointer to hardware structure
1196 *  @byte_offset: byte offset at address 0xA2
1197 *  @eeprom_data: value read
1198 *
1199 *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1200 **/
1201static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1202					u8 *sff8472_data)
1203{
1204	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1205					byte_offset, sff8472_data);
1206}
1207
1208/**
1209 *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1210 *  @hw: pointer to hardware structure
1211 *
1212 *  Determines physical layer capabilities of the current configuration.
1213 **/
1214u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1215{
1216	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1217	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1218	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1219	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1220	u16 ext_ability = 0;
1221
1222	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1223
1224	hw->phy.ops.identify(hw);
1225
1226	/* Copper PHY must be checked before AUTOC LMS to determine correct
1227	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1228	switch (hw->phy.type) {
1229	case ixgbe_phy_tn:
1230	case ixgbe_phy_cu_unknown:
1231		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1232		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1233		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1234			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1235		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1236			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1237		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1238			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1239		goto out;
1240	default:
1241		break;
1242	}
1243
1244	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1245	case IXGBE_AUTOC_LMS_1G_AN:
1246	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1247		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1248			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1249		else
1250			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1251		break;
1252	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1253		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1254			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1255		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1256			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1257		else /* XAUI */
1258			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1259		break;
1260	case IXGBE_AUTOC_LMS_KX4_AN:
1261	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1262		if (autoc & IXGBE_AUTOC_KX_SUPP)
1263			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1264		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1265			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1266		break;
1267	default:
1268		break;
1269	}
1270
1271	if (hw->phy.type == ixgbe_phy_nl) {
1272		hw->phy.ops.identify_sfp(hw);
1273
1274		switch (hw->phy.sfp_type) {
1275		case ixgbe_sfp_type_da_cu:
1276			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1277			break;
1278		case ixgbe_sfp_type_sr:
1279			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1280			break;
1281		case ixgbe_sfp_type_lr:
1282			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1283			break;
1284		default:
1285			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1286			break;
1287		}
1288	}
1289
1290	switch (hw->device_id) {
1291	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1292		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1293		break;
1294	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1295	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1296	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1297		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1298		break;
1299	case IXGBE_DEV_ID_82598EB_XF_LR:
1300		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1301		break;
1302	default:
1303		break;
1304	}
1305
1306out:
1307	return physical_layer;
1308}
1309
1310/**
1311 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1312 *  port devices.
1313 *  @hw: pointer to the HW structure
1314 *
1315 *  Calls common function and corrects issue with some single port devices
1316 *  that enable LAN1 but not LAN0.
1317 **/
1318void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1319{
1320	struct ixgbe_bus_info *bus = &hw->bus;
1321	u16 pci_gen = 0;
1322	u16 pci_ctrl2 = 0;
1323
1324	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1325
1326	ixgbe_set_lan_id_multi_port_pcie(hw);
1327
1328	/* check if LAN0 is disabled */
1329	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1330	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1331
1332		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1333
1334		/* if LAN0 is completely disabled force function to 0 */
1335		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1336		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1337		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1338
1339			bus->func = 0;
1340		}
1341	}
1342}
1343
1344/**
1345 *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1346 *  @hw: pointer to hardware structure
1347 *
1348 **/
1349void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1350{
1351	u32 regval;
1352	u32 i;
1353
1354	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1355
1356	/* Enable relaxed ordering */
1357	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1358	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1359		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1360		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1361		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1362	}
1363
1364	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1365	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1366		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1367		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1368			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1369		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1370	}
1371
1372}
1373
1374/**
1375 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1376 * @hw: pointer to hardware structure
1377 * @num_pb: number of packet buffers to allocate
1378 * @headroom: reserve n KB of headroom
1379 * @strategy: packet buffer allocation strategy
1380 **/
1381static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1382				  u32 headroom, int strategy)
1383{
1384	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1385	u8 i = 0;
1386	UNREFERENCED_1PARAMETER(headroom);
1387
1388	if (!num_pb)
1389		return;
1390
1391	/* Setup Rx packet buffer sizes */
1392	switch (strategy) {
1393	case PBA_STRATEGY_WEIGHTED:
1394		/* Setup the first four at 80KB */
1395		rxpktsize = IXGBE_RXPBSIZE_80KB;
1396		for (; i < 4; i++)
1397			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1398		/* Setup the last four at 48KB...don't re-init i */
1399		rxpktsize = IXGBE_RXPBSIZE_48KB;
1400		/* Fall Through */
1401	case PBA_STRATEGY_EQUAL:
1402	default:
1403		/* Divide the remaining Rx packet buffer evenly among the TCs */
1404		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1405			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1406		break;
1407	}
1408
1409	/* Setup Tx packet buffer sizes */
1410	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1411		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1412
1413	return;
1414}
1415