ixgbe_82598.c revision 205720
1/******************************************************************************
2
3  Copyright (c) 2001-2010, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 205720 2010-03-27 00:21:40Z jfv $*/
34
35#include "ixgbe_type.h"
36#include "ixgbe_api.h"
37#include "ixgbe_common.h"
38#include "ixgbe_phy.h"
39
40u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
41s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
42static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43                                             ixgbe_link_speed *speed,
44                                             bool *autoneg);
45static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
47static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
48					bool autoneg_wait_to_complete);
49static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
50                                      ixgbe_link_speed *speed, bool *link_up,
51                                      bool link_up_wait_to_complete);
52static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
53                                            ixgbe_link_speed speed,
54                                            bool autoneg,
55                                            bool autoneg_wait_to_complete);
56static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
57                                               ixgbe_link_speed speed,
58                                               bool autoneg,
59                                               bool autoneg_wait_to_complete);
60static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
61s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
62void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
63s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
64static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
66                         u32 vind, bool vlan_on);
67static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
68s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
69s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
70s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
71                                u8 *eeprom_data);
72u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
73s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
74void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
75void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
76static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
77
78/**
79 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
80 *  @hw: pointer to the HW structure
81 *
82 *  The defaults for 82598 should be in the range of 50us to 50ms,
83 *  however the hardware default for these parts is 500us to 1ms which is less
84 *  than the 10ms recommended by the pci-e spec.  To address this we need to
85 *  increase the value to either 10ms to 250ms for capability version 1 config,
86 *  or 16ms to 55ms for version 2.
87 **/
88void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
89{
90	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
91	u16 pcie_devctl2;
92
93	/* only take action if timeout value is defaulted to 0 */
94	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
95		goto out;
96
97	/*
98	 * if capababilities version is type 1 we can write the
99	 * timeout of 10ms to 250ms through the GCR register
100	 */
101	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
102		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
103		goto out;
104	}
105
106	/*
107	 * for version 2 capabilities we need to write the config space
108	 * directly in order to set the completion timeout value for
109	 * 16ms to 55ms
110	 */
111	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
112	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
113	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
114out:
115	/* disable completion timeout resend */
116	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
117	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
118}
119
120/**
121 *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
122 *  @hw: pointer to hardware structure
123 *
124 *  Read PCIe configuration space, and get the MSI-X vector count from
125 *  the capabilities table.
126 **/
127u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
128{
129	u32 msix_count = 18;
130
131	DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
132
133	if (hw->mac.msix_vectors_from_pcie) {
134		msix_count = IXGBE_READ_PCIE_WORD(hw,
135		                                  IXGBE_PCIE_MSIX_82598_CAPS);
136		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
137
138		/* MSI-X count is zero-based in HW, so increment to give
139		 * proper value */
140		msix_count++;
141	}
142	return msix_count;
143}
144
145/**
146 *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
147 *  @hw: pointer to hardware structure
148 *
149 *  Initialize the function pointers and assign the MAC type for 82598.
150 *  Does not touch the hardware.
151 **/
152s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
153{
154	struct ixgbe_mac_info *mac = &hw->mac;
155	struct ixgbe_phy_info *phy = &hw->phy;
156	s32 ret_val;
157
158	DEBUGFUNC("ixgbe_init_ops_82598");
159
160	ret_val = ixgbe_init_phy_ops_generic(hw);
161	ret_val = ixgbe_init_ops_generic(hw);
162
163	/* PHY */
164	phy->ops.init = &ixgbe_init_phy_ops_82598;
165
166	/* MAC */
167	mac->ops.start_hw = &ixgbe_start_hw_82598;
168	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
169	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
170	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
171	mac->ops.get_supported_physical_layer =
172	                            &ixgbe_get_supported_physical_layer_82598;
173	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
174	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
175	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
176
177	/* RAR, Multicast, VLAN */
178	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
179	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
180	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
181	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
182
183	/* Flow Control */
184	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
185
186	mac->mcft_size       = 128;
187	mac->vft_size        = 128;
188	mac->num_rar_entries = 16;
189	mac->max_tx_queues   = 32;
190	mac->max_rx_queues   = 64;
191	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
192
193	/* SFP+ Module */
194	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
195
196	/* Link */
197	mac->ops.check_link = &ixgbe_check_mac_link_82598;
198	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
199	mac->ops.get_link_capabilities =
200	                       &ixgbe_get_link_capabilities_82598;
201
202	return ret_val;
203}
204
205/**
206 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
207 *  @hw: pointer to hardware structure
208 *
209 *  Initialize any function pointers that were not able to be
210 *  set during init_shared_code because the PHY/SFP type was
211 *  not known.  Perform the SFP init if necessary.
212 *
213 **/
214s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
215{
216	struct ixgbe_mac_info *mac = &hw->mac;
217	struct ixgbe_phy_info *phy = &hw->phy;
218	s32 ret_val = IXGBE_SUCCESS;
219	u16 list_offset, data_offset;
220
221	DEBUGFUNC("ixgbe_init_phy_ops_82598");
222
223	/* Identify the PHY */
224	phy->ops.identify(hw);
225
226	/* Overwrite the link function pointers if copper PHY */
227	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
228		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
229		mac->ops.get_link_capabilities =
230		                  &ixgbe_get_copper_link_capabilities_generic;
231	}
232
233	switch (hw->phy.type) {
234	case ixgbe_phy_tn:
235		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
236		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
237		phy->ops.get_firmware_version =
238		             &ixgbe_get_phy_firmware_version_tnx;
239		break;
240	case ixgbe_phy_aq:
241		phy->ops.get_firmware_version =
242		             &ixgbe_get_phy_firmware_version_generic;
243		break;
244	case ixgbe_phy_nl:
245		phy->ops.reset = &ixgbe_reset_phy_nl;
246
247		/* Call SFP+ identify routine to get the SFP+ module type */
248		ret_val = phy->ops.identify_sfp(hw);
249		if (ret_val != IXGBE_SUCCESS)
250			goto out;
251		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
252			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
253			goto out;
254		}
255
256		/* Check to see if SFP+ module is supported */
257		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
258		                                            &list_offset,
259		                                            &data_offset);
260		if (ret_val != IXGBE_SUCCESS) {
261			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
262			goto out;
263		}
264		break;
265	default:
266		break;
267	}
268
269out:
270	return ret_val;
271}
272
273/**
274 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
275 *  @hw: pointer to hardware structure
276 *
277 *  Starts the hardware using the generic start_hw function.
278 *  Disables relaxed ordering Then set pcie completion timeout
279 *
280 **/
281s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
282{
283	u32 regval;
284	u32 i;
285	s32 ret_val = IXGBE_SUCCESS;
286
287	DEBUGFUNC("ixgbe_start_hw_82598");
288
289	ret_val = ixgbe_start_hw_generic(hw);
290
291	/* Disable relaxed ordering */
292	for (i = 0; ((i < hw->mac.max_tx_queues) &&
293	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
294		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
295		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
296		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
297	}
298
299	for (i = 0; ((i < hw->mac.max_rx_queues) &&
300	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
301		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
302		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
303		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
304		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
305	}
306
307	/* set the completion timeout for interface */
308	if (ret_val == IXGBE_SUCCESS)
309		ixgbe_set_pcie_completion_timeout(hw);
310
311	return ret_val;
312}
313
314/**
315 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
316 *  @hw: pointer to hardware structure
317 *  @speed: pointer to link speed
318 *  @autoneg: boolean auto-negotiation value
319 *
320 *  Determines the link capabilities by reading the AUTOC register.
321 **/
322static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
323                                             ixgbe_link_speed *speed,
324                                             bool *autoneg)
325{
326	s32 status = IXGBE_SUCCESS;
327	u32 autoc = 0;
328
329	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
330
331	/*
332	 * Determine link capabilities based on the stored value of AUTOC,
333	 * which represents EEPROM defaults.  If AUTOC value has not been
334	 * stored, use the current register value.
335	 */
336	if (hw->mac.orig_link_settings_stored)
337		autoc = hw->mac.orig_autoc;
338	else
339		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
340
341	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
342	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
343		*speed = IXGBE_LINK_SPEED_1GB_FULL;
344		*autoneg = FALSE;
345		break;
346
347	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
348		*speed = IXGBE_LINK_SPEED_10GB_FULL;
349		*autoneg = FALSE;
350		break;
351
352	case IXGBE_AUTOC_LMS_1G_AN:
353		*speed = IXGBE_LINK_SPEED_1GB_FULL;
354		*autoneg = TRUE;
355		break;
356
357	case IXGBE_AUTOC_LMS_KX4_AN:
358	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
359		*speed = IXGBE_LINK_SPEED_UNKNOWN;
360		if (autoc & IXGBE_AUTOC_KX4_SUPP)
361			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
362		if (autoc & IXGBE_AUTOC_KX_SUPP)
363			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
364		*autoneg = TRUE;
365		break;
366
367	default:
368		status = IXGBE_ERR_LINK_SETUP;
369		break;
370	}
371
372	return status;
373}
374
375/**
376 *  ixgbe_get_media_type_82598 - Determines media type
377 *  @hw: pointer to hardware structure
378 *
379 *  Returns the media type (fiber, copper, backplane)
380 **/
381static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
382{
383	enum ixgbe_media_type media_type;
384
385	DEBUGFUNC("ixgbe_get_media_type_82598");
386
387	/* Detect if there is a copper PHY attached. */
388	if (hw->phy.type == ixgbe_phy_cu_unknown ||
389	    hw->phy.type == ixgbe_phy_tn ||
390	    hw->phy.type == ixgbe_phy_aq) {
391		media_type = ixgbe_media_type_copper;
392		goto out;
393	}
394
395	/* Media type for I82598 is based on device ID */
396	switch (hw->device_id) {
397	case IXGBE_DEV_ID_82598:
398	case IXGBE_DEV_ID_82598_BX:
399		/* Default device ID is mezzanine card KX/KX4 */
400		media_type = ixgbe_media_type_backplane;
401		break;
402	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
403	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
404	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
405	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
406	case IXGBE_DEV_ID_82598EB_XF_LR:
407	case IXGBE_DEV_ID_82598EB_SFP_LOM:
408		media_type = ixgbe_media_type_fiber;
409		break;
410	case IXGBE_DEV_ID_82598EB_CX4:
411	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
412		media_type = ixgbe_media_type_cx4;
413		break;
414	case IXGBE_DEV_ID_82598AT:
415	case IXGBE_DEV_ID_82598AT2:
416		media_type = ixgbe_media_type_copper;
417		break;
418	default:
419		media_type = ixgbe_media_type_unknown;
420		break;
421	}
422out:
423	return media_type;
424}
425
426/**
427 *  ixgbe_fc_enable_82598 - Enable flow control
428 *  @hw: pointer to hardware structure
429 *  @packetbuf_num: packet buffer number (0-7)
430 *
431 *  Enable flow control according to the current settings.
432 **/
433s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
434{
435	s32 ret_val = IXGBE_SUCCESS;
436	u32 fctrl_reg;
437	u32 rmcs_reg;
438	u32 reg;
439	u32 link_speed = 0;
440	bool link_up;
441
442	DEBUGFUNC("ixgbe_fc_enable_82598");
443
444	/*
445	 * On 82598 having Rx FC on causes resets while doing 1G
446	 * so if it's on turn it off once we know link_speed. For
447	 * more details see 82598 Specification update.
448	 */
449	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
450	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
451		switch (hw->fc.requested_mode) {
452		case ixgbe_fc_full:
453			hw->fc.requested_mode = ixgbe_fc_tx_pause;
454			break;
455		case ixgbe_fc_rx_pause:
456			hw->fc.requested_mode = ixgbe_fc_none;
457			break;
458		default:
459			/* no change */
460			break;
461		}
462	}
463
464	/* Negotiate the fc mode to use */
465	ret_val = ixgbe_fc_autoneg(hw);
466	if (ret_val)
467		goto out;
468
469	/* Disable any previous flow control settings */
470	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
471	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
472
473	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
474	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
475
476	/*
477	 * The possible values of fc.current_mode are:
478	 * 0: Flow control is completely disabled
479	 * 1: Rx flow control is enabled (we can receive pause frames,
480	 *    but not send pause frames).
481	 * 2: Tx flow control is enabled (we can send pause frames but
482	 *     we do not support receiving pause frames).
483	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
484	 * other: Invalid.
485	 */
486	switch (hw->fc.current_mode) {
487	case ixgbe_fc_none:
488		/* Flow control is disabled by software override or autoneg.
489		 * The code below will actually disable it in the HW.
490		 */
491		break;
492	case ixgbe_fc_rx_pause:
493		/*
494		 * Rx Flow control is enabled and Tx Flow control is
495		 * disabled by software override. Since there really
496		 * isn't a way to advertise that we are capable of RX
497		 * Pause ONLY, we will advertise that we support both
498		 * symmetric and asymmetric Rx PAUSE.  Later, we will
499		 * disable the adapter's ability to send PAUSE frames.
500		 */
501		fctrl_reg |= IXGBE_FCTRL_RFCE;
502		break;
503	case ixgbe_fc_tx_pause:
504		/*
505		 * Tx Flow control is enabled, and Rx Flow control is
506		 * disabled by software override.
507		 */
508		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
509		break;
510	case ixgbe_fc_full:
511		/* Flow control (both Rx and Tx) is enabled by SW override. */
512		fctrl_reg |= IXGBE_FCTRL_RFCE;
513		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
514		break;
515	default:
516		DEBUGOUT("Flow control param set incorrectly\n");
517		ret_val = IXGBE_ERR_CONFIG;
518		goto out;
519		break;
520	}
521
522	/* Set 802.3x based flow control settings. */
523	fctrl_reg |= IXGBE_FCTRL_DPF;
524	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
525	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
526
527	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
528	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
529		if (hw->fc.send_xon) {
530			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
531			                (hw->fc.low_water | IXGBE_FCRTL_XONE));
532		} else {
533			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
534			                hw->fc.low_water);
535		}
536
537		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
538		                (hw->fc.high_water | IXGBE_FCRTH_FCEN));
539	}
540
541	/* Configure pause time (2 TCs per register) */
542	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
543	if ((packetbuf_num & 1) == 0)
544		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
545	else
546		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
547	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
548
549	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
550
551out:
552	return ret_val;
553}
554
555/**
556 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
557 *  @hw: pointer to hardware structure
558 *
559 *  Configures link settings based on values in the ixgbe_hw struct.
560 *  Restarts the link.  Performs autonegotiation if needed.
561 **/
562static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
563	                               bool autoneg_wait_to_complete)
564{
565	u32 autoc_reg;
566	u32 links_reg;
567	u32 i;
568	s32 status = IXGBE_SUCCESS;
569
570	DEBUGFUNC("ixgbe_start_mac_link_82598");
571
572	/* Restart link */
573	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
574	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
575	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
576
577	/* Only poll for autoneg to complete if specified to do so */
578	if (autoneg_wait_to_complete) {
579		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
580		     IXGBE_AUTOC_LMS_KX4_AN ||
581		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
582		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
583			links_reg = 0; /* Just in case Autoneg time = 0 */
584			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
585				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
586				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
587					break;
588				msec_delay(100);
589			}
590			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
591				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
592				DEBUGOUT("Autonegotiation did not complete.\n");
593			}
594		}
595	}
596
597	/* Add delay to filter out noises during initial link setup */
598	msec_delay(50);
599
600	return status;
601}
602
603/**
604 *  ixgbe_check_mac_link_82598 - Get link/speed status
605 *  @hw: pointer to hardware structure
606 *  @speed: pointer to link speed
607 *  @link_up: TRUE is link is up, FALSE otherwise
608 *  @link_up_wait_to_complete: bool used to wait for link up or not
609 *
610 *  Reads the links register to determine if link is up and the current speed
611 **/
612static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
613                                      ixgbe_link_speed *speed, bool *link_up,
614                                      bool link_up_wait_to_complete)
615{
616	u32 links_reg;
617	u32 i;
618	u16 link_reg, adapt_comp_reg;
619
620	DEBUGFUNC("ixgbe_check_mac_link_82598");
621
622	/*
623	 * SERDES PHY requires us to read link status from undocumented
624	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
625	 * indicates link down.  OxC00C is read to check that the XAUI lanes
626	 * are active.  Bit 0 clear indicates active; set indicates inactive.
627	 */
628	if (hw->phy.type == ixgbe_phy_nl) {
629		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
630		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
631		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
632		                     &adapt_comp_reg);
633		if (link_up_wait_to_complete) {
634			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
635				if ((link_reg & 1) &&
636				    ((adapt_comp_reg & 1) == 0)) {
637					*link_up = TRUE;
638					break;
639				} else {
640					*link_up = FALSE;
641				}
642				msec_delay(100);
643				hw->phy.ops.read_reg(hw, 0xC79F,
644				                     IXGBE_TWINAX_DEV,
645				                     &link_reg);
646				hw->phy.ops.read_reg(hw, 0xC00C,
647				                     IXGBE_TWINAX_DEV,
648				                     &adapt_comp_reg);
649			}
650		} else {
651			if ((link_reg & 1) &&
652			    ((adapt_comp_reg & 1) == 0))
653				*link_up = TRUE;
654			else
655				*link_up = FALSE;
656		}
657
658		if (*link_up == FALSE)
659			goto out;
660	}
661
662	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
663	if (link_up_wait_to_complete) {
664		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
665			if (links_reg & IXGBE_LINKS_UP) {
666				*link_up = TRUE;
667				break;
668			} else {
669				*link_up = FALSE;
670			}
671			msec_delay(100);
672			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
673		}
674	} else {
675		if (links_reg & IXGBE_LINKS_UP)
676			*link_up = TRUE;
677		else
678			*link_up = FALSE;
679	}
680
681	if (links_reg & IXGBE_LINKS_SPEED)
682		*speed = IXGBE_LINK_SPEED_10GB_FULL;
683	else
684		*speed = IXGBE_LINK_SPEED_1GB_FULL;
685
686	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
687	     (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
688		*link_up = FALSE;
689
690	/* if link is down, zero out the current_mode */
691	if (*link_up == FALSE) {
692		hw->fc.current_mode = ixgbe_fc_none;
693		hw->fc.fc_was_autonegged = FALSE;
694	}
695
696out:
697	return IXGBE_SUCCESS;
698}
699
700/**
701 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
702 *  @hw: pointer to hardware structure
703 *  @speed: new link speed
704 *  @autoneg: TRUE if autonegotiation enabled
705 *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
706 *
707 *  Set the link speed in the AUTOC register and restarts link.
708 **/
709static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
710                                           ixgbe_link_speed speed, bool autoneg,
711                                           bool autoneg_wait_to_complete)
712{
713	s32              status            = IXGBE_SUCCESS;
714	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
715	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
716	u32              autoc             = curr_autoc;
717	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
718
719	DEBUGFUNC("ixgbe_setup_mac_link_82598");
720
721	/* Check to see if speed passed in is supported. */
722	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
723	speed &= link_capabilities;
724
725	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
726		status = IXGBE_ERR_LINK_SETUP;
727
728	/* Set KX4/KX support according to speed requested */
729	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
730	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
731		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
732		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
733			autoc |= IXGBE_AUTOC_KX4_SUPP;
734		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
735			autoc |= IXGBE_AUTOC_KX_SUPP;
736		if (autoc != curr_autoc)
737			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
738	}
739
740	if (status == IXGBE_SUCCESS) {
741		/*
742		 * Setup and restart the link based on the new values in
743		 * ixgbe_hw This will write the AUTOC register based on the new
744		 * stored values
745		 */
746		status = ixgbe_start_mac_link_82598(hw,
747		                                    autoneg_wait_to_complete);
748	}
749
750	return status;
751}
752
753
754/**
755 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
756 *  @hw: pointer to hardware structure
757 *  @speed: new link speed
758 *  @autoneg: TRUE if autonegotiation enabled
759 *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
760 *
761 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
762 **/
763static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
764                                               ixgbe_link_speed speed,
765                                               bool autoneg,
766                                               bool autoneg_wait_to_complete)
767{
768	s32 status;
769
770	DEBUGFUNC("ixgbe_setup_copper_link_82598");
771
772	/* Setup the PHY according to input speed */
773	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
774	                                      autoneg_wait_to_complete);
775	/* Set up MAC */
776	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
777
778	return status;
779}
780
781/**
782 *  ixgbe_reset_hw_82598 - Performs hardware reset
783 *  @hw: pointer to hardware structure
784 *
785 *  Resets the hardware by resetting the transmit and receive units, masks and
786 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
787 *  reset.
788 **/
789static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
790{
791	s32 status = IXGBE_SUCCESS;
792	s32 phy_status = IXGBE_SUCCESS;
793	u32 ctrl;
794	u32 gheccr;
795	u32 i;
796	u32 autoc;
797	u8  analog_val;
798
799	DEBUGFUNC("ixgbe_reset_hw_82598");
800
801	/* Call adapter stop to disable tx/rx and clear interrupts */
802	hw->mac.ops.stop_adapter(hw);
803
804	/*
805	 * Power up the Atlas Tx lanes if they are currently powered down.
806	 * Atlas Tx lanes are powered down for MAC loopback tests, but
807	 * they are not automatically restored on reset.
808	 */
809	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
810	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
811		/* Enable Tx Atlas so packets can be transmitted again */
812		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
813		                             &analog_val);
814		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
815		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
816		                              analog_val);
817
818		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
819		                             &analog_val);
820		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
821		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
822		                              analog_val);
823
824		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
825		                             &analog_val);
826		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
827		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
828		                              analog_val);
829
830		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
831		                             &analog_val);
832		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
833		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
834		                              analog_val);
835	}
836
837	/* Reset PHY */
838	if (hw->phy.reset_disable == FALSE) {
839		/* PHY ops must be identified and initialized prior to reset */
840
841		/* Init PHY and function pointers, perform SFP setup */
842		phy_status = hw->phy.ops.init(hw);
843		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
844			goto reset_hw_out;
845		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
846			goto no_phy_reset;
847
848		hw->phy.ops.reset(hw);
849	}
850
851no_phy_reset:
852	/*
853	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
854	 * access and verify no pending requests before reset
855	 */
856	ixgbe_disable_pcie_master(hw);
857
858mac_reset_top:
859	/*
860	 * Issue global reset to the MAC.  This needs to be a SW reset.
861	 * If link reset is used, it might reset the MAC when mng is using it
862	 */
863	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
864	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
865	IXGBE_WRITE_FLUSH(hw);
866
867	/* Poll for reset bit to self-clear indicating reset is complete */
868	for (i = 0; i < 10; i++) {
869		usec_delay(1);
870		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
871		if (!(ctrl & IXGBE_CTRL_RST))
872			break;
873	}
874	if (ctrl & IXGBE_CTRL_RST) {
875		status = IXGBE_ERR_RESET_FAILED;
876		DEBUGOUT("Reset polling failed to complete.\n");
877	}
878
879	/*
880	 * Double resets are required for recovery from certain error
881	 * conditions.  Between resets, it is necessary to stall to allow time
882	 * for any pending HW events to complete.  We use 1usec since that is
883	 * what is needed for ixgbe_disable_pcie_master().  The second reset
884	 * then clears out any effects of those events.
885	 */
886	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
887		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
888		usec_delay(1);
889		goto mac_reset_top;
890	}
891
892	msec_delay(50);
893
894	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
895	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
896	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
897
898	/*
899	 * Store the original AUTOC value if it has not been
900	 * stored off yet.  Otherwise restore the stored original
901	 * AUTOC value since the reset operation sets back to deaults.
902	 */
903	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
904	if (hw->mac.orig_link_settings_stored == FALSE) {
905		hw->mac.orig_autoc = autoc;
906		hw->mac.orig_link_settings_stored = TRUE;
907	} else if (autoc != hw->mac.orig_autoc)
908		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
909
910	/* Store the permanent mac address */
911	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
912
913	/*
914	 * Store MAC address from RAR0, clear receive address registers, and
915	 * clear the multicast table
916	 */
917	hw->mac.ops.init_rx_addrs(hw);
918
919
920
921reset_hw_out:
922	if (phy_status != IXGBE_SUCCESS)
923		status = phy_status;
924	return status;
925}
926
927/**
928 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
929 *  @hw: pointer to hardware struct
930 *  @rar: receive address register index to associate with a VMDq index
931 *  @vmdq: VMDq set index
932 **/
933s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
934{
935	u32 rar_high;
936
937	DEBUGFUNC("ixgbe_set_vmdq_82598");
938
939	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
940	rar_high &= ~IXGBE_RAH_VIND_MASK;
941	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
942	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
943	return IXGBE_SUCCESS;
944}
945
946/**
947 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
948 *  @hw: pointer to hardware struct
949 *  @rar: receive address register index to associate with a VMDq index
950 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
951 **/
952static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
953{
954	u32 rar_high;
955	u32 rar_entries = hw->mac.num_rar_entries;
956
957	UNREFERENCED_PARAMETER(vmdq);
958
959	if (rar < rar_entries) {
960		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
961		if (rar_high & IXGBE_RAH_VIND_MASK) {
962			rar_high &= ~IXGBE_RAH_VIND_MASK;
963			IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
964		}
965	} else {
966		DEBUGOUT1("RAR index %d is out of range.\n", rar);
967	}
968
969	return IXGBE_SUCCESS;
970}
971
972/**
973 *  ixgbe_set_vfta_82598 - Set VLAN filter table
974 *  @hw: pointer to hardware structure
975 *  @vlan: VLAN id to write to VLAN filter
976 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
977 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
978 *
979 *  Turn on/off specified VLAN in the VLAN filter table.
980 **/
981s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
982	                                              bool vlan_on)
983{
984	u32 regindex;
985	u32 bitindex;
986	u32 bits;
987	u32 vftabyte;
988
989	DEBUGFUNC("ixgbe_set_vfta_82598");
990
991	if (vlan > 4095)
992		return IXGBE_ERR_PARAM;
993
994	/* Determine 32-bit word position in array */
995	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
996
997	/* Determine the location of the (VMD) queue index */
998	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
999	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1000
1001	/* Set the nibble for VMD queue index */
1002	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1003	bits &= (~(0x0F << bitindex));
1004	bits |= (vind << bitindex);
1005	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1006
1007	/* Determine the location of the bit for this VLAN id */
1008	bitindex = vlan & 0x1F;   /* lower five bits */
1009
1010	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1011	if (vlan_on)
1012		/* Turn on this VLAN id */
1013		bits |= (1 << bitindex);
1014	else
1015		/* Turn off this VLAN id */
1016		bits &= ~(1 << bitindex);
1017	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1018
1019	return IXGBE_SUCCESS;
1020}
1021
1022/**
1023 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1024 *  @hw: pointer to hardware structure
1025 *
1026 *  Clears the VLAN filer table, and the VMDq index associated with the filter
1027 **/
1028static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1029{
1030	u32 offset;
1031	u32 vlanbyte;
1032
1033	DEBUGFUNC("ixgbe_clear_vfta_82598");
1034
1035	for (offset = 0; offset < hw->mac.vft_size; offset++)
1036		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1037
1038	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1039		for (offset = 0; offset < hw->mac.vft_size; offset++)
1040			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1041			                0);
1042
1043	return IXGBE_SUCCESS;
1044}
1045
1046/**
1047 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1048 *  @hw: pointer to hardware structure
1049 *  @reg: analog register to read
1050 *  @val: read value
1051 *
1052 *  Performs read operation to Atlas analog register specified.
1053 **/
1054s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1055{
1056	u32  atlas_ctl;
1057
1058	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1059
1060	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1061	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1062	IXGBE_WRITE_FLUSH(hw);
1063	usec_delay(10);
1064	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1065	*val = (u8)atlas_ctl;
1066
1067	return IXGBE_SUCCESS;
1068}
1069
1070/**
1071 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1072 *  @hw: pointer to hardware structure
1073 *  @reg: atlas register to write
1074 *  @val: value to write
1075 *
1076 *  Performs write operation to Atlas analog register specified.
1077 **/
1078s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1079{
1080	u32  atlas_ctl;
1081
1082	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1083
1084	atlas_ctl = (reg << 8) | val;
1085	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1086	IXGBE_WRITE_FLUSH(hw);
1087	usec_delay(10);
1088
1089	return IXGBE_SUCCESS;
1090}
1091
1092/**
1093 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1094 *  @hw: pointer to hardware structure
1095 *  @byte_offset: EEPROM byte offset to read
1096 *  @eeprom_data: value read
1097 *
1098 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1099 **/
1100s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1101                                u8 *eeprom_data)
1102{
1103	s32 status = IXGBE_SUCCESS;
1104	u16 sfp_addr = 0;
1105	u16 sfp_data = 0;
1106	u16 sfp_stat = 0;
1107	u32 i;
1108
1109	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1110
1111	if (hw->phy.type == ixgbe_phy_nl) {
1112		/*
1113		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1114		 * 0xC30D. These registers are used to talk to the SFP+
1115		 * module's EEPROM through the SDA/SCL (I2C) interface.
1116		 */
1117		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1118		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1119		hw->phy.ops.write_reg(hw,
1120		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1121		                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1122		                      sfp_addr);
1123
1124		/* Poll status */
1125		for (i = 0; i < 100; i++) {
1126			hw->phy.ops.read_reg(hw,
1127			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1128			                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1129			                     &sfp_stat);
1130			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1131			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1132				break;
1133			msec_delay(10);
1134		}
1135
1136		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1137			DEBUGOUT("EEPROM read did not pass.\n");
1138			status = IXGBE_ERR_SFP_NOT_PRESENT;
1139			goto out;
1140		}
1141
1142		/* Read data */
1143		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1144		                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1145
1146		*eeprom_data = (u8)(sfp_data >> 8);
1147	} else {
1148		status = IXGBE_ERR_PHY;
1149		goto out;
1150	}
1151
1152out:
1153	return status;
1154}
1155
1156/**
1157 *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1158 *  @hw: pointer to hardware structure
1159 *
1160 *  Determines physical layer capabilities of the current configuration.
1161 **/
1162u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1163{
1164	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1165	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1166	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1167	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1168	u16 ext_ability = 0;
1169
1170	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1171
1172	hw->phy.ops.identify(hw);
1173
1174	/* Copper PHY must be checked before AUTOC LMS to determine correct
1175	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1176	if (hw->phy.type == ixgbe_phy_tn ||
1177	    hw->phy.type == ixgbe_phy_cu_unknown) {
1178		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1179		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1180		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1181			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1182		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1183			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1184		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1185			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1186		goto out;
1187	}
1188
1189	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1190	case IXGBE_AUTOC_LMS_1G_AN:
1191	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1192		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1193			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1194		else
1195			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1196		break;
1197	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1198		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1199			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1200		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1201			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1202		else /* XAUI */
1203			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1204		break;
1205	case IXGBE_AUTOC_LMS_KX4_AN:
1206	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1207		if (autoc & IXGBE_AUTOC_KX_SUPP)
1208			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1209		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1210			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1211		break;
1212	default:
1213		break;
1214	}
1215
1216	if (hw->phy.type == ixgbe_phy_nl) {
1217		hw->phy.ops.identify_sfp(hw);
1218
1219		switch (hw->phy.sfp_type) {
1220		case ixgbe_sfp_type_da_cu:
1221			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1222			break;
1223		case ixgbe_sfp_type_sr:
1224			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1225			break;
1226		case ixgbe_sfp_type_lr:
1227			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1228			break;
1229		default:
1230			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1231			break;
1232		}
1233	}
1234
1235	switch (hw->device_id) {
1236	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1237		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1238		break;
1239	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1240	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1241	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1242		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1243		break;
1244	case IXGBE_DEV_ID_82598EB_XF_LR:
1245		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1246		break;
1247	default:
1248		break;
1249	}
1250
1251out:
1252	return physical_layer;
1253}
1254
1255/**
1256 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1257 *  port devices.
1258 *  @hw: pointer to the HW structure
1259 *
1260 *  Calls common function and corrects issue with some single port devices
1261 *  that enable LAN1 but not LAN0.
1262 **/
1263void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1264{
1265	struct ixgbe_bus_info *bus = &hw->bus;
1266	u16 pci_gen, pci_ctrl2;
1267
1268	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1269
1270	ixgbe_set_lan_id_multi_port_pcie(hw);
1271
1272	/* check if LAN0 is disabled */
1273	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1274	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1275
1276		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1277
1278		/* if LAN0 is completely disabled force function to 0 */
1279		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1280		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1281		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1282
1283			bus->func = 0;
1284		}
1285	}
1286}
1287
1288/**
1289 *  ixgbe_validate_link_ready - Function looks for phy link
1290 *  @hw: pointer to hardware structure
1291 *
1292 *  Function indicates success when phy link is available. If phy is not ready
1293 *  within 5 seconds of MAC indicating link, the function returns error.
1294 **/
1295static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
1296{
1297	u32 timeout;
1298	u16 an_reg;
1299
1300	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
1301		return IXGBE_SUCCESS;
1302
1303	for (timeout = 0;
1304	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
1305		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1306		                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
1307
1308		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
1309		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
1310			break;
1311
1312		msec_delay(100);
1313	}
1314
1315	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
1316		DEBUGOUT("Link was indicated but link is down\n");
1317		return IXGBE_ERR_LINK_SETUP;
1318	}
1319
1320	return IXGBE_SUCCESS;
1321}
1322
1323/**
1324 *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1325 *  @hw: pointer to hardware structure
1326 *
1327 **/
1328void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1329{
1330	u32 regval;
1331	u32 i;
1332
1333	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1334
1335	/* Enable relaxed ordering */
1336	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1337	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1338		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1339		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1340		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1341	}
1342
1343	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1344	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1345		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1346		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1347		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1348		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1349	}
1350
1351}
1352