1/******************************************************************************
2
3  Copyright (c) 2001-2010, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.11 2010/11/26 22:46:32 jfv Exp $*/
34/*$NetBSD$*/
35
36#include "ixgbe_type.h"
37#include "ixgbe_api.h"
38#include "ixgbe_common.h"
39#include "ixgbe_phy.h"
40
41u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
42s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
43static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
44                                             ixgbe_link_speed *speed,
45                                             bool *autoneg);
46static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
47s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
48static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
49					bool autoneg_wait_to_complete);
50static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
51                                      ixgbe_link_speed *speed, bool *link_up,
52                                      bool link_up_wait_to_complete);
53static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
54                                            ixgbe_link_speed speed,
55                                            bool autoneg,
56                                            bool autoneg_wait_to_complete);
57static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
58                                               ixgbe_link_speed speed,
59                                               bool autoneg,
60                                               bool autoneg_wait_to_complete);
61static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
62s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
63void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
64s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
66s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
67                         u32 vind, bool vlan_on);
68static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
69s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
70s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
71s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
72                                u8 *eeprom_data);
73u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
74s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
75void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
76void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
77
78/**
79 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
80 *  @hw: pointer to the HW structure
81 *
82 *  The defaults for 82598 should be in the range of 50us to 50ms,
83 *  however the hardware default for these parts is 500us to 1ms which is less
84 *  than the 10ms recommended by the pci-e spec.  To address this we need to
85 *  increase the value to either 10ms to 250ms for capability version 1 config,
86 *  or 16ms to 55ms for version 2.
87 **/
88void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
89{
90	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
91	u16 pcie_devctl2;
92
93	/* only take action if timeout value is defaulted to 0 */
94	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
95		goto out;
96
97	/*
98	 * if capababilities version is type 1 we can write the
99	 * timeout of 10ms to 250ms through the GCR register
100	 */
101	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
102		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
103		goto out;
104	}
105
106	/*
107	 * for version 2 capabilities we need to write the config space
108	 * directly in order to set the completion timeout value for
109	 * 16ms to 55ms
110	 */
111	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
112	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
113	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
114out:
115	/* disable completion timeout resend */
116	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
117	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
118}
119
120/**
121 *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
122 *  @hw: pointer to hardware structure
123 *
124 *  Read PCIe configuration space, and get the MSI-X vector count from
125 *  the capabilities table.
126 **/
127u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
128{
129	u32 msix_count = 18;
130
131	DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
132
133	if (hw->mac.msix_vectors_from_pcie) {
134		msix_count = IXGBE_READ_PCIE_WORD(hw,
135		                                  IXGBE_PCIE_MSIX_82598_CAPS);
136		msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
137
138		/* MSI-X count is zero-based in HW, so increment to give
139		 * proper value */
140		msix_count++;
141	}
142	return msix_count;
143}
144
145/**
146 *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
147 *  @hw: pointer to hardware structure
148 *
149 *  Initialize the function pointers and assign the MAC type for 82598.
150 *  Does not touch the hardware.
151 **/
152s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
153{
154	struct ixgbe_mac_info *mac = &hw->mac;
155	struct ixgbe_phy_info *phy = &hw->phy;
156	s32 ret_val;
157
158	DEBUGFUNC("ixgbe_init_ops_82598");
159
160	ret_val = ixgbe_init_phy_ops_generic(hw);
161	ret_val = ixgbe_init_ops_generic(hw);
162
163	/* PHY */
164	phy->ops.init = &ixgbe_init_phy_ops_82598;
165
166	/* MAC */
167	mac->ops.start_hw = &ixgbe_start_hw_82598;
168	mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
169	mac->ops.reset_hw = &ixgbe_reset_hw_82598;
170	mac->ops.get_media_type = &ixgbe_get_media_type_82598;
171	mac->ops.get_supported_physical_layer =
172	                            &ixgbe_get_supported_physical_layer_82598;
173	mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
174	mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
175	mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
176
177	/* RAR, Multicast, VLAN */
178	mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
179	mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
180	mac->ops.set_vfta = &ixgbe_set_vfta_82598;
181	mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
182
183	/* Flow Control */
184	mac->ops.fc_enable = &ixgbe_fc_enable_82598;
185
186	mac->mcft_size       = 128;
187	mac->vft_size        = 128;
188	mac->num_rar_entries = 16;
189	mac->rx_pb_size      = 512;
190	mac->max_tx_queues   = 32;
191	mac->max_rx_queues   = 64;
192	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
193
194	/* SFP+ Module */
195	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
196
197	/* Link */
198	mac->ops.check_link = &ixgbe_check_mac_link_82598;
199	mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
200	mac->ops.flap_tx_laser = NULL;
201	mac->ops.get_link_capabilities =
202	                       &ixgbe_get_link_capabilities_82598;
203
204	return ret_val;
205}
206
207/**
208 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
209 *  @hw: pointer to hardware structure
210 *
211 *  Initialize any function pointers that were not able to be
212 *  set during init_shared_code because the PHY/SFP type was
213 *  not known.  Perform the SFP init if necessary.
214 *
215 **/
216s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
217{
218	struct ixgbe_mac_info *mac = &hw->mac;
219	struct ixgbe_phy_info *phy = &hw->phy;
220	s32 ret_val = IXGBE_SUCCESS;
221	u16 list_offset, data_offset;
222
223	DEBUGFUNC("ixgbe_init_phy_ops_82598");
224
225	/* Identify the PHY */
226	phy->ops.identify(hw);
227
228	/* Overwrite the link function pointers if copper PHY */
229	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
230		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
231		mac->ops.get_link_capabilities =
232		                  &ixgbe_get_copper_link_capabilities_generic;
233	}
234
235	switch (hw->phy.type) {
236	case ixgbe_phy_tn:
237		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
238		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
239		phy->ops.get_firmware_version =
240		             &ixgbe_get_phy_firmware_version_tnx;
241		break;
242	case ixgbe_phy_aq:
243		phy->ops.get_firmware_version =
244		             &ixgbe_get_phy_firmware_version_generic;
245		break;
246	case ixgbe_phy_nl:
247		phy->ops.reset = &ixgbe_reset_phy_nl;
248
249		/* Call SFP+ identify routine to get the SFP+ module type */
250		ret_val = phy->ops.identify_sfp(hw);
251		if (ret_val != IXGBE_SUCCESS)
252			goto out;
253		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
254			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
255			goto out;
256		}
257
258		/* Check to see if SFP+ module is supported */
259		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
260		                                            &list_offset,
261		                                            &data_offset);
262		if (ret_val != IXGBE_SUCCESS) {
263			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
264			goto out;
265		}
266		break;
267	default:
268		break;
269	}
270
271out:
272	return ret_val;
273}
274
275/**
276 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
277 *  @hw: pointer to hardware structure
278 *
279 *  Starts the hardware using the generic start_hw function.
280 *  Disables relaxed ordering Then set pcie completion timeout
281 *
282 **/
283s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
284{
285	u32 regval;
286	u32 i;
287	s32 ret_val = IXGBE_SUCCESS;
288
289	DEBUGFUNC("ixgbe_start_hw_82598");
290
291	ret_val = ixgbe_start_hw_generic(hw);
292
293	/* Disable relaxed ordering */
294	for (i = 0; ((i < hw->mac.max_tx_queues) &&
295	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
296		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
297		regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
298		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
299	}
300
301	for (i = 0; ((i < hw->mac.max_rx_queues) &&
302	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
303		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
304		regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
305		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
306		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
307	}
308
309	/* set the completion timeout for interface */
310	if (ret_val == IXGBE_SUCCESS)
311		ixgbe_set_pcie_completion_timeout(hw);
312
313	return ret_val;
314}
315
316/**
317 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
318 *  @hw: pointer to hardware structure
319 *  @speed: pointer to link speed
320 *  @autoneg: boolean auto-negotiation value
321 *
322 *  Determines the link capabilities by reading the AUTOC register.
323 **/
324static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
325                                             ixgbe_link_speed *speed,
326                                             bool *autoneg)
327{
328	s32 status = IXGBE_SUCCESS;
329	u32 autoc = 0;
330
331	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
332
333	/*
334	 * Determine link capabilities based on the stored value of AUTOC,
335	 * which represents EEPROM defaults.  If AUTOC value has not been
336	 * stored, use the current register value.
337	 */
338	if (hw->mac.orig_link_settings_stored)
339		autoc = hw->mac.orig_autoc;
340	else
341		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
342
343	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
344	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
345		*speed = IXGBE_LINK_SPEED_1GB_FULL;
346		*autoneg = FALSE;
347		break;
348
349	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
350		*speed = IXGBE_LINK_SPEED_10GB_FULL;
351		*autoneg = FALSE;
352		break;
353
354	case IXGBE_AUTOC_LMS_1G_AN:
355		*speed = IXGBE_LINK_SPEED_1GB_FULL;
356		*autoneg = TRUE;
357		break;
358
359	case IXGBE_AUTOC_LMS_KX4_AN:
360	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
361		*speed = IXGBE_LINK_SPEED_UNKNOWN;
362		if (autoc & IXGBE_AUTOC_KX4_SUPP)
363			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
364		if (autoc & IXGBE_AUTOC_KX_SUPP)
365			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
366		*autoneg = TRUE;
367		break;
368
369	default:
370		status = IXGBE_ERR_LINK_SETUP;
371		break;
372	}
373
374	return status;
375}
376
377/**
378 *  ixgbe_get_media_type_82598 - Determines media type
379 *  @hw: pointer to hardware structure
380 *
381 *  Returns the media type (fiber, copper, backplane)
382 **/
383static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
384{
385	enum ixgbe_media_type media_type;
386
387	DEBUGFUNC("ixgbe_get_media_type_82598");
388
389	/* Detect if there is a copper PHY attached. */
390	switch (hw->phy.type) {
391	case ixgbe_phy_cu_unknown:
392	case ixgbe_phy_tn:
393	case ixgbe_phy_aq:
394		media_type = ixgbe_media_type_copper;
395		goto out;
396	default:
397		break;
398	}
399
400	/* Media type for I82598 is based on device ID */
401	switch (hw->device_id) {
402	case IXGBE_DEV_ID_82598:
403	case IXGBE_DEV_ID_82598_BX:
404		/* Default device ID is mezzanine card KX/KX4 */
405		media_type = ixgbe_media_type_backplane;
406		break;
407	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
408	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
409	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
410	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
411	case IXGBE_DEV_ID_82598EB_XF_LR:
412	case IXGBE_DEV_ID_82598EB_SFP_LOM:
413		media_type = ixgbe_media_type_fiber;
414		break;
415	case IXGBE_DEV_ID_82598EB_CX4:
416	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
417		media_type = ixgbe_media_type_cx4;
418		break;
419	case IXGBE_DEV_ID_82598AT:
420	case IXGBE_DEV_ID_82598AT2:
421		media_type = ixgbe_media_type_copper;
422		break;
423	default:
424		media_type = ixgbe_media_type_unknown;
425		break;
426	}
427out:
428	return media_type;
429}
430
431/**
432 *  ixgbe_fc_enable_82598 - Enable flow control
433 *  @hw: pointer to hardware structure
434 *  @packetbuf_num: packet buffer number (0-7)
435 *
436 *  Enable flow control according to the current settings.
437 **/
438s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
439{
440	s32 ret_val = IXGBE_SUCCESS;
441	u32 fctrl_reg;
442	u32 rmcs_reg;
443	u32 reg;
444	u32 rx_pba_size;
445	u32 link_speed = 0;
446	bool link_up;
447
448	DEBUGFUNC("ixgbe_fc_enable_82598");
449
450	/*
451	 * On 82598 having Rx FC on causes resets while doing 1G
452	 * so if it's on turn it off once we know link_speed. For
453	 * more details see 82598 Specification update.
454	 */
455	hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
456	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
457		switch (hw->fc.requested_mode) {
458		case ixgbe_fc_full:
459			hw->fc.requested_mode = ixgbe_fc_tx_pause;
460			break;
461		case ixgbe_fc_rx_pause:
462			hw->fc.requested_mode = ixgbe_fc_none;
463			break;
464		default:
465			/* no change */
466			break;
467		}
468	}
469
470	/* Negotiate the fc mode to use */
471	ret_val = ixgbe_fc_autoneg(hw);
472	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
473		goto out;
474
475	/* Disable any previous flow control settings */
476	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
477	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
478
479	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
480	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
481
482	/*
483	 * The possible values of fc.current_mode are:
484	 * 0: Flow control is completely disabled
485	 * 1: Rx flow control is enabled (we can receive pause frames,
486	 *    but not send pause frames).
487	 * 2: Tx flow control is enabled (we can send pause frames but
488	 *     we do not support receiving pause frames).
489	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
490	 * other: Invalid.
491	 */
492	switch (hw->fc.current_mode) {
493	case ixgbe_fc_none:
494		/*
495		 * Flow control is disabled by software override or autoneg.
496		 * The code below will actually disable it in the HW.
497		 */
498		break;
499	case ixgbe_fc_rx_pause:
500		/*
501		 * Rx Flow control is enabled and Tx Flow control is
502		 * disabled by software override. Since there really
503		 * isn't a way to advertise that we are capable of RX
504		 * Pause ONLY, we will advertise that we support both
505		 * symmetric and asymmetric Rx PAUSE.  Later, we will
506		 * disable the adapter's ability to send PAUSE frames.
507		 */
508		fctrl_reg |= IXGBE_FCTRL_RFCE;
509		break;
510	case ixgbe_fc_tx_pause:
511		/*
512		 * Tx Flow control is enabled, and Rx Flow control is
513		 * disabled by software override.
514		 */
515		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
516		break;
517	case ixgbe_fc_full:
518		/* Flow control (both Rx and Tx) is enabled by SW override. */
519		fctrl_reg |= IXGBE_FCTRL_RFCE;
520		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
521		break;
522	default:
523		DEBUGOUT("Flow control param set incorrectly\n");
524		ret_val = IXGBE_ERR_CONFIG;
525		goto out;
526		break;
527	}
528
529	/* Set 802.3x based flow control settings. */
530	fctrl_reg |= IXGBE_FCTRL_DPF;
531	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
532	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
533
534	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
535	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
536		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
537		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
538
539		reg = (rx_pba_size - hw->fc.low_water) << 6;
540		if (hw->fc.send_xon)
541			reg |= IXGBE_FCRTL_XONE;
542
543		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
544
545		reg = (rx_pba_size - hw->fc.high_water) << 6;
546		reg |= IXGBE_FCRTH_FCEN;
547
548		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
549	}
550
551	/* Configure pause time (2 TCs per register) */
552	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
553	if ((packetbuf_num & 1) == 0)
554		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
555	else
556		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
557	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
558
559	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
560
561out:
562	return ret_val;
563}
564
565/**
566 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
567 *  @hw: pointer to hardware structure
568 *
569 *  Configures link settings based on values in the ixgbe_hw struct.
570 *  Restarts the link.  Performs autonegotiation if needed.
571 **/
572static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
573                                      bool autoneg_wait_to_complete)
574{
575	u32 autoc_reg;
576	u32 links_reg;
577	u32 i;
578	s32 status = IXGBE_SUCCESS;
579
580	DEBUGFUNC("ixgbe_start_mac_link_82598");
581
582	/* Restart link */
583	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
584	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
585	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
586
587	/* Only poll for autoneg to complete if specified to do so */
588	if (autoneg_wait_to_complete) {
589		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
590		     IXGBE_AUTOC_LMS_KX4_AN ||
591		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
592		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
593			links_reg = 0; /* Just in case Autoneg time = 0 */
594			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
595				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
596				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
597					break;
598				msec_delay(100);
599			}
600			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
601				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
602				DEBUGOUT("Autonegotiation did not complete.\n");
603			}
604		}
605	}
606
607	/* Add delay to filter out noises during initial link setup */
608	msec_delay(50);
609
610	return status;
611}
612
613/**
614 *  ixgbe_validate_link_ready - Function looks for phy link
615 *  @hw: pointer to hardware structure
616 *
617 *  Function indicates success when phy link is available. If phy is not ready
618 *  within 5 seconds of MAC indicating link, the function returns error.
619 **/
620static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
621{
622	u32 timeout;
623	u16 an_reg;
624
625	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
626		return IXGBE_SUCCESS;
627
628	for (timeout = 0;
629	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
630		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
631		                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
632
633		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
634		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
635			break;
636
637		msec_delay(100);
638	}
639
640	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
641		DEBUGOUT("Link was indicated but link is down\n");
642		return IXGBE_ERR_LINK_SETUP;
643	}
644
645	return IXGBE_SUCCESS;
646}
647
648/**
649 *  ixgbe_check_mac_link_82598 - Get link/speed status
650 *  @hw: pointer to hardware structure
651 *  @speed: pointer to link speed
652 *  @link_up: TRUE is link is up, FALSE otherwise
653 *  @link_up_wait_to_complete: bool used to wait for link up or not
654 *
655 *  Reads the links register to determine if link is up and the current speed
656 **/
657static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
658                                      ixgbe_link_speed *speed, bool *link_up,
659                                      bool link_up_wait_to_complete)
660{
661	u32 links_reg;
662	u32 i;
663	u16 link_reg, adapt_comp_reg;
664
665	DEBUGFUNC("ixgbe_check_mac_link_82598");
666
667	/*
668	 * SERDES PHY requires us to read link status from undocumented
669	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
670	 * indicates link down.  OxC00C is read to check that the XAUI lanes
671	 * are active.  Bit 0 clear indicates active; set indicates inactive.
672	 */
673	if (hw->phy.type == ixgbe_phy_nl) {
674		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
675		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
676		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
677		                     &adapt_comp_reg);
678		if (link_up_wait_to_complete) {
679			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
680				if ((link_reg & 1) &&
681				    ((adapt_comp_reg & 1) == 0)) {
682					*link_up = TRUE;
683					break;
684				} else {
685					*link_up = FALSE;
686				}
687				msec_delay(100);
688				hw->phy.ops.read_reg(hw, 0xC79F,
689				                     IXGBE_TWINAX_DEV,
690				                     &link_reg);
691				hw->phy.ops.read_reg(hw, 0xC00C,
692				                     IXGBE_TWINAX_DEV,
693				                     &adapt_comp_reg);
694			}
695		} else {
696			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
697				*link_up = TRUE;
698			else
699				*link_up = FALSE;
700		}
701
702		if (*link_up == FALSE)
703			goto out;
704	}
705
706	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
707	if (link_up_wait_to_complete) {
708		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
709			if (links_reg & IXGBE_LINKS_UP) {
710				*link_up = TRUE;
711				break;
712			} else {
713				*link_up = FALSE;
714			}
715			msec_delay(100);
716			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
717		}
718	} else {
719		if (links_reg & IXGBE_LINKS_UP)
720			*link_up = TRUE;
721		else
722			*link_up = FALSE;
723	}
724
725	if (links_reg & IXGBE_LINKS_SPEED)
726		*speed = IXGBE_LINK_SPEED_10GB_FULL;
727	else
728		*speed = IXGBE_LINK_SPEED_1GB_FULL;
729
730	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
731	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
732		*link_up = FALSE;
733
734	/* if link is down, zero out the current_mode */
735	if (*link_up == FALSE) {
736		hw->fc.current_mode = ixgbe_fc_none;
737		hw->fc.fc_was_autonegged = FALSE;
738	}
739out:
740	return IXGBE_SUCCESS;
741}
742
743/**
744 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
745 *  @hw: pointer to hardware structure
746 *  @speed: new link speed
747 *  @autoneg: TRUE if autonegotiation enabled
748 *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
749 *
750 *  Set the link speed in the AUTOC register and restarts link.
751 **/
752static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
753                                           ixgbe_link_speed speed, bool autoneg,
754                                           bool autoneg_wait_to_complete)
755{
756	s32              status            = IXGBE_SUCCESS;
757	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
758	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
759	u32              autoc             = curr_autoc;
760	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
761
762	DEBUGFUNC("ixgbe_setup_mac_link_82598");
763
764	/* Check to see if speed passed in is supported. */
765	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
766	speed &= link_capabilities;
767
768	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
769		status = IXGBE_ERR_LINK_SETUP;
770
771	/* Set KX4/KX support according to speed requested */
772	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
773	         link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
774		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
775		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
776			autoc |= IXGBE_AUTOC_KX4_SUPP;
777		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
778			autoc |= IXGBE_AUTOC_KX_SUPP;
779		if (autoc != curr_autoc)
780			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
781	}
782
783	if (status == IXGBE_SUCCESS) {
784		/*
785		 * Setup and restart the link based on the new values in
786		 * ixgbe_hw This will write the AUTOC register based on the new
787		 * stored values
788		 */
789		status = ixgbe_start_mac_link_82598(hw,
790		                                    autoneg_wait_to_complete);
791	}
792
793	return status;
794}
795
796
797/**
798 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
799 *  @hw: pointer to hardware structure
800 *  @speed: new link speed
801 *  @autoneg: TRUE if autonegotiation enabled
802 *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
803 *
804 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
805 **/
806static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
807                                               ixgbe_link_speed speed,
808                                               bool autoneg,
809                                               bool autoneg_wait_to_complete)
810{
811	s32 status;
812
813	DEBUGFUNC("ixgbe_setup_copper_link_82598");
814
815	/* Setup the PHY according to input speed */
816	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
817	                                      autoneg_wait_to_complete);
818	/* Set up MAC */
819	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
820
821	return status;
822}
823
824/**
825 *  ixgbe_reset_hw_82598 - Performs hardware reset
826 *  @hw: pointer to hardware structure
827 *
828 *  Resets the hardware by resetting the transmit and receive units, masks and
829 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
830 *  reset.
831 **/
832static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
833{
834	s32 status = IXGBE_SUCCESS;
835	s32 phy_status = IXGBE_SUCCESS;
836	u32 ctrl;
837	u32 gheccr;
838	u32 i;
839	u32 autoc;
840	u8  analog_val;
841
842	DEBUGFUNC("ixgbe_reset_hw_82598");
843
844	/* Call adapter stop to disable tx/rx and clear interrupts */
845	hw->mac.ops.stop_adapter(hw);
846
847	/*
848	 * Power up the Atlas Tx lanes if they are currently powered down.
849	 * Atlas Tx lanes are powered down for MAC loopback tests, but
850	 * they are not automatically restored on reset.
851	 */
852	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
853	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
854		/* Enable Tx Atlas so packets can be transmitted again */
855		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
856		                             &analog_val);
857		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
858		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
859		                              analog_val);
860
861		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
862		                             &analog_val);
863		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
864		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
865		                              analog_val);
866
867		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
868		                             &analog_val);
869		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
870		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
871		                              analog_val);
872
873		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
874		                             &analog_val);
875		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
876		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
877		                              analog_val);
878	}
879
880	/* Reset PHY */
881	if (hw->phy.reset_disable == FALSE) {
882		/* PHY ops must be identified and initialized prior to reset */
883
884		/* Init PHY and function pointers, perform SFP setup */
885		phy_status = hw->phy.ops.init(hw);
886		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
887			goto reset_hw_out;
888		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
889			goto no_phy_reset;
890
891		hw->phy.ops.reset(hw);
892	}
893
894no_phy_reset:
895	/*
896	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
897	 * access and verify no pending requests before reset
898	 */
899	ixgbe_disable_pcie_master(hw);
900
901mac_reset_top:
902	/*
903	 * Issue global reset to the MAC.  This needs to be a SW reset.
904	 * If link reset is used, it might reset the MAC when mng is using it
905	 */
906	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
907	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
908	IXGBE_WRITE_FLUSH(hw);
909
910	/* Poll for reset bit to self-clear indicating reset is complete */
911	for (i = 0; i < 10; i++) {
912		usec_delay(1);
913		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
914		if (!(ctrl & IXGBE_CTRL_RST))
915			break;
916	}
917	if (ctrl & IXGBE_CTRL_RST) {
918		status = IXGBE_ERR_RESET_FAILED;
919		DEBUGOUT("Reset polling failed to complete.\n");
920	}
921
922	/*
923	 * Double resets are required for recovery from certain error
924	 * conditions.  Between resets, it is necessary to stall to allow time
925	 * for any pending HW events to complete.  We use 1usec since that is
926	 * what is needed for ixgbe_disable_pcie_master().  The second reset
927	 * then clears out any effects of those events.
928	 */
929	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
930		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
931		usec_delay(1);
932		goto mac_reset_top;
933	}
934
935	msec_delay(50);
936
937	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
938	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
939	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
940
941	/*
942	 * Store the original AUTOC value if it has not been
943	 * stored off yet.  Otherwise restore the stored original
944	 * AUTOC value since the reset operation sets back to deaults.
945	 */
946	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
947	if (hw->mac.orig_link_settings_stored == FALSE) {
948		hw->mac.orig_autoc = autoc;
949		hw->mac.orig_link_settings_stored = TRUE;
950	} else if (autoc != hw->mac.orig_autoc) {
951		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
952	}
953
954	/* Store the permanent mac address */
955	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
956
957	/*
958	 * Store MAC address from RAR0, clear receive address registers, and
959	 * clear the multicast table
960	 */
961	hw->mac.ops.init_rx_addrs(hw);
962
963reset_hw_out:
964	if (phy_status != IXGBE_SUCCESS)
965		status = phy_status;
966
967	return status;
968}
969
970/**
971 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
972 *  @hw: pointer to hardware struct
973 *  @rar: receive address register index to associate with a VMDq index
974 *  @vmdq: VMDq set index
975 **/
976s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
977{
978	u32 rar_high;
979	u32 rar_entries = hw->mac.num_rar_entries;
980
981	DEBUGFUNC("ixgbe_set_vmdq_82598");
982
983	/* Make sure we are using a valid rar index range */
984	if (rar >= rar_entries) {
985		DEBUGOUT1("RAR index %d is out of range.\n", rar);
986		return IXGBE_ERR_INVALID_ARGUMENT;
987	}
988
989	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
990	rar_high &= ~IXGBE_RAH_VIND_MASK;
991	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
992	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
993	return IXGBE_SUCCESS;
994}
995
996/**
997 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
998 *  @hw: pointer to hardware struct
999 *  @rar: receive address register index to associate with a VMDq index
1000 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
1001 **/
1002static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1003{
1004	u32 rar_high;
1005	u32 rar_entries = hw->mac.num_rar_entries;
1006
1007	UNREFERENCED_PARAMETER(vmdq);
1008
1009	/* Make sure we are using a valid rar index range */
1010	if (rar >= rar_entries) {
1011		DEBUGOUT1("RAR index %d is out of range.\n", rar);
1012		return IXGBE_ERR_INVALID_ARGUMENT;
1013	}
1014
1015	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1016	if (rar_high & IXGBE_RAH_VIND_MASK) {
1017		rar_high &= ~IXGBE_RAH_VIND_MASK;
1018		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
1019	}
1020
1021	return IXGBE_SUCCESS;
1022}
1023
1024/**
1025 *  ixgbe_set_vfta_82598 - Set VLAN filter table
1026 *  @hw: pointer to hardware structure
1027 *  @vlan: VLAN id to write to VLAN filter
1028 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
1029 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
1030 *
1031 *  Turn on/off specified VLAN in the VLAN filter table.
1032 **/
1033s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1034	                                              bool vlan_on)
1035{
1036	u32 regindex;
1037	u32 bitindex;
1038	u32 bits;
1039	u32 vftabyte;
1040
1041	DEBUGFUNC("ixgbe_set_vfta_82598");
1042
1043	if (vlan > 4095)
1044		return IXGBE_ERR_PARAM;
1045
1046	/* Determine 32-bit word position in array */
1047	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
1048
1049	/* Determine the location of the (VMD) queue index */
1050	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1051	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
1052
1053	/* Set the nibble for VMD queue index */
1054	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1055	bits &= (~(0x0F << bitindex));
1056	bits |= (vind << bitindex);
1057	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1058
1059	/* Determine the location of the bit for this VLAN id */
1060	bitindex = vlan & 0x1F;   /* lower five bits */
1061
1062	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1063	if (vlan_on)
1064		/* Turn on this VLAN id */
1065		bits |= (1 << bitindex);
1066	else
1067		/* Turn off this VLAN id */
1068		bits &= ~(1 << bitindex);
1069	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1070
1071	return IXGBE_SUCCESS;
1072}
1073
1074/**
1075 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
1076 *  @hw: pointer to hardware structure
1077 *
1078 *  Clears the VLAN filer table, and the VMDq index associated with the filter
1079 **/
1080static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1081{
1082	u32 offset;
1083	u32 vlanbyte;
1084
1085	DEBUGFUNC("ixgbe_clear_vfta_82598");
1086
1087	for (offset = 0; offset < hw->mac.vft_size; offset++)
1088		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1089
1090	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1091		for (offset = 0; offset < hw->mac.vft_size; offset++)
1092			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1093			                0);
1094
1095	return IXGBE_SUCCESS;
1096}
1097
1098/**
1099 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1100 *  @hw: pointer to hardware structure
1101 *  @reg: analog register to read
1102 *  @val: read value
1103 *
1104 *  Performs read operation to Atlas analog register specified.
1105 **/
1106s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1107{
1108	u32  atlas_ctl;
1109
1110	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1111
1112	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1113	                IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1114	IXGBE_WRITE_FLUSH(hw);
1115	usec_delay(10);
1116	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1117	*val = (u8)atlas_ctl;
1118
1119	return IXGBE_SUCCESS;
1120}
1121
1122/**
1123 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1124 *  @hw: pointer to hardware structure
1125 *  @reg: atlas register to write
1126 *  @val: value to write
1127 *
1128 *  Performs write operation to Atlas analog register specified.
1129 **/
1130s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1131{
1132	u32  atlas_ctl;
1133
1134	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1135
1136	atlas_ctl = (reg << 8) | val;
1137	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1138	IXGBE_WRITE_FLUSH(hw);
1139	usec_delay(10);
1140
1141	return IXGBE_SUCCESS;
1142}
1143
1144/**
1145 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1146 *  @hw: pointer to hardware structure
1147 *  @byte_offset: EEPROM byte offset to read
1148 *  @eeprom_data: value read
1149 *
1150 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1151 **/
1152s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1153                                u8 *eeprom_data)
1154{
1155	s32 status = IXGBE_SUCCESS;
1156	u16 sfp_addr = 0;
1157	u16 sfp_data = 0;
1158	u16 sfp_stat = 0;
1159	u32 i;
1160
1161	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1162
1163	if (hw->phy.type == ixgbe_phy_nl) {
1164		/*
1165		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1166		 * 0xC30D. These registers are used to talk to the SFP+
1167		 * module's EEPROM through the SDA/SCL (I2C) interface.
1168		 */
1169		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1170		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1171		hw->phy.ops.write_reg(hw,
1172		                      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1173		                      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1174		                      sfp_addr);
1175
1176		/* Poll status */
1177		for (i = 0; i < 100; i++) {
1178			hw->phy.ops.read_reg(hw,
1179			                     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1180			                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1181			                     &sfp_stat);
1182			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1183			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1184				break;
1185			msec_delay(10);
1186		}
1187
1188		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1189			DEBUGOUT("EEPROM read did not pass.\n");
1190			status = IXGBE_ERR_SFP_NOT_PRESENT;
1191			goto out;
1192		}
1193
1194		/* Read data */
1195		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1196		                     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1197
1198		*eeprom_data = (u8)(sfp_data >> 8);
1199	} else {
1200		status = IXGBE_ERR_PHY;
1201		goto out;
1202	}
1203
1204out:
1205	return status;
1206}
1207
1208/**
1209 *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1210 *  @hw: pointer to hardware structure
1211 *
1212 *  Determines physical layer capabilities of the current configuration.
1213 **/
1214u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1215{
1216	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1217	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1218	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1219	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1220	u16 ext_ability = 0;
1221
1222	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1223
1224	hw->phy.ops.identify(hw);
1225
1226	/* Copper PHY must be checked before AUTOC LMS to determine correct
1227	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1228	switch (hw->phy.type) {
1229	case ixgbe_phy_tn:
1230	case ixgbe_phy_aq:
1231	case ixgbe_phy_cu_unknown:
1232		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1233		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1234		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1235			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1236		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1237			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1238		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1239			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1240		goto out;
1241	default:
1242		break;
1243	}
1244
1245	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1246	case IXGBE_AUTOC_LMS_1G_AN:
1247	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1248		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1249			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1250		else
1251			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1252		break;
1253	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1254		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1255			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1256		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1257			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1258		else /* XAUI */
1259			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1260		break;
1261	case IXGBE_AUTOC_LMS_KX4_AN:
1262	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1263		if (autoc & IXGBE_AUTOC_KX_SUPP)
1264			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1265		if (autoc & IXGBE_AUTOC_KX4_SUPP)
1266			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1267		break;
1268	default:
1269		break;
1270	}
1271
1272	if (hw->phy.type == ixgbe_phy_nl) {
1273		hw->phy.ops.identify_sfp(hw);
1274
1275		switch (hw->phy.sfp_type) {
1276		case ixgbe_sfp_type_da_cu:
1277			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1278			break;
1279		case ixgbe_sfp_type_sr:
1280			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1281			break;
1282		case ixgbe_sfp_type_lr:
1283			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1284			break;
1285		default:
1286			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1287			break;
1288		}
1289	}
1290
1291	switch (hw->device_id) {
1292	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1293		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1294		break;
1295	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1296	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1297	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1298		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1299		break;
1300	case IXGBE_DEV_ID_82598EB_XF_LR:
1301		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1302		break;
1303	default:
1304		break;
1305	}
1306
1307out:
1308	return physical_layer;
1309}
1310
1311/**
1312 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1313 *  port devices.
1314 *  @hw: pointer to the HW structure
1315 *
1316 *  Calls common function and corrects issue with some single port devices
1317 *  that enable LAN1 but not LAN0.
1318 **/
1319void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1320{
1321	struct ixgbe_bus_info *bus = &hw->bus;
1322	u16 pci_gen = 0;
1323	u16 pci_ctrl2 = 0;
1324
1325	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1326
1327	ixgbe_set_lan_id_multi_port_pcie(hw);
1328
1329	/* check if LAN0 is disabled */
1330	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1331	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1332
1333		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1334
1335		/* if LAN0 is completely disabled force function to 0 */
1336		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1337		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1338		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1339
1340			bus->func = 0;
1341		}
1342	}
1343}
1344
1345/**
1346 *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1347 *  @hw: pointer to hardware structure
1348 *
1349 **/
1350void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1351{
1352	u32 regval;
1353	u32 i;
1354
1355	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1356
1357	/* Enable relaxed ordering */
1358	for (i = 0; ((i < hw->mac.max_tx_queues) &&
1359	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1360		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1361		regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1362		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1363	}
1364
1365	for (i = 0; ((i < hw->mac.max_rx_queues) &&
1366	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1367		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1368		regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1369		           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1370		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1371	}
1372
1373}
1374