1/****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32******************************************************************************/
| 1/****************************************************************************** 2 3 Copyright (c) 2001-2010, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32******************************************************************************/
|
33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 215911 2010-11-26 22:46:32Z jfv $*/
| 33/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 217593 2011-01-19 19:36:27Z jfv $*/
|
34 35#include "ixgbe_common.h" 36#include "ixgbe_phy.h" 37#include "ixgbe_api.h" 38 39static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 40static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 41static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 42static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 43static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 44static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 45 u16 count); 46static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 47static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 49static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 50 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 53 u16 *san_mac_offset); 54static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); 55static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); 56static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); 57static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 58static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 59 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 60 61s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); 62 63/** 64 * ixgbe_init_ops_generic - Inits function ptrs 65 * @hw: pointer to the hardware structure 66 * 67 * Initialize the function pointers. 68 **/ 69s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 70{ 71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 72 struct ixgbe_mac_info *mac = &hw->mac; 73 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 74 75 DEBUGFUNC("ixgbe_init_ops_generic"); 76 77 /* EEPROM */ 78 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; 79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 80 if (eec & (1 << 8)) 81 eeprom->ops.read = &ixgbe_read_eerd_generic; 82 else 83 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; 84 eeprom->ops.write = &ixgbe_write_eeprom_generic; 85 eeprom->ops.validate_checksum = 86 &ixgbe_validate_eeprom_checksum_generic; 87 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; 88 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; 89 90 /* MAC */ 91 mac->ops.init_hw = &ixgbe_init_hw_generic; 92 mac->ops.reset_hw = NULL; 93 mac->ops.start_hw = &ixgbe_start_hw_generic; 94 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; 95 mac->ops.get_media_type = NULL; 96 mac->ops.get_supported_physical_layer = NULL; 97 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; 98 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; 99 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; 100 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; 101 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; 102 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; 103 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; 104 105 /* LEDs */ 106 mac->ops.led_on = &ixgbe_led_on_generic; 107 mac->ops.led_off = &ixgbe_led_off_generic; 108 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; 109 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; 110 111 /* RAR, Multicast, VLAN */ 112 mac->ops.set_rar = &ixgbe_set_rar_generic; 113 mac->ops.clear_rar = &ixgbe_clear_rar_generic; 114 mac->ops.insert_mac_addr = NULL; 115 mac->ops.set_vmdq = NULL; 116 mac->ops.clear_vmdq = NULL; 117 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; 118 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; 119 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; 120 mac->ops.enable_mc = &ixgbe_enable_mc_generic; 121 mac->ops.disable_mc = &ixgbe_disable_mc_generic; 122 mac->ops.clear_vfta = NULL; 123 mac->ops.set_vfta = NULL; 124 mac->ops.init_uta_tables = NULL; 125 126 /* Flow Control */ 127 mac->ops.fc_enable = &ixgbe_fc_enable_generic; 128 129 /* Link */ 130 mac->ops.get_link_capabilities = NULL; 131 mac->ops.setup_link = NULL; 132 mac->ops.check_link = NULL; 133 134 return IXGBE_SUCCESS; 135} 136 137/** 138 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 139 * @hw: pointer to hardware structure 140 * 141 * Starts the hardware by filling the bus info structure and media type, clears 142 * all on chip counters, initializes receive address registers, multicast 143 * table, VLAN filter table, calls routine to set up link and flow control 144 * settings, and leaves transmit and receive units disabled and uninitialized 145 **/ 146s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 147{ 148 u32 ctrl_ext; 149 150 DEBUGFUNC("ixgbe_start_hw_generic"); 151 152 /* Set the media type */ 153 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 154 155 /* PHY ops initialization must be done in reset_hw() */ 156 157 /* Clear the VLAN filter table */ 158 hw->mac.ops.clear_vfta(hw); 159 160 /* Clear statistics registers */ 161 hw->mac.ops.clear_hw_cntrs(hw); 162 163 /* Set No Snoop Disable */ 164 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 165 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 166 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 167 IXGBE_WRITE_FLUSH(hw); 168 169 /* Setup flow control */ 170 ixgbe_setup_fc(hw, 0); 171 172 /* Clear adapter stopped flag */ 173 hw->adapter_stopped = FALSE; 174 175 return IXGBE_SUCCESS; 176} 177 178/** 179 * ixgbe_start_hw_gen2 - Init sequence for common device family 180 * @hw: pointer to hw structure 181 * 182 * Performs the init sequence common to the second generation 183 * of 10 GbE devices. 184 * Devices in the second generation: 185 * 82599
| 34 35#include "ixgbe_common.h" 36#include "ixgbe_phy.h" 37#include "ixgbe_api.h" 38 39static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 40static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 41static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 42static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 43static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 44static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 45 u16 count); 46static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 47static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 48static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 49static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 50 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 53 u16 *san_mac_offset); 54static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); 55static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); 56static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); 57static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 58static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 59 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 60 61s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); 62 63/** 64 * ixgbe_init_ops_generic - Inits function ptrs 65 * @hw: pointer to the hardware structure 66 * 67 * Initialize the function pointers. 68 **/ 69s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 70{ 71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 72 struct ixgbe_mac_info *mac = &hw->mac; 73 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 74 75 DEBUGFUNC("ixgbe_init_ops_generic"); 76 77 /* EEPROM */ 78 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; 79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 80 if (eec & (1 << 8)) 81 eeprom->ops.read = &ixgbe_read_eerd_generic; 82 else 83 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; 84 eeprom->ops.write = &ixgbe_write_eeprom_generic; 85 eeprom->ops.validate_checksum = 86 &ixgbe_validate_eeprom_checksum_generic; 87 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; 88 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; 89 90 /* MAC */ 91 mac->ops.init_hw = &ixgbe_init_hw_generic; 92 mac->ops.reset_hw = NULL; 93 mac->ops.start_hw = &ixgbe_start_hw_generic; 94 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; 95 mac->ops.get_media_type = NULL; 96 mac->ops.get_supported_physical_layer = NULL; 97 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; 98 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; 99 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; 100 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; 101 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; 102 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; 103 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; 104 105 /* LEDs */ 106 mac->ops.led_on = &ixgbe_led_on_generic; 107 mac->ops.led_off = &ixgbe_led_off_generic; 108 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; 109 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; 110 111 /* RAR, Multicast, VLAN */ 112 mac->ops.set_rar = &ixgbe_set_rar_generic; 113 mac->ops.clear_rar = &ixgbe_clear_rar_generic; 114 mac->ops.insert_mac_addr = NULL; 115 mac->ops.set_vmdq = NULL; 116 mac->ops.clear_vmdq = NULL; 117 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; 118 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; 119 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; 120 mac->ops.enable_mc = &ixgbe_enable_mc_generic; 121 mac->ops.disable_mc = &ixgbe_disable_mc_generic; 122 mac->ops.clear_vfta = NULL; 123 mac->ops.set_vfta = NULL; 124 mac->ops.init_uta_tables = NULL; 125 126 /* Flow Control */ 127 mac->ops.fc_enable = &ixgbe_fc_enable_generic; 128 129 /* Link */ 130 mac->ops.get_link_capabilities = NULL; 131 mac->ops.setup_link = NULL; 132 mac->ops.check_link = NULL; 133 134 return IXGBE_SUCCESS; 135} 136 137/** 138 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 139 * @hw: pointer to hardware structure 140 * 141 * Starts the hardware by filling the bus info structure and media type, clears 142 * all on chip counters, initializes receive address registers, multicast 143 * table, VLAN filter table, calls routine to set up link and flow control 144 * settings, and leaves transmit and receive units disabled and uninitialized 145 **/ 146s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 147{ 148 u32 ctrl_ext; 149 150 DEBUGFUNC("ixgbe_start_hw_generic"); 151 152 /* Set the media type */ 153 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 154 155 /* PHY ops initialization must be done in reset_hw() */ 156 157 /* Clear the VLAN filter table */ 158 hw->mac.ops.clear_vfta(hw); 159 160 /* Clear statistics registers */ 161 hw->mac.ops.clear_hw_cntrs(hw); 162 163 /* Set No Snoop Disable */ 164 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 165 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 166 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 167 IXGBE_WRITE_FLUSH(hw); 168 169 /* Setup flow control */ 170 ixgbe_setup_fc(hw, 0); 171 172 /* Clear adapter stopped flag */ 173 hw->adapter_stopped = FALSE; 174 175 return IXGBE_SUCCESS; 176} 177 178/** 179 * ixgbe_start_hw_gen2 - Init sequence for common device family 180 * @hw: pointer to hw structure 181 * 182 * Performs the init sequence common to the second generation 183 * of 10 GbE devices. 184 * Devices in the second generation: 185 * 82599
|
| 186 * X540
|
186 **/ 187s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 188{ 189 u32 i; 190 u32 regval; 191 192 /* Clear the rate limiters */ 193 for (i = 0; i < hw->mac.max_tx_queues; i++) { 194 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 195 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 196 } 197 IXGBE_WRITE_FLUSH(hw); 198 199 /* Disable relaxed ordering */ 200 for (i = 0; i < hw->mac.max_tx_queues; i++) { 201 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 202 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 203 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 204 } 205 206 for (i = 0; i < hw->mac.max_rx_queues; i++) { 207 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 208 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 209 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 210 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 211 } 212 213 return IXGBE_SUCCESS; 214} 215 216/** 217 * ixgbe_init_hw_generic - Generic hardware initialization 218 * @hw: pointer to hardware structure 219 * 220 * Initialize the hardware by resetting the hardware, filling the bus info 221 * structure and media type, clears all on chip counters, initializes receive 222 * address registers, multicast table, VLAN filter table, calls routine to set 223 * up link and flow control settings, and leaves transmit and receive units 224 * disabled and uninitialized 225 **/ 226s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 227{ 228 s32 status; 229 230 DEBUGFUNC("ixgbe_init_hw_generic"); 231 232 /* Reset the hardware */ 233 status = hw->mac.ops.reset_hw(hw); 234 235 if (status == IXGBE_SUCCESS) { 236 /* Start the HW */ 237 status = hw->mac.ops.start_hw(hw); 238 } 239 240 return status; 241} 242 243/** 244 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 245 * @hw: pointer to hardware structure 246 * 247 * Clears all hardware statistics counters by reading them from the hardware 248 * Statistics counters are clear on read. 249 **/ 250s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 251{ 252 u16 i = 0; 253 254 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 255 256 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 257 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 258 IXGBE_READ_REG(hw, IXGBE_ERRBC); 259 IXGBE_READ_REG(hw, IXGBE_MSPDC); 260 for (i = 0; i < 8; i++) 261 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 262 263 IXGBE_READ_REG(hw, IXGBE_MLFC); 264 IXGBE_READ_REG(hw, IXGBE_MRFC); 265 IXGBE_READ_REG(hw, IXGBE_RLEC); 266 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 267 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 268 if (hw->mac.type >= ixgbe_mac_82599EB) { 269 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 270 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 271 } else { 272 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 273 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 274 } 275 276 for (i = 0; i < 8; i++) { 277 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 278 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 279 if (hw->mac.type >= ixgbe_mac_82599EB) { 280 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 281 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 282 } else { 283 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 284 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 285 } 286 } 287 if (hw->mac.type >= ixgbe_mac_82599EB) 288 for (i = 0; i < 8; i++) 289 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 290 IXGBE_READ_REG(hw, IXGBE_PRC64); 291 IXGBE_READ_REG(hw, IXGBE_PRC127); 292 IXGBE_READ_REG(hw, IXGBE_PRC255); 293 IXGBE_READ_REG(hw, IXGBE_PRC511); 294 IXGBE_READ_REG(hw, IXGBE_PRC1023); 295 IXGBE_READ_REG(hw, IXGBE_PRC1522); 296 IXGBE_READ_REG(hw, IXGBE_GPRC); 297 IXGBE_READ_REG(hw, IXGBE_BPRC); 298 IXGBE_READ_REG(hw, IXGBE_MPRC); 299 IXGBE_READ_REG(hw, IXGBE_GPTC); 300 IXGBE_READ_REG(hw, IXGBE_GORCL); 301 IXGBE_READ_REG(hw, IXGBE_GORCH); 302 IXGBE_READ_REG(hw, IXGBE_GOTCL); 303 IXGBE_READ_REG(hw, IXGBE_GOTCH); 304 for (i = 0; i < 8; i++) 305 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 306 IXGBE_READ_REG(hw, IXGBE_RUC); 307 IXGBE_READ_REG(hw, IXGBE_RFC); 308 IXGBE_READ_REG(hw, IXGBE_ROC); 309 IXGBE_READ_REG(hw, IXGBE_RJC); 310 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 311 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 312 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 313 IXGBE_READ_REG(hw, IXGBE_TORL); 314 IXGBE_READ_REG(hw, IXGBE_TORH); 315 IXGBE_READ_REG(hw, IXGBE_TPR); 316 IXGBE_READ_REG(hw, IXGBE_TPT); 317 IXGBE_READ_REG(hw, IXGBE_PTC64); 318 IXGBE_READ_REG(hw, IXGBE_PTC127); 319 IXGBE_READ_REG(hw, IXGBE_PTC255); 320 IXGBE_READ_REG(hw, IXGBE_PTC511); 321 IXGBE_READ_REG(hw, IXGBE_PTC1023); 322 IXGBE_READ_REG(hw, IXGBE_PTC1522); 323 IXGBE_READ_REG(hw, IXGBE_MPTC); 324 IXGBE_READ_REG(hw, IXGBE_BPTC); 325 for (i = 0; i < 16; i++) { 326 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 327 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 328 if (hw->mac.type >= ixgbe_mac_82599EB) { 329 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 330 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 331 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 332 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 333 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 334 } else { 335 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 336 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 337 } 338 } 339 340 return IXGBE_SUCCESS; 341} 342 343/** 344 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 345 * @hw: pointer to hardware structure 346 * @pba_num: stores the part number string from the EEPROM 347 * @pba_num_size: part number string buffer length 348 * 349 * Reads the part number string from the EEPROM. 350 **/ 351s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 352 u32 pba_num_size) 353{ 354 s32 ret_val; 355 u16 data; 356 u16 pba_ptr; 357 u16 offset; 358 u16 length; 359 360 DEBUGFUNC("ixgbe_read_pba_string_generic"); 361 362 if (pba_num == NULL) { 363 DEBUGOUT("PBA string buffer was null\n"); 364 return IXGBE_ERR_INVALID_ARGUMENT; 365 } 366 367 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 368 if (ret_val) { 369 DEBUGOUT("NVM Read Error\n"); 370 return ret_val; 371 } 372 373 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 374 if (ret_val) { 375 DEBUGOUT("NVM Read Error\n"); 376 return ret_val; 377 } 378 379 /* 380 * if data is not ptr guard the PBA must be in legacy format which 381 * means pba_ptr is actually our second data word for the PBA number 382 * and we can decode it into an ascii string 383 */ 384 if (data != IXGBE_PBANUM_PTR_GUARD) { 385 DEBUGOUT("NVM PBA number is not stored as string\n"); 386 387 /* we will need 11 characters to store the PBA */ 388 if (pba_num_size < 11) { 389 DEBUGOUT("PBA string buffer too small\n"); 390 return IXGBE_ERR_NO_SPACE; 391 } 392 393 /* extract hex string from data and pba_ptr */ 394 pba_num[0] = (data >> 12) & 0xF; 395 pba_num[1] = (data >> 8) & 0xF; 396 pba_num[2] = (data >> 4) & 0xF; 397 pba_num[3] = data & 0xF; 398 pba_num[4] = (pba_ptr >> 12) & 0xF; 399 pba_num[5] = (pba_ptr >> 8) & 0xF; 400 pba_num[6] = '-'; 401 pba_num[7] = 0; 402 pba_num[8] = (pba_ptr >> 4) & 0xF; 403 pba_num[9] = pba_ptr & 0xF; 404 405 /* put a null character on the end of our string */ 406 pba_num[10] = '\0'; 407 408 /* switch all the data but the '-' to hex char */ 409 for (offset = 0; offset < 10; offset++) { 410 if (pba_num[offset] < 0xA) 411 pba_num[offset] += '0'; 412 else if (pba_num[offset] < 0x10) 413 pba_num[offset] += 'A' - 0xA; 414 } 415 416 return IXGBE_SUCCESS; 417 } 418 419 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 420 if (ret_val) { 421 DEBUGOUT("NVM Read Error\n"); 422 return ret_val; 423 } 424 425 if (length == 0xFFFF || length == 0) { 426 DEBUGOUT("NVM PBA number section invalid length\n"); 427 return IXGBE_ERR_PBA_SECTION; 428 } 429 430 /* check if pba_num buffer is big enough */ 431 if (pba_num_size < (((u32)length * 2) - 1)) { 432 DEBUGOUT("PBA string buffer too small\n"); 433 return IXGBE_ERR_NO_SPACE; 434 } 435 436 /* trim pba length from start of string */ 437 pba_ptr++; 438 length--; 439 440 for (offset = 0; offset < length; offset++) { 441 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 442 if (ret_val) { 443 DEBUGOUT("NVM Read Error\n"); 444 return ret_val; 445 } 446 pba_num[offset * 2] = (u8)(data >> 8); 447 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 448 } 449 pba_num[offset * 2] = '\0'; 450 451 return IXGBE_SUCCESS; 452} 453 454/** 455 * ixgbe_read_pba_length_generic - Reads part number length from EEPROM 456 * @hw: pointer to hardware structure 457 * @pba_num_size: part number string buffer length 458 * 459 * Reads the part number length from the EEPROM. 460 * Returns expected buffer size in pba_num_size 461 **/ 462s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size) 463{ 464 s32 ret_val; 465 u16 data; 466 u16 pba_ptr; 467 u16 length; 468 469 DEBUGFUNC("ixgbe_read_pba_length_generic"); 470 471 if (pba_num_size == NULL) { 472 DEBUGOUT("PBA buffer size was null\n"); 473 return IXGBE_ERR_INVALID_ARGUMENT; 474 } 475 476 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 477 if (ret_val) { 478 DEBUGOUT("NVM Read Error\n"); 479 return ret_val; 480 } 481 482 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 483 if (ret_val) { 484 DEBUGOUT("NVM Read Error\n"); 485 return ret_val; 486 } 487 488 /* if data is not ptr guard the PBA must be in legacy format */ 489 if (data != IXGBE_PBANUM_PTR_GUARD) { 490 *pba_num_size = 11; 491 return IXGBE_SUCCESS; 492 } 493 494 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 495 if (ret_val) { 496 DEBUGOUT("NVM Read Error\n"); 497 return ret_val; 498 } 499 500 if (length == 0xFFFF || length == 0) { 501 DEBUGOUT("NVM PBA number section invalid length\n"); 502 return IXGBE_ERR_PBA_SECTION; 503 } 504 505 /* 506 * Convert from length in u16 values to u8 chars, add 1 for NULL, 507 * and subtract 2 because length field is included in length. 508 */ 509 *pba_num_size = ((u32)length * 2) - 1; 510 511 return IXGBE_SUCCESS; 512} 513 514/** 515 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 516 * @hw: pointer to hardware structure 517 * @pba_num: stores the part number from the EEPROM 518 * 519 * Reads the part number from the EEPROM. 520 **/ 521s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 522{ 523 s32 ret_val; 524 u16 data; 525 526 DEBUGFUNC("ixgbe_read_pba_num_generic"); 527 528 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 529 if (ret_val) { 530 DEBUGOUT("NVM Read Error\n"); 531 return ret_val; 532 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 533 DEBUGOUT("NVM Not supported\n"); 534 return IXGBE_NOT_IMPLEMENTED; 535 } 536 *pba_num = (u32)(data << 16); 537 538 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 539 if (ret_val) { 540 DEBUGOUT("NVM Read Error\n"); 541 return ret_val; 542 } 543 *pba_num |= data; 544 545 return IXGBE_SUCCESS; 546} 547 548/** 549 * ixgbe_get_mac_addr_generic - Generic get MAC address 550 * @hw: pointer to hardware structure 551 * @mac_addr: Adapter MAC address 552 * 553 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 554 * A reset of the adapter must be performed prior to calling this function 555 * in order for the MAC address to have been loaded from the EEPROM into RAR0 556 **/ 557s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 558{ 559 u32 rar_high; 560 u32 rar_low; 561 u16 i; 562 563 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 564 565 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 566 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 567 568 for (i = 0; i < 4; i++) 569 mac_addr[i] = (u8)(rar_low >> (i*8)); 570 571 for (i = 0; i < 2; i++) 572 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 573 574 return IXGBE_SUCCESS; 575} 576 577/** 578 * ixgbe_get_bus_info_generic - Generic set PCI bus info 579 * @hw: pointer to hardware structure 580 * 581 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 582 **/ 583s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 584{ 585 struct ixgbe_mac_info *mac = &hw->mac; 586 u16 link_status; 587 588 DEBUGFUNC("ixgbe_get_bus_info_generic"); 589 590 hw->bus.type = ixgbe_bus_type_pci_express; 591 592 /* Get the negotiated link width and speed from PCI config space */ 593 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 594 595 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 596 case IXGBE_PCI_LINK_WIDTH_1: 597 hw->bus.width = ixgbe_bus_width_pcie_x1; 598 break; 599 case IXGBE_PCI_LINK_WIDTH_2: 600 hw->bus.width = ixgbe_bus_width_pcie_x2; 601 break; 602 case IXGBE_PCI_LINK_WIDTH_4: 603 hw->bus.width = ixgbe_bus_width_pcie_x4; 604 break; 605 case IXGBE_PCI_LINK_WIDTH_8: 606 hw->bus.width = ixgbe_bus_width_pcie_x8; 607 break; 608 default: 609 hw->bus.width = ixgbe_bus_width_unknown; 610 break; 611 } 612 613 switch (link_status & IXGBE_PCI_LINK_SPEED) { 614 case IXGBE_PCI_LINK_SPEED_2500: 615 hw->bus.speed = ixgbe_bus_speed_2500; 616 break; 617 case IXGBE_PCI_LINK_SPEED_5000: 618 hw->bus.speed = ixgbe_bus_speed_5000; 619 break; 620 default: 621 hw->bus.speed = ixgbe_bus_speed_unknown; 622 break; 623 } 624 625 mac->ops.set_lan_id(hw); 626 627 return IXGBE_SUCCESS; 628} 629 630/** 631 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 632 * @hw: pointer to the HW structure 633 * 634 * Determines the LAN function id by reading memory-mapped registers 635 * and swaps the port value if requested. 636 **/ 637void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 638{ 639 struct ixgbe_bus_info *bus = &hw->bus; 640 u32 reg; 641 642 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 643 644 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 645 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 646 bus->lan_id = bus->func; 647 648 /* check for a port swap */ 649 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); 650 if (reg & IXGBE_FACTPS_LFS) 651 bus->func ^= 0x1; 652} 653 654/** 655 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 656 * @hw: pointer to hardware structure 657 * 658 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 659 * disables transmit and receive units. The adapter_stopped flag is used by 660 * the shared code and drivers to determine if the adapter is in a stopped 661 * state and should not touch the hardware. 662 **/ 663s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 664{ 665 u32 number_of_queues; 666 u32 reg_val; 667 u16 i; 668 669 DEBUGFUNC("ixgbe_stop_adapter_generic"); 670 671 /* 672 * Set the adapter_stopped flag so other driver functions stop touching 673 * the hardware 674 */ 675 hw->adapter_stopped = TRUE; 676 677 /* Disable the receive unit */ 678 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 679 reg_val &= ~(IXGBE_RXCTRL_RXEN); 680 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 681 IXGBE_WRITE_FLUSH(hw); 682 msec_delay(2); 683 684 /* Clear interrupt mask to stop from interrupts being generated */ 685 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 686 687 /* Clear any pending interrupts */ 688 IXGBE_READ_REG(hw, IXGBE_EICR); 689 690 /* Disable the transmit unit. Each queue must be disabled. */ 691 number_of_queues = hw->mac.max_tx_queues; 692 for (i = 0; i < number_of_queues; i++) { 693 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 694 if (reg_val & IXGBE_TXDCTL_ENABLE) { 695 reg_val &= ~IXGBE_TXDCTL_ENABLE; 696 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); 697 } 698 } 699 700 /* 701 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 702 * access and verify no pending requests 703 */ 704 ixgbe_disable_pcie_master(hw); 705 706 return IXGBE_SUCCESS; 707} 708 709/** 710 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 711 * @hw: pointer to hardware structure 712 * @index: led number to turn on 713 **/ 714s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 715{ 716 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 717 718 DEBUGFUNC("ixgbe_led_on_generic"); 719 720 /* To turn on the LED, set mode to ON. */ 721 led_reg &= ~IXGBE_LED_MODE_MASK(index); 722 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 723 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 724 IXGBE_WRITE_FLUSH(hw); 725 726 return IXGBE_SUCCESS; 727} 728 729/** 730 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 731 * @hw: pointer to hardware structure 732 * @index: led number to turn off 733 **/ 734s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 735{ 736 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 737 738 DEBUGFUNC("ixgbe_led_off_generic"); 739 740 /* To turn off the LED, set mode to OFF. */ 741 led_reg &= ~IXGBE_LED_MODE_MASK(index); 742 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 743 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 744 IXGBE_WRITE_FLUSH(hw); 745 746 return IXGBE_SUCCESS; 747} 748 749/** 750 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 751 * @hw: pointer to hardware structure 752 * 753 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 754 * ixgbe_hw struct in order to set up EEPROM access. 755 **/ 756s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 757{ 758 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 759 u32 eec; 760 u16 eeprom_size; 761 762 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 763 764 if (eeprom->type == ixgbe_eeprom_uninitialized) { 765 eeprom->type = ixgbe_eeprom_none; 766 /* Set default semaphore delay to 10ms which is a well 767 * tested value */ 768 eeprom->semaphore_delay = 10; 769 770 /* 771 * Check for EEPROM present first. 772 * If not present leave as none 773 */ 774 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 775 if (eec & IXGBE_EEC_PRES) { 776 eeprom->type = ixgbe_eeprom_spi; 777 778 /* 779 * SPI EEPROM is assumed here. This code would need to 780 * change if a future EEPROM is not SPI. 781 */ 782 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 783 IXGBE_EEC_SIZE_SHIFT); 784 eeprom->word_size = 1 << (eeprom_size + 785 IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT); 786 } 787 788 if (eec & IXGBE_EEC_ADDR_SIZE) 789 eeprom->address_bits = 16; 790 else 791 eeprom->address_bits = 8; 792 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 793 "%d\n", eeprom->type, eeprom->word_size, 794 eeprom->address_bits); 795 } 796 797 return IXGBE_SUCCESS; 798} 799 800/** 801 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 802 * @hw: pointer to hardware structure 803 * @offset: offset within the EEPROM to be written to 804 * @data: 16 bit word to be written to the EEPROM 805 * 806 * If ixgbe_eeprom_update_checksum is not called after this function, the 807 * EEPROM will most likely contain an invalid checksum. 808 **/ 809s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 810{ 811 s32 status; 812 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 813 814 DEBUGFUNC("ixgbe_write_eeprom_generic"); 815 816 hw->eeprom.ops.init_params(hw); 817 818 if (offset >= hw->eeprom.word_size) { 819 status = IXGBE_ERR_EEPROM; 820 goto out; 821 } 822 823 /* Prepare the EEPROM for writing */ 824 status = ixgbe_acquire_eeprom(hw); 825 826 if (status == IXGBE_SUCCESS) { 827 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 828 ixgbe_release_eeprom(hw); 829 status = IXGBE_ERR_EEPROM; 830 } 831 } 832 833 if (status == IXGBE_SUCCESS) { 834 ixgbe_standby_eeprom(hw); 835 836 /* Send the WRITE ENABLE command (8 bit opcode ) */ 837 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, 838 IXGBE_EEPROM_OPCODE_BITS); 839 840 ixgbe_standby_eeprom(hw); 841 842 /* 843 * Some SPI eeproms use the 8th address bit embedded in the 844 * opcode 845 */ 846 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 847 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 848 849 /* Send the Write command (8-bit opcode + addr) */ 850 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 851 IXGBE_EEPROM_OPCODE_BITS); 852 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 853 hw->eeprom.address_bits); 854 855 /* Send the data */ 856 data = (data >> 8) | (data << 8); 857 ixgbe_shift_out_eeprom_bits(hw, data, 16); 858 ixgbe_standby_eeprom(hw); 859 860 /* Done with writing - release the EEPROM */ 861 ixgbe_release_eeprom(hw); 862 } 863 864out: 865 return status; 866} 867 868/** 869 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 870 * @hw: pointer to hardware structure 871 * @offset: offset within the EEPROM to be read 872 * @data: read 16 bit value from EEPROM 873 * 874 * Reads 16 bit value from EEPROM through bit-bang method 875 **/ 876s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 877 u16 *data) 878{ 879 s32 status; 880 u16 word_in; 881 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 882 883 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 884 885 hw->eeprom.ops.init_params(hw); 886 887 if (offset >= hw->eeprom.word_size) { 888 status = IXGBE_ERR_EEPROM; 889 goto out; 890 } 891 892 /* Prepare the EEPROM for reading */ 893 status = ixgbe_acquire_eeprom(hw); 894 895 if (status == IXGBE_SUCCESS) { 896 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 897 ixgbe_release_eeprom(hw); 898 status = IXGBE_ERR_EEPROM; 899 } 900 } 901 902 if (status == IXGBE_SUCCESS) { 903 ixgbe_standby_eeprom(hw); 904 905 /* 906 * Some SPI eeproms use the 8th address bit embedded in the 907 * opcode 908 */ 909 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 910 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 911 912 /* Send the READ command (opcode + addr) */ 913 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 914 IXGBE_EEPROM_OPCODE_BITS); 915 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 916 hw->eeprom.address_bits); 917 918 /* Read the data. */ 919 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 920 *data = (word_in >> 8) | (word_in << 8); 921 922 /* End this read operation */ 923 ixgbe_release_eeprom(hw); 924 } 925 926out: 927 return status; 928} 929 930/** 931 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 932 * @hw: pointer to hardware structure 933 * @offset: offset of word in the EEPROM to read 934 * @data: word read from the EEPROM 935 * 936 * Reads a 16 bit word from the EEPROM using the EERD register. 937 **/ 938s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 939{ 940 u32 eerd; 941 s32 status; 942 943 DEBUGFUNC("ixgbe_read_eerd_generic"); 944 945 hw->eeprom.ops.init_params(hw); 946 947 if (offset >= hw->eeprom.word_size) { 948 status = IXGBE_ERR_EEPROM; 949 goto out; 950 } 951 952 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + 953 IXGBE_EEPROM_RW_REG_START; 954 955 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 956 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 957 958 if (status == IXGBE_SUCCESS) 959 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 960 IXGBE_EEPROM_RW_REG_DATA); 961 else 962 DEBUGOUT("Eeprom read timed out\n"); 963 964out: 965 return status; 966} 967 968/** 969 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 970 * @hw: pointer to hardware structure 971 * @offset: offset of word in the EEPROM to write 972 * @data: word write to the EEPROM 973 * 974 * Write a 16 bit word to the EEPROM using the EEWR register. 975 **/ 976s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 977{ 978 u32 eewr; 979 s32 status; 980 981 DEBUGFUNC("ixgbe_write_eewr_generic"); 982 983 hw->eeprom.ops.init_params(hw); 984 985 if (offset >= hw->eeprom.word_size) { 986 status = IXGBE_ERR_EEPROM; 987 goto out; 988 } 989 990 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | 991 (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; 992 993 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 994 if (status != IXGBE_SUCCESS) { 995 DEBUGOUT("Eeprom write EEWR timed out\n"); 996 goto out; 997 } 998 999 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1000 1001 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1002 if (status != IXGBE_SUCCESS) { 1003 DEBUGOUT("Eeprom write EEWR timed out\n"); 1004 goto out; 1005 } 1006 1007out: 1008 return status; 1009} 1010 1011/** 1012 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1013 * @hw: pointer to hardware structure 1014 * @ee_reg: EEPROM flag for polling 1015 * 1016 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1017 * read or write is done respectively. 1018 **/ 1019s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1020{ 1021 u32 i; 1022 u32 reg; 1023 s32 status = IXGBE_ERR_EEPROM; 1024 1025 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1026 1027 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1028 if (ee_reg == IXGBE_NVM_POLL_READ) 1029 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1030 else 1031 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1032 1033 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1034 status = IXGBE_SUCCESS; 1035 break; 1036 } 1037 usec_delay(5); 1038 } 1039 return status; 1040} 1041 1042/** 1043 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1044 * @hw: pointer to hardware structure 1045 * 1046 * Prepares EEPROM for access using bit-bang method. This function should 1047 * be called before issuing a command to the EEPROM. 1048 **/ 1049static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1050{ 1051 s32 status = IXGBE_SUCCESS; 1052 u32 eec; 1053 u32 i; 1054 1055 DEBUGFUNC("ixgbe_acquire_eeprom"); 1056 1057 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS) 1058 status = IXGBE_ERR_SWFW_SYNC; 1059 1060 if (status == IXGBE_SUCCESS) { 1061 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1062 1063 /* Request EEPROM Access */ 1064 eec |= IXGBE_EEC_REQ; 1065 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1066 1067 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1068 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1069 if (eec & IXGBE_EEC_GNT) 1070 break; 1071 usec_delay(5); 1072 } 1073 1074 /* Release if grant not acquired */ 1075 if (!(eec & IXGBE_EEC_GNT)) { 1076 eec &= ~IXGBE_EEC_REQ; 1077 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1078 DEBUGOUT("Could not acquire EEPROM grant\n"); 1079 1080 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1081 status = IXGBE_ERR_EEPROM; 1082 } 1083 1084 /* Setup EEPROM for Read/Write */ 1085 if (status == IXGBE_SUCCESS) { 1086 /* Clear CS and SK */ 1087 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1088 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1089 IXGBE_WRITE_FLUSH(hw); 1090 usec_delay(1); 1091 } 1092 } 1093 return status; 1094} 1095 1096/** 1097 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1098 * @hw: pointer to hardware structure 1099 * 1100 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1101 **/ 1102static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1103{ 1104 s32 status = IXGBE_ERR_EEPROM; 1105 u32 timeout = 2000; 1106 u32 i; 1107 u32 swsm; 1108 1109 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1110 1111 1112 /* Get SMBI software semaphore between device drivers first */ 1113 for (i = 0; i < timeout; i++) { 1114 /* 1115 * If the SMBI bit is 0 when we read it, then the bit will be 1116 * set and we have the semaphore 1117 */ 1118 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1119 if (!(swsm & IXGBE_SWSM_SMBI)) { 1120 status = IXGBE_SUCCESS; 1121 break; 1122 } 1123 usec_delay(50); 1124 } 1125 1126 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1127 if (status == IXGBE_SUCCESS) { 1128 for (i = 0; i < timeout; i++) { 1129 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1130 1131 /* Set the SW EEPROM semaphore bit to request access */ 1132 swsm |= IXGBE_SWSM_SWESMBI; 1133 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1134 1135 /* 1136 * If we set the bit successfully then we got the 1137 * semaphore. 1138 */ 1139 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1140 if (swsm & IXGBE_SWSM_SWESMBI) 1141 break; 1142 1143 usec_delay(50); 1144 } 1145 1146 /* 1147 * Release semaphores and return error if SW EEPROM semaphore 1148 * was not granted because we don't have access to the EEPROM 1149 */ 1150 if (i >= timeout) { 1151 DEBUGOUT("SWESMBI Software EEPROM semaphore " 1152 "not granted.\n"); 1153 ixgbe_release_eeprom_semaphore(hw); 1154 status = IXGBE_ERR_EEPROM; 1155 } 1156 } else { 1157 DEBUGOUT("Software semaphore SMBI between device drivers " 1158 "not granted.\n"); 1159 } 1160 1161 return status; 1162} 1163 1164/** 1165 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1166 * @hw: pointer to hardware structure 1167 * 1168 * This function clears hardware semaphore bits. 1169 **/ 1170static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1171{ 1172 u32 swsm; 1173 1174 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1175 1176 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1177 1178 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1179 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1180 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1181 IXGBE_WRITE_FLUSH(hw); 1182} 1183 1184/** 1185 * ixgbe_ready_eeprom - Polls for EEPROM ready 1186 * @hw: pointer to hardware structure 1187 **/ 1188static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1189{ 1190 s32 status = IXGBE_SUCCESS; 1191 u16 i; 1192 u8 spi_stat_reg; 1193 1194 DEBUGFUNC("ixgbe_ready_eeprom"); 1195 1196 /* 1197 * Read "Status Register" repeatedly until the LSB is cleared. The 1198 * EEPROM will signal that the command has been completed by clearing 1199 * bit 0 of the internal status register. If it's not cleared within 1200 * 5 milliseconds, then error out. 1201 */ 1202 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1203 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1204 IXGBE_EEPROM_OPCODE_BITS); 1205 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1206 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1207 break; 1208 1209 usec_delay(5); 1210 ixgbe_standby_eeprom(hw); 1211 }; 1212 1213 /* 1214 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1215 * devices (and only 0-5mSec on 5V devices) 1216 */ 1217 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1218 DEBUGOUT("SPI EEPROM Status error\n"); 1219 status = IXGBE_ERR_EEPROM; 1220 } 1221 1222 return status; 1223} 1224 1225/** 1226 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1227 * @hw: pointer to hardware structure 1228 **/ 1229static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1230{ 1231 u32 eec; 1232 1233 DEBUGFUNC("ixgbe_standby_eeprom"); 1234 1235 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1236 1237 /* Toggle CS to flush commands */ 1238 eec |= IXGBE_EEC_CS; 1239 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1240 IXGBE_WRITE_FLUSH(hw); 1241 usec_delay(1); 1242 eec &= ~IXGBE_EEC_CS; 1243 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1244 IXGBE_WRITE_FLUSH(hw); 1245 usec_delay(1); 1246} 1247 1248/** 1249 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1250 * @hw: pointer to hardware structure 1251 * @data: data to send to the EEPROM 1252 * @count: number of bits to shift out 1253 **/ 1254static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1255 u16 count) 1256{ 1257 u32 eec; 1258 u32 mask; 1259 u32 i; 1260 1261 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 1262 1263 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1264 1265 /* 1266 * Mask is used to shift "count" bits of "data" out to the EEPROM 1267 * one bit at a time. Determine the starting bit based on count 1268 */ 1269 mask = 0x01 << (count - 1); 1270 1271 for (i = 0; i < count; i++) { 1272 /* 1273 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1274 * "1", and then raising and then lowering the clock (the SK 1275 * bit controls the clock input to the EEPROM). A "0" is 1276 * shifted out to the EEPROM by setting "DI" to "0" and then 1277 * raising and then lowering the clock. 1278 */ 1279 if (data & mask) 1280 eec |= IXGBE_EEC_DI; 1281 else 1282 eec &= ~IXGBE_EEC_DI; 1283 1284 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1285 IXGBE_WRITE_FLUSH(hw); 1286 1287 usec_delay(1); 1288 1289 ixgbe_raise_eeprom_clk(hw, &eec); 1290 ixgbe_lower_eeprom_clk(hw, &eec); 1291 1292 /* 1293 * Shift mask to signify next bit of data to shift in to the 1294 * EEPROM 1295 */ 1296 mask = mask >> 1; 1297 }; 1298 1299 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1300 eec &= ~IXGBE_EEC_DI; 1301 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1302 IXGBE_WRITE_FLUSH(hw); 1303} 1304 1305/** 1306 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1307 * @hw: pointer to hardware structure 1308 **/ 1309static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1310{ 1311 u32 eec; 1312 u32 i; 1313 u16 data = 0; 1314 1315 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 1316 1317 /* 1318 * In order to read a register from the EEPROM, we need to shift 1319 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1320 * the clock input to the EEPROM (setting the SK bit), and then reading 1321 * the value of the "DO" bit. During this "shifting in" process the 1322 * "DI" bit should always be clear. 1323 */ 1324 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1325 1326 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1327 1328 for (i = 0; i < count; i++) { 1329 data = data << 1; 1330 ixgbe_raise_eeprom_clk(hw, &eec); 1331 1332 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1333 1334 eec &= ~(IXGBE_EEC_DI); 1335 if (eec & IXGBE_EEC_DO) 1336 data |= 1; 1337 1338 ixgbe_lower_eeprom_clk(hw, &eec); 1339 } 1340 1341 return data; 1342} 1343 1344/** 1345 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1346 * @hw: pointer to hardware structure 1347 * @eec: EEC register's current value 1348 **/ 1349static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1350{ 1351 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 1352 1353 /* 1354 * Raise the clock input to the EEPROM 1355 * (setting the SK bit), then delay 1356 */ 1357 *eec = *eec | IXGBE_EEC_SK; 1358 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1359 IXGBE_WRITE_FLUSH(hw); 1360 usec_delay(1); 1361} 1362 1363/** 1364 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1365 * @hw: pointer to hardware structure 1366 * @eecd: EECD's current value 1367 **/ 1368static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1369{ 1370 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 1371 1372 /* 1373 * Lower the clock input to the EEPROM (clearing the SK bit), then 1374 * delay 1375 */ 1376 *eec = *eec & ~IXGBE_EEC_SK; 1377 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1378 IXGBE_WRITE_FLUSH(hw); 1379 usec_delay(1); 1380} 1381 1382/** 1383 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1384 * @hw: pointer to hardware structure 1385 **/ 1386static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1387{ 1388 u32 eec; 1389 1390 DEBUGFUNC("ixgbe_release_eeprom"); 1391 1392 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1393 1394 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1395 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1396 1397 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1398 IXGBE_WRITE_FLUSH(hw); 1399 1400 usec_delay(1); 1401 1402 /* Stop requesting EEPROM access */ 1403 eec &= ~IXGBE_EEC_REQ; 1404 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1405 1406 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1407 1408 /* Delay before attempt to obtain semaphore again to allow FW access */ 1409 msec_delay(hw->eeprom.semaphore_delay); 1410} 1411 1412/** 1413 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1414 * @hw: pointer to hardware structure 1415 **/ 1416u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1417{ 1418 u16 i; 1419 u16 j; 1420 u16 checksum = 0; 1421 u16 length = 0; 1422 u16 pointer = 0; 1423 u16 word = 0; 1424 1425 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 1426 1427 /* Include 0x0-0x3F in the checksum */ 1428 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1429 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { 1430 DEBUGOUT("EEPROM read failed\n"); 1431 break; 1432 } 1433 checksum += word; 1434 } 1435 1436 /* Include all data from pointers except for the fw pointer */ 1437 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1438 hw->eeprom.ops.read(hw, i, &pointer); 1439 1440 /* Make sure the pointer seems valid */ 1441 if (pointer != 0xFFFF && pointer != 0) { 1442 hw->eeprom.ops.read(hw, pointer, &length); 1443 1444 if (length != 0xFFFF && length != 0) { 1445 for (j = pointer+1; j <= pointer+length; j++) { 1446 hw->eeprom.ops.read(hw, j, &word); 1447 checksum += word; 1448 } 1449 } 1450 } 1451 } 1452 1453 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1454 1455 return checksum; 1456} 1457 1458/** 1459 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1460 * @hw: pointer to hardware structure 1461 * @checksum_val: calculated checksum 1462 * 1463 * Performs checksum calculation and validates the EEPROM checksum. If the 1464 * caller does not need checksum_val, the value can be NULL. 1465 **/ 1466s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1467 u16 *checksum_val) 1468{ 1469 s32 status; 1470 u16 checksum; 1471 u16 read_checksum = 0; 1472 1473 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 1474 1475 /* 1476 * Read the first word from the EEPROM. If this times out or fails, do 1477 * not continue or we could be in for a very long wait while every 1478 * EEPROM read fails 1479 */ 1480 status = hw->eeprom.ops.read(hw, 0, &checksum); 1481 1482 if (status == IXGBE_SUCCESS) { 1483 checksum = hw->eeprom.ops.calc_checksum(hw); 1484 1485 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1486 1487 /* 1488 * Verify read checksum from EEPROM is the same as 1489 * calculated checksum 1490 */ 1491 if (read_checksum != checksum) 1492 status = IXGBE_ERR_EEPROM_CHECKSUM; 1493 1494 /* If the user cares, return the calculated checksum */ 1495 if (checksum_val) 1496 *checksum_val = checksum; 1497 } else { 1498 DEBUGOUT("EEPROM read failed\n"); 1499 } 1500 1501 return status; 1502} 1503 1504/** 1505 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1506 * @hw: pointer to hardware structure 1507 **/ 1508s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1509{ 1510 s32 status; 1511 u16 checksum; 1512 1513 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 1514 1515 /* 1516 * Read the first word from the EEPROM. If this times out or fails, do 1517 * not continue or we could be in for a very long wait while every 1518 * EEPROM read fails 1519 */ 1520 status = hw->eeprom.ops.read(hw, 0, &checksum); 1521 1522 if (status == IXGBE_SUCCESS) { 1523 checksum = hw->eeprom.ops.calc_checksum(hw); 1524 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1525 checksum); 1526 } else { 1527 DEBUGOUT("EEPROM read failed\n"); 1528 } 1529 1530 return status; 1531} 1532 1533/** 1534 * ixgbe_validate_mac_addr - Validate MAC address 1535 * @mac_addr: pointer to MAC address. 1536 * 1537 * Tests a MAC address to ensure it is a valid Individual Address 1538 **/ 1539s32 ixgbe_validate_mac_addr(u8 *mac_addr) 1540{ 1541 s32 status = IXGBE_SUCCESS; 1542 1543 DEBUGFUNC("ixgbe_validate_mac_addr"); 1544 1545 /* Make sure it is not a multicast address */ 1546 if (IXGBE_IS_MULTICAST(mac_addr)) { 1547 DEBUGOUT("MAC address is multicast\n"); 1548 status = IXGBE_ERR_INVALID_MAC_ADDR; 1549 /* Not a broadcast address */ 1550 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 1551 DEBUGOUT("MAC address is broadcast\n"); 1552 status = IXGBE_ERR_INVALID_MAC_ADDR; 1553 /* Reject the zero address */ 1554 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 1555 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 1556 DEBUGOUT("MAC address is all zeros\n"); 1557 status = IXGBE_ERR_INVALID_MAC_ADDR; 1558 } 1559 return status; 1560} 1561 1562/** 1563 * ixgbe_set_rar_generic - Set Rx address register 1564 * @hw: pointer to hardware structure 1565 * @index: Receive address register to write 1566 * @addr: Address to put into receive address register 1567 * @vmdq: VMDq "set" or "pool" index 1568 * @enable_addr: set flag that address is active 1569 * 1570 * Puts an ethernet address into a receive address register. 1571 **/ 1572s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1573 u32 enable_addr) 1574{ 1575 u32 rar_low, rar_high; 1576 u32 rar_entries = hw->mac.num_rar_entries; 1577 1578 DEBUGFUNC("ixgbe_set_rar_generic"); 1579 1580 /* Make sure we are using a valid rar index range */ 1581 if (index >= rar_entries) { 1582 DEBUGOUT1("RAR index %d is out of range.\n", index); 1583 return IXGBE_ERR_INVALID_ARGUMENT; 1584 } 1585 1586 /* setup VMDq pool selection before this RAR gets enabled */ 1587 hw->mac.ops.set_vmdq(hw, index, vmdq); 1588 1589 /* 1590 * HW expects these in little endian so we reverse the byte 1591 * order from network order (big endian) to little endian 1592 */ 1593 rar_low = ((u32)addr[0] | 1594 ((u32)addr[1] << 8) | 1595 ((u32)addr[2] << 16) | 1596 ((u32)addr[3] << 24)); 1597 /* 1598 * Some parts put the VMDq setting in the extra RAH bits, 1599 * so save everything except the lower 16 bits that hold part 1600 * of the address and the address valid bit. 1601 */ 1602 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1603 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1604 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1605 1606 if (enable_addr != 0) 1607 rar_high |= IXGBE_RAH_AV; 1608 1609 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1610 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1611 1612 return IXGBE_SUCCESS; 1613} 1614 1615/** 1616 * ixgbe_clear_rar_generic - Remove Rx address register 1617 * @hw: pointer to hardware structure 1618 * @index: Receive address register to write 1619 * 1620 * Clears an ethernet address from a receive address register. 1621 **/ 1622s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1623{ 1624 u32 rar_high; 1625 u32 rar_entries = hw->mac.num_rar_entries; 1626 1627 DEBUGFUNC("ixgbe_clear_rar_generic"); 1628 1629 /* Make sure we are using a valid rar index range */ 1630 if (index >= rar_entries) { 1631 DEBUGOUT1("RAR index %d is out of range.\n", index); 1632 return IXGBE_ERR_INVALID_ARGUMENT; 1633 } 1634 1635 /* 1636 * Some parts put the VMDq setting in the extra RAH bits, 1637 * so save everything except the lower 16 bits that hold part 1638 * of the address and the address valid bit. 1639 */ 1640 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1641 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1642 1643 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1644 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1645 1646 /* clear VMDq pool/queue selection for this RAR */ 1647 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1648 1649 return IXGBE_SUCCESS; 1650} 1651 1652/** 1653 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1654 * @hw: pointer to hardware structure 1655 * 1656 * Places the MAC address in receive address register 0 and clears the rest 1657 * of the receive address registers. Clears the multicast table. Assumes 1658 * the receiver is in reset when the routine is called. 1659 **/ 1660s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1661{ 1662 u32 i; 1663 u32 rar_entries = hw->mac.num_rar_entries; 1664 1665 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 1666 1667 /* 1668 * If the current mac address is valid, assume it is a software override 1669 * to the permanent address. 1670 * Otherwise, use the permanent address from the eeprom. 1671 */ 1672 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1673 IXGBE_ERR_INVALID_MAC_ADDR) { 1674 /* Get the MAC address from the RAR0 for later reference */ 1675 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1676 1677 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1678 hw->mac.addr[0], hw->mac.addr[1], 1679 hw->mac.addr[2]); 1680 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1681 hw->mac.addr[4], hw->mac.addr[5]); 1682 } else { 1683 /* Setup the receive address. */ 1684 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 1685 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 1686 hw->mac.addr[0], hw->mac.addr[1], 1687 hw->mac.addr[2]); 1688 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1689 hw->mac.addr[4], hw->mac.addr[5]); 1690 1691 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
| 187 **/ 188s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 189{ 190 u32 i; 191 u32 regval; 192 193 /* Clear the rate limiters */ 194 for (i = 0; i < hw->mac.max_tx_queues; i++) { 195 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 196 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 197 } 198 IXGBE_WRITE_FLUSH(hw); 199 200 /* Disable relaxed ordering */ 201 for (i = 0; i < hw->mac.max_tx_queues; i++) { 202 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 203 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 204 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 205 } 206 207 for (i = 0; i < hw->mac.max_rx_queues; i++) { 208 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 209 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 210 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 211 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 212 } 213 214 return IXGBE_SUCCESS; 215} 216 217/** 218 * ixgbe_init_hw_generic - Generic hardware initialization 219 * @hw: pointer to hardware structure 220 * 221 * Initialize the hardware by resetting the hardware, filling the bus info 222 * structure and media type, clears all on chip counters, initializes receive 223 * address registers, multicast table, VLAN filter table, calls routine to set 224 * up link and flow control settings, and leaves transmit and receive units 225 * disabled and uninitialized 226 **/ 227s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 228{ 229 s32 status; 230 231 DEBUGFUNC("ixgbe_init_hw_generic"); 232 233 /* Reset the hardware */ 234 status = hw->mac.ops.reset_hw(hw); 235 236 if (status == IXGBE_SUCCESS) { 237 /* Start the HW */ 238 status = hw->mac.ops.start_hw(hw); 239 } 240 241 return status; 242} 243 244/** 245 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 246 * @hw: pointer to hardware structure 247 * 248 * Clears all hardware statistics counters by reading them from the hardware 249 * Statistics counters are clear on read. 250 **/ 251s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 252{ 253 u16 i = 0; 254 255 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 256 257 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 258 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 259 IXGBE_READ_REG(hw, IXGBE_ERRBC); 260 IXGBE_READ_REG(hw, IXGBE_MSPDC); 261 for (i = 0; i < 8; i++) 262 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 263 264 IXGBE_READ_REG(hw, IXGBE_MLFC); 265 IXGBE_READ_REG(hw, IXGBE_MRFC); 266 IXGBE_READ_REG(hw, IXGBE_RLEC); 267 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 268 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 269 if (hw->mac.type >= ixgbe_mac_82599EB) { 270 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 271 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 272 } else { 273 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 274 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 275 } 276 277 for (i = 0; i < 8; i++) { 278 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 279 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 280 if (hw->mac.type >= ixgbe_mac_82599EB) { 281 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 282 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 283 } else { 284 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 285 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 286 } 287 } 288 if (hw->mac.type >= ixgbe_mac_82599EB) 289 for (i = 0; i < 8; i++) 290 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 291 IXGBE_READ_REG(hw, IXGBE_PRC64); 292 IXGBE_READ_REG(hw, IXGBE_PRC127); 293 IXGBE_READ_REG(hw, IXGBE_PRC255); 294 IXGBE_READ_REG(hw, IXGBE_PRC511); 295 IXGBE_READ_REG(hw, IXGBE_PRC1023); 296 IXGBE_READ_REG(hw, IXGBE_PRC1522); 297 IXGBE_READ_REG(hw, IXGBE_GPRC); 298 IXGBE_READ_REG(hw, IXGBE_BPRC); 299 IXGBE_READ_REG(hw, IXGBE_MPRC); 300 IXGBE_READ_REG(hw, IXGBE_GPTC); 301 IXGBE_READ_REG(hw, IXGBE_GORCL); 302 IXGBE_READ_REG(hw, IXGBE_GORCH); 303 IXGBE_READ_REG(hw, IXGBE_GOTCL); 304 IXGBE_READ_REG(hw, IXGBE_GOTCH); 305 for (i = 0; i < 8; i++) 306 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 307 IXGBE_READ_REG(hw, IXGBE_RUC); 308 IXGBE_READ_REG(hw, IXGBE_RFC); 309 IXGBE_READ_REG(hw, IXGBE_ROC); 310 IXGBE_READ_REG(hw, IXGBE_RJC); 311 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 312 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 313 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 314 IXGBE_READ_REG(hw, IXGBE_TORL); 315 IXGBE_READ_REG(hw, IXGBE_TORH); 316 IXGBE_READ_REG(hw, IXGBE_TPR); 317 IXGBE_READ_REG(hw, IXGBE_TPT); 318 IXGBE_READ_REG(hw, IXGBE_PTC64); 319 IXGBE_READ_REG(hw, IXGBE_PTC127); 320 IXGBE_READ_REG(hw, IXGBE_PTC255); 321 IXGBE_READ_REG(hw, IXGBE_PTC511); 322 IXGBE_READ_REG(hw, IXGBE_PTC1023); 323 IXGBE_READ_REG(hw, IXGBE_PTC1522); 324 IXGBE_READ_REG(hw, IXGBE_MPTC); 325 IXGBE_READ_REG(hw, IXGBE_BPTC); 326 for (i = 0; i < 16; i++) { 327 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 328 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 329 if (hw->mac.type >= ixgbe_mac_82599EB) { 330 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 331 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 332 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 333 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 334 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 335 } else { 336 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 337 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 338 } 339 } 340 341 return IXGBE_SUCCESS; 342} 343 344/** 345 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 346 * @hw: pointer to hardware structure 347 * @pba_num: stores the part number string from the EEPROM 348 * @pba_num_size: part number string buffer length 349 * 350 * Reads the part number string from the EEPROM. 351 **/ 352s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 353 u32 pba_num_size) 354{ 355 s32 ret_val; 356 u16 data; 357 u16 pba_ptr; 358 u16 offset; 359 u16 length; 360 361 DEBUGFUNC("ixgbe_read_pba_string_generic"); 362 363 if (pba_num == NULL) { 364 DEBUGOUT("PBA string buffer was null\n"); 365 return IXGBE_ERR_INVALID_ARGUMENT; 366 } 367 368 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 369 if (ret_val) { 370 DEBUGOUT("NVM Read Error\n"); 371 return ret_val; 372 } 373 374 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 375 if (ret_val) { 376 DEBUGOUT("NVM Read Error\n"); 377 return ret_val; 378 } 379 380 /* 381 * if data is not ptr guard the PBA must be in legacy format which 382 * means pba_ptr is actually our second data word for the PBA number 383 * and we can decode it into an ascii string 384 */ 385 if (data != IXGBE_PBANUM_PTR_GUARD) { 386 DEBUGOUT("NVM PBA number is not stored as string\n"); 387 388 /* we will need 11 characters to store the PBA */ 389 if (pba_num_size < 11) { 390 DEBUGOUT("PBA string buffer too small\n"); 391 return IXGBE_ERR_NO_SPACE; 392 } 393 394 /* extract hex string from data and pba_ptr */ 395 pba_num[0] = (data >> 12) & 0xF; 396 pba_num[1] = (data >> 8) & 0xF; 397 pba_num[2] = (data >> 4) & 0xF; 398 pba_num[3] = data & 0xF; 399 pba_num[4] = (pba_ptr >> 12) & 0xF; 400 pba_num[5] = (pba_ptr >> 8) & 0xF; 401 pba_num[6] = '-'; 402 pba_num[7] = 0; 403 pba_num[8] = (pba_ptr >> 4) & 0xF; 404 pba_num[9] = pba_ptr & 0xF; 405 406 /* put a null character on the end of our string */ 407 pba_num[10] = '\0'; 408 409 /* switch all the data but the '-' to hex char */ 410 for (offset = 0; offset < 10; offset++) { 411 if (pba_num[offset] < 0xA) 412 pba_num[offset] += '0'; 413 else if (pba_num[offset] < 0x10) 414 pba_num[offset] += 'A' - 0xA; 415 } 416 417 return IXGBE_SUCCESS; 418 } 419 420 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 421 if (ret_val) { 422 DEBUGOUT("NVM Read Error\n"); 423 return ret_val; 424 } 425 426 if (length == 0xFFFF || length == 0) { 427 DEBUGOUT("NVM PBA number section invalid length\n"); 428 return IXGBE_ERR_PBA_SECTION; 429 } 430 431 /* check if pba_num buffer is big enough */ 432 if (pba_num_size < (((u32)length * 2) - 1)) { 433 DEBUGOUT("PBA string buffer too small\n"); 434 return IXGBE_ERR_NO_SPACE; 435 } 436 437 /* trim pba length from start of string */ 438 pba_ptr++; 439 length--; 440 441 for (offset = 0; offset < length; offset++) { 442 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 443 if (ret_val) { 444 DEBUGOUT("NVM Read Error\n"); 445 return ret_val; 446 } 447 pba_num[offset * 2] = (u8)(data >> 8); 448 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 449 } 450 pba_num[offset * 2] = '\0'; 451 452 return IXGBE_SUCCESS; 453} 454 455/** 456 * ixgbe_read_pba_length_generic - Reads part number length from EEPROM 457 * @hw: pointer to hardware structure 458 * @pba_num_size: part number string buffer length 459 * 460 * Reads the part number length from the EEPROM. 461 * Returns expected buffer size in pba_num_size 462 **/ 463s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size) 464{ 465 s32 ret_val; 466 u16 data; 467 u16 pba_ptr; 468 u16 length; 469 470 DEBUGFUNC("ixgbe_read_pba_length_generic"); 471 472 if (pba_num_size == NULL) { 473 DEBUGOUT("PBA buffer size was null\n"); 474 return IXGBE_ERR_INVALID_ARGUMENT; 475 } 476 477 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 478 if (ret_val) { 479 DEBUGOUT("NVM Read Error\n"); 480 return ret_val; 481 } 482 483 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 484 if (ret_val) { 485 DEBUGOUT("NVM Read Error\n"); 486 return ret_val; 487 } 488 489 /* if data is not ptr guard the PBA must be in legacy format */ 490 if (data != IXGBE_PBANUM_PTR_GUARD) { 491 *pba_num_size = 11; 492 return IXGBE_SUCCESS; 493 } 494 495 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 496 if (ret_val) { 497 DEBUGOUT("NVM Read Error\n"); 498 return ret_val; 499 } 500 501 if (length == 0xFFFF || length == 0) { 502 DEBUGOUT("NVM PBA number section invalid length\n"); 503 return IXGBE_ERR_PBA_SECTION; 504 } 505 506 /* 507 * Convert from length in u16 values to u8 chars, add 1 for NULL, 508 * and subtract 2 because length field is included in length. 509 */ 510 *pba_num_size = ((u32)length * 2) - 1; 511 512 return IXGBE_SUCCESS; 513} 514 515/** 516 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 517 * @hw: pointer to hardware structure 518 * @pba_num: stores the part number from the EEPROM 519 * 520 * Reads the part number from the EEPROM. 521 **/ 522s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 523{ 524 s32 ret_val; 525 u16 data; 526 527 DEBUGFUNC("ixgbe_read_pba_num_generic"); 528 529 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 530 if (ret_val) { 531 DEBUGOUT("NVM Read Error\n"); 532 return ret_val; 533 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 534 DEBUGOUT("NVM Not supported\n"); 535 return IXGBE_NOT_IMPLEMENTED; 536 } 537 *pba_num = (u32)(data << 16); 538 539 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 540 if (ret_val) { 541 DEBUGOUT("NVM Read Error\n"); 542 return ret_val; 543 } 544 *pba_num |= data; 545 546 return IXGBE_SUCCESS; 547} 548 549/** 550 * ixgbe_get_mac_addr_generic - Generic get MAC address 551 * @hw: pointer to hardware structure 552 * @mac_addr: Adapter MAC address 553 * 554 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 555 * A reset of the adapter must be performed prior to calling this function 556 * in order for the MAC address to have been loaded from the EEPROM into RAR0 557 **/ 558s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 559{ 560 u32 rar_high; 561 u32 rar_low; 562 u16 i; 563 564 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 565 566 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 567 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 568 569 for (i = 0; i < 4; i++) 570 mac_addr[i] = (u8)(rar_low >> (i*8)); 571 572 for (i = 0; i < 2; i++) 573 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 574 575 return IXGBE_SUCCESS; 576} 577 578/** 579 * ixgbe_get_bus_info_generic - Generic set PCI bus info 580 * @hw: pointer to hardware structure 581 * 582 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure 583 **/ 584s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 585{ 586 struct ixgbe_mac_info *mac = &hw->mac; 587 u16 link_status; 588 589 DEBUGFUNC("ixgbe_get_bus_info_generic"); 590 591 hw->bus.type = ixgbe_bus_type_pci_express; 592 593 /* Get the negotiated link width and speed from PCI config space */ 594 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 595 596 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 597 case IXGBE_PCI_LINK_WIDTH_1: 598 hw->bus.width = ixgbe_bus_width_pcie_x1; 599 break; 600 case IXGBE_PCI_LINK_WIDTH_2: 601 hw->bus.width = ixgbe_bus_width_pcie_x2; 602 break; 603 case IXGBE_PCI_LINK_WIDTH_4: 604 hw->bus.width = ixgbe_bus_width_pcie_x4; 605 break; 606 case IXGBE_PCI_LINK_WIDTH_8: 607 hw->bus.width = ixgbe_bus_width_pcie_x8; 608 break; 609 default: 610 hw->bus.width = ixgbe_bus_width_unknown; 611 break; 612 } 613 614 switch (link_status & IXGBE_PCI_LINK_SPEED) { 615 case IXGBE_PCI_LINK_SPEED_2500: 616 hw->bus.speed = ixgbe_bus_speed_2500; 617 break; 618 case IXGBE_PCI_LINK_SPEED_5000: 619 hw->bus.speed = ixgbe_bus_speed_5000; 620 break; 621 default: 622 hw->bus.speed = ixgbe_bus_speed_unknown; 623 break; 624 } 625 626 mac->ops.set_lan_id(hw); 627 628 return IXGBE_SUCCESS; 629} 630 631/** 632 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 633 * @hw: pointer to the HW structure 634 * 635 * Determines the LAN function id by reading memory-mapped registers 636 * and swaps the port value if requested. 637 **/ 638void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 639{ 640 struct ixgbe_bus_info *bus = &hw->bus; 641 u32 reg; 642 643 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 644 645 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 646 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 647 bus->lan_id = bus->func; 648 649 /* check for a port swap */ 650 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); 651 if (reg & IXGBE_FACTPS_LFS) 652 bus->func ^= 0x1; 653} 654 655/** 656 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 657 * @hw: pointer to hardware structure 658 * 659 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 660 * disables transmit and receive units. The adapter_stopped flag is used by 661 * the shared code and drivers to determine if the adapter is in a stopped 662 * state and should not touch the hardware. 663 **/ 664s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 665{ 666 u32 number_of_queues; 667 u32 reg_val; 668 u16 i; 669 670 DEBUGFUNC("ixgbe_stop_adapter_generic"); 671 672 /* 673 * Set the adapter_stopped flag so other driver functions stop touching 674 * the hardware 675 */ 676 hw->adapter_stopped = TRUE; 677 678 /* Disable the receive unit */ 679 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 680 reg_val &= ~(IXGBE_RXCTRL_RXEN); 681 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 682 IXGBE_WRITE_FLUSH(hw); 683 msec_delay(2); 684 685 /* Clear interrupt mask to stop from interrupts being generated */ 686 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 687 688 /* Clear any pending interrupts */ 689 IXGBE_READ_REG(hw, IXGBE_EICR); 690 691 /* Disable the transmit unit. Each queue must be disabled. */ 692 number_of_queues = hw->mac.max_tx_queues; 693 for (i = 0; i < number_of_queues; i++) { 694 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 695 if (reg_val & IXGBE_TXDCTL_ENABLE) { 696 reg_val &= ~IXGBE_TXDCTL_ENABLE; 697 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); 698 } 699 } 700 701 /* 702 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 703 * access and verify no pending requests 704 */ 705 ixgbe_disable_pcie_master(hw); 706 707 return IXGBE_SUCCESS; 708} 709 710/** 711 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 712 * @hw: pointer to hardware structure 713 * @index: led number to turn on 714 **/ 715s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 716{ 717 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 718 719 DEBUGFUNC("ixgbe_led_on_generic"); 720 721 /* To turn on the LED, set mode to ON. */ 722 led_reg &= ~IXGBE_LED_MODE_MASK(index); 723 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 724 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 725 IXGBE_WRITE_FLUSH(hw); 726 727 return IXGBE_SUCCESS; 728} 729 730/** 731 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 732 * @hw: pointer to hardware structure 733 * @index: led number to turn off 734 **/ 735s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 736{ 737 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 738 739 DEBUGFUNC("ixgbe_led_off_generic"); 740 741 /* To turn off the LED, set mode to OFF. */ 742 led_reg &= ~IXGBE_LED_MODE_MASK(index); 743 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 744 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 745 IXGBE_WRITE_FLUSH(hw); 746 747 return IXGBE_SUCCESS; 748} 749 750/** 751 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 752 * @hw: pointer to hardware structure 753 * 754 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 755 * ixgbe_hw struct in order to set up EEPROM access. 756 **/ 757s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 758{ 759 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 760 u32 eec; 761 u16 eeprom_size; 762 763 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 764 765 if (eeprom->type == ixgbe_eeprom_uninitialized) { 766 eeprom->type = ixgbe_eeprom_none; 767 /* Set default semaphore delay to 10ms which is a well 768 * tested value */ 769 eeprom->semaphore_delay = 10; 770 771 /* 772 * Check for EEPROM present first. 773 * If not present leave as none 774 */ 775 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 776 if (eec & IXGBE_EEC_PRES) { 777 eeprom->type = ixgbe_eeprom_spi; 778 779 /* 780 * SPI EEPROM is assumed here. This code would need to 781 * change if a future EEPROM is not SPI. 782 */ 783 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 784 IXGBE_EEC_SIZE_SHIFT); 785 eeprom->word_size = 1 << (eeprom_size + 786 IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT); 787 } 788 789 if (eec & IXGBE_EEC_ADDR_SIZE) 790 eeprom->address_bits = 16; 791 else 792 eeprom->address_bits = 8; 793 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 794 "%d\n", eeprom->type, eeprom->word_size, 795 eeprom->address_bits); 796 } 797 798 return IXGBE_SUCCESS; 799} 800 801/** 802 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 803 * @hw: pointer to hardware structure 804 * @offset: offset within the EEPROM to be written to 805 * @data: 16 bit word to be written to the EEPROM 806 * 807 * If ixgbe_eeprom_update_checksum is not called after this function, the 808 * EEPROM will most likely contain an invalid checksum. 809 **/ 810s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 811{ 812 s32 status; 813 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 814 815 DEBUGFUNC("ixgbe_write_eeprom_generic"); 816 817 hw->eeprom.ops.init_params(hw); 818 819 if (offset >= hw->eeprom.word_size) { 820 status = IXGBE_ERR_EEPROM; 821 goto out; 822 } 823 824 /* Prepare the EEPROM for writing */ 825 status = ixgbe_acquire_eeprom(hw); 826 827 if (status == IXGBE_SUCCESS) { 828 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 829 ixgbe_release_eeprom(hw); 830 status = IXGBE_ERR_EEPROM; 831 } 832 } 833 834 if (status == IXGBE_SUCCESS) { 835 ixgbe_standby_eeprom(hw); 836 837 /* Send the WRITE ENABLE command (8 bit opcode ) */ 838 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, 839 IXGBE_EEPROM_OPCODE_BITS); 840 841 ixgbe_standby_eeprom(hw); 842 843 /* 844 * Some SPI eeproms use the 8th address bit embedded in the 845 * opcode 846 */ 847 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 848 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 849 850 /* Send the Write command (8-bit opcode + addr) */ 851 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 852 IXGBE_EEPROM_OPCODE_BITS); 853 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 854 hw->eeprom.address_bits); 855 856 /* Send the data */ 857 data = (data >> 8) | (data << 8); 858 ixgbe_shift_out_eeprom_bits(hw, data, 16); 859 ixgbe_standby_eeprom(hw); 860 861 /* Done with writing - release the EEPROM */ 862 ixgbe_release_eeprom(hw); 863 } 864 865out: 866 return status; 867} 868 869/** 870 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 871 * @hw: pointer to hardware structure 872 * @offset: offset within the EEPROM to be read 873 * @data: read 16 bit value from EEPROM 874 * 875 * Reads 16 bit value from EEPROM through bit-bang method 876 **/ 877s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 878 u16 *data) 879{ 880 s32 status; 881 u16 word_in; 882 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 883 884 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 885 886 hw->eeprom.ops.init_params(hw); 887 888 if (offset >= hw->eeprom.word_size) { 889 status = IXGBE_ERR_EEPROM; 890 goto out; 891 } 892 893 /* Prepare the EEPROM for reading */ 894 status = ixgbe_acquire_eeprom(hw); 895 896 if (status == IXGBE_SUCCESS) { 897 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 898 ixgbe_release_eeprom(hw); 899 status = IXGBE_ERR_EEPROM; 900 } 901 } 902 903 if (status == IXGBE_SUCCESS) { 904 ixgbe_standby_eeprom(hw); 905 906 /* 907 * Some SPI eeproms use the 8th address bit embedded in the 908 * opcode 909 */ 910 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 911 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 912 913 /* Send the READ command (opcode + addr) */ 914 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 915 IXGBE_EEPROM_OPCODE_BITS); 916 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 917 hw->eeprom.address_bits); 918 919 /* Read the data. */ 920 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 921 *data = (word_in >> 8) | (word_in << 8); 922 923 /* End this read operation */ 924 ixgbe_release_eeprom(hw); 925 } 926 927out: 928 return status; 929} 930 931/** 932 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 933 * @hw: pointer to hardware structure 934 * @offset: offset of word in the EEPROM to read 935 * @data: word read from the EEPROM 936 * 937 * Reads a 16 bit word from the EEPROM using the EERD register. 938 **/ 939s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 940{ 941 u32 eerd; 942 s32 status; 943 944 DEBUGFUNC("ixgbe_read_eerd_generic"); 945 946 hw->eeprom.ops.init_params(hw); 947 948 if (offset >= hw->eeprom.word_size) { 949 status = IXGBE_ERR_EEPROM; 950 goto out; 951 } 952 953 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + 954 IXGBE_EEPROM_RW_REG_START; 955 956 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 957 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 958 959 if (status == IXGBE_SUCCESS) 960 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 961 IXGBE_EEPROM_RW_REG_DATA); 962 else 963 DEBUGOUT("Eeprom read timed out\n"); 964 965out: 966 return status; 967} 968 969/** 970 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 971 * @hw: pointer to hardware structure 972 * @offset: offset of word in the EEPROM to write 973 * @data: word write to the EEPROM 974 * 975 * Write a 16 bit word to the EEPROM using the EEWR register. 976 **/ 977s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 978{ 979 u32 eewr; 980 s32 status; 981 982 DEBUGFUNC("ixgbe_write_eewr_generic"); 983 984 hw->eeprom.ops.init_params(hw); 985 986 if (offset >= hw->eeprom.word_size) { 987 status = IXGBE_ERR_EEPROM; 988 goto out; 989 } 990 991 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | 992 (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; 993 994 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 995 if (status != IXGBE_SUCCESS) { 996 DEBUGOUT("Eeprom write EEWR timed out\n"); 997 goto out; 998 } 999 1000 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1001 1002 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1003 if (status != IXGBE_SUCCESS) { 1004 DEBUGOUT("Eeprom write EEWR timed out\n"); 1005 goto out; 1006 } 1007 1008out: 1009 return status; 1010} 1011 1012/** 1013 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1014 * @hw: pointer to hardware structure 1015 * @ee_reg: EEPROM flag for polling 1016 * 1017 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1018 * read or write is done respectively. 1019 **/ 1020s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1021{ 1022 u32 i; 1023 u32 reg; 1024 s32 status = IXGBE_ERR_EEPROM; 1025 1026 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1027 1028 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1029 if (ee_reg == IXGBE_NVM_POLL_READ) 1030 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1031 else 1032 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1033 1034 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1035 status = IXGBE_SUCCESS; 1036 break; 1037 } 1038 usec_delay(5); 1039 } 1040 return status; 1041} 1042 1043/** 1044 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1045 * @hw: pointer to hardware structure 1046 * 1047 * Prepares EEPROM for access using bit-bang method. This function should 1048 * be called before issuing a command to the EEPROM. 1049 **/ 1050static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1051{ 1052 s32 status = IXGBE_SUCCESS; 1053 u32 eec; 1054 u32 i; 1055 1056 DEBUGFUNC("ixgbe_acquire_eeprom"); 1057 1058 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS) 1059 status = IXGBE_ERR_SWFW_SYNC; 1060 1061 if (status == IXGBE_SUCCESS) { 1062 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1063 1064 /* Request EEPROM Access */ 1065 eec |= IXGBE_EEC_REQ; 1066 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1067 1068 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1069 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1070 if (eec & IXGBE_EEC_GNT) 1071 break; 1072 usec_delay(5); 1073 } 1074 1075 /* Release if grant not acquired */ 1076 if (!(eec & IXGBE_EEC_GNT)) { 1077 eec &= ~IXGBE_EEC_REQ; 1078 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1079 DEBUGOUT("Could not acquire EEPROM grant\n"); 1080 1081 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1082 status = IXGBE_ERR_EEPROM; 1083 } 1084 1085 /* Setup EEPROM for Read/Write */ 1086 if (status == IXGBE_SUCCESS) { 1087 /* Clear CS and SK */ 1088 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1089 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1090 IXGBE_WRITE_FLUSH(hw); 1091 usec_delay(1); 1092 } 1093 } 1094 return status; 1095} 1096 1097/** 1098 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1099 * @hw: pointer to hardware structure 1100 * 1101 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1102 **/ 1103static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1104{ 1105 s32 status = IXGBE_ERR_EEPROM; 1106 u32 timeout = 2000; 1107 u32 i; 1108 u32 swsm; 1109 1110 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1111 1112 1113 /* Get SMBI software semaphore between device drivers first */ 1114 for (i = 0; i < timeout; i++) { 1115 /* 1116 * If the SMBI bit is 0 when we read it, then the bit will be 1117 * set and we have the semaphore 1118 */ 1119 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1120 if (!(swsm & IXGBE_SWSM_SMBI)) { 1121 status = IXGBE_SUCCESS; 1122 break; 1123 } 1124 usec_delay(50); 1125 } 1126 1127 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1128 if (status == IXGBE_SUCCESS) { 1129 for (i = 0; i < timeout; i++) { 1130 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1131 1132 /* Set the SW EEPROM semaphore bit to request access */ 1133 swsm |= IXGBE_SWSM_SWESMBI; 1134 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1135 1136 /* 1137 * If we set the bit successfully then we got the 1138 * semaphore. 1139 */ 1140 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1141 if (swsm & IXGBE_SWSM_SWESMBI) 1142 break; 1143 1144 usec_delay(50); 1145 } 1146 1147 /* 1148 * Release semaphores and return error if SW EEPROM semaphore 1149 * was not granted because we don't have access to the EEPROM 1150 */ 1151 if (i >= timeout) { 1152 DEBUGOUT("SWESMBI Software EEPROM semaphore " 1153 "not granted.\n"); 1154 ixgbe_release_eeprom_semaphore(hw); 1155 status = IXGBE_ERR_EEPROM; 1156 } 1157 } else { 1158 DEBUGOUT("Software semaphore SMBI between device drivers " 1159 "not granted.\n"); 1160 } 1161 1162 return status; 1163} 1164 1165/** 1166 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1167 * @hw: pointer to hardware structure 1168 * 1169 * This function clears hardware semaphore bits. 1170 **/ 1171static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1172{ 1173 u32 swsm; 1174 1175 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1176 1177 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1178 1179 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1180 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1181 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1182 IXGBE_WRITE_FLUSH(hw); 1183} 1184 1185/** 1186 * ixgbe_ready_eeprom - Polls for EEPROM ready 1187 * @hw: pointer to hardware structure 1188 **/ 1189static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1190{ 1191 s32 status = IXGBE_SUCCESS; 1192 u16 i; 1193 u8 spi_stat_reg; 1194 1195 DEBUGFUNC("ixgbe_ready_eeprom"); 1196 1197 /* 1198 * Read "Status Register" repeatedly until the LSB is cleared. The 1199 * EEPROM will signal that the command has been completed by clearing 1200 * bit 0 of the internal status register. If it's not cleared within 1201 * 5 milliseconds, then error out. 1202 */ 1203 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1204 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1205 IXGBE_EEPROM_OPCODE_BITS); 1206 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1207 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1208 break; 1209 1210 usec_delay(5); 1211 ixgbe_standby_eeprom(hw); 1212 }; 1213 1214 /* 1215 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 1216 * devices (and only 0-5mSec on 5V devices) 1217 */ 1218 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 1219 DEBUGOUT("SPI EEPROM Status error\n"); 1220 status = IXGBE_ERR_EEPROM; 1221 } 1222 1223 return status; 1224} 1225 1226/** 1227 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 1228 * @hw: pointer to hardware structure 1229 **/ 1230static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 1231{ 1232 u32 eec; 1233 1234 DEBUGFUNC("ixgbe_standby_eeprom"); 1235 1236 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1237 1238 /* Toggle CS to flush commands */ 1239 eec |= IXGBE_EEC_CS; 1240 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1241 IXGBE_WRITE_FLUSH(hw); 1242 usec_delay(1); 1243 eec &= ~IXGBE_EEC_CS; 1244 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1245 IXGBE_WRITE_FLUSH(hw); 1246 usec_delay(1); 1247} 1248 1249/** 1250 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 1251 * @hw: pointer to hardware structure 1252 * @data: data to send to the EEPROM 1253 * @count: number of bits to shift out 1254 **/ 1255static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 1256 u16 count) 1257{ 1258 u32 eec; 1259 u32 mask; 1260 u32 i; 1261 1262 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 1263 1264 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1265 1266 /* 1267 * Mask is used to shift "count" bits of "data" out to the EEPROM 1268 * one bit at a time. Determine the starting bit based on count 1269 */ 1270 mask = 0x01 << (count - 1); 1271 1272 for (i = 0; i < count; i++) { 1273 /* 1274 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 1275 * "1", and then raising and then lowering the clock (the SK 1276 * bit controls the clock input to the EEPROM). A "0" is 1277 * shifted out to the EEPROM by setting "DI" to "0" and then 1278 * raising and then lowering the clock. 1279 */ 1280 if (data & mask) 1281 eec |= IXGBE_EEC_DI; 1282 else 1283 eec &= ~IXGBE_EEC_DI; 1284 1285 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1286 IXGBE_WRITE_FLUSH(hw); 1287 1288 usec_delay(1); 1289 1290 ixgbe_raise_eeprom_clk(hw, &eec); 1291 ixgbe_lower_eeprom_clk(hw, &eec); 1292 1293 /* 1294 * Shift mask to signify next bit of data to shift in to the 1295 * EEPROM 1296 */ 1297 mask = mask >> 1; 1298 }; 1299 1300 /* We leave the "DI" bit set to "0" when we leave this routine. */ 1301 eec &= ~IXGBE_EEC_DI; 1302 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1303 IXGBE_WRITE_FLUSH(hw); 1304} 1305 1306/** 1307 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 1308 * @hw: pointer to hardware structure 1309 **/ 1310static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 1311{ 1312 u32 eec; 1313 u32 i; 1314 u16 data = 0; 1315 1316 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 1317 1318 /* 1319 * In order to read a register from the EEPROM, we need to shift 1320 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 1321 * the clock input to the EEPROM (setting the SK bit), and then reading 1322 * the value of the "DO" bit. During this "shifting in" process the 1323 * "DI" bit should always be clear. 1324 */ 1325 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1326 1327 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 1328 1329 for (i = 0; i < count; i++) { 1330 data = data << 1; 1331 ixgbe_raise_eeprom_clk(hw, &eec); 1332 1333 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1334 1335 eec &= ~(IXGBE_EEC_DI); 1336 if (eec & IXGBE_EEC_DO) 1337 data |= 1; 1338 1339 ixgbe_lower_eeprom_clk(hw, &eec); 1340 } 1341 1342 return data; 1343} 1344 1345/** 1346 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 1347 * @hw: pointer to hardware structure 1348 * @eec: EEC register's current value 1349 **/ 1350static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1351{ 1352 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 1353 1354 /* 1355 * Raise the clock input to the EEPROM 1356 * (setting the SK bit), then delay 1357 */ 1358 *eec = *eec | IXGBE_EEC_SK; 1359 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1360 IXGBE_WRITE_FLUSH(hw); 1361 usec_delay(1); 1362} 1363 1364/** 1365 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 1366 * @hw: pointer to hardware structure 1367 * @eecd: EECD's current value 1368 **/ 1369static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 1370{ 1371 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 1372 1373 /* 1374 * Lower the clock input to the EEPROM (clearing the SK bit), then 1375 * delay 1376 */ 1377 *eec = *eec & ~IXGBE_EEC_SK; 1378 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 1379 IXGBE_WRITE_FLUSH(hw); 1380 usec_delay(1); 1381} 1382 1383/** 1384 * ixgbe_release_eeprom - Release EEPROM, release semaphores 1385 * @hw: pointer to hardware structure 1386 **/ 1387static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 1388{ 1389 u32 eec; 1390 1391 DEBUGFUNC("ixgbe_release_eeprom"); 1392 1393 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 1394 1395 eec |= IXGBE_EEC_CS; /* Pull CS high */ 1396 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 1397 1398 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1399 IXGBE_WRITE_FLUSH(hw); 1400 1401 usec_delay(1); 1402 1403 /* Stop requesting EEPROM access */ 1404 eec &= ~IXGBE_EEC_REQ; 1405 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1406 1407 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1408 1409 /* Delay before attempt to obtain semaphore again to allow FW access */ 1410 msec_delay(hw->eeprom.semaphore_delay); 1411} 1412 1413/** 1414 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1415 * @hw: pointer to hardware structure 1416 **/ 1417u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1418{ 1419 u16 i; 1420 u16 j; 1421 u16 checksum = 0; 1422 u16 length = 0; 1423 u16 pointer = 0; 1424 u16 word = 0; 1425 1426 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 1427 1428 /* Include 0x0-0x3F in the checksum */ 1429 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1430 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { 1431 DEBUGOUT("EEPROM read failed\n"); 1432 break; 1433 } 1434 checksum += word; 1435 } 1436 1437 /* Include all data from pointers except for the fw pointer */ 1438 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1439 hw->eeprom.ops.read(hw, i, &pointer); 1440 1441 /* Make sure the pointer seems valid */ 1442 if (pointer != 0xFFFF && pointer != 0) { 1443 hw->eeprom.ops.read(hw, pointer, &length); 1444 1445 if (length != 0xFFFF && length != 0) { 1446 for (j = pointer+1; j <= pointer+length; j++) { 1447 hw->eeprom.ops.read(hw, j, &word); 1448 checksum += word; 1449 } 1450 } 1451 } 1452 } 1453 1454 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1455 1456 return checksum; 1457} 1458 1459/** 1460 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 1461 * @hw: pointer to hardware structure 1462 * @checksum_val: calculated checksum 1463 * 1464 * Performs checksum calculation and validates the EEPROM checksum. If the 1465 * caller does not need checksum_val, the value can be NULL. 1466 **/ 1467s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 1468 u16 *checksum_val) 1469{ 1470 s32 status; 1471 u16 checksum; 1472 u16 read_checksum = 0; 1473 1474 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 1475 1476 /* 1477 * Read the first word from the EEPROM. If this times out or fails, do 1478 * not continue or we could be in for a very long wait while every 1479 * EEPROM read fails 1480 */ 1481 status = hw->eeprom.ops.read(hw, 0, &checksum); 1482 1483 if (status == IXGBE_SUCCESS) { 1484 checksum = hw->eeprom.ops.calc_checksum(hw); 1485 1486 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1487 1488 /* 1489 * Verify read checksum from EEPROM is the same as 1490 * calculated checksum 1491 */ 1492 if (read_checksum != checksum) 1493 status = IXGBE_ERR_EEPROM_CHECKSUM; 1494 1495 /* If the user cares, return the calculated checksum */ 1496 if (checksum_val) 1497 *checksum_val = checksum; 1498 } else { 1499 DEBUGOUT("EEPROM read failed\n"); 1500 } 1501 1502 return status; 1503} 1504 1505/** 1506 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 1507 * @hw: pointer to hardware structure 1508 **/ 1509s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 1510{ 1511 s32 status; 1512 u16 checksum; 1513 1514 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 1515 1516 /* 1517 * Read the first word from the EEPROM. If this times out or fails, do 1518 * not continue or we could be in for a very long wait while every 1519 * EEPROM read fails 1520 */ 1521 status = hw->eeprom.ops.read(hw, 0, &checksum); 1522 1523 if (status == IXGBE_SUCCESS) { 1524 checksum = hw->eeprom.ops.calc_checksum(hw); 1525 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1526 checksum); 1527 } else { 1528 DEBUGOUT("EEPROM read failed\n"); 1529 } 1530 1531 return status; 1532} 1533 1534/** 1535 * ixgbe_validate_mac_addr - Validate MAC address 1536 * @mac_addr: pointer to MAC address. 1537 * 1538 * Tests a MAC address to ensure it is a valid Individual Address 1539 **/ 1540s32 ixgbe_validate_mac_addr(u8 *mac_addr) 1541{ 1542 s32 status = IXGBE_SUCCESS; 1543 1544 DEBUGFUNC("ixgbe_validate_mac_addr"); 1545 1546 /* Make sure it is not a multicast address */ 1547 if (IXGBE_IS_MULTICAST(mac_addr)) { 1548 DEBUGOUT("MAC address is multicast\n"); 1549 status = IXGBE_ERR_INVALID_MAC_ADDR; 1550 /* Not a broadcast address */ 1551 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 1552 DEBUGOUT("MAC address is broadcast\n"); 1553 status = IXGBE_ERR_INVALID_MAC_ADDR; 1554 /* Reject the zero address */ 1555 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 1556 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 1557 DEBUGOUT("MAC address is all zeros\n"); 1558 status = IXGBE_ERR_INVALID_MAC_ADDR; 1559 } 1560 return status; 1561} 1562 1563/** 1564 * ixgbe_set_rar_generic - Set Rx address register 1565 * @hw: pointer to hardware structure 1566 * @index: Receive address register to write 1567 * @addr: Address to put into receive address register 1568 * @vmdq: VMDq "set" or "pool" index 1569 * @enable_addr: set flag that address is active 1570 * 1571 * Puts an ethernet address into a receive address register. 1572 **/ 1573s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 1574 u32 enable_addr) 1575{ 1576 u32 rar_low, rar_high; 1577 u32 rar_entries = hw->mac.num_rar_entries; 1578 1579 DEBUGFUNC("ixgbe_set_rar_generic"); 1580 1581 /* Make sure we are using a valid rar index range */ 1582 if (index >= rar_entries) { 1583 DEBUGOUT1("RAR index %d is out of range.\n", index); 1584 return IXGBE_ERR_INVALID_ARGUMENT; 1585 } 1586 1587 /* setup VMDq pool selection before this RAR gets enabled */ 1588 hw->mac.ops.set_vmdq(hw, index, vmdq); 1589 1590 /* 1591 * HW expects these in little endian so we reverse the byte 1592 * order from network order (big endian) to little endian 1593 */ 1594 rar_low = ((u32)addr[0] | 1595 ((u32)addr[1] << 8) | 1596 ((u32)addr[2] << 16) | 1597 ((u32)addr[3] << 24)); 1598 /* 1599 * Some parts put the VMDq setting in the extra RAH bits, 1600 * so save everything except the lower 16 bits that hold part 1601 * of the address and the address valid bit. 1602 */ 1603 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1604 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1605 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1606 1607 if (enable_addr != 0) 1608 rar_high |= IXGBE_RAH_AV; 1609 1610 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1611 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1612 1613 return IXGBE_SUCCESS; 1614} 1615 1616/** 1617 * ixgbe_clear_rar_generic - Remove Rx address register 1618 * @hw: pointer to hardware structure 1619 * @index: Receive address register to write 1620 * 1621 * Clears an ethernet address from a receive address register. 1622 **/ 1623s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 1624{ 1625 u32 rar_high; 1626 u32 rar_entries = hw->mac.num_rar_entries; 1627 1628 DEBUGFUNC("ixgbe_clear_rar_generic"); 1629 1630 /* Make sure we are using a valid rar index range */ 1631 if (index >= rar_entries) { 1632 DEBUGOUT1("RAR index %d is out of range.\n", index); 1633 return IXGBE_ERR_INVALID_ARGUMENT; 1634 } 1635 1636 /* 1637 * Some parts put the VMDq setting in the extra RAH bits, 1638 * so save everything except the lower 16 bits that hold part 1639 * of the address and the address valid bit. 1640 */ 1641 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1642 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1643 1644 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1645 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1646 1647 /* clear VMDq pool/queue selection for this RAR */ 1648 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1649 1650 return IXGBE_SUCCESS; 1651} 1652 1653/** 1654 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1655 * @hw: pointer to hardware structure 1656 * 1657 * Places the MAC address in receive address register 0 and clears the rest 1658 * of the receive address registers. Clears the multicast table. Assumes 1659 * the receiver is in reset when the routine is called. 1660 **/ 1661s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1662{ 1663 u32 i; 1664 u32 rar_entries = hw->mac.num_rar_entries; 1665 1666 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 1667 1668 /* 1669 * If the current mac address is valid, assume it is a software override 1670 * to the permanent address. 1671 * Otherwise, use the permanent address from the eeprom. 1672 */ 1673 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1674 IXGBE_ERR_INVALID_MAC_ADDR) { 1675 /* Get the MAC address from the RAR0 for later reference */ 1676 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1677 1678 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1679 hw->mac.addr[0], hw->mac.addr[1], 1680 hw->mac.addr[2]); 1681 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1682 hw->mac.addr[4], hw->mac.addr[5]); 1683 } else { 1684 /* Setup the receive address. */ 1685 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 1686 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 1687 hw->mac.addr[0], hw->mac.addr[1], 1688 hw->mac.addr[2]); 1689 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 1690 hw->mac.addr[4], hw->mac.addr[5]); 1691 1692 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
|
| 1693 1694 /* clear VMDq pool/queue selection for RAR 0 */ 1695 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
|
1692 } 1693 hw->addr_ctrl.overflow_promisc = 0; 1694 1695 hw->addr_ctrl.rar_used_count = 1; 1696 1697 /* Zero out the other receive addresses. */ 1698 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 1699 for (i = 1; i < rar_entries; i++) { 1700 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1701 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1702 } 1703 1704 /* Clear the MTA */ 1705 hw->addr_ctrl.mta_in_use = 0; 1706 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1707 1708 DEBUGOUT(" Clearing MTA\n"); 1709 for (i = 0; i < hw->mac.mcft_size; i++) 1710 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1711 1712 ixgbe_init_uta_tables(hw); 1713 1714 return IXGBE_SUCCESS; 1715} 1716 1717/** 1718 * ixgbe_add_uc_addr - Adds a secondary unicast address. 1719 * @hw: pointer to hardware structure 1720 * @addr: new address 1721 * 1722 * Adds it to unused receive address register or goes into promiscuous mode. 1723 **/ 1724void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 1725{ 1726 u32 rar_entries = hw->mac.num_rar_entries; 1727 u32 rar; 1728 1729 DEBUGFUNC("ixgbe_add_uc_addr"); 1730 1731 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 1732 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 1733 1734 /* 1735 * Place this address in the RAR if there is room, 1736 * else put the controller into promiscuous mode 1737 */ 1738 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1739 rar = hw->addr_ctrl.rar_used_count; 1740 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 1741 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 1742 hw->addr_ctrl.rar_used_count++; 1743 } else { 1744 hw->addr_ctrl.overflow_promisc++; 1745 } 1746 1747 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 1748} 1749 1750/** 1751 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1752 * @hw: pointer to hardware structure 1753 * @addr_list: the list of new addresses 1754 * @addr_count: number of addresses 1755 * @next: iterator function to walk the address list 1756 * 1757 * The given list replaces any existing list. Clears the secondary addrs from 1758 * receive address registers. Uses unused receive address registers for the 1759 * first secondary addresses, and falls back to promiscuous mode as needed. 1760 * 1761 * Drivers using secondary unicast addresses must set user_set_promisc when 1762 * manually putting the device into promiscuous mode. 1763 **/ 1764s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 1765 u32 addr_count, ixgbe_mc_addr_itr next) 1766{ 1767 u8 *addr; 1768 u32 i; 1769 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1770 u32 uc_addr_in_use; 1771 u32 fctrl; 1772 u32 vmdq; 1773 1774 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 1775 1776 /* 1777 * Clear accounting of old secondary address list, 1778 * don't count RAR[0] 1779 */ 1780 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 1781 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 1782 hw->addr_ctrl.overflow_promisc = 0; 1783 1784 /* Zero out the other receive addresses */ 1785 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 1786 for (i = 0; i < uc_addr_in_use; i++) { 1787 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 1788 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 1789 } 1790 1791 /* Add the new addresses */ 1792 for (i = 0; i < addr_count; i++) { 1793 DEBUGOUT(" Adding the secondary addresses:\n"); 1794 addr = next(hw, &addr_list, &vmdq); 1795 ixgbe_add_uc_addr(hw, addr, vmdq); 1796 } 1797 1798 if (hw->addr_ctrl.overflow_promisc) { 1799 /* enable promisc if not already in overflow or set by user */ 1800 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1801 DEBUGOUT(" Entering address overflow promisc mode\n"); 1802 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1803 fctrl |= IXGBE_FCTRL_UPE; 1804 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1805 } 1806 } else { 1807 /* only disable if set by overflow, not by user */ 1808 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1809 DEBUGOUT(" Leaving address overflow promisc mode\n"); 1810 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1811 fctrl &= ~IXGBE_FCTRL_UPE; 1812 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1813 } 1814 } 1815 1816 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 1817 return IXGBE_SUCCESS; 1818} 1819 1820/** 1821 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1822 * @hw: pointer to hardware structure 1823 * @mc_addr: the multicast address 1824 * 1825 * Extracts the 12 bits, from a multicast address, to determine which 1826 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1827 * incoming rx multicast addresses, to determine the bit-vector to check in 1828 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1829 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1830 * to mc_filter_type. 1831 **/ 1832static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1833{ 1834 u32 vector = 0; 1835 1836 DEBUGFUNC("ixgbe_mta_vector"); 1837 1838 switch (hw->mac.mc_filter_type) { 1839 case 0: /* use bits [47:36] of the address */ 1840 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1841 break; 1842 case 1: /* use bits [46:35] of the address */ 1843 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1844 break; 1845 case 2: /* use bits [45:34] of the address */ 1846 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1847 break; 1848 case 3: /* use bits [43:32] of the address */ 1849 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1850 break; 1851 default: /* Invalid mc_filter_type */ 1852 DEBUGOUT("MC filter type param set incorrectly\n"); 1853 ASSERT(0); 1854 break; 1855 } 1856 1857 /* vector can only be 12-bits or boundary will be exceeded */ 1858 vector &= 0xFFF; 1859 return vector; 1860} 1861 1862/** 1863 * ixgbe_set_mta - Set bit-vector in multicast table 1864 * @hw: pointer to hardware structure 1865 * @hash_value: Multicast address hash value 1866 * 1867 * Sets the bit-vector in the multicast table. 1868 **/ 1869void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 1870{ 1871 u32 vector; 1872 u32 vector_bit; 1873 u32 vector_reg; 1874 1875 DEBUGFUNC("ixgbe_set_mta"); 1876 1877 hw->addr_ctrl.mta_in_use++; 1878 1879 vector = ixgbe_mta_vector(hw, mc_addr); 1880 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 1881 1882 /* 1883 * The MTA is a register array of 128 32-bit registers. It is treated 1884 * like an array of 4096 bits. We want to set bit 1885 * BitArray[vector_value]. So we figure out what register the bit is 1886 * in, read it, OR in the new bit, then write back the new value. The 1887 * register is determined by the upper 7 bits of the vector value and 1888 * the bit within that register are determined by the lower 5 bits of 1889 * the value. 1890 */ 1891 vector_reg = (vector >> 5) & 0x7F; 1892 vector_bit = vector & 0x1F; 1893 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 1894} 1895 1896/** 1897 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1898 * @hw: pointer to hardware structure 1899 * @mc_addr_list: the list of new multicast addresses 1900 * @mc_addr_count: number of addresses 1901 * @next: iterator function to walk the multicast address list 1902 * 1903 * The given list replaces any existing list. Clears the MC addrs from receive 1904 * address registers and the multicast table. Uses unused receive address 1905 * registers for the first multicast addresses, and hashes the rest into the 1906 * multicast table. 1907 **/ 1908s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1909 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1910{ 1911 u32 i; 1912 u32 vmdq; 1913 1914 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 1915 1916 /* 1917 * Set the new number of MC addresses that we are being requested to 1918 * use. 1919 */ 1920 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1921 hw->addr_ctrl.mta_in_use = 0; 1922 1923 /* Clear mta_shadow */ 1924 DEBUGOUT(" Clearing MTA\n"); 1925 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 1926 1927 /* Update mta_shadow */ 1928 for (i = 0; i < mc_addr_count; i++) { 1929 DEBUGOUT(" Adding the multicast addresses:\n"); 1930 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1931 } 1932 1933 /* Enable mta */ 1934 for (i = 0; i < hw->mac.mcft_size; i++) 1935 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 1936 hw->mac.mta_shadow[i]); 1937 1938 if (hw->addr_ctrl.mta_in_use > 0) 1939 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1940 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1941 1942 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 1943 return IXGBE_SUCCESS; 1944} 1945 1946/** 1947 * ixgbe_enable_mc_generic - Enable multicast address in RAR 1948 * @hw: pointer to hardware structure 1949 * 1950 * Enables multicast address in RAR and the use of the multicast hash table. 1951 **/ 1952s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1953{ 1954 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1955 1956 DEBUGFUNC("ixgbe_enable_mc_generic"); 1957 1958 if (a->mta_in_use > 0) 1959 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1960 hw->mac.mc_filter_type); 1961 1962 return IXGBE_SUCCESS; 1963} 1964 1965/** 1966 * ixgbe_disable_mc_generic - Disable multicast address in RAR 1967 * @hw: pointer to hardware structure 1968 * 1969 * Disables multicast address in RAR and the use of the multicast hash table. 1970 **/ 1971s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1972{ 1973 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1974 1975 DEBUGFUNC("ixgbe_disable_mc_generic"); 1976 1977 if (a->mta_in_use > 0) 1978 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1979 1980 return IXGBE_SUCCESS; 1981} 1982 1983/** 1984 * ixgbe_fc_enable_generic - Enable flow control 1985 * @hw: pointer to hardware structure 1986 * @packetbuf_num: packet buffer number (0-7) 1987 * 1988 * Enable flow control according to the current settings. 1989 **/ 1990s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 1991{ 1992 s32 ret_val = IXGBE_SUCCESS; 1993 u32 mflcn_reg, fccfg_reg; 1994 u32 reg; 1995 u32 rx_pba_size; 1996 u32 fcrtl, fcrth; 1997 1998 DEBUGFUNC("ixgbe_fc_enable_generic"); 1999 2000 /* Negotiate the fc mode to use */ 2001 ret_val = ixgbe_fc_autoneg(hw); 2002 if (ret_val == IXGBE_ERR_FLOW_CONTROL) 2003 goto out; 2004 2005 /* Disable any previous flow control settings */ 2006 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2007 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2008 2009 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2010 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2011 2012 /* 2013 * The possible values of fc.current_mode are: 2014 * 0: Flow control is completely disabled 2015 * 1: Rx flow control is enabled (we can receive pause frames, 2016 * but not send pause frames). 2017 * 2: Tx flow control is enabled (we can send pause frames but 2018 * we do not support receiving pause frames). 2019 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2020 * other: Invalid. 2021 */ 2022 switch (hw->fc.current_mode) { 2023 case ixgbe_fc_none: 2024 /* 2025 * Flow control is disabled by software override or autoneg. 2026 * The code below will actually disable it in the HW. 2027 */ 2028 break; 2029 case ixgbe_fc_rx_pause: 2030 /* 2031 * Rx Flow control is enabled and Tx Flow control is 2032 * disabled by software override. Since there really 2033 * isn't a way to advertise that we are capable of RX 2034 * Pause ONLY, we will advertise that we support both 2035 * symmetric and asymmetric Rx PAUSE. Later, we will 2036 * disable the adapter's ability to send PAUSE frames. 2037 */ 2038 mflcn_reg |= IXGBE_MFLCN_RFCE; 2039 break; 2040 case ixgbe_fc_tx_pause: 2041 /* 2042 * Tx Flow control is enabled, and Rx Flow control is 2043 * disabled by software override. 2044 */ 2045 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2046 break; 2047 case ixgbe_fc_full: 2048 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2049 mflcn_reg |= IXGBE_MFLCN_RFCE; 2050 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2051 break; 2052 default: 2053 DEBUGOUT("Flow control param set incorrectly\n"); 2054 ret_val = IXGBE_ERR_CONFIG; 2055 goto out; 2056 break; 2057 } 2058 2059 /* Set 802.3x based flow control settings. */ 2060 mflcn_reg |= IXGBE_MFLCN_DPF; 2061 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2062 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2063 2064 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); 2065 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 2066 2067 fcrth = (rx_pba_size - hw->fc.high_water) << 10; 2068 fcrtl = (rx_pba_size - hw->fc.low_water) << 10; 2069 2070 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2071 fcrth |= IXGBE_FCRTH_FCEN; 2072 if (hw->fc.send_xon) 2073 fcrtl |= IXGBE_FCRTL_XONE; 2074 } 2075 2076 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2077 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2078 2079 /* Configure pause time (2 TCs per register) */ 2080 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2081 if ((packetbuf_num & 1) == 0) 2082 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2083 else 2084 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); 2085 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); 2086 2087 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2088 2089out: 2090 return ret_val; 2091} 2092 2093/** 2094 * ixgbe_fc_autoneg - Configure flow control 2095 * @hw: pointer to hardware structure 2096 * 2097 * Compares our advertised flow control capabilities to those advertised by 2098 * our link partner, and determines the proper flow control mode to use. 2099 **/ 2100s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2101{ 2102 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2103 ixgbe_link_speed speed; 2104 bool link_up; 2105 2106 DEBUGFUNC("ixgbe_fc_autoneg"); 2107 2108 if (hw->fc.disable_fc_autoneg) 2109 goto out; 2110 2111 /* 2112 * AN should have completed when the cable was plugged in. 2113 * Look for reasons to bail out. Bail out if: 2114 * - FC autoneg is disabled, or if 2115 * - link is not up. 2116 * 2117 * Since we're being called from an LSC, link is already known to be up. 2118 * So use link_up_wait_to_complete=FALSE. 2119 */ 2120 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2121 if (!link_up) { 2122 ret_val = IXGBE_ERR_FLOW_CONTROL; 2123 goto out; 2124 } 2125 2126 switch (hw->phy.media_type) { 2127 /* Autoneg flow control on fiber adapters */ 2128 case ixgbe_media_type_fiber: 2129 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2130 ret_val = ixgbe_fc_autoneg_fiber(hw); 2131 break; 2132 2133 /* Autoneg flow control on backplane adapters */ 2134 case ixgbe_media_type_backplane: 2135 ret_val = ixgbe_fc_autoneg_backplane(hw); 2136 break; 2137 2138 /* Autoneg flow control on copper adapters */ 2139 case ixgbe_media_type_copper: 2140 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS) 2141 ret_val = ixgbe_fc_autoneg_copper(hw); 2142 break; 2143 2144 default: 2145 break; 2146 } 2147 2148out: 2149 if (ret_val == IXGBE_SUCCESS) { 2150 hw->fc.fc_was_autonegged = TRUE; 2151 } else { 2152 hw->fc.fc_was_autonegged = FALSE; 2153 hw->fc.current_mode = hw->fc.requested_mode; 2154 } 2155 return ret_val; 2156} 2157 2158/** 2159 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2160 * @hw: pointer to hardware structure 2161 * @speed: 2162 * @link_up 2163 * 2164 * Enable flow control according on 1 gig fiber. 2165 **/ 2166static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2167{ 2168 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2169 s32 ret_val; 2170 2171 /* 2172 * On multispeed fiber at 1g, bail out if 2173 * - link is up but AN did not complete, or if 2174 * - link is up and AN completed but timed out 2175 */ 2176 2177 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2178 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2179 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2180 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2181 goto out; 2182 } 2183 2184 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2185 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2186 2187 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2188 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2189 IXGBE_PCS1GANA_ASM_PAUSE, 2190 IXGBE_PCS1GANA_SYM_PAUSE, 2191 IXGBE_PCS1GANA_ASM_PAUSE); 2192 2193out: 2194 return ret_val; 2195} 2196 2197/** 2198 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2199 * @hw: pointer to hardware structure 2200 * 2201 * Enable flow control according to IEEE clause 37. 2202 **/ 2203static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2204{ 2205 u32 links2, anlp1_reg, autoc_reg, links; 2206 s32 ret_val; 2207 2208 /* 2209 * On backplane, bail out if 2210 * - backplane autoneg was not completed, or if 2211 * - we are 82599 and link partner is not AN enabled 2212 */ 2213 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2214 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2215 hw->fc.fc_was_autonegged = FALSE; 2216 hw->fc.current_mode = hw->fc.requested_mode; 2217 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2218 goto out; 2219 } 2220 2221 if (hw->mac.type == ixgbe_mac_82599EB) { 2222 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2223 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2224 hw->fc.fc_was_autonegged = FALSE; 2225 hw->fc.current_mode = hw->fc.requested_mode; 2226 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2227 goto out; 2228 } 2229 } 2230 /* 2231 * Read the 10g AN autoc and LP ability registers and resolve 2232 * local flow control settings accordingly 2233 */ 2234 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2235 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2236 2237 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2238 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2239 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2240 2241out: 2242 return ret_val; 2243} 2244 2245/** 2246 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2247 * @hw: pointer to hardware structure 2248 * 2249 * Enable flow control according to IEEE clause 37. 2250 **/ 2251static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2252{ 2253 u16 technology_ability_reg = 0; 2254 u16 lp_technology_ability_reg = 0; 2255 2256 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2257 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2258 &technology_ability_reg); 2259 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 2260 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2261 &lp_technology_ability_reg); 2262 2263 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2264 (u32)lp_technology_ability_reg, 2265 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2266 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2267} 2268 2269/** 2270 * ixgbe_negotiate_fc - Negotiate flow control 2271 * @hw: pointer to hardware structure 2272 * @adv_reg: flow control advertised settings 2273 * @lp_reg: link partner's flow control settings 2274 * @adv_sym: symmetric pause bit in advertisement 2275 * @adv_asm: asymmetric pause bit in advertisement 2276 * @lp_sym: symmetric pause bit in link partner advertisement 2277 * @lp_asm: asymmetric pause bit in link partner advertisement 2278 * 2279 * Find the intersection between advertised settings and link partner's 2280 * advertised settings 2281 **/ 2282static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2283 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2284{ 2285 if ((!(adv_reg)) || (!(lp_reg))) 2286 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2287 2288 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2289 /* 2290 * Now we need to check if the user selected Rx ONLY 2291 * of pause frames. In this case, we had to advertise 2292 * FULL flow control because we could not advertise RX 2293 * ONLY. Hence, we must now check to see if we need to 2294 * turn OFF the TRANSMISSION of PAUSE frames. 2295 */ 2296 if (hw->fc.requested_mode == ixgbe_fc_full) { 2297 hw->fc.current_mode = ixgbe_fc_full; 2298 DEBUGOUT("Flow Control = FULL.\n"); 2299 } else { 2300 hw->fc.current_mode = ixgbe_fc_rx_pause; 2301 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2302 } 2303 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2304 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2305 hw->fc.current_mode = ixgbe_fc_tx_pause; 2306 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2307 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2308 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2309 hw->fc.current_mode = ixgbe_fc_rx_pause; 2310 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2311 } else { 2312 hw->fc.current_mode = ixgbe_fc_none; 2313 DEBUGOUT("Flow Control = NONE.\n"); 2314 } 2315 return IXGBE_SUCCESS; 2316} 2317 2318/** 2319 * ixgbe_setup_fc - Set up flow control 2320 * @hw: pointer to hardware structure 2321 * 2322 * Called at init time to set up flow control. 2323 **/ 2324s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2325{ 2326 s32 ret_val = IXGBE_SUCCESS; 2327 u32 reg = 0, reg_bp = 0; 2328 u16 reg_cu = 0; 2329 2330 DEBUGFUNC("ixgbe_setup_fc"); 2331 2332 /* Validate the packetbuf configuration */ 2333 if (packetbuf_num < 0 || packetbuf_num > 7) { 2334 DEBUGOUT1("Invalid packet buffer number [%d], expected range is" 2335 " 0-7\n", packetbuf_num); 2336 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2337 goto out; 2338 } 2339 2340 /* 2341 * Validate the water mark configuration. Zero water marks are invalid 2342 * because it causes the controller to just blast out fc packets. 2343 */ 2344 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 2345 DEBUGOUT("Invalid water mark configuration\n"); 2346 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2347 goto out; 2348 } 2349 2350 /* 2351 * Validate the requested mode. Strict IEEE mode does not allow 2352 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 2353 */ 2354 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 2355 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 2356 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2357 goto out; 2358 } 2359 2360 /* 2361 * 10gig parts do not have a word in the EEPROM to determine the 2362 * default flow control setting, so we explicitly set it to full. 2363 */ 2364 if (hw->fc.requested_mode == ixgbe_fc_default) 2365 hw->fc.requested_mode = ixgbe_fc_full; 2366 2367 /* 2368 * Set up the 1G and 10G flow control advertisement registers so the 2369 * HW will be able to do fc autoneg once the cable is plugged in. If 2370 * we link at 10G, the 1G advertisement is harmless and vice versa. 2371 */ 2372 2373 switch (hw->phy.media_type) { 2374 case ixgbe_media_type_fiber: 2375 case ixgbe_media_type_backplane: 2376 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2377 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2378 break; 2379 2380 case ixgbe_media_type_copper: 2381 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2382 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 2383 break; 2384 2385 default: 2386 ; 2387 } 2388 2389 /* 2390 * The possible values of fc.requested_mode are: 2391 * 0: Flow control is completely disabled 2392 * 1: Rx flow control is enabled (we can receive pause frames, 2393 * but not send pause frames). 2394 * 2: Tx flow control is enabled (we can send pause frames but 2395 * we do not support receiving pause frames). 2396 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2397 * other: Invalid. 2398 */ 2399 switch (hw->fc.requested_mode) { 2400 case ixgbe_fc_none: 2401 /* Flow control completely disabled by software override. */ 2402 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2403 if (hw->phy.media_type == ixgbe_media_type_backplane) 2404 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 2405 IXGBE_AUTOC_ASM_PAUSE); 2406 else if (hw->phy.media_type == ixgbe_media_type_copper) 2407 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2408 break; 2409 case ixgbe_fc_rx_pause: 2410 /* 2411 * Rx Flow control is enabled and Tx Flow control is 2412 * disabled by software override. Since there really 2413 * isn't a way to advertise that we are capable of RX 2414 * Pause ONLY, we will advertise that we support both 2415 * symmetric and asymmetric Rx PAUSE. Later, we will 2416 * disable the adapter's ability to send PAUSE frames. 2417 */ 2418 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2419 if (hw->phy.media_type == ixgbe_media_type_backplane) 2420 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2421 IXGBE_AUTOC_ASM_PAUSE); 2422 else if (hw->phy.media_type == ixgbe_media_type_copper) 2423 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2424 break; 2425 case ixgbe_fc_tx_pause: 2426 /* 2427 * Tx Flow control is enabled, and Rx Flow control is 2428 * disabled by software override. 2429 */ 2430 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2431 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2432 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2433 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); 2434 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); 2435 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 2436 reg_cu |= (IXGBE_TAF_ASM_PAUSE); 2437 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); 2438 } 2439 break; 2440 case ixgbe_fc_full: 2441 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2442 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2443 if (hw->phy.media_type == ixgbe_media_type_backplane) 2444 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2445 IXGBE_AUTOC_ASM_PAUSE); 2446 else if (hw->phy.media_type == ixgbe_media_type_copper) 2447 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2448 break; 2449 default: 2450 DEBUGOUT("Flow control param set incorrectly\n"); 2451 ret_val = IXGBE_ERR_CONFIG; 2452 goto out; 2453 break; 2454 } 2455 2456 /* 2457 * Enable auto-negotiation between the MAC & PHY; 2458 * the MAC will advertise clause 37 flow control. 2459 */ 2460 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2461 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2462 2463 /* Disable AN timeout */ 2464 if (hw->fc.strict_ieee) 2465 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2466 2467 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2468 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2469 2470 /* 2471 * AUTOC restart handles negotiation of 1G and 10G on backplane 2472 * and copper. There is no need to set the PCS1GCTL register. 2473 * 2474 */ 2475 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2476 reg_bp |= IXGBE_AUTOC_AN_RESTART; 2477 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 2478 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 2479 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { 2480 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2481 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 2482 } 2483 2484 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 2485out: 2486 return ret_val; 2487} 2488 2489/** 2490 * ixgbe_disable_pcie_master - Disable PCI-express master access 2491 * @hw: pointer to hardware structure 2492 * 2493 * Disables PCI-Express master access and verifies there are no pending 2494 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2495 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 2496 * is returned signifying master requests disabled. 2497 **/ 2498s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2499{ 2500 u32 i; 2501 u32 reg_val; 2502 u32 number_of_queues; 2503 s32 status = IXGBE_SUCCESS; 2504 2505 DEBUGFUNC("ixgbe_disable_pcie_master"); 2506 2507 /* Just jump out if bus mastering is already disabled */ 2508 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2509 goto out; 2510 2511 /* Disable the receive unit by stopping each queue */ 2512 number_of_queues = hw->mac.max_rx_queues; 2513 for (i = 0; i < number_of_queues; i++) { 2514 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 2515 if (reg_val & IXGBE_RXDCTL_ENABLE) { 2516 reg_val &= ~IXGBE_RXDCTL_ENABLE; 2517 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 2518 } 2519 } 2520 2521 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); 2522 reg_val |= IXGBE_CTRL_GIO_DIS; 2523 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2524 2525 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2526 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
| 1696 } 1697 hw->addr_ctrl.overflow_promisc = 0; 1698 1699 hw->addr_ctrl.rar_used_count = 1; 1700 1701 /* Zero out the other receive addresses. */ 1702 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 1703 for (i = 1; i < rar_entries; i++) { 1704 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1705 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1706 } 1707 1708 /* Clear the MTA */ 1709 hw->addr_ctrl.mta_in_use = 0; 1710 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1711 1712 DEBUGOUT(" Clearing MTA\n"); 1713 for (i = 0; i < hw->mac.mcft_size; i++) 1714 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1715 1716 ixgbe_init_uta_tables(hw); 1717 1718 return IXGBE_SUCCESS; 1719} 1720 1721/** 1722 * ixgbe_add_uc_addr - Adds a secondary unicast address. 1723 * @hw: pointer to hardware structure 1724 * @addr: new address 1725 * 1726 * Adds it to unused receive address register or goes into promiscuous mode. 1727 **/ 1728void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 1729{ 1730 u32 rar_entries = hw->mac.num_rar_entries; 1731 u32 rar; 1732 1733 DEBUGFUNC("ixgbe_add_uc_addr"); 1734 1735 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 1736 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 1737 1738 /* 1739 * Place this address in the RAR if there is room, 1740 * else put the controller into promiscuous mode 1741 */ 1742 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1743 rar = hw->addr_ctrl.rar_used_count; 1744 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 1745 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 1746 hw->addr_ctrl.rar_used_count++; 1747 } else { 1748 hw->addr_ctrl.overflow_promisc++; 1749 } 1750 1751 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 1752} 1753 1754/** 1755 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1756 * @hw: pointer to hardware structure 1757 * @addr_list: the list of new addresses 1758 * @addr_count: number of addresses 1759 * @next: iterator function to walk the address list 1760 * 1761 * The given list replaces any existing list. Clears the secondary addrs from 1762 * receive address registers. Uses unused receive address registers for the 1763 * first secondary addresses, and falls back to promiscuous mode as needed. 1764 * 1765 * Drivers using secondary unicast addresses must set user_set_promisc when 1766 * manually putting the device into promiscuous mode. 1767 **/ 1768s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 1769 u32 addr_count, ixgbe_mc_addr_itr next) 1770{ 1771 u8 *addr; 1772 u32 i; 1773 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1774 u32 uc_addr_in_use; 1775 u32 fctrl; 1776 u32 vmdq; 1777 1778 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 1779 1780 /* 1781 * Clear accounting of old secondary address list, 1782 * don't count RAR[0] 1783 */ 1784 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 1785 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 1786 hw->addr_ctrl.overflow_promisc = 0; 1787 1788 /* Zero out the other receive addresses */ 1789 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 1790 for (i = 0; i < uc_addr_in_use; i++) { 1791 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 1792 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 1793 } 1794 1795 /* Add the new addresses */ 1796 for (i = 0; i < addr_count; i++) { 1797 DEBUGOUT(" Adding the secondary addresses:\n"); 1798 addr = next(hw, &addr_list, &vmdq); 1799 ixgbe_add_uc_addr(hw, addr, vmdq); 1800 } 1801 1802 if (hw->addr_ctrl.overflow_promisc) { 1803 /* enable promisc if not already in overflow or set by user */ 1804 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1805 DEBUGOUT(" Entering address overflow promisc mode\n"); 1806 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1807 fctrl |= IXGBE_FCTRL_UPE; 1808 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1809 } 1810 } else { 1811 /* only disable if set by overflow, not by user */ 1812 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1813 DEBUGOUT(" Leaving address overflow promisc mode\n"); 1814 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1815 fctrl &= ~IXGBE_FCTRL_UPE; 1816 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1817 } 1818 } 1819 1820 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 1821 return IXGBE_SUCCESS; 1822} 1823 1824/** 1825 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1826 * @hw: pointer to hardware structure 1827 * @mc_addr: the multicast address 1828 * 1829 * Extracts the 12 bits, from a multicast address, to determine which 1830 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1831 * incoming rx multicast addresses, to determine the bit-vector to check in 1832 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1833 * by the MO field of the MCSTCTRL. The MO field is set during initialization 1834 * to mc_filter_type. 1835 **/ 1836static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1837{ 1838 u32 vector = 0; 1839 1840 DEBUGFUNC("ixgbe_mta_vector"); 1841 1842 switch (hw->mac.mc_filter_type) { 1843 case 0: /* use bits [47:36] of the address */ 1844 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1845 break; 1846 case 1: /* use bits [46:35] of the address */ 1847 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1848 break; 1849 case 2: /* use bits [45:34] of the address */ 1850 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1851 break; 1852 case 3: /* use bits [43:32] of the address */ 1853 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1854 break; 1855 default: /* Invalid mc_filter_type */ 1856 DEBUGOUT("MC filter type param set incorrectly\n"); 1857 ASSERT(0); 1858 break; 1859 } 1860 1861 /* vector can only be 12-bits or boundary will be exceeded */ 1862 vector &= 0xFFF; 1863 return vector; 1864} 1865 1866/** 1867 * ixgbe_set_mta - Set bit-vector in multicast table 1868 * @hw: pointer to hardware structure 1869 * @hash_value: Multicast address hash value 1870 * 1871 * Sets the bit-vector in the multicast table. 1872 **/ 1873void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 1874{ 1875 u32 vector; 1876 u32 vector_bit; 1877 u32 vector_reg; 1878 1879 DEBUGFUNC("ixgbe_set_mta"); 1880 1881 hw->addr_ctrl.mta_in_use++; 1882 1883 vector = ixgbe_mta_vector(hw, mc_addr); 1884 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 1885 1886 /* 1887 * The MTA is a register array of 128 32-bit registers. It is treated 1888 * like an array of 4096 bits. We want to set bit 1889 * BitArray[vector_value]. So we figure out what register the bit is 1890 * in, read it, OR in the new bit, then write back the new value. The 1891 * register is determined by the upper 7 bits of the vector value and 1892 * the bit within that register are determined by the lower 5 bits of 1893 * the value. 1894 */ 1895 vector_reg = (vector >> 5) & 0x7F; 1896 vector_bit = vector & 0x1F; 1897 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 1898} 1899 1900/** 1901 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1902 * @hw: pointer to hardware structure 1903 * @mc_addr_list: the list of new multicast addresses 1904 * @mc_addr_count: number of addresses 1905 * @next: iterator function to walk the multicast address list 1906 * 1907 * The given list replaces any existing list. Clears the MC addrs from receive 1908 * address registers and the multicast table. Uses unused receive address 1909 * registers for the first multicast addresses, and hashes the rest into the 1910 * multicast table. 1911 **/ 1912s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1913 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1914{ 1915 u32 i; 1916 u32 vmdq; 1917 1918 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 1919 1920 /* 1921 * Set the new number of MC addresses that we are being requested to 1922 * use. 1923 */ 1924 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1925 hw->addr_ctrl.mta_in_use = 0; 1926 1927 /* Clear mta_shadow */ 1928 DEBUGOUT(" Clearing MTA\n"); 1929 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 1930 1931 /* Update mta_shadow */ 1932 for (i = 0; i < mc_addr_count; i++) { 1933 DEBUGOUT(" Adding the multicast addresses:\n"); 1934 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1935 } 1936 1937 /* Enable mta */ 1938 for (i = 0; i < hw->mac.mcft_size; i++) 1939 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 1940 hw->mac.mta_shadow[i]); 1941 1942 if (hw->addr_ctrl.mta_in_use > 0) 1943 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1944 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1945 1946 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 1947 return IXGBE_SUCCESS; 1948} 1949 1950/** 1951 * ixgbe_enable_mc_generic - Enable multicast address in RAR 1952 * @hw: pointer to hardware structure 1953 * 1954 * Enables multicast address in RAR and the use of the multicast hash table. 1955 **/ 1956s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1957{ 1958 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1959 1960 DEBUGFUNC("ixgbe_enable_mc_generic"); 1961 1962 if (a->mta_in_use > 0) 1963 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1964 hw->mac.mc_filter_type); 1965 1966 return IXGBE_SUCCESS; 1967} 1968 1969/** 1970 * ixgbe_disable_mc_generic - Disable multicast address in RAR 1971 * @hw: pointer to hardware structure 1972 * 1973 * Disables multicast address in RAR and the use of the multicast hash table. 1974 **/ 1975s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1976{ 1977 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1978 1979 DEBUGFUNC("ixgbe_disable_mc_generic"); 1980 1981 if (a->mta_in_use > 0) 1982 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1983 1984 return IXGBE_SUCCESS; 1985} 1986 1987/** 1988 * ixgbe_fc_enable_generic - Enable flow control 1989 * @hw: pointer to hardware structure 1990 * @packetbuf_num: packet buffer number (0-7) 1991 * 1992 * Enable flow control according to the current settings. 1993 **/ 1994s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 1995{ 1996 s32 ret_val = IXGBE_SUCCESS; 1997 u32 mflcn_reg, fccfg_reg; 1998 u32 reg; 1999 u32 rx_pba_size; 2000 u32 fcrtl, fcrth; 2001 2002 DEBUGFUNC("ixgbe_fc_enable_generic"); 2003 2004 /* Negotiate the fc mode to use */ 2005 ret_val = ixgbe_fc_autoneg(hw); 2006 if (ret_val == IXGBE_ERR_FLOW_CONTROL) 2007 goto out; 2008 2009 /* Disable any previous flow control settings */ 2010 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2011 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2012 2013 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2014 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2015 2016 /* 2017 * The possible values of fc.current_mode are: 2018 * 0: Flow control is completely disabled 2019 * 1: Rx flow control is enabled (we can receive pause frames, 2020 * but not send pause frames). 2021 * 2: Tx flow control is enabled (we can send pause frames but 2022 * we do not support receiving pause frames). 2023 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2024 * other: Invalid. 2025 */ 2026 switch (hw->fc.current_mode) { 2027 case ixgbe_fc_none: 2028 /* 2029 * Flow control is disabled by software override or autoneg. 2030 * The code below will actually disable it in the HW. 2031 */ 2032 break; 2033 case ixgbe_fc_rx_pause: 2034 /* 2035 * Rx Flow control is enabled and Tx Flow control is 2036 * disabled by software override. Since there really 2037 * isn't a way to advertise that we are capable of RX 2038 * Pause ONLY, we will advertise that we support both 2039 * symmetric and asymmetric Rx PAUSE. Later, we will 2040 * disable the adapter's ability to send PAUSE frames. 2041 */ 2042 mflcn_reg |= IXGBE_MFLCN_RFCE; 2043 break; 2044 case ixgbe_fc_tx_pause: 2045 /* 2046 * Tx Flow control is enabled, and Rx Flow control is 2047 * disabled by software override. 2048 */ 2049 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2050 break; 2051 case ixgbe_fc_full: 2052 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2053 mflcn_reg |= IXGBE_MFLCN_RFCE; 2054 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2055 break; 2056 default: 2057 DEBUGOUT("Flow control param set incorrectly\n"); 2058 ret_val = IXGBE_ERR_CONFIG; 2059 goto out; 2060 break; 2061 } 2062 2063 /* Set 802.3x based flow control settings. */ 2064 mflcn_reg |= IXGBE_MFLCN_DPF; 2065 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2066 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2067 2068 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); 2069 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 2070 2071 fcrth = (rx_pba_size - hw->fc.high_water) << 10; 2072 fcrtl = (rx_pba_size - hw->fc.low_water) << 10; 2073 2074 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2075 fcrth |= IXGBE_FCRTH_FCEN; 2076 if (hw->fc.send_xon) 2077 fcrtl |= IXGBE_FCRTL_XONE; 2078 } 2079 2080 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2081 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2082 2083 /* Configure pause time (2 TCs per register) */ 2084 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2085 if ((packetbuf_num & 1) == 0) 2086 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2087 else 2088 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); 2089 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); 2090 2091 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2092 2093out: 2094 return ret_val; 2095} 2096 2097/** 2098 * ixgbe_fc_autoneg - Configure flow control 2099 * @hw: pointer to hardware structure 2100 * 2101 * Compares our advertised flow control capabilities to those advertised by 2102 * our link partner, and determines the proper flow control mode to use. 2103 **/ 2104s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2105{ 2106 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2107 ixgbe_link_speed speed; 2108 bool link_up; 2109 2110 DEBUGFUNC("ixgbe_fc_autoneg"); 2111 2112 if (hw->fc.disable_fc_autoneg) 2113 goto out; 2114 2115 /* 2116 * AN should have completed when the cable was plugged in. 2117 * Look for reasons to bail out. Bail out if: 2118 * - FC autoneg is disabled, or if 2119 * - link is not up. 2120 * 2121 * Since we're being called from an LSC, link is already known to be up. 2122 * So use link_up_wait_to_complete=FALSE. 2123 */ 2124 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2125 if (!link_up) { 2126 ret_val = IXGBE_ERR_FLOW_CONTROL; 2127 goto out; 2128 } 2129 2130 switch (hw->phy.media_type) { 2131 /* Autoneg flow control on fiber adapters */ 2132 case ixgbe_media_type_fiber: 2133 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2134 ret_val = ixgbe_fc_autoneg_fiber(hw); 2135 break; 2136 2137 /* Autoneg flow control on backplane adapters */ 2138 case ixgbe_media_type_backplane: 2139 ret_val = ixgbe_fc_autoneg_backplane(hw); 2140 break; 2141 2142 /* Autoneg flow control on copper adapters */ 2143 case ixgbe_media_type_copper: 2144 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS) 2145 ret_val = ixgbe_fc_autoneg_copper(hw); 2146 break; 2147 2148 default: 2149 break; 2150 } 2151 2152out: 2153 if (ret_val == IXGBE_SUCCESS) { 2154 hw->fc.fc_was_autonegged = TRUE; 2155 } else { 2156 hw->fc.fc_was_autonegged = FALSE; 2157 hw->fc.current_mode = hw->fc.requested_mode; 2158 } 2159 return ret_val; 2160} 2161 2162/** 2163 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2164 * @hw: pointer to hardware structure 2165 * @speed: 2166 * @link_up 2167 * 2168 * Enable flow control according on 1 gig fiber. 2169 **/ 2170static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2171{ 2172 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2173 s32 ret_val; 2174 2175 /* 2176 * On multispeed fiber at 1g, bail out if 2177 * - link is up but AN did not complete, or if 2178 * - link is up and AN completed but timed out 2179 */ 2180 2181 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2182 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2183 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2184 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2185 goto out; 2186 } 2187 2188 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2189 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2190 2191 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 2192 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 2193 IXGBE_PCS1GANA_ASM_PAUSE, 2194 IXGBE_PCS1GANA_SYM_PAUSE, 2195 IXGBE_PCS1GANA_ASM_PAUSE); 2196 2197out: 2198 return ret_val; 2199} 2200 2201/** 2202 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 2203 * @hw: pointer to hardware structure 2204 * 2205 * Enable flow control according to IEEE clause 37. 2206 **/ 2207static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2208{ 2209 u32 links2, anlp1_reg, autoc_reg, links; 2210 s32 ret_val; 2211 2212 /* 2213 * On backplane, bail out if 2214 * - backplane autoneg was not completed, or if 2215 * - we are 82599 and link partner is not AN enabled 2216 */ 2217 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2218 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2219 hw->fc.fc_was_autonegged = FALSE; 2220 hw->fc.current_mode = hw->fc.requested_mode; 2221 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2222 goto out; 2223 } 2224 2225 if (hw->mac.type == ixgbe_mac_82599EB) { 2226 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2227 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2228 hw->fc.fc_was_autonegged = FALSE; 2229 hw->fc.current_mode = hw->fc.requested_mode; 2230 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2231 goto out; 2232 } 2233 } 2234 /* 2235 * Read the 10g AN autoc and LP ability registers and resolve 2236 * local flow control settings accordingly 2237 */ 2238 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2239 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2240 2241 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 2242 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 2243 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 2244 2245out: 2246 return ret_val; 2247} 2248 2249/** 2250 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 2251 * @hw: pointer to hardware structure 2252 * 2253 * Enable flow control according to IEEE clause 37. 2254 **/ 2255static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 2256{ 2257 u16 technology_ability_reg = 0; 2258 u16 lp_technology_ability_reg = 0; 2259 2260 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2261 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2262 &technology_ability_reg); 2263 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 2264 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 2265 &lp_technology_ability_reg); 2266 2267 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 2268 (u32)lp_technology_ability_reg, 2269 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 2270 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 2271} 2272 2273/** 2274 * ixgbe_negotiate_fc - Negotiate flow control 2275 * @hw: pointer to hardware structure 2276 * @adv_reg: flow control advertised settings 2277 * @lp_reg: link partner's flow control settings 2278 * @adv_sym: symmetric pause bit in advertisement 2279 * @adv_asm: asymmetric pause bit in advertisement 2280 * @lp_sym: symmetric pause bit in link partner advertisement 2281 * @lp_asm: asymmetric pause bit in link partner advertisement 2282 * 2283 * Find the intersection between advertised settings and link partner's 2284 * advertised settings 2285 **/ 2286static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2287 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2288{ 2289 if ((!(adv_reg)) || (!(lp_reg))) 2290 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2291 2292 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2293 /* 2294 * Now we need to check if the user selected Rx ONLY 2295 * of pause frames. In this case, we had to advertise 2296 * FULL flow control because we could not advertise RX 2297 * ONLY. Hence, we must now check to see if we need to 2298 * turn OFF the TRANSMISSION of PAUSE frames. 2299 */ 2300 if (hw->fc.requested_mode == ixgbe_fc_full) { 2301 hw->fc.current_mode = ixgbe_fc_full; 2302 DEBUGOUT("Flow Control = FULL.\n"); 2303 } else { 2304 hw->fc.current_mode = ixgbe_fc_rx_pause; 2305 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2306 } 2307 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2308 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2309 hw->fc.current_mode = ixgbe_fc_tx_pause; 2310 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2311 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2312 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2313 hw->fc.current_mode = ixgbe_fc_rx_pause; 2314 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2315 } else { 2316 hw->fc.current_mode = ixgbe_fc_none; 2317 DEBUGOUT("Flow Control = NONE.\n"); 2318 } 2319 return IXGBE_SUCCESS; 2320} 2321 2322/** 2323 * ixgbe_setup_fc - Set up flow control 2324 * @hw: pointer to hardware structure 2325 * 2326 * Called at init time to set up flow control. 2327 **/ 2328s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2329{ 2330 s32 ret_val = IXGBE_SUCCESS; 2331 u32 reg = 0, reg_bp = 0; 2332 u16 reg_cu = 0; 2333 2334 DEBUGFUNC("ixgbe_setup_fc"); 2335 2336 /* Validate the packetbuf configuration */ 2337 if (packetbuf_num < 0 || packetbuf_num > 7) { 2338 DEBUGOUT1("Invalid packet buffer number [%d], expected range is" 2339 " 0-7\n", packetbuf_num); 2340 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2341 goto out; 2342 } 2343 2344 /* 2345 * Validate the water mark configuration. Zero water marks are invalid 2346 * because it causes the controller to just blast out fc packets. 2347 */ 2348 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 2349 DEBUGOUT("Invalid water mark configuration\n"); 2350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2351 goto out; 2352 } 2353 2354 /* 2355 * Validate the requested mode. Strict IEEE mode does not allow 2356 * ixgbe_fc_rx_pause because it will cause us to fail at UNH. 2357 */ 2358 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 2359 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 2360 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2361 goto out; 2362 } 2363 2364 /* 2365 * 10gig parts do not have a word in the EEPROM to determine the 2366 * default flow control setting, so we explicitly set it to full. 2367 */ 2368 if (hw->fc.requested_mode == ixgbe_fc_default) 2369 hw->fc.requested_mode = ixgbe_fc_full; 2370 2371 /* 2372 * Set up the 1G and 10G flow control advertisement registers so the 2373 * HW will be able to do fc autoneg once the cable is plugged in. If 2374 * we link at 10G, the 1G advertisement is harmless and vice versa. 2375 */ 2376 2377 switch (hw->phy.media_type) { 2378 case ixgbe_media_type_fiber: 2379 case ixgbe_media_type_backplane: 2380 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2381 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2382 break; 2383 2384 case ixgbe_media_type_copper: 2385 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2386 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 2387 break; 2388 2389 default: 2390 ; 2391 } 2392 2393 /* 2394 * The possible values of fc.requested_mode are: 2395 * 0: Flow control is completely disabled 2396 * 1: Rx flow control is enabled (we can receive pause frames, 2397 * but not send pause frames). 2398 * 2: Tx flow control is enabled (we can send pause frames but 2399 * we do not support receiving pause frames). 2400 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2401 * other: Invalid. 2402 */ 2403 switch (hw->fc.requested_mode) { 2404 case ixgbe_fc_none: 2405 /* Flow control completely disabled by software override. */ 2406 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2407 if (hw->phy.media_type == ixgbe_media_type_backplane) 2408 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 2409 IXGBE_AUTOC_ASM_PAUSE); 2410 else if (hw->phy.media_type == ixgbe_media_type_copper) 2411 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2412 break; 2413 case ixgbe_fc_rx_pause: 2414 /* 2415 * Rx Flow control is enabled and Tx Flow control is 2416 * disabled by software override. Since there really 2417 * isn't a way to advertise that we are capable of RX 2418 * Pause ONLY, we will advertise that we support both 2419 * symmetric and asymmetric Rx PAUSE. Later, we will 2420 * disable the adapter's ability to send PAUSE frames. 2421 */ 2422 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2423 if (hw->phy.media_type == ixgbe_media_type_backplane) 2424 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2425 IXGBE_AUTOC_ASM_PAUSE); 2426 else if (hw->phy.media_type == ixgbe_media_type_copper) 2427 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2428 break; 2429 case ixgbe_fc_tx_pause: 2430 /* 2431 * Tx Flow control is enabled, and Rx Flow control is 2432 * disabled by software override. 2433 */ 2434 reg |= (IXGBE_PCS1GANA_ASM_PAUSE); 2435 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); 2436 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2437 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); 2438 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); 2439 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 2440 reg_cu |= (IXGBE_TAF_ASM_PAUSE); 2441 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); 2442 } 2443 break; 2444 case ixgbe_fc_full: 2445 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2446 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 2447 if (hw->phy.media_type == ixgbe_media_type_backplane) 2448 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | 2449 IXGBE_AUTOC_ASM_PAUSE); 2450 else if (hw->phy.media_type == ixgbe_media_type_copper) 2451 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 2452 break; 2453 default: 2454 DEBUGOUT("Flow control param set incorrectly\n"); 2455 ret_val = IXGBE_ERR_CONFIG; 2456 goto out; 2457 break; 2458 } 2459 2460 /* 2461 * Enable auto-negotiation between the MAC & PHY; 2462 * the MAC will advertise clause 37 flow control. 2463 */ 2464 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 2465 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 2466 2467 /* Disable AN timeout */ 2468 if (hw->fc.strict_ieee) 2469 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 2470 2471 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 2472 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 2473 2474 /* 2475 * AUTOC restart handles negotiation of 1G and 10G on backplane 2476 * and copper. There is no need to set the PCS1GCTL register. 2477 * 2478 */ 2479 if (hw->phy.media_type == ixgbe_media_type_backplane) { 2480 reg_bp |= IXGBE_AUTOC_AN_RESTART; 2481 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 2482 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 2483 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { 2484 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 2485 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 2486 } 2487 2488 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); 2489out: 2490 return ret_val; 2491} 2492 2493/** 2494 * ixgbe_disable_pcie_master - Disable PCI-express master access 2495 * @hw: pointer to hardware structure 2496 * 2497 * Disables PCI-Express master access and verifies there are no pending 2498 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 2499 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 2500 * is returned signifying master requests disabled. 2501 **/ 2502s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2503{ 2504 u32 i; 2505 u32 reg_val; 2506 u32 number_of_queues; 2507 s32 status = IXGBE_SUCCESS; 2508 2509 DEBUGFUNC("ixgbe_disable_pcie_master"); 2510 2511 /* Just jump out if bus mastering is already disabled */ 2512 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2513 goto out; 2514 2515 /* Disable the receive unit by stopping each queue */ 2516 number_of_queues = hw->mac.max_rx_queues; 2517 for (i = 0; i < number_of_queues; i++) { 2518 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 2519 if (reg_val & IXGBE_RXDCTL_ENABLE) { 2520 reg_val &= ~IXGBE_RXDCTL_ENABLE; 2521 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 2522 } 2523 } 2524 2525 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); 2526 reg_val |= IXGBE_CTRL_GIO_DIS; 2527 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2528 2529 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2530 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
|
2527 goto out;
| 2531 goto check_device_status;
|
2528 usec_delay(100); 2529 } 2530 2531 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 2532 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2533 2534 /*
| 2532 usec_delay(100); 2533 } 2534 2535 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 2536 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2537 2538 /*
|
2535 * The GIO Master Disable bit didn't clear. There are multiple reasons 2536 * for this listed in the datasheet 5.2.5.3.2 Master Disable, and they 2537 * all require a double reset to recover from. Before proceeding, we 2538 * first wait a little more to try to ensure that, at a minimum, the 2539 * PCIe block has no transactions pending.
| 2539 * Before proceeding, make sure that the PCIe block does not have 2540 * transactions pending.
|
2540 */
| 2541 */
|
| 2542check_device_status:
|
2541 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2542 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & 2543 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2544 break; 2545 usec_delay(100); 2546 } 2547 2548 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) 2549 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
| 2543 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2544 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & 2545 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2546 break; 2547 usec_delay(100); 2548 } 2549 2550 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) 2551 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
|
| 2552 else 2553 goto out;
|
2550 2551 /* 2552 * Two consecutive resets are required via CTRL.RST per datasheet 2553 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2554 * of this need. The first reset prevents new master requests from 2555 * being issued by our device. We then must wait 1usec for any 2556 * remaining completions from the PCIe bus to trickle in, and then reset 2557 * again to clear out any effects they may have had on our device. 2558 */ 2559 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2560 2561out: 2562 return status; 2563} 2564 2565 2566/** 2567 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2568 * @hw: pointer to hardware structure 2569 * @mask: Mask to specify which semaphore to acquire 2570 * 2571 * Acquires the SWFW semaphore thought the GSSR register for the specified 2572 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2573 **/ 2574s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2575{ 2576 u32 gssr; 2577 u32 swmask = mask; 2578 u32 fwmask = mask << 5; 2579 s32 timeout = 200; 2580 2581 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 2582 2583 while (timeout) { 2584 /* 2585 * SW EEPROM semaphore bit is used for access to all 2586 * SW_FW_SYNC/GSSR bits (not just EEPROM) 2587 */ 2588 if (ixgbe_get_eeprom_semaphore(hw)) 2589 return IXGBE_ERR_SWFW_SYNC; 2590 2591 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2592 if (!(gssr & (fwmask | swmask))) 2593 break; 2594 2595 /* 2596 * Firmware currently using resource (fwmask) or other software 2597 * thread currently using resource (swmask) 2598 */ 2599 ixgbe_release_eeprom_semaphore(hw); 2600 msec_delay(5); 2601 timeout--; 2602 } 2603 2604 if (!timeout) { 2605 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 2606 return IXGBE_ERR_SWFW_SYNC; 2607 } 2608 2609 gssr |= swmask; 2610 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2611 2612 ixgbe_release_eeprom_semaphore(hw); 2613 return IXGBE_SUCCESS; 2614} 2615 2616/** 2617 * ixgbe_release_swfw_sync - Release SWFW semaphore 2618 * @hw: pointer to hardware structure 2619 * @mask: Mask to specify which semaphore to release 2620 * 2621 * Releases the SWFW semaphore thought the GSSR register for the specified 2622 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2623 **/ 2624void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2625{ 2626 u32 gssr; 2627 u32 swmask = mask; 2628 2629 DEBUGFUNC("ixgbe_release_swfw_sync"); 2630 2631 ixgbe_get_eeprom_semaphore(hw); 2632 2633 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2634 gssr &= ~swmask; 2635 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2636 2637 ixgbe_release_eeprom_semaphore(hw); 2638} 2639 2640/** 2641 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2642 * @hw: pointer to hardware structure 2643 * @regval: register value to write to RXCTRL 2644 * 2645 * Enables the Rx DMA unit 2646 **/ 2647s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2648{ 2649 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 2650 2651 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2652 2653 return IXGBE_SUCCESS; 2654} 2655 2656/** 2657 * ixgbe_blink_led_start_generic - Blink LED based on index. 2658 * @hw: pointer to hardware structure 2659 * @index: led number to blink 2660 **/ 2661s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2662{ 2663 ixgbe_link_speed speed = 0; 2664 bool link_up = 0; 2665 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2666 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2667 2668 DEBUGFUNC("ixgbe_blink_led_start_generic"); 2669 2670 /* 2671 * Link must be up to auto-blink the LEDs; 2672 * Force it if link is down. 2673 */ 2674 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2675 2676 if (!link_up) { 2677 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2678 autoc_reg |= IXGBE_AUTOC_FLU; 2679 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2680 msec_delay(10); 2681 } 2682 2683 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2684 led_reg |= IXGBE_LED_BLINK(index); 2685 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2686 IXGBE_WRITE_FLUSH(hw); 2687 2688 return IXGBE_SUCCESS; 2689} 2690 2691/** 2692 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2693 * @hw: pointer to hardware structure 2694 * @index: led number to stop blinking 2695 **/ 2696s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2697{ 2698 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2699 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2700 2701 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 2702 2703 2704 autoc_reg &= ~IXGBE_AUTOC_FLU; 2705 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2706 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2707 2708 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2709 led_reg &= ~IXGBE_LED_BLINK(index); 2710 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2711 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2712 IXGBE_WRITE_FLUSH(hw); 2713 2714 return IXGBE_SUCCESS; 2715} 2716 2717/** 2718 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2719 * @hw: pointer to hardware structure 2720 * @san_mac_offset: SAN MAC address offset 2721 * 2722 * This function will read the EEPROM location for the SAN MAC address 2723 * pointer, and returns the value at that location. This is used in both 2724 * get and set mac_addr routines. 2725 **/ 2726static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2727 u16 *san_mac_offset) 2728{ 2729 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 2730 2731 /* 2732 * First read the EEPROM pointer to see if the MAC addresses are 2733 * available. 2734 */ 2735 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2736 2737 return IXGBE_SUCCESS; 2738} 2739 2740/** 2741 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2742 * @hw: pointer to hardware structure 2743 * @san_mac_addr: SAN MAC address 2744 * 2745 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2746 * per-port, so set_lan_id() must be called before reading the addresses. 2747 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2748 * upon for non-SFP connections, so we must call it here. 2749 **/ 2750s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2751{ 2752 u16 san_mac_data, san_mac_offset; 2753 u8 i; 2754 2755 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 2756 2757 /* 2758 * First read the EEPROM pointer to see if the MAC addresses are 2759 * available. If they're not, no point in calling set_lan_id() here. 2760 */ 2761 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2762 2763 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2764 /* 2765 * No addresses available in this EEPROM. It's not an 2766 * error though, so just wipe the local address and return. 2767 */ 2768 for (i = 0; i < 6; i++) 2769 san_mac_addr[i] = 0xFF; 2770 2771 goto san_mac_addr_out; 2772 } 2773 2774 /* make sure we know which port we need to program */ 2775 hw->mac.ops.set_lan_id(hw); 2776 /* apply the port offset to the address offset */ 2777 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2778 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2779 for (i = 0; i < 3; i++) { 2780 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2781 san_mac_addr[i * 2] = (u8)(san_mac_data); 2782 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2783 san_mac_offset++; 2784 } 2785 2786san_mac_addr_out: 2787 return IXGBE_SUCCESS; 2788} 2789 2790/** 2791 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 2792 * @hw: pointer to hardware structure 2793 * @san_mac_addr: SAN MAC address 2794 * 2795 * Write a SAN MAC address to the EEPROM. 2796 **/ 2797s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2798{ 2799 s32 status = IXGBE_SUCCESS; 2800 u16 san_mac_data, san_mac_offset; 2801 u8 i; 2802 2803 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 2804 2805 /* Look for SAN mac address pointer. If not defined, return */ 2806 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2807 2808 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2809 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 2810 goto san_mac_addr_out; 2811 } 2812 2813 /* Make sure we know which port we need to write */ 2814 hw->mac.ops.set_lan_id(hw); 2815 /* Apply the port offset to the address offset */ 2816 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2817 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2818 2819 for (i = 0; i < 3; i++) { 2820 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 2821 san_mac_data |= (u16)(san_mac_addr[i * 2]); 2822 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 2823 san_mac_offset++; 2824 } 2825 2826san_mac_addr_out: 2827 return status; 2828} 2829 2830/** 2831 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2832 * @hw: pointer to hardware structure 2833 * 2834 * Read PCIe configuration space, and get the MSI-X vector count from 2835 * the capabilities table. 2836 **/ 2837u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2838{ 2839 u32 msix_count = 64; 2840 2841 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 2842 if (hw->mac.msix_vectors_from_pcie) { 2843 msix_count = IXGBE_READ_PCIE_WORD(hw, 2844 IXGBE_PCIE_MSIX_82599_CAPS); 2845 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2846 2847 /* MSI-X count is zero-based in HW, so increment to give 2848 * proper value */ 2849 msix_count++; 2850 } 2851 2852 return msix_count; 2853} 2854 2855/** 2856 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 2857 * @hw: pointer to hardware structure 2858 * @addr: Address to put into receive address register 2859 * @vmdq: VMDq pool to assign 2860 * 2861 * Puts an ethernet address into a receive address register, or 2862 * finds the rar that it is aleady in; adds to the pool list 2863 **/ 2864s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2865{ 2866 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 2867 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 2868 u32 rar; 2869 u32 rar_low, rar_high; 2870 u32 addr_low, addr_high; 2871 2872 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 2873 2874 /* swap bytes for HW little endian */ 2875 addr_low = addr[0] | (addr[1] << 8) 2876 | (addr[2] << 16) 2877 | (addr[3] << 24); 2878 addr_high = addr[4] | (addr[5] << 8); 2879 2880 /* 2881 * Either find the mac_id in rar or find the first empty space. 2882 * rar_highwater points to just after the highest currently used 2883 * rar in order to shorten the search. It grows when we add a new 2884 * rar to the top. 2885 */ 2886 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 2887 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 2888 2889 if (((IXGBE_RAH_AV & rar_high) == 0) 2890 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 2891 first_empty_rar = rar; 2892 } else if ((rar_high & 0xFFFF) == addr_high) { 2893 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 2894 if (rar_low == addr_low) 2895 break; /* found it already in the rars */ 2896 } 2897 } 2898 2899 if (rar < hw->mac.rar_highwater) { 2900 /* already there so just add to the pool bits */ 2901 ixgbe_set_vmdq(hw, rar, vmdq); 2902 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 2903 /* stick it into first empty RAR slot we found */ 2904 rar = first_empty_rar; 2905 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2906 } else if (rar == hw->mac.rar_highwater) { 2907 /* add it to the top of the list and inc the highwater mark */ 2908 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2909 hw->mac.rar_highwater++; 2910 } else if (rar >= hw->mac.num_rar_entries) { 2911 return IXGBE_ERR_INVALID_MAC_ADDR; 2912 } 2913 2914 /* 2915 * If we found rar[0], make sure the default pool bit (we use pool 0) 2916 * remains cleared to be sure default pool packets will get delivered 2917 */ 2918 if (rar == 0) 2919 ixgbe_clear_vmdq(hw, rar, 0); 2920 2921 return rar; 2922} 2923 2924/** 2925 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2926 * @hw: pointer to hardware struct 2927 * @rar: receive address register index to disassociate 2928 * @vmdq: VMDq pool index to remove from the rar 2929 **/ 2930s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2931{ 2932 u32 mpsar_lo, mpsar_hi; 2933 u32 rar_entries = hw->mac.num_rar_entries; 2934 2935 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 2936 2937 /* Make sure we are using a valid rar index range */ 2938 if (rar >= rar_entries) { 2939 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2940 return IXGBE_ERR_INVALID_ARGUMENT; 2941 } 2942 2943 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2944 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2945 2946 if (!mpsar_lo && !mpsar_hi) 2947 goto done; 2948 2949 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2950 if (mpsar_lo) { 2951 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2952 mpsar_lo = 0; 2953 } 2954 if (mpsar_hi) { 2955 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2956 mpsar_hi = 0; 2957 } 2958 } else if (vmdq < 32) { 2959 mpsar_lo &= ~(1 << vmdq); 2960 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2961 } else { 2962 mpsar_hi &= ~(1 << (vmdq - 32)); 2963 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2964 } 2965 2966 /* was that the last pool using this rar? */ 2967 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2968 hw->mac.ops.clear_rar(hw, rar); 2969done: 2970 return IXGBE_SUCCESS; 2971} 2972 2973/** 2974 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 2975 * @hw: pointer to hardware struct 2976 * @rar: receive address register index to associate with a VMDq index 2977 * @vmdq: VMDq pool index 2978 **/ 2979s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2980{ 2981 u32 mpsar; 2982 u32 rar_entries = hw->mac.num_rar_entries; 2983 2984 DEBUGFUNC("ixgbe_set_vmdq_generic"); 2985 2986 /* Make sure we are using a valid rar index range */ 2987 if (rar >= rar_entries) { 2988 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2989 return IXGBE_ERR_INVALID_ARGUMENT; 2990 } 2991 2992 if (vmdq < 32) { 2993 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2994 mpsar |= 1 << vmdq; 2995 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 2996 } else { 2997 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2998 mpsar |= 1 << (vmdq - 32); 2999 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3000 } 3001 return IXGBE_SUCCESS; 3002} 3003 3004/** 3005 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3006 * @hw: pointer to hardware structure 3007 **/ 3008s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3009{ 3010 int i; 3011 3012 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3013 DEBUGOUT(" Clearing UTA\n"); 3014 3015 for (i = 0; i < 128; i++) 3016 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3017 3018 return IXGBE_SUCCESS; 3019} 3020 3021/** 3022 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3023 * @hw: pointer to hardware structure 3024 * @vlan: VLAN id to write to VLAN filter 3025 * 3026 * return the VLVF index where this VLAN id should be placed 3027 * 3028 **/ 3029s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 3030{ 3031 u32 bits = 0; 3032 u32 first_empty_slot = 0; 3033 s32 regindex; 3034 3035 /* short cut the special case */ 3036 if (vlan == 0) 3037 return 0; 3038 3039 /* 3040 * Search for the vlan id in the VLVF entries. Save off the first empty 3041 * slot found along the way 3042 */ 3043 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 3044 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3045 if (!bits && !(first_empty_slot)) 3046 first_empty_slot = regindex; 3047 else if ((bits & 0x0FFF) == vlan) 3048 break; 3049 } 3050 3051 /* 3052 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 3053 * in the VLVF. Else use the first empty VLVF register for this 3054 * vlan id. 3055 */ 3056 if (regindex >= IXGBE_VLVF_ENTRIES) { 3057 if (first_empty_slot) 3058 regindex = first_empty_slot; 3059 else { 3060 DEBUGOUT("No space in VLVF.\n"); 3061 regindex = IXGBE_ERR_NO_SPACE; 3062 } 3063 } 3064 3065 return regindex; 3066} 3067 3068/** 3069 * ixgbe_set_vfta_generic - Set VLAN filter table 3070 * @hw: pointer to hardware structure 3071 * @vlan: VLAN id to write to VLAN filter 3072 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3073 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3074 * 3075 * Turn on/off specified VLAN in the VLAN filter table. 3076 **/ 3077s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3078 bool vlan_on) 3079{ 3080 s32 regindex; 3081 u32 bitindex; 3082 u32 vfta; 3083 u32 bits; 3084 u32 vt; 3085 u32 targetbit; 3086 bool vfta_changed = FALSE; 3087 3088 DEBUGFUNC("ixgbe_set_vfta_generic"); 3089 3090 if (vlan > 4095) 3091 return IXGBE_ERR_PARAM; 3092 3093 /* 3094 * this is a 2 part operation - first the VFTA, then the 3095 * VLVF and VLVFB if VT Mode is set 3096 * We don't write the VFTA until we know the VLVF part succeeded. 3097 */ 3098 3099 /* Part 1 3100 * The VFTA is a bitstring made up of 128 32-bit registers 3101 * that enable the particular VLAN id, much like the MTA: 3102 * bits[11-5]: which register 3103 * bits[4-0]: which bit in the register 3104 */ 3105 regindex = (vlan >> 5) & 0x7F; 3106 bitindex = vlan & 0x1F; 3107 targetbit = (1 << bitindex); 3108 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3109 3110 if (vlan_on) { 3111 if (!(vfta & targetbit)) { 3112 vfta |= targetbit; 3113 vfta_changed = TRUE; 3114 } 3115 } else { 3116 if ((vfta & targetbit)) { 3117 vfta &= ~targetbit; 3118 vfta_changed = TRUE; 3119 } 3120 } 3121 3122 /* Part 2 3123 * If VT Mode is set 3124 * Either vlan_on 3125 * make sure the vlan is in VLVF 3126 * set the vind bit in the matching VLVFB 3127 * Or !vlan_on 3128 * clear the pool bit and possibly the vind 3129 */ 3130 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3131 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3132 s32 vlvf_index; 3133 3134 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3135 if (vlvf_index < 0) 3136 return vlvf_index; 3137 3138 if (vlan_on) { 3139 /* set the pool bit */ 3140 if (vind < 32) { 3141 bits = IXGBE_READ_REG(hw, 3142 IXGBE_VLVFB(vlvf_index*2)); 3143 bits |= (1 << vind); 3144 IXGBE_WRITE_REG(hw, 3145 IXGBE_VLVFB(vlvf_index*2), 3146 bits); 3147 } else { 3148 bits = IXGBE_READ_REG(hw, 3149 IXGBE_VLVFB((vlvf_index*2)+1)); 3150 bits |= (1 << (vind-32)); 3151 IXGBE_WRITE_REG(hw, 3152 IXGBE_VLVFB((vlvf_index*2)+1), 3153 bits); 3154 } 3155 } else { 3156 /* clear the pool bit */ 3157 if (vind < 32) { 3158 bits = IXGBE_READ_REG(hw, 3159 IXGBE_VLVFB(vlvf_index*2)); 3160 bits &= ~(1 << vind); 3161 IXGBE_WRITE_REG(hw, 3162 IXGBE_VLVFB(vlvf_index*2), 3163 bits); 3164 bits |= IXGBE_READ_REG(hw, 3165 IXGBE_VLVFB((vlvf_index*2)+1)); 3166 } else { 3167 bits = IXGBE_READ_REG(hw, 3168 IXGBE_VLVFB((vlvf_index*2)+1)); 3169 bits &= ~(1 << (vind-32)); 3170 IXGBE_WRITE_REG(hw, 3171 IXGBE_VLVFB((vlvf_index*2)+1), 3172 bits); 3173 bits |= IXGBE_READ_REG(hw, 3174 IXGBE_VLVFB(vlvf_index*2)); 3175 } 3176 } 3177 3178 /* 3179 * If there are still bits set in the VLVFB registers 3180 * for the VLAN ID indicated we need to see if the 3181 * caller is requesting that we clear the VFTA entry bit. 3182 * If the caller has requested that we clear the VFTA 3183 * entry bit but there are still pools/VFs using this VLAN 3184 * ID entry then ignore the request. We're not worried 3185 * about the case where we're turning the VFTA VLAN ID 3186 * entry bit on, only when requested to turn it off as 3187 * there may be multiple pools and/or VFs using the 3188 * VLAN ID entry. In that case we cannot clear the 3189 * VFTA bit until all pools/VFs using that VLAN ID have also 3190 * been cleared. This will be indicated by "bits" being 3191 * zero. 3192 */ 3193 if (bits) { 3194 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 3195 (IXGBE_VLVF_VIEN | vlan)); 3196 if (!vlan_on) { 3197 /* someone wants to clear the vfta entry 3198 * but some pools/VFs are still using it. 3199 * Ignore it. */ 3200 vfta_changed = FALSE; 3201 } 3202 } 3203 else 3204 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3205 } 3206 3207 if (vfta_changed) 3208 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3209 3210 return IXGBE_SUCCESS; 3211} 3212 3213/** 3214 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3215 * @hw: pointer to hardware structure 3216 * 3217 * Clears the VLAN filer table, and the VMDq index associated with the filter 3218 **/ 3219s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3220{ 3221 u32 offset; 3222 3223 DEBUGFUNC("ixgbe_clear_vfta_generic"); 3224 3225 for (offset = 0; offset < hw->mac.vft_size; offset++) 3226 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3227 3228 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3229 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3230 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 3231 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 3232 } 3233 3234 return IXGBE_SUCCESS; 3235} 3236 3237/** 3238 * ixgbe_check_mac_link_generic - Determine link and speed status 3239 * @hw: pointer to hardware structure 3240 * @speed: pointer to link speed 3241 * @link_up: TRUE when link is up 3242 * @link_up_wait_to_complete: bool used to wait for link up or not 3243 * 3244 * Reads the links register to determine if link is up and the current speed 3245 **/ 3246s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3247 bool *link_up, bool link_up_wait_to_complete) 3248{ 3249 u32 links_reg, links_orig; 3250 u32 i; 3251 3252 DEBUGFUNC("ixgbe_check_mac_link_generic"); 3253 3254 /* clear the old state */ 3255 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3256 3257 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3258 3259 if (links_orig != links_reg) { 3260 DEBUGOUT2("LINKS changed from %08X to %08X\n", 3261 links_orig, links_reg); 3262 } 3263 3264 if (link_up_wait_to_complete) { 3265 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3266 if (links_reg & IXGBE_LINKS_UP) { 3267 *link_up = TRUE; 3268 break; 3269 } else { 3270 *link_up = FALSE; 3271 } 3272 msec_delay(100); 3273 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3274 } 3275 } else { 3276 if (links_reg & IXGBE_LINKS_UP) 3277 *link_up = TRUE; 3278 else 3279 *link_up = FALSE; 3280 } 3281 3282 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3283 IXGBE_LINKS_SPEED_10G_82599) 3284 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3285 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3286 IXGBE_LINKS_SPEED_1G_82599) 3287 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3288 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3289 IXGBE_LINKS_SPEED_100_82599) 3290 *speed = IXGBE_LINK_SPEED_100_FULL; 3291 else 3292 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3293 3294 /* if link is down, zero out the current_mode */ 3295 if (*link_up == FALSE) { 3296 hw->fc.current_mode = ixgbe_fc_none; 3297 hw->fc.fc_was_autonegged = FALSE; 3298 } 3299 3300 return IXGBE_SUCCESS; 3301} 3302 3303/** 3304 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3305 * the EEPROM 3306 * @hw: pointer to hardware structure 3307 * @wwnn_prefix: the alternative WWNN prefix 3308 * @wwpn_prefix: the alternative WWPN prefix 3309 * 3310 * This function will read the EEPROM from the alternative SAN MAC address 3311 * block to check the support for the alternative WWNN/WWPN prefix support. 3312 **/ 3313s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3314 u16 *wwpn_prefix) 3315{ 3316 u16 offset, caps; 3317 u16 alt_san_mac_blk_offset; 3318 3319 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 3320 3321 /* clear output first */ 3322 *wwnn_prefix = 0xFFFF; 3323 *wwpn_prefix = 0xFFFF; 3324 3325 /* check if alternative SAN MAC is supported */ 3326 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3327 &alt_san_mac_blk_offset); 3328 3329 if ((alt_san_mac_blk_offset == 0) || 3330 (alt_san_mac_blk_offset == 0xFFFF)) 3331 goto wwn_prefix_out; 3332 3333 /* check capability in alternative san mac address block */ 3334 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3335 hw->eeprom.ops.read(hw, offset, &caps); 3336 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3337 goto wwn_prefix_out; 3338 3339 /* get the corresponding prefix for WWNN/WWPN */ 3340 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3341 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3342 3343 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3344 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3345 3346wwn_prefix_out: 3347 return IXGBE_SUCCESS; 3348} 3349 3350/** 3351 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 3352 * @hw: pointer to hardware structure 3353 * @bs: the fcoe boot status 3354 * 3355 * This function will read the FCOE boot status from the iSCSI FCOE block 3356 **/ 3357s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 3358{ 3359 u16 offset, caps, flags; 3360 s32 status; 3361 3362 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 3363 3364 /* clear output first */ 3365 *bs = ixgbe_fcoe_bootstatus_unavailable; 3366 3367 /* check if FCOE IBA block is present */ 3368 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 3369 status = hw->eeprom.ops.read(hw, offset, &caps); 3370 if (status != IXGBE_SUCCESS) 3371 goto out; 3372 3373 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 3374 goto out; 3375 3376 /* check if iSCSI FCOE block is populated */ 3377 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 3378 if (status != IXGBE_SUCCESS) 3379 goto out; 3380 3381 if ((offset == 0) || (offset == 0xFFFF)) 3382 goto out; 3383 3384 /* read fcoe flags in iSCSI FCOE block */ 3385 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 3386 status = hw->eeprom.ops.read(hw, offset, &flags); 3387 if (status != IXGBE_SUCCESS) 3388 goto out; 3389 3390 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 3391 *bs = ixgbe_fcoe_bootstatus_enabled; 3392 else 3393 *bs = ixgbe_fcoe_bootstatus_disabled; 3394 3395out: 3396 return status; 3397} 3398 3399/** 3400 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 3401 * control 3402 * @hw: pointer to hardware structure 3403 * 3404 * There are several phys that do not support autoneg flow control. This 3405 * function check the device id to see if the associated phy supports 3406 * autoneg flow control. 3407 **/ 3408static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 3409{ 3410 3411 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 3412 3413 switch (hw->device_id) { 3414 case IXGBE_DEV_ID_82599_T3_LOM: 3415 return IXGBE_SUCCESS; 3416 default: 3417 return IXGBE_ERR_FC_NOT_SUPPORTED; 3418 } 3419} 3420 3421/** 3422 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3423 * @hw: pointer to hardware structure 3424 * @enable: enable or disable switch for anti-spoofing 3425 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 3426 * 3427 **/ 3428void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 3429{ 3430 int j; 3431 int pf_target_reg = pf >> 3; 3432 int pf_target_shift = pf % 8; 3433 u32 pfvfspoof = 0; 3434 3435 if (hw->mac.type == ixgbe_mac_82598EB) 3436 return; 3437 3438 if (enable) 3439 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 3440 3441 /* 3442 * PFVFSPOOF register array is size 8 with 8 bits assigned to 3443 * MAC anti-spoof enables in each register array element. 3444 */ 3445 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 3446 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3447 3448 /* If not enabling anti-spoofing then done */ 3449 if (!enable) 3450 return; 3451 3452 /* 3453 * The PF should be allowed to spoof so that it can support 3454 * emulation mode NICs. Reset the bit assigned to the PF 3455 */ 3456 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); 3457 pfvfspoof ^= (1 << pf_target_shift); 3458 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); 3459} 3460 3461/** 3462 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3463 * @hw: pointer to hardware structure 3464 * @enable: enable or disable switch for VLAN anti-spoofing 3465 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3466 * 3467 **/ 3468void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3469{ 3470 int vf_target_reg = vf >> 3; 3471 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3472 u32 pfvfspoof; 3473 3474 if (hw->mac.type == ixgbe_mac_82598EB) 3475 return; 3476 3477 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3478 if (enable) 3479 pfvfspoof |= (1 << vf_target_shift); 3480 else 3481 pfvfspoof &= ~(1 << vf_target_shift); 3482 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3483}
| 2554 2555 /* 2556 * Two consecutive resets are required via CTRL.RST per datasheet 2557 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 2558 * of this need. The first reset prevents new master requests from 2559 * being issued by our device. We then must wait 1usec for any 2560 * remaining completions from the PCIe bus to trickle in, and then reset 2561 * again to clear out any effects they may have had on our device. 2562 */ 2563 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 2564 2565out: 2566 return status; 2567} 2568 2569 2570/** 2571 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 2572 * @hw: pointer to hardware structure 2573 * @mask: Mask to specify which semaphore to acquire 2574 * 2575 * Acquires the SWFW semaphore thought the GSSR register for the specified 2576 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2577 **/ 2578s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2579{ 2580 u32 gssr; 2581 u32 swmask = mask; 2582 u32 fwmask = mask << 5; 2583 s32 timeout = 200; 2584 2585 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 2586 2587 while (timeout) { 2588 /* 2589 * SW EEPROM semaphore bit is used for access to all 2590 * SW_FW_SYNC/GSSR bits (not just EEPROM) 2591 */ 2592 if (ixgbe_get_eeprom_semaphore(hw)) 2593 return IXGBE_ERR_SWFW_SYNC; 2594 2595 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2596 if (!(gssr & (fwmask | swmask))) 2597 break; 2598 2599 /* 2600 * Firmware currently using resource (fwmask) or other software 2601 * thread currently using resource (swmask) 2602 */ 2603 ixgbe_release_eeprom_semaphore(hw); 2604 msec_delay(5); 2605 timeout--; 2606 } 2607 2608 if (!timeout) { 2609 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); 2610 return IXGBE_ERR_SWFW_SYNC; 2611 } 2612 2613 gssr |= swmask; 2614 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2615 2616 ixgbe_release_eeprom_semaphore(hw); 2617 return IXGBE_SUCCESS; 2618} 2619 2620/** 2621 * ixgbe_release_swfw_sync - Release SWFW semaphore 2622 * @hw: pointer to hardware structure 2623 * @mask: Mask to specify which semaphore to release 2624 * 2625 * Releases the SWFW semaphore thought the GSSR register for the specified 2626 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2627 **/ 2628void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2629{ 2630 u32 gssr; 2631 u32 swmask = mask; 2632 2633 DEBUGFUNC("ixgbe_release_swfw_sync"); 2634 2635 ixgbe_get_eeprom_semaphore(hw); 2636 2637 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2638 gssr &= ~swmask; 2639 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 2640 2641 ixgbe_release_eeprom_semaphore(hw); 2642} 2643 2644/** 2645 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 2646 * @hw: pointer to hardware structure 2647 * @regval: register value to write to RXCTRL 2648 * 2649 * Enables the Rx DMA unit 2650 **/ 2651s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 2652{ 2653 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 2654 2655 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2656 2657 return IXGBE_SUCCESS; 2658} 2659 2660/** 2661 * ixgbe_blink_led_start_generic - Blink LED based on index. 2662 * @hw: pointer to hardware structure 2663 * @index: led number to blink 2664 **/ 2665s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 2666{ 2667 ixgbe_link_speed speed = 0; 2668 bool link_up = 0; 2669 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2670 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2671 2672 DEBUGFUNC("ixgbe_blink_led_start_generic"); 2673 2674 /* 2675 * Link must be up to auto-blink the LEDs; 2676 * Force it if link is down. 2677 */ 2678 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 2679 2680 if (!link_up) { 2681 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2682 autoc_reg |= IXGBE_AUTOC_FLU; 2683 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2684 msec_delay(10); 2685 } 2686 2687 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2688 led_reg |= IXGBE_LED_BLINK(index); 2689 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2690 IXGBE_WRITE_FLUSH(hw); 2691 2692 return IXGBE_SUCCESS; 2693} 2694 2695/** 2696 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 2697 * @hw: pointer to hardware structure 2698 * @index: led number to stop blinking 2699 **/ 2700s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2701{ 2702 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2703 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2704 2705 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 2706 2707 2708 autoc_reg &= ~IXGBE_AUTOC_FLU; 2709 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2710 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2711 2712 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2713 led_reg &= ~IXGBE_LED_BLINK(index); 2714 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2715 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2716 IXGBE_WRITE_FLUSH(hw); 2717 2718 return IXGBE_SUCCESS; 2719} 2720 2721/** 2722 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 2723 * @hw: pointer to hardware structure 2724 * @san_mac_offset: SAN MAC address offset 2725 * 2726 * This function will read the EEPROM location for the SAN MAC address 2727 * pointer, and returns the value at that location. This is used in both 2728 * get and set mac_addr routines. 2729 **/ 2730static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 2731 u16 *san_mac_offset) 2732{ 2733 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 2734 2735 /* 2736 * First read the EEPROM pointer to see if the MAC addresses are 2737 * available. 2738 */ 2739 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2740 2741 return IXGBE_SUCCESS; 2742} 2743 2744/** 2745 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 2746 * @hw: pointer to hardware structure 2747 * @san_mac_addr: SAN MAC address 2748 * 2749 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2750 * per-port, so set_lan_id() must be called before reading the addresses. 2751 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2752 * upon for non-SFP connections, so we must call it here. 2753 **/ 2754s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2755{ 2756 u16 san_mac_data, san_mac_offset; 2757 u8 i; 2758 2759 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 2760 2761 /* 2762 * First read the EEPROM pointer to see if the MAC addresses are 2763 * available. If they're not, no point in calling set_lan_id() here. 2764 */ 2765 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2766 2767 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2768 /* 2769 * No addresses available in this EEPROM. It's not an 2770 * error though, so just wipe the local address and return. 2771 */ 2772 for (i = 0; i < 6; i++) 2773 san_mac_addr[i] = 0xFF; 2774 2775 goto san_mac_addr_out; 2776 } 2777 2778 /* make sure we know which port we need to program */ 2779 hw->mac.ops.set_lan_id(hw); 2780 /* apply the port offset to the address offset */ 2781 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2782 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2783 for (i = 0; i < 3; i++) { 2784 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2785 san_mac_addr[i * 2] = (u8)(san_mac_data); 2786 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2787 san_mac_offset++; 2788 } 2789 2790san_mac_addr_out: 2791 return IXGBE_SUCCESS; 2792} 2793 2794/** 2795 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 2796 * @hw: pointer to hardware structure 2797 * @san_mac_addr: SAN MAC address 2798 * 2799 * Write a SAN MAC address to the EEPROM. 2800 **/ 2801s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 2802{ 2803 s32 status = IXGBE_SUCCESS; 2804 u16 san_mac_data, san_mac_offset; 2805 u8 i; 2806 2807 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 2808 2809 /* Look for SAN mac address pointer. If not defined, return */ 2810 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 2811 2812 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2813 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 2814 goto san_mac_addr_out; 2815 } 2816 2817 /* Make sure we know which port we need to write */ 2818 hw->mac.ops.set_lan_id(hw); 2819 /* Apply the port offset to the address offset */ 2820 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2821 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2822 2823 for (i = 0; i < 3; i++) { 2824 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 2825 san_mac_data |= (u16)(san_mac_addr[i * 2]); 2826 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 2827 san_mac_offset++; 2828 } 2829 2830san_mac_addr_out: 2831 return status; 2832} 2833 2834/** 2835 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 2836 * @hw: pointer to hardware structure 2837 * 2838 * Read PCIe configuration space, and get the MSI-X vector count from 2839 * the capabilities table. 2840 **/ 2841u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2842{ 2843 u32 msix_count = 64; 2844 2845 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 2846 if (hw->mac.msix_vectors_from_pcie) { 2847 msix_count = IXGBE_READ_PCIE_WORD(hw, 2848 IXGBE_PCIE_MSIX_82599_CAPS); 2849 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2850 2851 /* MSI-X count is zero-based in HW, so increment to give 2852 * proper value */ 2853 msix_count++; 2854 } 2855 2856 return msix_count; 2857} 2858 2859/** 2860 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 2861 * @hw: pointer to hardware structure 2862 * @addr: Address to put into receive address register 2863 * @vmdq: VMDq pool to assign 2864 * 2865 * Puts an ethernet address into a receive address register, or 2866 * finds the rar that it is aleady in; adds to the pool list 2867 **/ 2868s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2869{ 2870 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 2871 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 2872 u32 rar; 2873 u32 rar_low, rar_high; 2874 u32 addr_low, addr_high; 2875 2876 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 2877 2878 /* swap bytes for HW little endian */ 2879 addr_low = addr[0] | (addr[1] << 8) 2880 | (addr[2] << 16) 2881 | (addr[3] << 24); 2882 addr_high = addr[4] | (addr[5] << 8); 2883 2884 /* 2885 * Either find the mac_id in rar or find the first empty space. 2886 * rar_highwater points to just after the highest currently used 2887 * rar in order to shorten the search. It grows when we add a new 2888 * rar to the top. 2889 */ 2890 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 2891 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 2892 2893 if (((IXGBE_RAH_AV & rar_high) == 0) 2894 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 2895 first_empty_rar = rar; 2896 } else if ((rar_high & 0xFFFF) == addr_high) { 2897 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 2898 if (rar_low == addr_low) 2899 break; /* found it already in the rars */ 2900 } 2901 } 2902 2903 if (rar < hw->mac.rar_highwater) { 2904 /* already there so just add to the pool bits */ 2905 ixgbe_set_vmdq(hw, rar, vmdq); 2906 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 2907 /* stick it into first empty RAR slot we found */ 2908 rar = first_empty_rar; 2909 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2910 } else if (rar == hw->mac.rar_highwater) { 2911 /* add it to the top of the list and inc the highwater mark */ 2912 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2913 hw->mac.rar_highwater++; 2914 } else if (rar >= hw->mac.num_rar_entries) { 2915 return IXGBE_ERR_INVALID_MAC_ADDR; 2916 } 2917 2918 /* 2919 * If we found rar[0], make sure the default pool bit (we use pool 0) 2920 * remains cleared to be sure default pool packets will get delivered 2921 */ 2922 if (rar == 0) 2923 ixgbe_clear_vmdq(hw, rar, 0); 2924 2925 return rar; 2926} 2927 2928/** 2929 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 2930 * @hw: pointer to hardware struct 2931 * @rar: receive address register index to disassociate 2932 * @vmdq: VMDq pool index to remove from the rar 2933 **/ 2934s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2935{ 2936 u32 mpsar_lo, mpsar_hi; 2937 u32 rar_entries = hw->mac.num_rar_entries; 2938 2939 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 2940 2941 /* Make sure we are using a valid rar index range */ 2942 if (rar >= rar_entries) { 2943 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2944 return IXGBE_ERR_INVALID_ARGUMENT; 2945 } 2946 2947 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2948 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2949 2950 if (!mpsar_lo && !mpsar_hi) 2951 goto done; 2952 2953 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2954 if (mpsar_lo) { 2955 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 2956 mpsar_lo = 0; 2957 } 2958 if (mpsar_hi) { 2959 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 2960 mpsar_hi = 0; 2961 } 2962 } else if (vmdq < 32) { 2963 mpsar_lo &= ~(1 << vmdq); 2964 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 2965 } else { 2966 mpsar_hi &= ~(1 << (vmdq - 32)); 2967 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 2968 } 2969 2970 /* was that the last pool using this rar? */ 2971 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2972 hw->mac.ops.clear_rar(hw, rar); 2973done: 2974 return IXGBE_SUCCESS; 2975} 2976 2977/** 2978 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 2979 * @hw: pointer to hardware struct 2980 * @rar: receive address register index to associate with a VMDq index 2981 * @vmdq: VMDq pool index 2982 **/ 2983s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 2984{ 2985 u32 mpsar; 2986 u32 rar_entries = hw->mac.num_rar_entries; 2987 2988 DEBUGFUNC("ixgbe_set_vmdq_generic"); 2989 2990 /* Make sure we are using a valid rar index range */ 2991 if (rar >= rar_entries) { 2992 DEBUGOUT1("RAR index %d is out of range.\n", rar); 2993 return IXGBE_ERR_INVALID_ARGUMENT; 2994 } 2995 2996 if (vmdq < 32) { 2997 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2998 mpsar |= 1 << vmdq; 2999 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3000 } else { 3001 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3002 mpsar |= 1 << (vmdq - 32); 3003 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3004 } 3005 return IXGBE_SUCCESS; 3006} 3007 3008/** 3009 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3010 * @hw: pointer to hardware structure 3011 **/ 3012s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3013{ 3014 int i; 3015 3016 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3017 DEBUGOUT(" Clearing UTA\n"); 3018 3019 for (i = 0; i < 128; i++) 3020 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3021 3022 return IXGBE_SUCCESS; 3023} 3024 3025/** 3026 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3027 * @hw: pointer to hardware structure 3028 * @vlan: VLAN id to write to VLAN filter 3029 * 3030 * return the VLVF index where this VLAN id should be placed 3031 * 3032 **/ 3033s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 3034{ 3035 u32 bits = 0; 3036 u32 first_empty_slot = 0; 3037 s32 regindex; 3038 3039 /* short cut the special case */ 3040 if (vlan == 0) 3041 return 0; 3042 3043 /* 3044 * Search for the vlan id in the VLVF entries. Save off the first empty 3045 * slot found along the way 3046 */ 3047 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 3048 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3049 if (!bits && !(first_empty_slot)) 3050 first_empty_slot = regindex; 3051 else if ((bits & 0x0FFF) == vlan) 3052 break; 3053 } 3054 3055 /* 3056 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan 3057 * in the VLVF. Else use the first empty VLVF register for this 3058 * vlan id. 3059 */ 3060 if (regindex >= IXGBE_VLVF_ENTRIES) { 3061 if (first_empty_slot) 3062 regindex = first_empty_slot; 3063 else { 3064 DEBUGOUT("No space in VLVF.\n"); 3065 regindex = IXGBE_ERR_NO_SPACE; 3066 } 3067 } 3068 3069 return regindex; 3070} 3071 3072/** 3073 * ixgbe_set_vfta_generic - Set VLAN filter table 3074 * @hw: pointer to hardware structure 3075 * @vlan: VLAN id to write to VLAN filter 3076 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 3077 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 3078 * 3079 * Turn on/off specified VLAN in the VLAN filter table. 3080 **/ 3081s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3082 bool vlan_on) 3083{ 3084 s32 regindex; 3085 u32 bitindex; 3086 u32 vfta; 3087 u32 bits; 3088 u32 vt; 3089 u32 targetbit; 3090 bool vfta_changed = FALSE; 3091 3092 DEBUGFUNC("ixgbe_set_vfta_generic"); 3093 3094 if (vlan > 4095) 3095 return IXGBE_ERR_PARAM; 3096 3097 /* 3098 * this is a 2 part operation - first the VFTA, then the 3099 * VLVF and VLVFB if VT Mode is set 3100 * We don't write the VFTA until we know the VLVF part succeeded. 3101 */ 3102 3103 /* Part 1 3104 * The VFTA is a bitstring made up of 128 32-bit registers 3105 * that enable the particular VLAN id, much like the MTA: 3106 * bits[11-5]: which register 3107 * bits[4-0]: which bit in the register 3108 */ 3109 regindex = (vlan >> 5) & 0x7F; 3110 bitindex = vlan & 0x1F; 3111 targetbit = (1 << bitindex); 3112 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 3113 3114 if (vlan_on) { 3115 if (!(vfta & targetbit)) { 3116 vfta |= targetbit; 3117 vfta_changed = TRUE; 3118 } 3119 } else { 3120 if ((vfta & targetbit)) { 3121 vfta &= ~targetbit; 3122 vfta_changed = TRUE; 3123 } 3124 } 3125 3126 /* Part 2 3127 * If VT Mode is set 3128 * Either vlan_on 3129 * make sure the vlan is in VLVF 3130 * set the vind bit in the matching VLVFB 3131 * Or !vlan_on 3132 * clear the pool bit and possibly the vind 3133 */ 3134 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3135 if (vt & IXGBE_VT_CTL_VT_ENABLE) { 3136 s32 vlvf_index; 3137 3138 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); 3139 if (vlvf_index < 0) 3140 return vlvf_index; 3141 3142 if (vlan_on) { 3143 /* set the pool bit */ 3144 if (vind < 32) { 3145 bits = IXGBE_READ_REG(hw, 3146 IXGBE_VLVFB(vlvf_index*2)); 3147 bits |= (1 << vind); 3148 IXGBE_WRITE_REG(hw, 3149 IXGBE_VLVFB(vlvf_index*2), 3150 bits); 3151 } else { 3152 bits = IXGBE_READ_REG(hw, 3153 IXGBE_VLVFB((vlvf_index*2)+1)); 3154 bits |= (1 << (vind-32)); 3155 IXGBE_WRITE_REG(hw, 3156 IXGBE_VLVFB((vlvf_index*2)+1), 3157 bits); 3158 } 3159 } else { 3160 /* clear the pool bit */ 3161 if (vind < 32) { 3162 bits = IXGBE_READ_REG(hw, 3163 IXGBE_VLVFB(vlvf_index*2)); 3164 bits &= ~(1 << vind); 3165 IXGBE_WRITE_REG(hw, 3166 IXGBE_VLVFB(vlvf_index*2), 3167 bits); 3168 bits |= IXGBE_READ_REG(hw, 3169 IXGBE_VLVFB((vlvf_index*2)+1)); 3170 } else { 3171 bits = IXGBE_READ_REG(hw, 3172 IXGBE_VLVFB((vlvf_index*2)+1)); 3173 bits &= ~(1 << (vind-32)); 3174 IXGBE_WRITE_REG(hw, 3175 IXGBE_VLVFB((vlvf_index*2)+1), 3176 bits); 3177 bits |= IXGBE_READ_REG(hw, 3178 IXGBE_VLVFB(vlvf_index*2)); 3179 } 3180 } 3181 3182 /* 3183 * If there are still bits set in the VLVFB registers 3184 * for the VLAN ID indicated we need to see if the 3185 * caller is requesting that we clear the VFTA entry bit. 3186 * If the caller has requested that we clear the VFTA 3187 * entry bit but there are still pools/VFs using this VLAN 3188 * ID entry then ignore the request. We're not worried 3189 * about the case where we're turning the VFTA VLAN ID 3190 * entry bit on, only when requested to turn it off as 3191 * there may be multiple pools and/or VFs using the 3192 * VLAN ID entry. In that case we cannot clear the 3193 * VFTA bit until all pools/VFs using that VLAN ID have also 3194 * been cleared. This will be indicated by "bits" being 3195 * zero. 3196 */ 3197 if (bits) { 3198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 3199 (IXGBE_VLVF_VIEN | vlan)); 3200 if (!vlan_on) { 3201 /* someone wants to clear the vfta entry 3202 * but some pools/VFs are still using it. 3203 * Ignore it. */ 3204 vfta_changed = FALSE; 3205 } 3206 } 3207 else 3208 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 3209 } 3210 3211 if (vfta_changed) 3212 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); 3213 3214 return IXGBE_SUCCESS; 3215} 3216 3217/** 3218 * ixgbe_clear_vfta_generic - Clear VLAN filter table 3219 * @hw: pointer to hardware structure 3220 * 3221 * Clears the VLAN filer table, and the VMDq index associated with the filter 3222 **/ 3223s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 3224{ 3225 u32 offset; 3226 3227 DEBUGFUNC("ixgbe_clear_vfta_generic"); 3228 3229 for (offset = 0; offset < hw->mac.vft_size; offset++) 3230 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 3231 3232 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 3233 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 3234 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 3235 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 3236 } 3237 3238 return IXGBE_SUCCESS; 3239} 3240 3241/** 3242 * ixgbe_check_mac_link_generic - Determine link and speed status 3243 * @hw: pointer to hardware structure 3244 * @speed: pointer to link speed 3245 * @link_up: TRUE when link is up 3246 * @link_up_wait_to_complete: bool used to wait for link up or not 3247 * 3248 * Reads the links register to determine if link is up and the current speed 3249 **/ 3250s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3251 bool *link_up, bool link_up_wait_to_complete) 3252{ 3253 u32 links_reg, links_orig; 3254 u32 i; 3255 3256 DEBUGFUNC("ixgbe_check_mac_link_generic"); 3257 3258 /* clear the old state */ 3259 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 3260 3261 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3262 3263 if (links_orig != links_reg) { 3264 DEBUGOUT2("LINKS changed from %08X to %08X\n", 3265 links_orig, links_reg); 3266 } 3267 3268 if (link_up_wait_to_complete) { 3269 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 3270 if (links_reg & IXGBE_LINKS_UP) { 3271 *link_up = TRUE; 3272 break; 3273 } else { 3274 *link_up = FALSE; 3275 } 3276 msec_delay(100); 3277 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 3278 } 3279 } else { 3280 if (links_reg & IXGBE_LINKS_UP) 3281 *link_up = TRUE; 3282 else 3283 *link_up = FALSE; 3284 } 3285 3286 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3287 IXGBE_LINKS_SPEED_10G_82599) 3288 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3289 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3290 IXGBE_LINKS_SPEED_1G_82599) 3291 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3292 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 3293 IXGBE_LINKS_SPEED_100_82599) 3294 *speed = IXGBE_LINK_SPEED_100_FULL; 3295 else 3296 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3297 3298 /* if link is down, zero out the current_mode */ 3299 if (*link_up == FALSE) { 3300 hw->fc.current_mode = ixgbe_fc_none; 3301 hw->fc.fc_was_autonegged = FALSE; 3302 } 3303 3304 return IXGBE_SUCCESS; 3305} 3306 3307/** 3308 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 3309 * the EEPROM 3310 * @hw: pointer to hardware structure 3311 * @wwnn_prefix: the alternative WWNN prefix 3312 * @wwpn_prefix: the alternative WWPN prefix 3313 * 3314 * This function will read the EEPROM from the alternative SAN MAC address 3315 * block to check the support for the alternative WWNN/WWPN prefix support. 3316 **/ 3317s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 3318 u16 *wwpn_prefix) 3319{ 3320 u16 offset, caps; 3321 u16 alt_san_mac_blk_offset; 3322 3323 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 3324 3325 /* clear output first */ 3326 *wwnn_prefix = 0xFFFF; 3327 *wwpn_prefix = 0xFFFF; 3328 3329 /* check if alternative SAN MAC is supported */ 3330 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, 3331 &alt_san_mac_blk_offset); 3332 3333 if ((alt_san_mac_blk_offset == 0) || 3334 (alt_san_mac_blk_offset == 0xFFFF)) 3335 goto wwn_prefix_out; 3336 3337 /* check capability in alternative san mac address block */ 3338 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 3339 hw->eeprom.ops.read(hw, offset, &caps); 3340 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 3341 goto wwn_prefix_out; 3342 3343 /* get the corresponding prefix for WWNN/WWPN */ 3344 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 3345 hw->eeprom.ops.read(hw, offset, wwnn_prefix); 3346 3347 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 3348 hw->eeprom.ops.read(hw, offset, wwpn_prefix); 3349 3350wwn_prefix_out: 3351 return IXGBE_SUCCESS; 3352} 3353 3354/** 3355 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 3356 * @hw: pointer to hardware structure 3357 * @bs: the fcoe boot status 3358 * 3359 * This function will read the FCOE boot status from the iSCSI FCOE block 3360 **/ 3361s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 3362{ 3363 u16 offset, caps, flags; 3364 s32 status; 3365 3366 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 3367 3368 /* clear output first */ 3369 *bs = ixgbe_fcoe_bootstatus_unavailable; 3370 3371 /* check if FCOE IBA block is present */ 3372 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 3373 status = hw->eeprom.ops.read(hw, offset, &caps); 3374 if (status != IXGBE_SUCCESS) 3375 goto out; 3376 3377 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 3378 goto out; 3379 3380 /* check if iSCSI FCOE block is populated */ 3381 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 3382 if (status != IXGBE_SUCCESS) 3383 goto out; 3384 3385 if ((offset == 0) || (offset == 0xFFFF)) 3386 goto out; 3387 3388 /* read fcoe flags in iSCSI FCOE block */ 3389 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 3390 status = hw->eeprom.ops.read(hw, offset, &flags); 3391 if (status != IXGBE_SUCCESS) 3392 goto out; 3393 3394 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 3395 *bs = ixgbe_fcoe_bootstatus_enabled; 3396 else 3397 *bs = ixgbe_fcoe_bootstatus_disabled; 3398 3399out: 3400 return status; 3401} 3402 3403/** 3404 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow 3405 * control 3406 * @hw: pointer to hardware structure 3407 * 3408 * There are several phys that do not support autoneg flow control. This 3409 * function check the device id to see if the associated phy supports 3410 * autoneg flow control. 3411 **/ 3412static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 3413{ 3414 3415 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 3416 3417 switch (hw->device_id) { 3418 case IXGBE_DEV_ID_82599_T3_LOM: 3419 return IXGBE_SUCCESS; 3420 default: 3421 return IXGBE_ERR_FC_NOT_SUPPORTED; 3422 } 3423} 3424 3425/** 3426 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3427 * @hw: pointer to hardware structure 3428 * @enable: enable or disable switch for anti-spoofing 3429 * @pf: Physical Function pool - do not enable anti-spoofing for the PF 3430 * 3431 **/ 3432void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) 3433{ 3434 int j; 3435 int pf_target_reg = pf >> 3; 3436 int pf_target_shift = pf % 8; 3437 u32 pfvfspoof = 0; 3438 3439 if (hw->mac.type == ixgbe_mac_82598EB) 3440 return; 3441 3442 if (enable) 3443 pfvfspoof = IXGBE_SPOOF_MACAS_MASK; 3444 3445 /* 3446 * PFVFSPOOF register array is size 8 with 8 bits assigned to 3447 * MAC anti-spoof enables in each register array element. 3448 */ 3449 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) 3450 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); 3451 3452 /* If not enabling anti-spoofing then done */ 3453 if (!enable) 3454 return; 3455 3456 /* 3457 * The PF should be allowed to spoof so that it can support 3458 * emulation mode NICs. Reset the bit assigned to the PF 3459 */ 3460 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); 3461 pfvfspoof ^= (1 << pf_target_shift); 3462 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); 3463} 3464 3465/** 3466 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 3467 * @hw: pointer to hardware structure 3468 * @enable: enable or disable switch for VLAN anti-spoofing 3469 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 3470 * 3471 **/ 3472void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 3473{ 3474 int vf_target_reg = vf >> 3; 3475 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 3476 u32 pfvfspoof; 3477 3478 if (hw->mac.type == ixgbe_mac_82598EB) 3479 return; 3480 3481 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 3482 if (enable) 3483 pfvfspoof |= (1 << vf_target_shift); 3484 else 3485 pfvfspoof &= ~(1 << vf_target_shift); 3486 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3487}
|
| 3488 3489/** 3490 * ixgbe_get_device_caps_generic - Get additional device capabilities 3491 * @hw: pointer to hardware structure 3492 * @device_caps: the EEPROM word with the extra device capabilities 3493 * 3494 * This function will read the EEPROM location for the device capabilities, 3495 * and return the word through device_caps. 3496 **/ 3497s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 3498{ 3499 DEBUGFUNC("ixgbe_get_device_caps_generic"); 3500 3501 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 3502 3503 return IXGBE_SUCCESS; 3504} 3505 3506/** 3507 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 3508 * @hw: pointer to hardware structure 3509 * 3510 **/ 3511void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 3512{ 3513 u32 regval; 3514 u32 i; 3515 3516 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 3517 3518 /* Enable relaxed ordering */ 3519 for (i = 0; i < hw->mac.max_tx_queues; i++) { 3520 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 3521 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 3522 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 3523 } 3524 3525 for (i = 0; i < hw->mac.max_rx_queues; i++) { 3526 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 3527 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | 3528 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 3529 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 3530 } 3531 3532}
|
| |