Deleted Added
full compact
3c3
< Copyright (c) 2001-2015, Intel Corporation
---
> Copyright (c) 2001-2017, Intel Corporation
5,6c5,6
<
< Redistribution and use in source and binary forms, with or without
---
>
> Redistribution and use in source and binary forms, with or without
8,9c8,9
<
< 1. Redistributions of source code must retain the above copyright notice,
---
>
> 1. Redistributions of source code must retain the above copyright notice,
11,13c11,13
<
< 2. Redistributions in binary form must reproduce the above copyright
< notice, this list of conditions and the following disclaimer in the
---
>
> 2. Redistributions in binary form must reproduce the above copyright
> notice, this list of conditions and the following disclaimer in the
15,17c15,17
<
< 3. Neither the name of the Intel Corporation nor the names of its
< contributors may be used to endorse or promote products derived from
---
>
> 3. Neither the name of the Intel Corporation nor the names of its
> contributors may be used to endorse or promote products derived from
19c19
<
---
>
21,28c21,28
< AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
< IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
< ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
< LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
< CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
< SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
< INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
< CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
---
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33c33
< /*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe_common.c 299200 2016-05-06 22:54:56Z pfg $*/
---
> /*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe_common.c 320897 2017-07-11 21:25:07Z erj $*/
116a117
> mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
138a140
> mac->ops.fc_autoneg = ixgbe_fc_autoneg;
172,175c174,186
< hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
< /* if link is down, assume supported */
< if (link_up)
< supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
---
> /* flow control autoneg black list */
> switch (hw->device_id) {
> case IXGBE_DEV_ID_X550EM_A_SFP:
> case IXGBE_DEV_ID_X550EM_A_SFP_N:
> case IXGBE_DEV_ID_X550EM_A_QSFP:
> case IXGBE_DEV_ID_X550EM_A_QSFP_N:
> supported = FALSE;
> break;
> default:
> hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
> /* if link is down, assume supported */
> if (link_up)
> supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
176a188,195
> else
> supported = TRUE;
> }
>
> break;
> case ixgbe_media_type_backplane:
> if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
> supported = FALSE;
180,182d198
< case ixgbe_media_type_backplane:
< supported = TRUE;
< break;
192a209,211
> case IXGBE_DEV_ID_X550EM_A_10G_T:
> case IXGBE_DEV_ID_X550EM_A_1G_T:
> case IXGBE_DEV_ID_X550EM_A_1G_T_L:
202c221
< if (!supported) {
---
> if (!supported)
204,207c223,224
< "Device %x does not support flow control autoneg",
< hw->device_id);
< }
<
---
> "Device %x does not support flow control autoneg",
> hw->device_id);
253c270
< /* only backplane uses autoc so fall though */
---
> /* fall through - only backplane uses autoc */
379a397
> u16 device_caps;
402,403c420,423
< if (ret_val != IXGBE_SUCCESS)
< goto out;
---
> if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
> DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
> return ret_val;
> }
404a425,440
> /* Cache bit indicating need for crosstalk fix */
> switch (hw->mac.type) {
> case ixgbe_mac_82599EB:
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> hw->mac.ops.get_device_caps(hw, &device_caps);
> if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
> hw->need_crosstalk_fix = FALSE;
> else
> hw->need_crosstalk_fix = TRUE;
> break;
> default:
> hw->need_crosstalk_fix = FALSE;
> break;
> }
>
408,409c444
< out:
< return ret_val;
---
> return IXGBE_SUCCESS;
470c505
< if (status == IXGBE_SUCCESS) {
---
> if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
474a510,516
> /* Initialize the LED link active for LED blink support */
> if (hw->mac.ops.init_led_link_act)
> hw->mac.ops.init_led_link_act(hw);
>
> if (status != IXGBE_SUCCESS)
> DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
>
1030,1031c1072,1074
< * Determines the LAN function id by reading memory-mapped registers
< * and swaps the port value if requested.
---
> * Determines the LAN function id by reading memory-mapped registers and swaps
> * the port value if requested, and set MAC instance for devices that share
> * CS4227.
1036a1080
> u16 ee_ctrl_4;
1042c1086
< bus->lan_id = bus->func;
---
> bus->lan_id = (u8)bus->func;
1047a1092,1098
>
> /* Get MAC instance from EEPROM for configuring CS4227 */
> if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
> hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
> bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
> IXGBE_EE_CTRL_4_INST_ID_SHIFT;
> }
1104a1156,1196
> * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
> * @hw: pointer to hardware structure
> *
> * Store the index for the link active LED. This will be used to support
> * blinking the LED.
> **/
> s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
> {
> struct ixgbe_mac_info *mac = &hw->mac;
> u32 led_reg, led_mode;
> u8 i;
>
> led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
>
> /* Get LED link active from the LEDCTL register */
> for (i = 0; i < 4; i++) {
> led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
>
> if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
> IXGBE_LED_LINK_ACTIVE) {
> mac->led_link_act = i;
> return IXGBE_SUCCESS;
> }
> }
>
> /*
> * If LEDCTL register does not have the LED link active set, then use
> * known MAC defaults.
> */
> switch (hw->mac.type) {
> case ixgbe_mac_X550EM_a:
> case ixgbe_mac_X550EM_x:
> mac->led_link_act = 1;
> break;
> default:
> mac->led_link_act = 2;
> }
> return IXGBE_SUCCESS;
> }
>
> /**
1114a1207,1209
> if (index > 3)
> return IXGBE_ERR_PARAM;
>
1134a1230,1232
> if (index > 3)
> return IXGBE_ERR_PARAM;
>
1905c2003
< }
---
> };
1991c2089
< }
---
> };
2253c2351
< * Tests a MAC address to ensure it is a valid Individual Address
---
> * Tests a MAC address to ensure it is a valid Individual Address.
2263d2360
< DEBUGOUT("MAC address is multicast\n");
2267d2363
< DEBUGOUT("MAC address is broadcast\n");
2272d2367
< DEBUGOUT("MAC address is all zeros\n");
2410,2412d2504
<
< /* clear VMDq pool/queue selection for RAR 0 */
< hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2413a2506,2509
>
> /* clear VMDq pool/queue selection for RAR 0 */
> hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
>
2742c2838
< ixgbe_fc_autoneg(hw);
---
> hw->mac.ops.fc_autoneg(hw);
2852,2853c2948,2949
< static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
< u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
---
> s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
> u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
3326c3422
< int secrxreg;
---
> u32 secrxreg;
3372a3469,3471
> if (index > 3)
> return IXGBE_ERR_PARAM;
>
3417a3517,3520
> if (index > 3)
> return IXGBE_ERR_PARAM;
>
>
3583a3687
> case ixgbe_mac_X550EM_a:
3613c3717
< * finds the rar that it is already in; adds to the pool list
---
> * finds the rar that it is aleady in; adds to the pool list
3722c3826,3827
< if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
---
> if (mpsar_lo == 0 && mpsar_hi == 0 &&
> rar != 0 && rar != hw->mac.san_mac_rar_index)
3812c3917
< s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
---
> s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3814,3816c3919,3920
< u32 bits = 0;
< u32 first_empty_slot = 0;
< s32 regindex;
---
> s32 regindex, first_empty_slot;
> u32 bits;
3822,3826c3926,3940
< /*
< * Search for the vlan id in the VLVF entries. Save off the first empty
< * slot found along the way
< */
< for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
---
> /* if vlvf_bypass is set we don't want to use an empty slot, we
> * will simply bypass the VLVF if there are no entries present in the
> * VLVF that contain our VLAN
> */
> first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
>
> /* add VLAN enable bit for comparison */
> vlan |= IXGBE_VLVF_VIEN;
>
> /* Search for the vlan id in the VLVF entries. Save off the first empty
> * slot found along the way.
> *
> * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
> */
> for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3828c3942,3944
< if (!bits && !(first_empty_slot))
---
> if (bits == vlan)
> return regindex;
> if (!first_empty_slot && !bits)
3830,3831d3945
< else if ((bits & 0x0FFF) == vlan)
< break;
3834,3847c3948,3952
< /*
< * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
< * in the VLVF. Else use the first empty VLVF register for this
< * vlan id.
< */
< if (regindex >= IXGBE_VLVF_ENTRIES) {
< if (first_empty_slot)
< regindex = first_empty_slot;
< else {
< ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
< "No space in VLVF.\n");
< regindex = IXGBE_ERR_NO_SPACE;
< }
< }
---
> /* If we are here then we didn't find the VLAN. Return first empty
> * slot we found during our search, else error.
> */
> if (!first_empty_slot)
> ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3849c3954
< return regindex;
---
> return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3856,3857c3961,3963
< * @vind: VMDq output index that maps queue to VLAN id in VFVFB
< * @vlan_on: boolean flag to turn on/off VLAN in VFVF
---
> * @vind: VMDq output index that maps queue to VLAN id in VLVFB
> * @vlan_on: boolean flag to turn on/off VLAN
> * @vlvf_bypass: boolean flag indicating updating default pool is okay
3862c3968
< bool vlan_on)
---
> bool vlan_on, bool vlvf_bypass)
3864,3869c3970,3971
< s32 regindex;
< u32 bitindex;
< u32 vfta;
< u32 targetbit;
< s32 ret_val = IXGBE_SUCCESS;
< bool vfta_changed = FALSE;
---
> u32 regidx, vfta_delta, vfta;
> s32 ret_val;
3873c3975
< if (vlan > 4095)
---
> if (vlan > 4095 || vind > 63)
3888,3891c3990,3992
< regindex = (vlan >> 5) & 0x7F;
< bitindex = vlan & 0x1F;
< targetbit = (1 << bitindex);
< vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
---
> regidx = vlan / 32;
> vfta_delta = 1 << (vlan % 32);
> vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3893,3903c3994,4000
< if (vlan_on) {
< if (!(vfta & targetbit)) {
< vfta |= targetbit;
< vfta_changed = TRUE;
< }
< } else {
< if ((vfta & targetbit)) {
< vfta &= ~targetbit;
< vfta_changed = TRUE;
< }
< }
---
> /*
> * vfta_delta represents the difference between the current value
> * of vfta and the value we want in the register. Since the diff
> * is an XOR mask we can just update the vfta using an XOR
> */
> vfta_delta &= vlan_on ? ~vfta : vfta;
> vfta ^= vfta_delta;
3908,3910c4005,4009
< ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
< &vfta_changed);
< if (ret_val != IXGBE_SUCCESS)
---
> ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
> vfta, vlvf_bypass);
> if (ret_val != IXGBE_SUCCESS) {
> if (vlvf_bypass)
> goto vfta_update;
3911a4011
> }
3913,3914c4013,4016
< if (vfta_changed)
< IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
---
> vfta_update:
> /* Update VFTA now that we are ready for traffic */
> if (vfta_delta)
> IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3923,3926c4025,4030
< * @vind: VMDq output index that maps queue to VLAN id in VFVFB
< * @vlan_on: boolean flag to turn on/off VLAN in VFVF
< * @vfta_changed: pointer to boolean flag which indicates whether VFTA
< * should be changed
---
> * @vind: VMDq output index that maps queue to VLAN id in VLVFB
> * @vlan_on: boolean flag to turn on/off VLAN in VLVF
> * @vfta_delta: pointer to the difference between the current value of VFTA
> * and the desired value
> * @vfta: the desired value of the VFTA
> * @vlvf_bypass: boolean flag indicating updating default pool is okay
3931c4035,4036
< bool vlan_on, bool *vfta_changed)
---
> bool vlan_on, u32 *vfta_delta, u32 vfta,
> bool vlvf_bypass)
3933c4038,4039
< u32 vt;
---
> u32 bits;
> s32 vlvf_index;
3937c4043
< if (vlan > 4095)
---
> if (vlan > 4095 || vind > 63)
3947,3950c4053,4054
< vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
< if (vt & IXGBE_VT_CTL_VT_ENABLE) {
< s32 vlvf_index;
< u32 bits;
---
> if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
> return IXGBE_SUCCESS;
3952,3954c4056,4058
< vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
< if (vlvf_index < 0)
< return vlvf_index;
---
> vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
> if (vlvf_index < 0)
> return vlvf_index;
3956,3994c4060
< if (vlan_on) {
< /* set the pool bit */
< if (vind < 32) {
< bits = IXGBE_READ_REG(hw,
< IXGBE_VLVFB(vlvf_index * 2));
< bits |= (1 << vind);
< IXGBE_WRITE_REG(hw,
< IXGBE_VLVFB(vlvf_index * 2),
< bits);
< } else {
< bits = IXGBE_READ_REG(hw,
< IXGBE_VLVFB((vlvf_index * 2) + 1));
< bits |= (1 << (vind - 32));
< IXGBE_WRITE_REG(hw,
< IXGBE_VLVFB((vlvf_index * 2) + 1),
< bits);
< }
< } else {
< /* clear the pool bit */
< if (vind < 32) {
< bits = IXGBE_READ_REG(hw,
< IXGBE_VLVFB(vlvf_index * 2));
< bits &= ~(1 << vind);
< IXGBE_WRITE_REG(hw,
< IXGBE_VLVFB(vlvf_index * 2),
< bits);
< bits |= IXGBE_READ_REG(hw,
< IXGBE_VLVFB((vlvf_index * 2) + 1));
< } else {
< bits = IXGBE_READ_REG(hw,
< IXGBE_VLVFB((vlvf_index * 2) + 1));
< bits &= ~(1 << (vind - 32));
< IXGBE_WRITE_REG(hw,
< IXGBE_VLVFB((vlvf_index * 2) + 1),
< bits);
< bits |= IXGBE_READ_REG(hw,
< IXGBE_VLVFB(vlvf_index * 2));
< }
< }
---
> bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3996,4009c4062,4074
< /*
< * If there are still bits set in the VLVFB registers
< * for the VLAN ID indicated we need to see if the
< * caller is requesting that we clear the VFTA entry bit.
< * If the caller has requested that we clear the VFTA
< * entry bit but there are still pools/VFs using this VLAN
< * ID entry then ignore the request. We're not worried
< * about the case where we're turning the VFTA VLAN ID
< * entry bit on, only when requested to turn it off as
< * there may be multiple pools and/or VFs using the
< * VLAN ID entry. In that case we cannot clear the
< * VFTA bit until all pools/VFs using that VLAN ID have also
< * been cleared. This will be indicated by "bits" being
< * zero.
---
> /* set the pool bit */
> bits |= 1 << (vind % 32);
> if (vlan_on)
> goto vlvf_update;
>
> /* clear the pool bit */
> bits ^= 1 << (vind % 32);
>
> if (!bits &&
> !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
> /* Clear VFTA first, then disable VLVF. Otherwise
> * we run the risk of stray packets leaking into
> * the PF via the default pool
4011,4021c4076,4083
< if (bits) {
< IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
< (IXGBE_VLVF_VIEN | vlan));
< if ((!vlan_on) && (vfta_changed != NULL)) {
< /* someone wants to clear the vfta entry
< * but some pools/VFs are still using it.
< * Ignore it. */
< *vfta_changed = FALSE;
< }
< } else
< IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
---
> if (*vfta_delta)
> IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
>
> /* disable VLVF and clear remaining bit from pool */
> IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
> IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
>
> return IXGBE_SUCCESS;
4023a4086,4106
> /* If there are still bits set in the VLVFB registers
> * for the VLAN ID indicated we need to see if the
> * caller is requesting that we clear the VFTA entry bit.
> * If the caller has requested that we clear the VFTA
> * entry bit but there are still pools/VFs using this VLAN
> * ID entry then ignore the request. We're not worried
> * about the case where we're turning the VFTA VLAN ID
> * entry bit on, only when requested to turn it off as
> * there may be multiple pools and/or VFs using the
> * VLAN ID entry. In that case we cannot clear the
> * VFTA bit until all pools/VFs using that VLAN ID have also
> * been cleared. This will be indicated by "bits" being
> * zero.
> */
> *vfta_delta = 0;
>
> vlvf_update:
> /* record pool change and enable VLAN ID if not already enabled */
> IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
> IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
>
4045c4128
< IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
---
> IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4051a4135,4160
> * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
> * @hw: pointer to hardware structure
> *
> * Contains the logic to identify if we need to verify link for the
> * crosstalk fix
> **/
> static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
> {
>
> /* Does FW say we need the fix */
> if (!hw->need_crosstalk_fix)
> return FALSE;
>
> /* Only consider SFP+ PHYs i.e. media type fiber */
> switch (hw->mac.ops.get_media_type(hw)) {
> case ixgbe_media_type_fiber:
> case ixgbe_media_type_fiber_qsfp:
> break;
> default:
> return FALSE;
> }
>
> return TRUE;
> }
>
> /**
4067a4177,4205
> /* If Crosstalk fix enabled do the sanity check of making sure
> * the SFP+ cage is full.
> */
> if (ixgbe_need_crosstalk_fix(hw)) {
> u32 sfp_cage_full;
>
> switch (hw->mac.type) {
> case ixgbe_mac_82599EB:
> sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
> IXGBE_ESDP_SDP2;
> break;
> case ixgbe_mac_X550EM_x:
> case ixgbe_mac_X550EM_a:
> sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
> IXGBE_ESDP_SDP0;
> break;
> default:
> /* sanity check - No SFP+ devices here */
> sfp_cage_full = FALSE;
> break;
> }
>
> if (!sfp_cage_full) {
> *link_up = FALSE;
> *speed = IXGBE_LINK_SPEED_UNKNOWN;
> return IXGBE_SUCCESS;
> }
> }
>
4109c4247
< if (hw->mac.type >= ixgbe_mac_X550) {
---
> if (hw->mac.type == ixgbe_mac_X550) {
4113a4252,4258
> case IXGBE_LINKS_SPEED_10_X550EM_A:
> *speed = IXGBE_LINK_SPEED_UNKNOWN;
> if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
> hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
> *speed = IXGBE_LINK_SPEED_10_FULL;
> }
> break;
4231,4232c4376,4377
< * @enable: enable or disable switch for anti-spoofing
< * @pf: Physical Function pool - do not enable anti-spoofing for the PF
---
> * @enable: enable or disable switch for MAC anti-spoofing
> * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4235c4380
< void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
---
> void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4237,4240c4382,4384
< int j;
< int pf_target_reg = pf >> 3;
< int pf_target_shift = pf % 8;
< u32 pfvfspoof = 0;
---
> int vf_target_reg = vf >> 3;
> int vf_target_shift = vf % 8;
> u32 pfvfspoof;
4244a4389
> pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4246,4267c4391,4394
< pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
<
< /*
< * PFVFSPOOF register array is size 8 with 8 bits assigned to
< * MAC anti-spoof enables in each register array element.
< */
< for (j = 0; j < pf_target_reg; j++)
< IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
<
< /*
< * The PF should be allowed to spoof so that it can support
< * emulation mode NICs. Do not set the bits assigned to the PF
< */
< pfvfspoof &= (1 << pf_target_shift) - 1;
< IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
<
< /*
< * Remaining pools belong to the PF so they do not need to have
< * anti-spoofing enabled.
< */
< for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
< IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
---
> pfvfspoof |= (1 << vf_target_shift);
> else
> pfvfspoof &= ~(1 << vf_target_shift);
> IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4363c4490
< * ixgbe_host_interface_command - Issue command to manageability block
---
> * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4365,4366c4492
< * @buffer: contains the command to write and where the return status will
< * be placed
---
> * @buffer: command to write and where the return status will be placed
4369,4374d4494
< * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
< * Needed because FW structures are big endian and decoding of
< * these fields can be 8 bit or 16 bit based on command. Decoding
< * is not easily understood without making a table of commands.
< * So we will leave this up to the caller to read back the data
< * in these cases.
4376,4377c4496,4501
< * Communicates with the manageability block. On success return IXGBE_SUCCESS
< * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
---
> * Communicates with the manageability block. On success return IXGBE_SUCCESS
> * else returns semaphore error when encountering an error acquiring
> * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
> *
> * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
> * by the caller.
4379,4380c4503,4504
< s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
< u32 length, u32 timeout, bool return_data)
---
> s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
> u32 timeout)
4382,4384c4506
< u32 hicr, i, bi, fwsts;
< u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
< u16 buf_len;
---
> u32 hicr, i, fwsts;
4387c4509
< DEBUGFUNC("ixgbe_host_interface_command");
---
> DEBUGFUNC("ixgbe_hic_unlocked");
4389c4511
< if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
---
> if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4392a4515
>
4399c4522
< if ((hicr & IXGBE_HICR_EN) == 0) {
---
> if (!(hicr & IXGBE_HICR_EN)) {
4405c4528
< if ((length % (sizeof(u32))) != 0) {
---
> if (length % sizeof(u32)) {
4430c4553
< if ((timeout != 0 && i == timeout) ||
---
> if ((timeout && i == timeout) ||
4436a4560,4605
> return IXGBE_SUCCESS;
> }
>
> /**
> * ixgbe_host_interface_command - Issue command to manageability block
> * @hw: pointer to the HW structure
> * @buffer: contains the command to write and where the return status will
> * be placed
> * @length: length of buffer, must be multiple of 4 bytes
> * @timeout: time in ms to wait for command completion
> * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
> * Needed because FW structures are big endian and decoding of
> * these fields can be 8 bit or 16 bit based on command. Decoding
> * is not easily understood without making a table of commands.
> * So we will leave this up to the caller to read back the data
> * in these cases.
> *
> * Communicates with the manageability block. On success return IXGBE_SUCCESS
> * else returns semaphore error when encountering an error acquiring
> * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
> **/
> s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
> u32 length, u32 timeout, bool return_data)
> {
> u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
> u16 dword_len;
> u16 buf_len;
> s32 status;
> u32 bi;
>
> DEBUGFUNC("ixgbe_host_interface_command");
>
> if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
> DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
> return IXGBE_ERR_HOST_INTERFACE_COMMAND;
> }
>
> /* Take management host interface semaphore */
> status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
> if (status)
> return status;
>
> status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
> if (status)
> goto rel_out;
>
4438c4607
< return 0;
---
> goto rel_out;
4451,4452c4620,4621
< if (buf_len == 0)
< return 0;
---
> if (!buf_len)
> goto rel_out;
4456c4625,4626
< return IXGBE_ERR_HOST_INTERFACE_COMMAND;
---
> status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
> goto rel_out;
4468c4638,4641
< return 0;
---
> rel_out:
> hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
>
> return status;
4485c4658,4659
< u8 build, u8 sub)
---
> u8 build, u8 sub, u16 len,
> const char *driver_ver)
4491a4666
> UNREFERENCED_2PARAMETER(len, driver_ver);
4493,4498d4667
< if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
< != IXGBE_SUCCESS) {
< ret_val = IXGBE_ERR_SWFW_SYNC;
< goto out;
< }
<
4530,4531d4698
< hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
< out:
4568c4735
< /* Fall through to configure remaining packet buffers */
---
> /* fall through - configure remaining packet buffers */
4654a4822,4836
> /**
> * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
> *
> * @hw: pointer to hardware structure
> * @cmd: Command we send to the FW
> * @status: The reply from the FW
> *
> * Bit-bangs the cmd to the by_pass FW status points to what is returned.
> **/
> #define IXGBE_BYPASS_BB_WAIT 1
> s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
> {
> int i;
> u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
> u32 esdp;
4655a4838,4933
> if (!status)
> return IXGBE_ERR_PARAM;
>
> *status = 0;
>
> /* SDP vary by MAC type */
> switch (hw->mac.type) {
> case ixgbe_mac_82599EB:
> sck = IXGBE_ESDP_SDP7;
> sdi = IXGBE_ESDP_SDP0;
> sdo = IXGBE_ESDP_SDP6;
> dir_sck = IXGBE_ESDP_SDP7_DIR;
> dir_sdi = IXGBE_ESDP_SDP0_DIR;
> dir_sdo = IXGBE_ESDP_SDP6_DIR;
> break;
> case ixgbe_mac_X540:
> sck = IXGBE_ESDP_SDP2;
> sdi = IXGBE_ESDP_SDP0;
> sdo = IXGBE_ESDP_SDP1;
> dir_sck = IXGBE_ESDP_SDP2_DIR;
> dir_sdi = IXGBE_ESDP_SDP0_DIR;
> dir_sdo = IXGBE_ESDP_SDP1_DIR;
> break;
> default:
> return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
> }
>
> /* Set SDP pins direction */
> esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
> esdp |= dir_sck; /* SCK as output */
> esdp |= dir_sdi; /* SDI as output */
> esdp &= ~dir_sdo; /* SDO as input */
> esdp |= sck;
> esdp |= sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> /* Generate start condition */
> esdp &= ~sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> esdp &= ~sck;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> /* Clock out the new control word and clock in the status */
> for (i = 0; i < 32; i++) {
> if ((cmd >> (31 - i)) & 0x01) {
> esdp |= sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> } else {
> esdp &= ~sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> }
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> esdp |= sck;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> esdp &= ~sck;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
> if (esdp & sdo)
> *status = (*status << 1) | 0x01;
> else
> *status = (*status << 1) | 0x00;
> msec_delay(IXGBE_BYPASS_BB_WAIT);
> }
>
> /* stop condition */
> esdp |= sck;
> esdp &= ~sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
> msec_delay(IXGBE_BYPASS_BB_WAIT);
>
> esdp |= sdi;
> IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
> IXGBE_WRITE_FLUSH(hw);
>
> /* set the page bits to match the cmd that the status it belongs to */
> *status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
>
> return IXGBE_SUCCESS;
> }
>
4656a4935,5070
> * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
> *
> * If we send a write we can't be sure it took until we can read back
> * that same register. It can be a problem as some of the feilds may
> * for valid reasons change inbetween the time wrote the register and
> * we read it again to verify. So this function check everything we
> * can check and then assumes it worked.
> *
> * @u32 in_reg - The register cmd for the bit-bang read.
> * @u32 out_reg - The register returned from a bit-bang read.
> **/
> bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
> {
> u32 mask;
>
> /* Page must match for all control pages */
> if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
> return FALSE;
>
> switch (in_reg & BYPASS_PAGE_M) {
> case BYPASS_PAGE_CTL0:
> /* All the following can't change since the last write
> * - All the event actions
> * - The timeout value
> */
> mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
> BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
> BYPASS_WDTIMEOUT_M |
> BYPASS_WDT_VALUE_M;
> if ((out_reg & mask) != (in_reg & mask))
> return FALSE;
>
> /* 0x0 is never a valid value for bypass status */
> if (!(out_reg & BYPASS_STATUS_OFF_M))
> return FALSE;
> break;
> case BYPASS_PAGE_CTL1:
> /* All the following can't change since the last write
> * - time valid bit
> * - time we last sent
> */
> mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
> if ((out_reg & mask) != (in_reg & mask))
> return FALSE;
> break;
> case BYPASS_PAGE_CTL2:
> /* All we can check in this page is control number
> * which is already done above.
> */
> break;
> }
>
> /* We are as sure as we can be return TRUE */
> return TRUE;
> }
>
> /**
> * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
> *
> * @hw: pointer to hardware structure
> * @cmd: The control word we are setting.
> * @event: The event we are setting in the FW. This also happens to
> * be the mask for the event we are setting (handy)
> * @action: The action we set the event to in the FW. This is in a
> * bit field that happens to be what we want to put in
> * the event spot (also handy)
> **/
> s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
> u32 action)
> {
> u32 by_ctl = 0;
> u32 cmd, verify;
> u32 count = 0;
>
> /* Get current values */
> cmd = ctrl; /* just reading only need control number */
> if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
> return IXGBE_ERR_INVALID_ARGUMENT;
>
> /* Set to new action */
> cmd = (by_ctl & ~event) | BYPASS_WE | action;
> if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
> return IXGBE_ERR_INVALID_ARGUMENT;
>
> /* Page 0 force a FW eeprom write which is slow so verify */
> if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
> verify = BYPASS_PAGE_CTL0;
> do {
> if (count++ > 5)
> return IXGBE_BYPASS_FW_WRITE_FAILURE;
>
> if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
> return IXGBE_ERR_INVALID_ARGUMENT;
> } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
> } else {
> /* We have give the FW time for the write to stick */
> msec_delay(100);
> }
>
> return IXGBE_SUCCESS;
> }
>
> /**
> * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
> *
> * @hw: pointer to hardware structure
> * @addr: The bypass eeprom address to read.
> * @value: The 8b of data at the address above.
> **/
> s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
> {
> u32 cmd;
> u32 status;
>
>
> /* send the request */
> cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
> cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
> if (ixgbe_bypass_rw_generic(hw, cmd, &status))
> return IXGBE_ERR_INVALID_ARGUMENT;
>
> /* We have give the FW time for the write to stick */
> msec_delay(100);
>
> /* now read the results */
> cmd &= ~BYPASS_WE;
> if (ixgbe_bypass_rw_generic(hw, cmd, &status))
> return IXGBE_ERR_INVALID_ARGUMENT;
>
> *value = status & BYPASS_CTL2_DATA_M;
>
> return IXGBE_SUCCESS;
> }
>
>
> /**
4792,4799d5205
< /* If we already have link at this speed, just jump out */
< status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
< if (status != IXGBE_SUCCESS)
< return status;
<
< if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
< goto out;
<
4851,4858d5256
< /* If we already have link at this speed, just jump out */
< status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
< if (status != IXGBE_SUCCESS)
< return status;
<
< if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
< goto out;
<