Deleted Added
full compact
e1000_82575.c (190872) e1000_82575.c (194865)
1/******************************************************************************
2
3 Copyright (c) 2001-2009, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
1/******************************************************************************
2
3 Copyright (c) 2001-2009, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/e1000/e1000_82575.c 190872 2009-04-10 00:05:46Z jfv $*/
33/*$FreeBSD: head/sys/dev/e1000/e1000_82575.c 194865 2009-06-24 17:41:29Z jfv $*/
34
35/*
36 * 82575EB Gigabit Network Connection
37 * 82575EB Gigabit Backplane Connection
38 * 82575GB Gigabit Network Connection
39 * 82575GB Gigabit Network Connection
40 * 82576 Gigabit Network Connection
34
35/*
36 * 82575EB Gigabit Network Connection
37 * 82575EB Gigabit Backplane Connection
38 * 82575GB Gigabit Network Connection
39 * 82575GB Gigabit Network Connection
40 * 82576 Gigabit Network Connection
41 * 82576 Quad Port Gigabit Mezzanine Adapter
41 */
42
43#include "e1000_api.h"
44
45static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
46static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
47static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
48static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);

--- 23 unchanged lines hidden (view full) ---

72 u16 *speed, u16 *duplex);
73static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
74static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
75static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
76static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
77static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
78static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
79void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
42 */
43
44#include "e1000_api.h"
45
46static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
47static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
48static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
49static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);

--- 23 unchanged lines hidden (view full) ---

73 u16 *speed, u16 *duplex);
74static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
75static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
76static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
77static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
78static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
79static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
80void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
81static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
80
81/**
82 * e1000_init_phy_params_82575 - Init PHY func ptrs.
83 * @hw: pointer to the HW structure
84 **/
85static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
86{
87 struct e1000_phy_info *phy = &hw->phy;

--- 233 unchanged lines hidden (view full) ---

321/**
322 * e1000_acquire_phy_82575 - Acquire rights to access PHY
323 * @hw: pointer to the HW structure
324 *
325 * Acquire access rights to the correct PHY.
326 **/
327static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
328{
82
83/**
84 * e1000_init_phy_params_82575 - Init PHY func ptrs.
85 * @hw: pointer to the HW structure
86 **/
87static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
88{
89 struct e1000_phy_info *phy = &hw->phy;

--- 233 unchanged lines hidden (view full) ---

323/**
324 * e1000_acquire_phy_82575 - Acquire rights to access PHY
325 * @hw: pointer to the HW structure
326 *
327 * Acquire access rights to the correct PHY.
328 **/
329static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
330{
329 u16 mask;
331 u16 mask = E1000_SWFW_PHY0_SM;
330
331 DEBUGFUNC("e1000_acquire_phy_82575");
332
332
333 DEBUGFUNC("e1000_acquire_phy_82575");
334
333 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
335 if (hw->bus.func == E1000_FUNC_1)
336 mask = E1000_SWFW_PHY1_SM;
334
335 return e1000_acquire_swfw_sync_82575(hw, mask);
336}
337
338/**
339 * e1000_release_phy_82575 - Release rights to access PHY
340 * @hw: pointer to the HW structure
341 *
342 * A wrapper to release access rights to the correct PHY.
343 **/
344static void e1000_release_phy_82575(struct e1000_hw *hw)
345{
337
338 return e1000_acquire_swfw_sync_82575(hw, mask);
339}
340
341/**
342 * e1000_release_phy_82575 - Release rights to access PHY
343 * @hw: pointer to the HW structure
344 *
345 * A wrapper to release access rights to the correct PHY.
346 **/
347static void e1000_release_phy_82575(struct e1000_hw *hw)
348{
346 u16 mask;
349 u16 mask = E1000_SWFW_PHY0_SM;
347
348 DEBUGFUNC("e1000_release_phy_82575");
349
350
351 DEBUGFUNC("e1000_release_phy_82575");
352
350 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
353 if (hw->bus.func == E1000_FUNC_1)
354 mask = E1000_SWFW_PHY1_SM;
355
351 e1000_release_swfw_sync_82575(hw, mask);
352}
353
354/**
355 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
356 * @hw: pointer to the HW structure
357 * @offset: register offset to be read
358 * @data: pointer to the read data

--- 421 unchanged lines hidden (view full) ---

780static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
781{
782 s32 timeout = PHY_CFG_TIMEOUT;
783 s32 ret_val = E1000_SUCCESS;
784 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
785
786 DEBUGFUNC("e1000_get_cfg_done_82575");
787
356 e1000_release_swfw_sync_82575(hw, mask);
357}
358
359/**
360 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
361 * @hw: pointer to the HW structure
362 * @offset: register offset to be read
363 * @data: pointer to the read data

--- 421 unchanged lines hidden (view full) ---

785static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
786{
787 s32 timeout = PHY_CFG_TIMEOUT;
788 s32 ret_val = E1000_SUCCESS;
789 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
790
791 DEBUGFUNC("e1000_get_cfg_done_82575");
792
788 if (hw->bus.func == 1)
793 if (hw->bus.func == E1000_FUNC_1)
789 mask = E1000_NVM_CFG_DONE_PORT_1;
794 mask = E1000_NVM_CFG_DONE_PORT_1;
790
791 while (timeout) {
792 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
793 break;
794 msec_delay(1);
795 timeout--;
796 }
797 if (!timeout) {
798 DEBUGOUT("MNG configuration cycle has not completed.\n");

--- 133 unchanged lines hidden (view full) ---

932 * In the case of fiber serdes shut down optics and PCS on driver unload
933 * when management pass thru is not enabled.
934 **/
935void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
936{
937 u32 reg;
938 u16 eeprom_data = 0;
939
795 while (timeout) {
796 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
797 break;
798 msec_delay(1);
799 timeout--;
800 }
801 if (!timeout) {
802 DEBUGOUT("MNG configuration cycle has not completed.\n");

--- 133 unchanged lines hidden (view full) ---

936 * In the case of fiber serdes shut down optics and PCS on driver unload
937 * when management pass thru is not enabled.
938 **/
939void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
940{
941 u32 reg;
942 u16 eeprom_data = 0;
943
940 if (hw->mac.type != e1000_82576 ||
941 (hw->phy.media_type != e1000_media_type_fiber &&
942 hw->phy.media_type != e1000_media_type_internal_serdes))
944 if (hw->phy.media_type != e1000_media_type_internal_serdes)
943 return;
944
945 return;
946
945 if (hw->bus.func == 0)
947 if (hw->bus.func == E1000_FUNC_0)
946 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
948 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
949 else if (hw->bus.func == E1000_FUNC_1)
950 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
947
948 /*
949 * If APM is not enabled in the EEPROM and management interface is
950 * not enabled, then power down.
951 */
952 if (!(eeprom_data & E1000_NVM_APME_82575) &&
953 !e1000_enable_mng_pass_thru(hw)) {
954 /* Disable PCS to turn off link */

--- 10 unchanged lines hidden (view full) ---

965 E1000_WRITE_FLUSH(hw);
966 msec_delay(1);
967 }
968
969 return;
970}
971
972/**
951
952 /*
953 * If APM is not enabled in the EEPROM and management interface is
954 * not enabled, then power down.
955 */
956 if (!(eeprom_data & E1000_NVM_APME_82575) &&
957 !e1000_enable_mng_pass_thru(hw)) {
958 /* Disable PCS to turn off link */

--- 10 unchanged lines hidden (view full) ---

969 E1000_WRITE_FLUSH(hw);
970 msec_delay(1);
971 }
972
973 return;
974}
975
976/**
973 * e1000_vmdq_loopback_enable_pf- Enables VM to VM queue loopback replication
977 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
974 * @hw: pointer to the HW structure
978 * @hw: pointer to the HW structure
979 * @enable: state to enter, either enabled or disabled
980 *
981 * enables/disables L2 switch loopback functionality
975 **/
982 **/
976void e1000_vmdq_loopback_enable_pf(struct e1000_hw *hw)
983void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
977{
978 u32 reg;
979
980 reg = E1000_READ_REG(hw, E1000_DTXSWC);
984{
985 u32 reg;
986
987 reg = E1000_READ_REG(hw, E1000_DTXSWC);
981 reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
988 if (enable)
989 reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
990 else
991 reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
982 E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
983}
984
985/**
992 E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
993}
994
995/**
986 * e1000_vmdq_loopback_disable_pf - Disable VM to VM queue loopbk replication
996 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
987 * @hw: pointer to the HW structure
997 * @hw: pointer to the HW structure
988 **/
989void e1000_vmdq_loopback_disable_pf(struct e1000_hw *hw)
990{
991 u32 reg;
992
993 reg = E1000_READ_REG(hw, E1000_DTXSWC);
994 reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
995 E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
996}
997
998/**
999 * e1000_vmdq_replication_enable_pf - Enable replication of brdcst & multicst
1000 * @hw: pointer to the HW structure
998 * @enable: state to enter, either enabled or disabled
1001 *
999 *
1002 * Enables replication of broadcast and multicast packets from the network
1003 * to VM's which have their respective broadcast and multicast accept
1004 * bits set in the VM Offload Register. This gives the PF driver per
1005 * VM granularity control over which VM's get replicated broadcast traffic.
1000 * enables/disables replication of packets across multiple pools
1006 **/
1001 **/
1007void e1000_vmdq_replication_enable_pf(struct e1000_hw *hw, u32 enables)
1002void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1008{
1009 u32 reg;
1003{
1004 u32 reg;
1010 u32 i;
1011
1005
1012 for (i = 0; i < MAX_NUM_VFS; i++) {
1013 if (enables & (1 << i)) {
1014 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1015 reg |= (E1000_VMOLR_AUPE |
1016 E1000_VMOLR_BAM |
1017 E1000_VMOLR_MPME);
1018 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1019 }
1020 }
1021
1022 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1006 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1023 reg |= E1000_VT_CTL_VM_REPL_EN;
1024 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1025}
1007 if (enable)
1008 reg |= E1000_VT_CTL_VM_REPL_EN;
1009 else
1010 reg &= ~(E1000_VT_CTL_VM_REPL_EN);
1026
1011
1027/**
1028 * e1000_vmdq_replication_disable_pf - Disable replication of brdcst & multicst
1029 * @hw: pointer to the HW structure
1030 *
1031 * Disables replication of broadcast and multicast packets to the VM's.
1032 **/
1033void e1000_vmdq_replication_disable_pf(struct e1000_hw *hw)
1034{
1035 u32 reg;
1036
1037 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1038 reg &= ~(E1000_VT_CTL_VM_REPL_EN);
1039 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1040}
1041
1042/**
1012 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1013}
1014
1015/**
1043 * e1000_vmdq_enable_replication_mode_pf - Enables replication mode in the device
1044 * @hw: pointer to the HW structure
1045 **/
1046void e1000_vmdq_enable_replication_mode_pf(struct e1000_hw *hw)
1047{
1048 u32 reg;
1049
1050 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1051 reg |= E1000_VT_CTL_VM_REPL_EN;
1052 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1053}
1054
1055/**
1056 * e1000_vmdq_broadcast_replication_enable_pf - Enable replication of brdcst
1057 * @hw: pointer to the HW structure
1058 * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1059 *
1060 * Enables replication of broadcast packets from the network
1061 * to VM's which have their respective broadcast accept
1062 * bits set in the VM Offload Register. This gives the PF driver per
1063 * VM granularity control over which VM's get replicated broadcast traffic.
1064 **/
1065void e1000_vmdq_broadcast_replication_enable_pf(struct e1000_hw *hw,
1066 u32 enables)
1067{
1068 u32 reg;
1069 u32 i;
1070
1071 for (i = 0; i < MAX_NUM_VFS; i++) {
1072 if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
1073 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1074 reg |= E1000_VMOLR_BAM;
1075 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1076 }
1077 }
1078}
1079
1080/**
1081 * e1000_vmdq_broadcast_replication_disable_pf - Disable replication
1082 * of broadcast packets
1083 * @hw: pointer to the HW structure
1084 * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1085 *
1086 * Disables replication of broadcast packets for specific pools.
1087 * If bam/mpe is disabled on all pools then replication mode is
1088 * turned off.
1089 **/
1090void e1000_vmdq_broadcast_replication_disable_pf(struct e1000_hw *hw,
1091 u32 disables)
1092{
1093 u32 reg;
1094 u32 i;
1095 u32 oneenabled = 0;
1096
1097 for (i = 0; i < MAX_NUM_VFS; i++) {
1098 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1099 if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
1100 reg &= ~(E1000_VMOLR_BAM);
1101 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1102 }
1103 if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
1104 E1000_VMOLR_BAM |
1105 E1000_VMOLR_MPME)))
1106 oneenabled = 1;
1107 }
1108 if (!oneenabled) {
1109 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1110 reg &= ~(E1000_VT_CTL_VM_REPL_EN);
1111 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1112 }
1113}
1114
1115/**
1116 * e1000_vmdq_multicast_promiscuous_enable_pf - Enable promiscuous reception
1117 * @hw: pointer to the HW structure
1118 * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1119 *
1120 * Enables promiscuous reception of multicast packets from the network
1121 * to VM's which have their respective multicast promiscuous mode enable
1122 * bits set in the VM Offload Register. This gives the PF driver per
1123 * VM granularity control over which VM's get all multicast traffic.
1124 **/
1125void e1000_vmdq_multicast_promiscuous_enable_pf(struct e1000_hw *hw,
1126 u32 enables)
1127{
1128 u32 reg;
1129 u32 i;
1130
1131 for (i = 0; i < MAX_NUM_VFS; i++) {
1132 if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
1133 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1134 reg |= E1000_VMOLR_MPME;
1135 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1136 }
1137 }
1138}
1139
1140/**
1141 * e1000_vmdq_multicast_promiscuous_disable_pf - Disable promiscuous
1142 * reception of multicast packets
1143 * @hw: pointer to the HW structure
1144 * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1145 *
1146 * Disables promiscuous reception of multicast packets for specific pools.
1147 * If bam/mpe is disabled on all pools then replication mode is
1148 * turned off.
1149 **/
1150void e1000_vmdq_multicast_promiscuous_disable_pf(struct e1000_hw *hw,
1151 u32 disables)
1152{
1153 u32 reg;
1154 u32 i;
1155 u32 oneenabled = 0;
1156
1157 for (i = 0; i < MAX_NUM_VFS; i++) {
1158 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1159 if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
1160 reg &= ~(E1000_VMOLR_MPME);
1161 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1162 }
1163 if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
1164 E1000_VMOLR_BAM |
1165 E1000_VMOLR_MPME)))
1166 oneenabled = 1;
1167 }
1168 if (!oneenabled) {
1169 reg = E1000_READ_REG(hw, E1000_VT_CTL);
1170 reg &= ~(E1000_VT_CTL_VM_REPL_EN);
1171 E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
1172 }
1173}
1174
1175/**
1176 * e1000_vmdq_aupe_enable_pf - Enable acceptance of untagged packets
1177 * @hw: pointer to the HW structure
1178 * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1179 *
1180 * Enables acceptance of packets from the network which do not have
1181 * a VLAN tag but match the exact MAC filter of a given VM.
1182 **/
1183void e1000_vmdq_aupe_enable_pf(struct e1000_hw *hw, u32 enables)
1184{
1185 u32 reg;
1186 u32 i;
1187
1188 for (i = 0; i < MAX_NUM_VFS; i++) {
1189 if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
1190 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1191 reg |= E1000_VMOLR_AUPE;
1192 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1193 }
1194 }
1195}
1196
1197/**
1198 * e1000_vmdq_aupe_disable_pf - Disable acceptance of untagged packets
1199 * @hw: pointer to the HW structure
1200 * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
1201 *
1202 * Disables acceptance of packets from the network which do not have
1203 * a VLAN tag but match the exact MAC filter of a given VM.
1204 **/
1205void e1000_vmdq_aupe_disable_pf(struct e1000_hw *hw, u32 disables)
1206{
1207 u32 reg;
1208 u32 i;
1209
1210 for (i = 0; i < MAX_NUM_VFS; i++) {
1211 if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
1212 reg = E1000_READ_REG(hw, E1000_VMOLR(i));
1213 reg &= ~E1000_VMOLR_AUPE;
1214 E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
1215 }
1216 }
1217}
1218
1219/**
1220 * e1000_reset_hw_82575 - Reset hardware
1221 * @hw: pointer to the HW structure
1222 *
1223 * This resets the hardware into a known state.
1224 **/
1225static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1226{
1227 u32 ctrl, icr;

--- 5 unchanged lines hidden (view full) ---

1233 * Prevent the PCI-E bus from sticking if there is no TLP connection
1234 * on the last TLP read/write transaction when MAC is reset.
1235 */
1236 ret_val = e1000_disable_pcie_master_generic(hw);
1237 if (ret_val) {
1238 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1239 }
1240
1016 * e1000_reset_hw_82575 - Reset hardware
1017 * @hw: pointer to the HW structure
1018 *
1019 * This resets the hardware into a known state.
1020 **/
1021static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
1022{
1023 u32 ctrl, icr;

--- 5 unchanged lines hidden (view full) ---

1029 * Prevent the PCI-E bus from sticking if there is no TLP connection
1030 * on the last TLP read/write transaction when MAC is reset.
1031 */
1032 ret_val = e1000_disable_pcie_master_generic(hw);
1033 if (ret_val) {
1034 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1035 }
1036
1037 /* set the completion timeout for interface */
1038 ret_val = e1000_set_pcie_completion_timeout(hw);
1039 if (ret_val) {
1040 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1041 }
1042
1241 DEBUGOUT("Masking off all interrupts\n");
1242 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1243
1244 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1245 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1246 E1000_WRITE_FLUSH(hw);
1247
1248 msec_delay(10);

--- 79 unchanged lines hidden (view full) ---

1328 * @hw: pointer to the HW structure
1329 *
1330 * Configures the link for auto-neg or forced speed and duplex. Then we check
1331 * for link, once link is established calls to configure collision distance
1332 * and flow control are called.
1333 **/
1334static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1335{
1043 DEBUGOUT("Masking off all interrupts\n");
1044 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1045
1046 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1047 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1048 E1000_WRITE_FLUSH(hw);
1049
1050 msec_delay(10);

--- 79 unchanged lines hidden (view full) ---

1130 * @hw: pointer to the HW structure
1131 *
1132 * Configures the link for auto-neg or forced speed and duplex. Then we check
1133 * for link, once link is established calls to configure collision distance
1134 * and flow control are called.
1135 **/
1136static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1137{
1336 u32 ctrl, led_ctrl;
1138 u32 ctrl;
1337 s32 ret_val;
1338 bool link;
1339
1340 DEBUGFUNC("e1000_setup_copper_link_82575");
1341
1342 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1343 ctrl |= E1000_CTRL_SLU;
1344 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1345 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1346
1347 switch (hw->phy.type) {
1348 case e1000_phy_m88:
1349 ret_val = e1000_copper_link_setup_m88(hw);
1350 break;
1351 case e1000_phy_igp_3:
1352 ret_val = e1000_copper_link_setup_igp(hw);
1139 s32 ret_val;
1140 bool link;
1141
1142 DEBUGFUNC("e1000_setup_copper_link_82575");
1143
1144 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1145 ctrl |= E1000_CTRL_SLU;
1146 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1147 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1148
1149 switch (hw->phy.type) {
1150 case e1000_phy_m88:
1151 ret_val = e1000_copper_link_setup_m88(hw);
1152 break;
1153 case e1000_phy_igp_3:
1154 ret_val = e1000_copper_link_setup_igp(hw);
1353 /* Setup activity LED */
1354 led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
1355 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1356 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1357 E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
1358 break;
1359 default:
1360 ret_val = -E1000_ERR_PHY;
1361 break;
1362 }
1363
1364 if (ret_val)
1365 goto out;

--- 62 unchanged lines hidden (view full) ---

1428 /*
1429 * On the 82575, SerDes loopback mode persists until it is
1430 * explicitly turned off or a power cycle is performed. A read to
1431 * the register does not indicate its status. Therefore, we ensure
1432 * loopback mode is disabled during initialization.
1433 */
1434 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1435
1155 break;
1156 default:
1157 ret_val = -E1000_ERR_PHY;
1158 break;
1159 }
1160
1161 if (ret_val)
1162 goto out;

--- 62 unchanged lines hidden (view full) ---

1225 /*
1226 * On the 82575, SerDes loopback mode persists until it is
1227 * explicitly turned off or a power cycle is performed. A read to
1228 * the register does not indicate its status. Therefore, we ensure
1229 * loopback mode is disabled during initialization.
1230 */
1231 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1232
1436 /* Force link up, set 1gb, set both sw defined pins */
1233 /* Force link up, set 1gb */
1437 reg = E1000_READ_REG(hw, E1000_CTRL);
1234 reg = E1000_READ_REG(hw, E1000_CTRL);
1438 reg |= E1000_CTRL_SLU |
1439 E1000_CTRL_SPD_1000 |
1440 E1000_CTRL_FRCSPD |
1441 E1000_CTRL_SWDPIN0 |
1442 E1000_CTRL_SWDPIN1;
1235 reg |= E1000_CTRL_SLU | E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD;
1236 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1237 /* set both sw defined pins */
1238 reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1239 }
1443 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1240 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1444
1445 /* Power on phy for 82576 fiber adapters */
1446 if (hw->mac.type == e1000_82576) {
1447 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1448 reg &= ~E1000_CTRL_EXT_SDP7_DATA;
1449 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1450 }
1451
1452 /* Set switch control to serdes energy detect */

--- 56 unchanged lines hidden (view full) ---

1509 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1510 if (ret_val) {
1511 DEBUGOUT("NVM Read Error\n");
1512 goto out;
1513 }
1514
1515 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1516 switch(hw->phy.media_type) {
1241 /* Power on phy for 82576 fiber adapters */
1242 if (hw->mac.type == e1000_82576) {
1243 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1244 reg &= ~E1000_CTRL_EXT_SDP7_DATA;
1245 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1246 }
1247
1248 /* Set switch control to serdes energy detect */

--- 56 unchanged lines hidden (view full) ---

1305 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1306 if (ret_val) {
1307 DEBUGOUT("NVM Read Error\n");
1308 goto out;
1309 }
1310
1311 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1312 switch(hw->phy.media_type) {
1517 case e1000_media_type_fiber:
1518 case e1000_media_type_internal_serdes:
1519 *data = ID_LED_DEFAULT_82575_SERDES;
1520 break;
1521 case e1000_media_type_copper:
1522 default:
1523 *data = ID_LED_DEFAULT;
1524 break;
1525 }

--- 74 unchanged lines hidden (view full) ---

1600 *
1601 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1602 * which can be enabled for use in the embedded applications. Simply
1603 * return the current state of the sgmii interface.
1604 **/
1605static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1606{
1607 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1313 case e1000_media_type_internal_serdes:
1314 *data = ID_LED_DEFAULT_82575_SERDES;
1315 break;
1316 case e1000_media_type_copper:
1317 default:
1318 *data = ID_LED_DEFAULT;
1319 break;
1320 }

--- 74 unchanged lines hidden (view full) ---

1395 *
1396 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1397 * which can be enabled for use in the embedded applications. Simply
1398 * return the current state of the sgmii interface.
1399 **/
1400static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
1401{
1402 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1608
1609 DEBUGFUNC("e1000_sgmii_active_82575");
1610
1611 if (hw->mac.type != e1000_82575 && hw->mac.type != e1000_82576)
1612 return FALSE;
1613
1614 return dev_spec->sgmii_active;
1615}
1616
1617/**
1618 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1619 * @hw: pointer to the HW structure
1620 *
1621 * Inits recommended HW defaults after a reset when there is no EEPROM

--- 135 unchanged lines hidden (view full) ---

1757 E1000_READ_REG(hw, E1000_HGOTCL);
1758 E1000_READ_REG(hw, E1000_HGOTCH);
1759 E1000_READ_REG(hw, E1000_LENERRS);
1760
1761 /* This register should not be read in copper configurations */
1762 if (hw->phy.media_type == e1000_media_type_internal_serdes)
1763 E1000_READ_REG(hw, E1000_SCVPC);
1764}
1403 return dev_spec->sgmii_active;
1404}
1405
1406/**
1407 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1408 * @hw: pointer to the HW structure
1409 *
1410 * Inits recommended HW defaults after a reset when there is no EEPROM

--- 135 unchanged lines hidden (view full) ---

1546 E1000_READ_REG(hw, E1000_HGOTCL);
1547 E1000_READ_REG(hw, E1000_HGOTCH);
1548 E1000_READ_REG(hw, E1000_LENERRS);
1549
1550 /* This register should not be read in copper configurations */
1551 if (hw->phy.media_type == e1000_media_type_internal_serdes)
1552 E1000_READ_REG(hw, E1000_SCVPC);
1553}
1554
1765/**
1766 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1767 * @hw: pointer to the HW structure
1768 *
1769 * After rx enable if managability is enabled then there is likely some
1770 * bad data at the start of the fifo and possibly in the DMA fifo. This
1771 * function clears the fifos and flushes any packets that came in as rx was
1772 * being enabled.

--- 58 unchanged lines hidden (view full) ---

1831 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1832
1833 /* Flush receive errors generated by workaround */
1834 E1000_READ_REG(hw, E1000_ROC);
1835 E1000_READ_REG(hw, E1000_RNBC);
1836 E1000_READ_REG(hw, E1000_MPC);
1837}
1838
1555/**
1556 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1557 * @hw: pointer to the HW structure
1558 *
1559 * After rx enable if managability is enabled then there is likely some
1560 * bad data at the start of the fifo and possibly in the DMA fifo. This
1561 * function clears the fifos and flushes any packets that came in as rx was
1562 * being enabled.

--- 58 unchanged lines hidden (view full) ---

1621 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1622
1623 /* Flush receive errors generated by workaround */
1624 E1000_READ_REG(hw, E1000_ROC);
1625 E1000_READ_REG(hw, E1000_RNBC);
1626 E1000_READ_REG(hw, E1000_MPC);
1627}
1628
1629/**
1630 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1631 * @hw: pointer to the HW structure
1632 *
1633 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1634 * however the hardware default for these parts is 500us to 1ms which is less
1635 * than the 10ms recommended by the pci-e spec. To address this we need to
1636 * increase the value to either 10ms to 200ms for capability version 1 config,
1637 * or 16ms to 55ms for version 2.
1638 **/
1639static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1640{
1641 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1642 s32 ret_val = E1000_SUCCESS;
1643 u16 pcie_devctl2;
1644
1645 /* only take action if timeout value is defaulted to 0 */
1646 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1647 goto out;
1648
1649 /*
1650 * if capababilities version is type 1 we can write the
1651 * timeout of 10ms to 200ms through the GCR register
1652 */
1653 if (!(gcr & E1000_GCR_CAP_VER2)) {
1654 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1655 goto out;
1656 }
1657
1658 /*
1659 * for version 2 capabilities we need to write the config space
1660 * directly in order to set the completion timeout value for
1661 * 16ms to 55ms
1662 */
1663 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1664 &pcie_devctl2);
1665 if (ret_val)
1666 goto out;
1667
1668 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1669
1670 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1671 &pcie_devctl2);
1672out:
1673 /* disable completion timeout resend */
1674 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1675
1676 E1000_WRITE_REG(hw, E1000_GCR, gcr);
1677 return ret_val;
1678}
1679