Lines Matching defs:wx

17 	struct wx *wx = bus->priv;
25 wr32(wx, WX_MSCA, command);
28 if (wx->mac.type == wx_mac_em)
30 wr32(wx, WX_MSCC, command);
34 100000, false, wx, WX_MSCC);
36 wx_err(wx, "Mdio read c22 command did not complete.\n");
40 return (u16)rd32(wx, WX_MSCC);
46 struct wx *wx = bus->priv;
54 wr32(wx, WX_MSCA, command);
57 if (wx->mac.type == wx_mac_em)
59 wr32(wx, WX_MSCC, command);
63 100000, false, wx, WX_MSCC);
65 wx_err(wx, "Mdio write c22 command did not complete.\n");
72 struct wx *wx = bus->priv;
74 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
81 struct wx *wx = bus->priv;
83 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
90 struct wx *wx = bus->priv;
92 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
100 struct wx *wx = bus->priv;
102 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
107 static void wx_intr_disable(struct wx *wx, u64 qmask)
113 wr32(wx, WX_PX_IMS(0), mask);
115 if (wx->mac.type == wx_mac_sp) {
118 wr32(wx, WX_PX_IMS(1), mask);
122 void wx_intr_enable(struct wx *wx, u64 qmask)
128 wr32(wx, WX_PX_IMC(0), mask);
129 if (wx->mac.type == wx_mac_sp) {
132 wr32(wx, WX_PX_IMC(1), mask);
139 * @wx: board private structure
141 void wx_irq_disable(struct wx *wx)
143 struct pci_dev *pdev = wx->pdev;
145 wr32(wx, WX_PX_MISC_IEN, 0);
146 wx_intr_disable(wx, WX_INTR_ALL);
151 for (vector = 0; vector < wx->num_q_vectors; vector++)
152 synchronize_irq(wx->msix_q_entries[vector].vector);
154 synchronize_irq(wx->msix_entry->vector);
165 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
172 wr32(wx, WX_SPI_CMD, cmd_val);
175 false, wx, WX_SPI_STATUS);
178 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
182 ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
186 *data = rd32(wx, WX_SPI_DATA);
191 int wx_check_flash_load(struct wx *hw, u32 check_bit)
210 void wx_control_hw(struct wx *wx, bool drv)
215 wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
222 * @wx: pointer to hardware structure
224 int wx_mng_present(struct wx *wx)
228 fwsm = rd32(wx, WX_MIS_ST);
241 * @wx: pointer to hardware structure
247 static void wx_release_sw_sync(struct wx *wx, u32 mask)
250 wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
256 * @wx: pointer to hardware structure
262 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
269 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
272 wr32(wx, WX_MNG_SWFW_SYNC, sem);
274 wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
283 * @wx: pointer to the HW structure
295 int wx_host_interface_command(struct wx *wx, u32 *buffer,
305 wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
309 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
315 wx_err(wx, "Buffer length failure, not aligned to dword");
326 wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
328 buf[i] = rd32a(wx, WX_MNG_MBOX, i);
331 wr32m(wx, WX_MNG_MBOX_CTL,
335 timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
339 wx_dbg(wx, "Command has failed with no status valid.\n");
341 buf[0] = rd32(wx, WX_MNG_MBOX);
347 wx_dbg(wx, "It's unknown cmd.\n");
352 wx_dbg(wx, "write value:\n");
354 wx_dbg(wx, "%x ", buffer[i]);
355 wx_dbg(wx, "read value:\n");
357 wx_dbg(wx, "%x ", buf[i]);
368 buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
378 wx_err(wx, "Buffer not large enough for reply message.\n");
388 buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
393 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
401 * @wx: pointer to hardware structure
407 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
422 status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
428 *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
435 * @wx: pointer to hardware structure
441 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
445 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
447 status = wx_read_ee_hostif_data(wx, offset, data);
448 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
457 * @wx: pointer to hardware structure
464 int wx_read_ee_hostif_buffer(struct wx *wx,
475 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
494 status = wx_host_interface_command(wx, (u32 *)&buffer,
500 wx_err(wx, "Host interface command failed\n");
507 value = rd32(wx, reg);
521 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
528 * @wx: pointer to hardware structure
533 void wx_init_eeprom_params(struct wx *wx)
535 struct wx_eeprom_info *eeprom = &wx->eeprom;
543 if (!(rd32(wx, WX_SPI_STATUS) &
550 wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
555 if (wx->mac.type == wx_mac_sp) {
556 if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
557 wx_err(wx, "NVM Read Error\n");
569 * @wx: pointer to hardware structure
576 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
582 wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
583 rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
584 rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
596 * @wx: pointer to hardware structure
604 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
607 u32 rar_entries = wx->mac.num_rar_entries;
612 wx_err(wx, "RAR index %d is out of range.\n", index);
617 wr32(wx, WX_PSR_MAC_SWC_IDX, index);
620 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
621 if (wx->mac.type == wx_mac_sp)
622 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
640 wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
641 wr32m(wx, WX_PSR_MAC_SWC_AD_H,
652 * @wx: pointer to hardware structure
657 static int wx_clear_rar(struct wx *wx, u32 index)
659 u32 rar_entries = wx->mac.num_rar_entries;
663 wx_err(wx, "RAR index %d is out of range.\n", index);
671 wr32(wx, WX_PSR_MAC_SWC_IDX, index);
673 wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
674 wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
676 wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
677 wr32m(wx, WX_PSR_MAC_SWC_AD_H,
688 * @wx: pointer to hardware struct
692 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
694 u32 rar_entries = wx->mac.num_rar_entries;
699 wx_err(wx, "RAR index %d is out of range.\n", rar);
703 wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
704 mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
705 mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
712 wx_clear_rar(wx, rar);
719 * @wx: pointer to hardware structure
721 static void wx_init_uta_tables(struct wx *wx)
725 wx_dbg(wx, " Clearing UTA\n");
728 wr32(wx, WX_PSR_UC_TBL(i), 0);
733 * @wx: pointer to hardware structure
739 void wx_init_rx_addrs(struct wx *wx)
741 u32 rar_entries = wx->mac.num_rar_entries;
749 if (!is_valid_ether_addr(wx->mac.addr)) {
751 wx_get_mac_addr(wx, wx->mac.addr);
752 wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
755 wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
756 wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
758 wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
760 if (wx->mac.type == wx_mac_sp) {
762 wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
767 wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
769 wr32(wx, WX_PSR_MAC_SWC_IDX, i);
770 wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
771 wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
775 wx->addr_ctrl.mta_in_use = 0;
776 psrctl = rd32(wx, WX_PSR_CTL);
778 psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
779 wr32(wx, WX_PSR_CTL, psrctl);
780 wx_dbg(wx, " Clearing MTA\n");
781 for (i = 0; i < wx->mac.mcft_size; i++)
782 wr32(wx, WX_PSR_MC_TBL(i), 0);
784 wx_init_uta_tables(wx);
788 static void wx_sync_mac_table(struct wx *wx)
792 for (i = 0; i < wx->mac.num_rar_entries; i++) {
793 if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
794 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
795 wx_set_rar(wx, i,
796 wx->mac_table[i].addr,
797 wx->mac_table[i].pools,
800 wx_clear_rar(wx, i);
802 wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
808 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
810 memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
811 wx->mac_table[0].pools = 1ULL;
812 wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
813 wx_set_rar(wx, 0, wx->mac_table[0].addr,
814 wx->mac_table[0].pools,
819 void wx_flush_sw_mac_table(struct wx *wx)
823 for (i = 0; i < wx->mac.num_rar_entries; i++) {
824 if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
827 wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
828 wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
829 memset(wx->mac_table[i].addr, 0, ETH_ALEN);
830 wx->mac_table[i].pools = 0;
832 wx_sync_mac_table(wx);
836 static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
843 for (i = 0; i < wx->mac.num_rar_entries; i++) {
844 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
845 if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
846 if (wx->mac_table[i].pools != (1ULL << pool)) {
847 memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
848 wx->mac_table[i].pools |= (1ULL << pool);
849 wx_sync_mac_table(wx);
855 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
857 wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
859 memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
860 wx->mac_table[i].pools |= (1ULL << pool);
861 wx_sync_mac_table(wx);
867 static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
875 for (i = 0; i < wx->mac.num_rar_entries; i++) {
876 if (!ether_addr_equal(addr, wx->mac_table[i].addr))
879 wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
880 wx->mac_table[i].pools &= ~(1ULL << pool);
881 if (!wx->mac_table[i].pools) {
882 wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
883 memset(wx->mac_table[i].addr, 0, ETH_ALEN);
885 wx_sync_mac_table(wx);
891 static int wx_available_rars(struct wx *wx)
895 for (i = 0; i < wx->mac.num_rar_entries; i++) {
896 if (wx->mac_table[i].state == 0)
915 struct wx *wx = netdev_priv(netdev);
919 if (netdev_uc_count(netdev) > wx_available_rars(wx))
926 wx_del_mac_filter(wx, ha->addr, pool);
927 wx_add_mac_filter(wx, ha->addr, pool);
936 * @wx: pointer to private structure
946 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
950 switch (wx->mac.mc_filter_type) {
964 wx_err(wx, "MC filter type param set incorrectly\n");
975 * @wx: pointer to private structure
980 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
984 wx->addr_ctrl.mta_in_use++;
986 vector = wx_mta_vector(wx, mc_addr);
987 wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
999 wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1004 * @wx: pointer to private structure
1012 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
1020 wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1021 wx->addr_ctrl.mta_in_use = 0;
1024 wx_dbg(wx, " Clearing MTA\n");
1025 memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
1029 wx_dbg(wx, " Adding the multicast addresses:\n");
1030 wx_set_mta(wx, ha->addr);
1034 for (i = 0; i < wx->mac.mcft_size; i++)
1035 wr32a(wx, WX_PSR_MC_TBL(0), i,
1036 wx->mac.mta_shadow[i]);
1038 if (wx->addr_ctrl.mta_in_use > 0) {
1039 psrctl = rd32(wx, WX_PSR_CTL);
1042 (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1043 wr32(wx, WX_PSR_CTL, psrctl);
1046 wx_dbg(wx, "Update mc addr list Complete\n");
1059 struct wx *wx = netdev_priv(netdev);
1064 wx_update_mc_addr_list(wx, netdev);
1078 struct wx *wx = netdev_priv(netdev);
1086 wx_del_mac_filter(wx, wx->mac.addr, 0);
1088 memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1090 wx_mac_set_default_filter(wx, wx->mac.addr);
1096 void wx_disable_rx(struct wx *wx)
1101 rxctrl = rd32(wx, WX_RDB_PB_CTL);
1103 pfdtxgswc = rd32(wx, WX_PSR_CTL);
1106 wr32(wx, WX_PSR_CTL, pfdtxgswc);
1107 wx->mac.set_lben = true;
1109 wx->mac.set_lben = false;
1112 wr32(wx, WX_RDB_PB_CTL, rxctrl);
1114 if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1115 ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1117 wr32m(wx, WX_MAC_RX_CFG,
1124 static void wx_enable_rx(struct wx *wx)
1129 wr32m(wx, WX_MAC_RX_CFG,
1132 wr32m(wx, WX_RDB_PB_CTL,
1135 if (wx->mac.set_lben) {
1136 psrctl = rd32(wx, WX_PSR_CTL);
1138 wr32(wx, WX_PSR_CTL, psrctl);
1139 wx->mac.set_lben = false;
1145 * @wx: pointer to private structure
1147 static void wx_set_rxpba(struct wx *wx)
1151 rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
1152 wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1155 txpktsize = wx->mac.tx_pb_size;
1157 wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1158 wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1166 * @wx: board private structure to calculate for
1168 static int wx_hpbthresh(struct wx *wx)
1170 struct net_device *dev = wx->netdev;
1183 rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
1192 dev_warn(&wx->pdev->dev,
1203 * @wx: board private structure to calculate for
1205 static int wx_lpbthresh(struct wx *wx)
1207 struct net_device *dev = wx->netdev;
1224 * @wx: board private structure to calculate for
1226 static void wx_pbthresh_setup(struct wx *wx)
1228 wx->fc.high_water = wx_hpbthresh(wx);
1229 wx->fc.low_water = wx_lpbthresh(wx);
1232 if (wx->fc.low_water > wx->fc.high_water)
1233 wx->fc.low_water = 0;
1236 static void wx_configure_port(struct wx *wx)
1241 wr32m(wx, WX_CFG_PORT_CTL,
1246 wr32(wx, WX_CFG_TAG_TPID(0),
1248 wx->tpid[0] = ETH_P_8021Q;
1249 wx->tpid[1] = ETH_P_8021AD;
1251 wr32(wx, WX_CFG_TAG_TPID(i),
1254 wx->tpid[i] = ETH_P_8021Q;
1259 * @wx: pointer to private structure
1264 static int wx_disable_sec_rx_path(struct wx *wx)
1268 wr32m(wx, WX_RSC_CTL,
1272 1000, 40000, false, wx, WX_RSC_ST);
1277 * @wx: pointer to private structure
1281 static void wx_enable_sec_rx_path(struct wx *wx)
1283 wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1284 WX_WRITE_FLUSH(wx);
1287 static void wx_vlan_strip_control(struct wx *wx, bool enable)
1291 for (i = 0; i < wx->num_rx_queues; i++) {
1292 struct wx_ring *ring = wx->rx_ring[i];
1295 wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
1302 struct wx *wx = netdev_priv(netdev);
1310 fctrl = rd32(wx, WX_PSR_CTL);
1312 vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
1317 vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1327 wx->addr_ctrl.user_set_promisc = false;
1329 wx->addr_ctrl.user_set_promisc = true;
1345 wr32m(wx, WX_RSC_CTL,
1372 wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1373 wr32(wx, WX_PSR_CTL, fctrl);
1374 wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
1378 wx_vlan_strip_control(wx, true);
1380 wx_vlan_strip_control(wx, false);
1385 static void wx_set_rx_buffer_len(struct wx *wx)
1387 struct net_device *netdev = wx->netdev;
1395 mhadd = rd32(wx, WX_PSR_MAX_SZ);
1397 wr32(wx, WX_PSR_MAX_SZ, max_frame);
1409 struct wx *wx = netdev_priv(netdev);
1412 wx_set_rx_buffer_len(wx);
1419 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1426 wr32m(wx, WX_PX_RR_CFG(reg_idx),
1431 10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1435 wx_err(wx,
1442 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1449 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1453 wx_err(wx,
1459 static void wx_configure_srrctl(struct wx *wx,
1465 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1475 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1478 static void wx_configure_tx_ring(struct wx *wx,
1487 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1488 WX_WRITE_FLUSH(wx);
1490 wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1491 wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1494 wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1495 wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1496 ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1507 wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1511 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1513 wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1516 static void wx_configure_rx_ring(struct wx *wx,
1525 rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1526 wx_disable_rx_queue(wx, ring);
1528 wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1529 wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1537 wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1540 wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1541 wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1542 ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1544 wx_configure_srrctl(wx, ring);
1555 wr32m(wx, WX_PX_RR_CFG(reg_idx),
1558 wx_enable_rx_queue(wx, ring);
1564 * @wx: pointer to private structure
1568 static void wx_configure_tx(struct wx *wx)
1573 wr32m(wx, WX_TDM_CTL,
1577 for (i = 0; i < wx->num_tx_queues; i++)
1578 wx_configure_tx_ring(wx, wx->tx_ring[i]);
1580 wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1582 if (wx->mac.type == wx_mac_em)
1583 wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1586 wr32m(wx, WX_MAC_TX_CFG,
1590 static void wx_restore_vlan(struct wx *wx)
1594 wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
1596 for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
1597 wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
1600 static void wx_store_reta(struct wx *wx)
1602 u8 *indir_tbl = wx->rss_indir_tbl;
1612 wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
1618 static void wx_setup_reta(struct wx *wx)
1620 u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
1626 wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
1629 memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
1635 wx->rss_indir_tbl[i] = j;
1638 wx_store_reta(wx);
1641 static void wx_setup_mrqc(struct wx *wx)
1646 wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
1656 netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
1658 wx_setup_reta(wx);
1660 if (wx->rss_enabled)
1663 wr32(wx, WX_RDB_RA_CTL, rss_field);
1668 * @wx: pointer to private structure
1672 void wx_configure_rx(struct wx *wx)
1677 wx_disable_rx(wx);
1683 wr32(wx, WX_RDB_PL_CFG(0), psrtype);
1686 wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
1688 if (wx->mac.type == wx_mac_sp) {
1692 psrctl = rd32(wx, WX_PSR_CTL);
1695 wr32(wx, WX_PSR_CTL, psrctl);
1698 wx_setup_mrqc(wx);
1701 wx_set_rx_buffer_len(wx);
1706 for (i = 0; i < wx->num_rx_queues; i++)
1707 wx_configure_rx_ring(wx, wx->rx_ring[i]);
1710 ret = wx_disable_sec_rx_path(wx);
1712 wx_err(wx, "The register status is abnormal, please check device.");
1714 wx_enable_rx(wx);
1715 wx_enable_sec_rx_path(wx);
1719 static void wx_configure_isb(struct wx *wx)
1722 wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
1724 wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
1727 void wx_configure(struct wx *wx)
1729 wx_set_rxpba(wx);
1730 wx_pbthresh_setup(wx);
1731 wx_configure_port(wx);
1733 wx_set_rx_mode(wx->netdev);
1734 wx_restore_vlan(wx);
1735 wx_enable_sec_rx_path(wx);
1737 wx_configure_tx(wx);
1738 wx_configure_rx(wx);
1739 wx_configure_isb(wx);
1745 * @wx: pointer to hardware structure
1750 int wx_disable_pcie_master(struct wx *wx)
1756 pci_clear_master(wx->pdev);
1759 if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
1764 false, wx, WX_PX_TRANSACTION_PENDING);
1766 wx_err(wx, "PCIe transaction pending bit did not clear.\n");
1774 * @wx: pointer to hardware structure
1781 int wx_stop_adapter(struct wx *wx)
1788 wx->adapter_stopped = true;
1791 wx_disable_rx(wx);
1794 wx_intr_disable(wx, WX_INTR_ALL);
1797 wr32(wx, WX_PX_MISC_IC, 0xffffffff);
1798 wr32(wx, WX_BME_CTL, 0x3);
1801 for (i = 0; i < wx->mac.max_tx_queues; i++) {
1802 wr32m(wx, WX_PX_TR_CFG(i),
1808 for (i = 0; i < wx->mac.max_rx_queues; i++) {
1809 wr32m(wx, WX_PX_RR_CFG(i),
1814 WX_WRITE_FLUSH(wx);
1819 return wx_disable_pcie_master(wx);
1823 void wx_reset_misc(struct wx *wx)
1828 wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
1831 wr32m(wx, WX_MMC_CONTROL,
1834 wr32m(wx, WX_MAC_RX_FLOW_CTRL,
1837 wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
1839 wr32m(wx, WX_MIS_RST_ST,
1843 wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
1845 wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
1846 wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
1847 wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
1849 wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
1851 wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
1852 wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
1853 wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
1857 wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
1858 wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
1864 * @wx: pointer to hardware structure
1871 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
1873 struct pci_dev *pdev = wx->pdev;
1898 * wx_init_rss_key - Initialize wx RSS key
1899 * @wx: device handle
1903 static int wx_init_rss_key(struct wx *wx)
1907 if (!wx->rss_key) {
1913 wx->rss_key = rss_key;
1919 int wx_sw_init(struct wx *wx)
1921 struct pci_dev *pdev = wx->pdev;
1925 wx->vendor_id = pdev->vendor;
1926 wx->device_id = pdev->device;
1927 wx->revision_id = pdev->revision;
1928 wx->oem_svid = pdev->subsystem_vendor;
1929 wx->oem_ssid = pdev->subsystem_device;
1930 wx->bus.device = PCI_SLOT(pdev->devfn);
1931 wx->bus.func = PCI_FUNC(pdev->devfn);
1933 if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
1934 wx->subsystem_vendor_id = pdev->subsystem_vendor;
1935 wx->subsystem_device_id = pdev->subsystem_device;
1937 err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
1939 wx_err(wx, "read of internal subsystem device id failed\n");
1943 wx->subsystem_device_id = swab16((u16)ssid);
1946 err = wx_init_rss_key(wx);
1948 wx_err(wx, "rss key allocation failed\n");
1952 wx->mac_table = kcalloc(wx->mac.num_rar_entries,
1955 if (!wx->mac_table) {
1956 wx_err(wx, "mac_table allocation failed\n");
1957 kfree(wx->rss_key);
1967 * @wx: pointer to hardware structure
1973 static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
1986 wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
1987 bits = rd32(wx, WX_PSR_VLAN_SWC);
2006 * @wx: pointer to hardware structure
2015 static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
2028 vt = rd32(wx, WX_CFG_PORT_CTL);
2032 vlvf_index = wx_find_vlvf_slot(wx, vlan);
2036 wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
2040 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2042 wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2044 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2046 wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2051 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2053 wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2054 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2056 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2058 wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2059 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2064 wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
2068 wr32(wx, WX_PSR_VLAN_SWC, 0);
2076 * @wx: pointer to hardware structure
2083 static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
2104 vfta = wx->mac.vft_shadow[regindex];
2119 ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
2124 wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
2125 wx->mac.vft_shadow[regindex] = vfta;
2132 * @wx: pointer to hardware structure
2136 static void wx_clear_vfta(struct wx *wx)
2140 for (offset = 0; offset < wx->mac.vft_size; offset++) {
2141 wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
2142 wx->mac.vft_shadow[offset] = 0;
2146 wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
2147 wr32(wx, WX_PSR_VLAN_SWC, 0);
2148 wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
2149 wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
2156 struct wx *wx = netdev_priv(netdev);
2159 wx_set_vfta(wx, vid, VMDQ_P(0), true);
2160 set_bit(vid, wx->active_vlans);
2168 struct wx *wx = netdev_priv(netdev);
2172 wx_set_vfta(wx, vid, VMDQ_P(0), false);
2173 clear_bit(vid, wx->active_vlans);
2179 static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
2184 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2187 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2190 static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
2195 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2198 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2201 int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
2209 if (tx_pause && wx->fc.high_water) {
2210 if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
2211 wx_err(wx, "Invalid water mark configuration\n");
2217 mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
2220 fccfg_reg = rd32(wx, WX_RDB_RFCC);
2229 wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
2230 wr32(wx, WX_RDB_RFCC, fccfg_reg);
2233 if (tx_pause && wx->fc.high_water) {
2234 fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
2235 wr32(wx, WX_RDB_RFCL, fcrtl);
2236 fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
2238 wr32(wx, WX_RDB_RFCL, 0);
2245 fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
2248 wr32(wx, WX_RDB_RFCH, fcrth);
2252 wr32(wx, WX_RDB_RFCV, reg);
2255 wr32(wx, WX_RDB_RFCRT, pause_time / 2);
2263 if (wx->num_rx_queues > 1 && !tx_pause) {
2264 for (i = 0; i < wx->num_rx_queues; i++)
2265 wx_enable_rx_drop(wx, wx->rx_ring[i]);
2267 for (i = 0; i < wx->num_rx_queues; i++)
2268 wx_disable_rx_drop(wx, wx->rx_ring[i]);
2277 * @wx: board private structure
2279 void wx_update_stats(struct wx *wx)
2281 struct wx_hw_stats *hwstats = &wx->stats;
2288 /* gather some stats to the wx struct that are per queue */
2289 for (i = 0; i < wx->num_rx_queues; i++) {
2290 struct wx_ring *rx_ring = wx->rx_ring[i];
2297 wx->non_eop_descs = non_eop_descs;
2298 wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
2299 wx->hw_csum_rx_error = hw_csum_rx_error;
2300 wx->hw_csum_rx_good = hw_csum_rx_good;
2302 for (i = 0; i < wx->num_tx_queues; i++) {
2303 struct wx_ring *tx_ring = wx->tx_ring[i];
2308 wx->restart_queue = restart_queue;
2309 wx->tx_busy = tx_busy;
2311 hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
2312 hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
2313 hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
2314 hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
2315 hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2316 hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2317 hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2318 hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2319 hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2320 hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2321 hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2322 hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2323 hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2324 hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2325 hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
2326 hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
2327 hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
2328 hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
2329 hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
2330 hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
2331 hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
2332 hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
2334 for (i = 0; i < wx->mac.max_rx_queues; i++)
2335 hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
2341 * @wx: board private structure
2346 void wx_clear_hw_cntrs(struct wx *wx)
2350 for (i = 0; i < wx->mac.max_rx_queues; i++)
2351 wr32(wx, WX_PX_MPRC(i), 0);
2353 rd32(wx, WX_RDM_PKT_CNT);
2354 rd32(wx, WX_TDM_PKT_CNT);
2355 rd64(wx, WX_RDM_BYTE_CNT_LSB);
2356 rd32(wx, WX_TDM_BYTE_CNT_LSB);
2357 rd32(wx, WX_RDM_DRP_PKT);
2358 rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2359 rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2360 rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2361 rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2362 rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2363 rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2364 rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2365 rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2366 rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2367 rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2368 rd32(wx, WX_RDB_LXONTXC);
2369 rd32(wx, WX_RDB_LXOFFTXC);
2370 rd32(wx, WX_MAC_LXONOFFRXC);
2376 * @wx: pointer to hardware structure
2382 void wx_start_hw(struct wx *wx)
2387 wx_clear_vfta(wx);
2388 WX_WRITE_FLUSH(wx);
2390 for (i = 0; i < wx->mac.max_tx_queues; i++) {
2391 wr32(wx, WX_TDM_RP_IDX, i);
2392 wr32(wx, WX_TDM_RP_RATE, 0);