Lines Matching refs:pdata

127 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
129 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
132 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
140 rate = pdata->sysclk_rate;
155 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
163 rate = pdata->sysclk_rate;
178 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
184 pbl = pdata->pbl;
186 if (pdata->pbl > 32) {
191 for (i = 0; i < pdata->channel_count; i++) {
192 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
195 if (pdata->channel[i]->tx_ring)
196 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
199 if (pdata->channel[i]->rx_ring)
200 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
207 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
211 for (i = 0; i < pdata->channel_count; i++) {
212 if (!pdata->channel[i]->tx_ring)
215 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
216 pdata->tx_osp_mode);
222 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
226 for (i = 0; i < pdata->rx_q_count; i++)
227 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
232 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
236 for (i = 0; i < pdata->tx_q_count; i++)
237 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
242 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
247 for (i = 0; i < pdata->rx_q_count; i++)
248 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
253 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
258 for (i = 0; i < pdata->tx_q_count; i++)
259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
264 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
268 for (i = 0; i < pdata->channel_count; i++) {
269 if (!pdata->channel[i]->rx_ring)
272 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
273 pdata->rx_riwt);
279 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
284 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
288 for (i = 0; i < pdata->channel_count; i++) {
289 if (!pdata->channel[i]->rx_ring)
292 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
293 pdata->rx_buf_size);
297 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
301 for (i = 0; i < pdata->channel_count; i++) {
302 if (!pdata->channel[i]->tx_ring)
305 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
309 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
313 for (i = 0; i < pdata->channel_count; i++) {
314 if (!pdata->channel[i]->rx_ring)
317 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
320 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
323 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
329 mutex_lock(&pdata->rss_mutex);
331 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
336 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
340 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
341 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
345 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
354 mutex_unlock(&pdata->rss_mutex);
359 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
361 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
362 unsigned int *key = (unsigned int *)&pdata->rss_key;
366 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
375 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
380 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
381 ret = xgbe_write_rss_reg(pdata,
383 pdata->rss_table[i]);
391 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
393 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
395 return xgbe_write_rss_hash_key(pdata);
398 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
403 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
404 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
406 return xgbe_write_rss_lookup_table(pdata);
409 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
413 if (!pdata->hw_feat.rss)
417 ret = xgbe_write_rss_hash_key(pdata);
422 ret = xgbe_write_rss_lookup_table(pdata);
427 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
430 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
435 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
437 if (!pdata->hw_feat.rss)
440 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
445 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
449 if (!pdata->hw_feat.rss)
452 if (pdata->netdev->features & NETIF_F_RXHASH)
453 ret = xgbe_enable_rss(pdata);
455 ret = xgbe_disable_rss(pdata);
458 netdev_err(pdata->netdev,
462 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
469 if (pdata->prio2q_map[prio] != queue)
473 tc = pdata->ets->prio_tc[prio];
476 if (pdata->pfc->pfc_en & (1 << tc))
483 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
486 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
488 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
489 pdata->vxlan_port);
492 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
494 if (!pdata->hw_feat.vxn)
498 xgbe_set_vxlan_id(pdata);
501 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
504 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
505 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
507 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
510 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
512 if (!pdata->hw_feat.vxn)
516 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
519 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
522 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
524 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
527 static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
532 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
535 return min_t(unsigned int, pdata->tx_q_count, max_q_count);
538 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
544 for (i = 0; i < pdata->rx_q_count; i++)
545 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
548 q_count = xgbe_get_fc_queue_count(pdata);
551 reg_val = XGMAC_IOREAD(pdata, reg);
553 XGMAC_IOWRITE(pdata, reg, reg_val);
561 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
563 struct ieee_pfc *pfc = pdata->pfc;
564 struct ieee_ets *ets = pdata->ets;
569 for (i = 0; i < pdata->rx_q_count; i++) {
572 if (pdata->rx_rfd[i]) {
575 if (xgbe_is_pfc_queue(pdata, i))
582 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
584 netif_dbg(pdata, drv, pdata->netdev,
590 q_count = xgbe_get_fc_queue_count(pdata);
593 reg_val = XGMAC_IOREAD(pdata, reg);
600 XGMAC_IOWRITE(pdata, reg, reg_val);
608 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
610 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
615 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
617 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
622 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
624 struct ieee_pfc *pfc = pdata->pfc;
626 if (pdata->tx_pause || (pfc && pfc->pfc_en))
627 xgbe_enable_tx_flow_control(pdata);
629 xgbe_disable_tx_flow_control(pdata);
634 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
636 struct ieee_pfc *pfc = pdata->pfc;
638 if (pdata->rx_pause || (pfc && pfc->pfc_en))
639 xgbe_enable_rx_flow_control(pdata);
641 xgbe_disable_rx_flow_control(pdata);
646 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
648 struct ieee_pfc *pfc = pdata->pfc;
650 xgbe_config_tx_flow_control(pdata);
651 xgbe_config_rx_flow_control(pdata);
653 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
657 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
663 if (pdata->channel_irq_mode)
664 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
665 pdata->channel_irq_mode);
667 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
669 for (i = 0; i < pdata->channel_count; i++) {
670 channel = pdata->channel[i];
699 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
711 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
720 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
725 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
728 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
729 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
732 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
736 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
743 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
746 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
747 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
750 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
753 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
757 if (!pdata->vdata->ecc_support)
761 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
762 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
772 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
775 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
779 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
786 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
789 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
794 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
809 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
812 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
833 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
834 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
839 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
842 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
845 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
848 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
851 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
854 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
859 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
861 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
866 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
869 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
872 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
875 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
878 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
886 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
891 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
894 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
923 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
931 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
940 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
945 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
950 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
953 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
955 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
959 xgbe_disable_rx_vlan_filtering(pdata);
961 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
962 xgbe_enable_rx_vlan_filtering(pdata);
968 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
973 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
976 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
978 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
983 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
1002 netif_dbg(pdata, drv, pdata->netdev,
1009 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
1011 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
1015 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
1017 struct net_device *netdev = pdata->netdev;
1023 addn_macs = pdata->hw_feat.addn_mac;
1026 xgbe_set_promiscuous_mode(pdata, 1);
1029 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1034 xgbe_set_all_multicast_mode(pdata, 1);
1037 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1045 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1048 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1050 struct net_device *netdev = pdata->netdev;
1058 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1059 hash_table_count = pdata->hw_feat.hash_table_size / 32;
1078 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1083 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1085 if (pdata->hw_feat.hash_table_size)
1086 xgbe_set_mac_hash_table(pdata);
1088 xgbe_set_mac_addn_addrs(pdata);
1093 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
1101 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1102 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1107 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1109 struct net_device *netdev = pdata->netdev;
1115 xgbe_set_promiscuous_mode(pdata, pr_mode);
1116 xgbe_set_all_multicast_mode(pdata, am_mode);
1118 xgbe_add_mac_addresses(pdata);
1123 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1130 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1133 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1138 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1145 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1148 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1153 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1163 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1175 index = mmd_address & ~pdata->xpcs_window_mask;
1176 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1178 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1179 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1180 mmd_data = XPCS16_IOREAD(pdata, offset);
1181 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1186 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1195 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1207 index = mmd_address & ~pdata->xpcs_window_mask;
1208 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1210 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1211 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1212 XPCS16_IOWRITE(pdata, offset, mmd_data);
1213 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1216 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1226 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1237 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1238 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1239 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1240 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1245 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1254 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1265 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1266 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1267 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1268 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1271 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1274 switch (pdata->vdata->xpcs_access) {
1276 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1280 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1284 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1287 switch (pdata->vdata->xpcs_access) {
1289 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1293 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1320 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
1325 reinit_completion(&pdata->mdio_complete);
1327 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1333 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1335 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1336 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1343 static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
1350 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
1353 static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
1360 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
1363 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
1368 reinit_completion(&pdata->mdio_complete);
1370 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1375 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1377 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1378 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1382 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1385 static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
1392 return xgbe_read_ext_mii_regs(pdata, mdio_sca);
1395 static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
1402 return xgbe_read_ext_mii_regs(pdata, mdio_sca);
1405 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1408 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1422 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1432 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1434 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1439 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1441 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1495 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1499 unsigned int rx_usecs = pdata->rx_usecs;
1500 unsigned int rx_frames = pdata->rx_frames;
1545 struct xgbe_prv_data *pdata = channel->pdata;
1558 xgbe_rx_desc_reset(pdata, rdata, i);
1579 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1585 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1586 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1589 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1593 netdev_err(pdata->netdev,
1597 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1603 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1604 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1605 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1608 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1612 netdev_err(pdata->netdev, "timed out initializing timestamp\n");
1615 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1619 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1621 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1626 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1631 if (pdata->vdata->tx_tstamp_workaround) {
1632 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1633 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1635 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1636 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1667 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1679 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1686 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1687 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1688 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1689 xgbe_set_tstamp_time(pdata, 0, 0);
1692 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1701 struct xgbe_prv_data *pdata = channel->pdata;
1714 if (pdata->tx_usecs && !channel->tx_timer_active) {
1717 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1725 struct xgbe_prv_data *pdata = channel->pdata;
1773 if (!pdata->tx_frames)
1775 else if (tx_packets > pdata->tx_frames)
1777 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
1788 netif_dbg(pdata, tx_queued, pdata->netdev,
1808 netif_dbg(pdata, tx_queued, pdata->netdev,
1867 pdata->ext_stats.tx_tso_packets += tx_packets;
1886 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
1925 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
1926 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
1939 if (netif_msg_tx_queued(pdata))
1940 xgbe_dump_tx_desc(pdata, ring, start_index,
1948 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1963 struct xgbe_prv_data *pdata = channel->pdata;
1968 struct net_device *netdev = pdata->netdev;
1983 if (netif_msg_rx_status(pdata))
1984 xgbe_dump_rx_desc(pdata, ring, ring->cur);
2012 pdata->ext_stats.rx_split_header_packets++;
2061 pdata->ext_stats.rx_vxlan_packets++;
2076 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2087 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2099 pdata->ext_stats.rx_csum_errors++;
2105 pdata->ext_stats.rx_vxlan_csum_errors++;
2112 pdata->ext_stats.rxq_packets[channel->queue_index]++;
2113 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
2216 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2223 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2227 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2238 static int xgbe_exit(struct xgbe_prv_data *pdata)
2245 ret = __xgbe_exit(pdata);
2249 return __xgbe_exit(pdata);
2252 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2256 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2259 for (i = 0; i < pdata->tx_q_count; i++)
2260 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2263 for (i = 0; i < pdata->tx_q_count; i++) {
2265 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2276 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2280 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
2287 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
2288 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
2289 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
2290 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
2292 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
2295 if (pdata->vdata->tx_desc_prefetch)
2296 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
2297 pdata->vdata->tx_desc_prefetch);
2299 if (pdata->vdata->rx_desc_prefetch)
2300 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
2301 pdata->vdata->rx_desc_prefetch);
2304 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2306 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
2307 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
2308 if (pdata->awarcr)
2309 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
2312 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2317 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2320 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2321 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2323 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2327 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2330 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2337 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2339 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2341 rfa = pdata->pfc_rfa;
2355 pdata->rx_rfa[queue] = 0;
2356 pdata->rx_rfd[queue] = 0;
2362 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
2363 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
2369 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
2370 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
2391 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2392 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2395 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2401 for (i = 0; i < pdata->rx_q_count; i++) {
2404 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2408 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2412 for (i = 0; i < pdata->rx_q_count; i++) {
2413 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2414 pdata->rx_rfa[i]);
2415 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2416 pdata->rx_rfd[i]);
2420 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2423 return min_t(unsigned int, pdata->tx_max_fifo_size,
2424 pdata->hw_feat.tx_fifo_size);
2427 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2430 return min_t(unsigned int, pdata->rx_max_fifo_size,
2431 pdata->hw_feat.rx_fifo_size);
2480 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2485 if (pdata->pfc->delay)
2486 return pdata->pfc->delay / 8;
2489 delay = xgbe_get_max_frame(pdata);
2504 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2509 if (!pdata->pfc->pfc_en)
2513 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2515 if (!xgbe_is_pfc_queue(pdata, i))
2518 pdata->pfcq[i] = 1;
2525 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2534 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2535 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2536 pfc_count = xgbe_get_pfc_queues(pdata);
2550 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2551 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2553 if (pdata->pfc_rfa > q_fifo_size) {
2554 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2571 if (!pdata->pfcq[i] || !addn_fifo)
2575 netdev_warn(pdata->netdev,
2596 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2602 fifo_size = xgbe_get_tx_fifo_size(pdata);
2604 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2606 for (i = 0; i < pdata->tx_q_count; i++)
2607 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2609 netif_info(pdata, drv, pdata->netdev,
2611 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2614 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2622 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2623 pdata->pfc_rfa = 0;
2625 fifo_size = xgbe_get_rx_fifo_size(pdata);
2626 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2629 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2631 if (pdata->pfc && pdata->ets)
2632 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2636 for (i = 0; i < pdata->rx_q_count; i++)
2637 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2639 xgbe_calculate_flow_control_threshold(pdata, fifo);
2640 xgbe_config_flow_control_threshold(pdata);
2642 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2643 netif_info(pdata, drv, pdata->netdev,
2644 "%u Rx hardware queues\n", pdata->rx_q_count);
2645 for (i = 0; i < pdata->rx_q_count; i++)
2646 netif_info(pdata, drv, pdata->netdev,
2650 netif_info(pdata, drv, pdata->netdev,
2652 pdata->rx_q_count,
2657 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2668 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2669 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2671 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2673 netif_dbg(pdata, drv, pdata->netdev,
2675 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2677 pdata->q2tc_map[queue++] = i;
2681 netif_dbg(pdata, drv, pdata->netdev,
2683 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2685 pdata->q2tc_map[queue++] = i;
2690 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2699 netif_dbg(pdata, drv, pdata->netdev,
2702 pdata->prio2q_map[prio++] = i;
2706 netif_dbg(pdata, drv, pdata->netdev,
2709 pdata->prio2q_map[prio++] = i;
2717 XGMAC_IOWRITE(pdata, reg, reg_val);
2725 for (i = 0; i < pdata->rx_q_count;) {
2728 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2731 XGMAC_IOWRITE(pdata, reg, reg_val);
2738 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2743 netdev_reset_tc(pdata->netdev);
2744 if (!pdata->num_tcs)
2747 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2749 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2750 while ((queue < pdata->tx_q_count) &&
2751 (pdata->q2tc_map[queue] == i))
2754 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2756 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2760 if (!pdata->ets)
2764 netdev_set_prio_tc_map(pdata->netdev, prio,
2765 pdata->ets->prio_tc[prio]);
2768 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2770 struct ieee_ets *ets = pdata->ets;
2781 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2784 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2789 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2798 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2801 reg_val = XGMAC_IOREAD(pdata, reg);
2806 XGMAC_IOWRITE(pdata, reg, reg_val);
2811 netif_dbg(pdata, drv, pdata->netdev,
2813 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2820 netif_dbg(pdata, drv, pdata->netdev,
2822 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2824 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2830 xgbe_config_tc(pdata);
2833 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2835 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2837 netif_tx_stop_all_queues(pdata->netdev);
2840 pdata->hw_if.disable_rx(pdata);
2843 xgbe_config_rx_fifo_size(pdata);
2844 xgbe_config_flow_control(pdata);
2846 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2848 pdata->hw_if.enable_rx(pdata);
2851 netif_tx_start_all_queues(pdata->netdev);
2855 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2857 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2860 if (pdata->hw_feat.hash_table_size) {
2861 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2862 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2863 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2867 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2871 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2873 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2876 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2878 xgbe_set_speed(pdata, pdata->phy_speed);
2881 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2883 if (pdata->netdev->features & NETIF_F_RXCSUM)
2884 xgbe_enable_rx_csum(pdata);
2886 xgbe_disable_rx_csum(pdata);
2889 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2892 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2893 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2896 xgbe_update_vlan_hash_table(pdata);
2898 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2899 xgbe_enable_rx_vlan_filtering(pdata);
2901 xgbe_disable_rx_vlan_filtering(pdata);
2903 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2904 xgbe_enable_rx_vlan_stripping(pdata);
2906 xgbe_disable_rx_vlan_stripping(pdata);
2909 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2914 if (pdata->vdata->mmc_64bit) {
2943 val = XGMAC_IOREAD(pdata, reg_lo);
2946 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2951 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2953 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2954 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2958 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2962 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2966 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2970 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2974 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2978 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2982 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2986 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2990 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2994 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2998 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3002 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3006 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3010 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3014 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3018 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3022 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3026 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3029 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
3031 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3032 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
3036 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3040 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3044 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3048 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3052 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3056 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3060 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3064 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3068 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3072 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3076 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3080 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3084 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3088 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3092 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3096 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3100 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3104 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3108 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3112 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3116 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3120 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3124 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3127 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
3129 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3132 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
3135 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3138 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3141 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3144 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3147 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3150 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3153 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3156 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3159 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3162 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3165 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3168 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3171 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3174 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3177 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3180 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3183 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3186 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3189 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3192 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3195 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3198 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3201 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3204 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3207 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3210 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3213 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3216 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3219 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3222 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3225 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3228 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3231 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3234 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3237 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3240 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3243 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3246 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3249 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3252 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3255 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3258 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3261 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3264 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3267 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3270 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3282 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3291 netdev_info(pdata->netdev,
3296 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3303 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3304 return xgbe_txq_prepare_tx_stop(pdata, queue);
3324 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3334 netdev_info(pdata->netdev,
3339 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3344 for (i = 0; i < pdata->channel_count; i++) {
3345 if (!pdata->channel[i]->tx_ring)
3348 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3352 for (i = 0; i < pdata->tx_q_count; i++)
3353 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3357 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3360 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3365 for (i = 0; i < pdata->tx_q_count; i++)
3366 xgbe_prepare_tx_stop(pdata, i);
3369 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3372 for (i = 0; i < pdata->tx_q_count; i++)
3373 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3376 for (i = 0; i < pdata->channel_count; i++) {
3377 if (!pdata->channel[i]->tx_ring)
3380 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3384 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3396 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3405 netdev_info(pdata->netdev,
3410 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3415 for (i = 0; i < pdata->channel_count; i++) {
3416 if (!pdata->channel[i]->rx_ring)
3419 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3424 for (i = 0; i < pdata->rx_q_count; i++)
3426 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3429 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3430 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3431 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3432 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3435 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3440 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3441 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3442 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3443 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3446 for (i = 0; i < pdata->rx_q_count; i++)
3447 xgbe_prepare_rx_stop(pdata, i);
3450 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3453 for (i = 0; i < pdata->channel_count; i++) {
3454 if (!pdata->channel[i]->rx_ring)
3457 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3461 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3466 for (i = 0; i < pdata->channel_count; i++) {
3467 if (!pdata->channel[i]->tx_ring)
3470 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3474 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3477 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3482 for (i = 0; i < pdata->tx_q_count; i++)
3483 xgbe_prepare_tx_stop(pdata, i);
3486 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3489 for (i = 0; i < pdata->channel_count; i++) {
3490 if (!pdata->channel[i]->tx_ring)
3493 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3497 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3502 for (i = 0; i < pdata->channel_count; i++) {
3503 if (!pdata->channel[i]->rx_ring)
3506 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3510 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3515 for (i = 0; i < pdata->channel_count; i++) {
3516 if (!pdata->channel[i]->rx_ring)
3519 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3523 static int xgbe_init(struct xgbe_prv_data *pdata)
3525 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3531 ret = xgbe_flush_tx_queues(pdata);
3533 netdev_err(pdata->netdev, "error flushing TX queues\n");
3540 xgbe_config_dma_bus(pdata);
3541 xgbe_config_dma_cache(pdata);
3542 xgbe_config_osp_mode(pdata);
3543 xgbe_config_pbl_val(pdata);
3544 xgbe_config_rx_coalesce(pdata);
3545 xgbe_config_tx_coalesce(pdata);
3546 xgbe_config_rx_buffer_size(pdata);
3547 xgbe_config_tso_mode(pdata);
3548 xgbe_config_sph_mode(pdata);
3549 xgbe_config_rss(pdata);
3550 desc_if->wrapper_tx_desc_init(pdata);
3551 desc_if->wrapper_rx_desc_init(pdata);
3552 xgbe_enable_dma_interrupts(pdata);
3557 xgbe_config_mtl_mode(pdata);
3558 xgbe_config_queue_mapping(pdata);
3559 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3560 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3561 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3562 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3563 xgbe_config_tx_fifo_size(pdata);
3564 xgbe_config_rx_fifo_size(pdata);
3568 xgbe_config_dcb_tc(pdata);
3569 xgbe_enable_mtl_interrupts(pdata);
3574 xgbe_config_mac_address(pdata);
3575 xgbe_config_rx_mode(pdata);
3576 xgbe_config_jumbo_enable(pdata);
3577 xgbe_config_flow_control(pdata);
3578 xgbe_config_mac_speed(pdata);
3579 xgbe_config_checksum_offload(pdata);
3580 xgbe_config_vlan_support(pdata);
3581 xgbe_config_mmc(pdata);
3582 xgbe_enable_mac_interrupts(pdata);
3587 xgbe_enable_ecc_interrupts(pdata);