Lines Matching refs:priv

32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
34 u32 reg = readl_relaxed(priv->base + offset + off); \
37 static inline void name##_writel(struct bcm_sysport_priv *priv, \
40 writel_relaxed(val, priv->base + offset + off); \
57 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59 if (priv->is_lite && off >= RDMA_STATUS)
61 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
64 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66 if (priv->is_lite && off >= RDMA_STATUS)
68 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
71 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73 if (!priv->is_lite) {
87 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
90 priv->irq##which##_mask &= ~(mask); \
91 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
93 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
96 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
97 priv->irq##which##_mask |= (mask); \
107 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
122 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
126 reg = rxchk_readl(priv, RXCHK_CONTROL);
131 if (priv->rx_chk_en)
139 if (priv->rx_chk_en && priv->crc_fwd)
153 rxchk_writel(priv, reg, RXCHK_CONTROL);
159 struct bcm_sysport_priv *priv = netdev_priv(dev);
165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
167 reg = tdma_readl(priv, TDMA_CONTROL);
168 if (priv->tsb_en)
169 reg |= tdma_control_bit(priv, TSB_EN);
171 reg &= ~tdma_control_bit(priv, TSB_EN);
177 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
179 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
180 tdma_writel(priv, reg, TDMA_CONTROL);
184 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
190 struct bcm_sysport_priv *priv = netdev_priv(dev);
193 ret = clk_prepare_enable(priv->clk);
198 if (!priv->is_lite)
199 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
201 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
207 clk_disable_unprepare(priv->clk);
319 struct bcm_sysport_priv *priv = netdev_priv(dev);
321 return priv->msg_enable;
326 struct bcm_sysport_priv *priv = netdev_priv(dev);
328 priv->msg_enable = enable;
348 struct bcm_sysport_priv *priv = netdev_priv(dev);
356 if (priv->is_lite &&
371 struct bcm_sysport_priv *priv = netdev_priv(dev);
380 if (priv->is_lite &&
406 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
425 if (priv->is_lite)
430 val = umac_readl(priv, UMAC_MIB_START + j + offset);
433 val = rxchk_readl(priv, s->reg_offset);
435 rxchk_writel(priv, 0, s->reg_offset);
438 val = rbuf_readl(priv, s->reg_offset);
440 rbuf_writel(priv, 0, s->reg_offset);
443 if (!priv->is_lite)
446 val = rdma_readl(priv, s->reg_offset);
448 rdma_writel(priv, 0, s->reg_offset);
453 p = (char *)priv + s->stat_offset;
457 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
460 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
468 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
469 ring = &priv->tx_rings[q];
471 start = u64_stats_fetch_begin(&priv->syncp);
474 } while (u64_stats_fetch_retry(&priv->syncp, start));
484 struct bcm_sysport_priv *priv = netdev_priv(dev);
485 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
486 struct u64_stats_sync *syncp = &priv->syncp;
493 bcm_sysport_update_mib_counters(priv);
494 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
509 p = (char *)priv;
511 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
535 ring = &priv->tx_rings[i];
546 struct bcm_sysport_priv *priv = netdev_priv(dev);
549 wol->wolopts = priv->wolopts;
551 if (!(priv->wolopts & WAKE_MAGICSECURE))
554 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
560 struct bcm_sysport_priv *priv = netdev_priv(dev);
561 struct device *kdev = &priv->pdev->dev;
571 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
576 if (priv->wol_irq_disabled)
577 enable_irq_wake(priv->wol_irq);
578 priv->wol_irq_disabled = 0;
582 if (!priv->wol_irq_disabled)
583 disable_irq_wake(priv->wol_irq);
584 priv->wol_irq_disabled = 1;
587 priv->wolopts = wol->wolopts;
592 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
597 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
602 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
608 struct bcm_sysport_priv *priv = ring->priv;
611 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
617 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
625 struct bcm_sysport_priv *priv = netdev_priv(dev);
628 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
633 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
637 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
647 struct bcm_sysport_priv *priv = netdev_priv(dev);
667 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
669 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
670 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
671 usecs = priv->rx_coalesce_usecs;
672 pkts = priv->rx_max_coalesced_frames;
674 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
675 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
680 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
683 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
695 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
698 struct device *kdev = &priv->pdev->dev;
699 struct net_device *ndev = priv->netdev;
704 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
707 priv->mib.alloc_rx_buff_failed++;
708 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
715 priv->mib.rx_dma_failed++;
717 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
730 dma_desc_set_addr(priv, cb->bd_addr, mapping);
732 netif_dbg(priv, rx_status, ndev, "RX refill\n");
738 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
744 for (i = 0; i < priv->num_rx_bds; i++) {
745 cb = &priv->rx_cbs[i];
746 skb = bcm_sysport_rx_refill(priv, cb);
756 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
759 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
760 struct net_device *ndev = priv->netdev;
770 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
776 if (!priv->is_lite)
777 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
779 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
782 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
784 netif_dbg(priv, rx_status, ndev,
786 p_index, priv->rx_c_index, to_process);
789 cb = &priv->rx_cbs[priv->rx_read_ptr];
790 skb = bcm_sysport_rx_refill(priv, cb);
799 netif_err(priv, rx_err, ndev, "out of memory!\n");
811 netif_dbg(priv, rx_status, ndev,
813 p_index, priv->rx_c_index, priv->rx_read_ptr,
817 netif_err(priv, rx_status, ndev, "oversized packet\n");
825 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
833 netif_err(priv, rx_err, ndev, "error packet\n");
857 if (priv->crc_fwd) {
865 u64_stats_update_begin(&priv->syncp);
868 u64_stats_update_end(&priv->syncp);
870 napi_gro_receive(&priv->napi, skb);
873 priv->rx_read_ptr++;
875 if (priv->rx_read_ptr == priv->num_rx_bds)
876 priv->rx_read_ptr = 0;
879 priv->dim.packets = processed;
880 priv->dim.bytes = processed_bytes;
890 struct bcm_sysport_priv *priv = ring->priv;
891 struct device *kdev = &priv->pdev->dev;
910 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
914 struct net_device *ndev = priv->netdev;
922 if (!ring->priv->is_lite)
923 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
925 intrl2_0_writel(ring->priv, BIT(ring->index +
929 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
933 netif_dbg(priv, tx_done, ndev,
950 u64_stats_update_begin(&priv->syncp);
953 u64_stats_update_end(&priv->syncp);
957 netif_dbg(priv, tx_done, ndev,
965 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
972 txq = netdev_get_tx_queue(priv->netdev, ring->index);
975 released = __bcm_sysport_tx_reclaim(priv, ring);
985 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
991 __bcm_sysport_tx_reclaim(priv, ring);
1001 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
1006 if (!ring->priv->is_lite)
1007 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
1009 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
1018 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1022 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1023 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1028 struct bcm_sysport_priv *priv =
1033 work_done = bcm_sysport_desc_rx(priv, budget);
1035 priv->rx_c_index += work_done;
1036 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1042 if (!priv->is_lite)
1043 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1045 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1050 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1053 if (priv->dim.use_dim) {
1054 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1055 priv->dim.bytes, &dim_sample);
1056 net_dim(&priv->dim.dim, dim_sample);
1062 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1066 reg = umac_readl(priv, UMAC_MPD_CTRL);
1071 umac_writel(priv, reg, UMAC_MPD_CTRL);
1073 if (priv->is_lite)
1078 reg = rbuf_readl(priv, RBUF_CONTROL);
1083 rbuf_writel(priv, reg, RBUF_CONTROL);
1086 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1092 reg = rxchk_readl(priv, RXCHK_CONTROL);
1095 rxchk_writel(priv, reg, RXCHK_CONTROL);
1100 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1101 rxchk_writel(priv, priv->filters_loc[index] <<
1103 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1107 mpd_enable_set(priv, false);
1109 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1111 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1114 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1116 netdev_info(priv->netdev,
1120 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1128 struct bcm_sysport_priv *priv =
1133 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1141 struct bcm_sysport_priv *priv = netdev_priv(dev);
1145 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1146 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1147 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1149 if (unlikely(priv->irq0_stat == 0)) {
1150 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1154 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1155 priv->dim.event_ctr++;
1156 if (likely(napi_schedule_prep(&priv->napi))) {
1158 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1159 __napi_schedule_irqoff(&priv->napi);
1166 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1167 bcm_sysport_tx_reclaim_all(priv);
1169 if (!priv->is_lite)
1174 if (!(priv->irq0_stat & ring_bit))
1177 txr = &priv->tx_rings[ring];
1180 intrl2_0_mask_set(priv, ring_bit);
1192 struct bcm_sysport_priv *priv = netdev_priv(dev);
1196 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1197 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1198 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1200 if (unlikely(priv->irq1_stat == 0)) {
1201 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1206 if (!(priv->irq1_stat & BIT(ring)))
1209 txr = &priv->tx_rings[ring];
1212 intrl2_1_mask_set(priv, BIT(ring));
1222 struct bcm_sysport_priv *priv = dev_id;
1224 pm_wakeup_event(&priv->pdev->dev, 0);
1232 struct bcm_sysport_priv *priv = netdev_priv(dev);
1234 disable_irq(priv->irq0);
1235 bcm_sysport_rx_isr(priv->irq0, priv);
1236 enable_irq(priv->irq0);
1238 if (!priv->is_lite) {
1239 disable_irq(priv->irq1);
1240 bcm_sysport_tx_isr(priv->irq1, priv);
1241 enable_irq(priv->irq1);
1249 struct bcm_sysport_priv *priv = netdev_priv(dev);
1262 priv->mib.tx_realloc_tsb_failed++;
1269 priv->mib.tx_realloc_tsb++;
1320 struct bcm_sysport_priv *priv = netdev_priv(dev);
1321 struct device *kdev = &priv->pdev->dev;
1334 ring = &priv->tx_rings[queue];
1346 if (priv->tsb_en) {
1358 priv->mib.tx_dma_failed++;
1359 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1387 spin_lock_irqsave(&priv->desc_lock, desc_flags);
1388 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1389 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1390 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1396 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1418 struct bcm_sysport_priv *priv = netdev_priv(dev);
1423 if (priv->old_link != phydev->link) {
1425 priv->old_link = phydev->link;
1428 if (priv->old_duplex != phydev->duplex) {
1430 priv->old_duplex = phydev->duplex;
1433 if (priv->is_lite)
1457 if (priv->old_pause != phydev->pause) {
1459 priv->old_pause = phydev->pause;
1469 reg = umac_readl(priv, UMAC_CMD);
1474 umac_writel(priv, reg, UMAC_CMD);
1481 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1484 struct bcm_sysport_net_dim *dim = &priv->dim;
1493 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1495 struct bcm_sysport_net_dim *dim = &priv->dim;
1499 usecs = priv->rx_coalesce_usecs;
1500 pkts = priv->rx_max_coalesced_frames;
1509 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1512 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1515 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1524 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1530 ring->priv = priv;
1531 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
1540 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1541 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1542 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1543 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1546 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1554 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1559 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1561 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1564 reg = tdma_readl(priv, TDMA_CONTROL);
1565 reg |= tdma_control_bit(priv, ACB_ALGO);
1566 tdma_writel(priv, reg, TDMA_CONTROL);
1571 reg = tdma_readl(priv, TDMA_CONTROL);
1572 if (priv->is_lite)
1576 reg |= tdma_control_bit(priv, TSB_SWAP0);
1578 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1579 tdma_writel(priv, reg, TDMA_CONTROL);
1584 tdma_writel(priv, ring->size |
1589 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1591 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1595 netif_dbg(priv, hw, priv->netdev,
1603 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1606 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1610 reg = tdma_readl(priv, TDMA_STATUS);
1612 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1624 bcm_sysport_tx_clean(priv, ring);
1631 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1635 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1641 reg = rdma_readl(priv, RDMA_CONTROL);
1646 rdma_writel(priv, reg, RDMA_CONTROL);
1650 reg = rdma_readl(priv, RDMA_STATUS);
1656 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1662 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1668 reg = tdma_readl(priv, TDMA_CONTROL);
1670 reg |= tdma_control_bit(priv, TDMA_EN);
1672 reg &= ~tdma_control_bit(priv, TDMA_EN);
1673 tdma_writel(priv, reg, TDMA_CONTROL);
1677 reg = tdma_readl(priv, TDMA_STATUS);
1684 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1689 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1697 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1698 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1699 priv->rx_c_index = 0;
1700 priv->rx_read_ptr = 0;
1701 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1703 if (!priv->rx_cbs) {
1704 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1708 for (i = 0; i < priv->num_rx_bds; i++) {
1709 cb = priv->rx_cbs + i;
1710 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1713 ret = bcm_sysport_alloc_rx_bufs(priv);
1715 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1720 reg = rdma_readl(priv, RDMA_STATUS);
1722 rdma_enable_set(priv, 0);
1724 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1725 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1726 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1727 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1728 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1731 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1732 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1733 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1734 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1736 netif_dbg(priv, hw, priv->netdev,
1738 priv->num_rx_bds, priv->rx_bds);
1743 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1750 reg = rdma_readl(priv, RDMA_STATUS);
1752 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1754 for (i = 0; i < priv->num_rx_bds; i++) {
1755 cb = &priv->rx_cbs[i];
1757 dma_unmap_single(&priv->pdev->dev,
1763 kfree(priv->rx_cbs);
1764 priv->rx_cbs = NULL;
1766 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1771 struct bcm_sysport_priv *priv = netdev_priv(dev);
1774 if (priv->is_lite)
1777 reg = umac_readl(priv, UMAC_CMD);
1782 umac_writel(priv, reg, UMAC_CMD);
1789 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1794 if (!priv->is_lite) {
1795 reg = umac_readl(priv, UMAC_CMD);
1800 umac_writel(priv, reg, UMAC_CMD);
1802 reg = gib_readl(priv, GIB_CONTROL);
1807 gib_writel(priv, reg, GIB_CONTROL);
1817 static inline void umac_reset(struct bcm_sysport_priv *priv)
1821 if (priv->is_lite)
1824 reg = umac_readl(priv, UMAC_CMD);
1826 umac_writel(priv, reg, UMAC_CMD);
1828 reg = umac_readl(priv, UMAC_CMD);
1830 umac_writel(priv, reg, UMAC_CMD);
1833 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1840 if (!priv->is_lite) {
1841 umac_writel(priv, mac0, UMAC_MAC0);
1842 umac_writel(priv, mac1, UMAC_MAC1);
1844 gib_writel(priv, mac0, GIB_MAC0);
1845 gib_writel(priv, mac1, GIB_MAC1);
1849 static void topctrl_flush(struct bcm_sysport_priv *priv)
1851 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1852 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1854 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1855 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1860 struct bcm_sysport_priv *priv = netdev_priv(dev);
1874 umac_set_hw_addr(priv, dev->dev_addr);
1882 struct bcm_sysport_priv *priv = netdev_priv(dev);
1883 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1888 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1892 start = u64_stats_fetch_begin(&priv->syncp);
1895 } while (u64_stats_fetch_retry(&priv->syncp, start));
1900 struct bcm_sysport_priv *priv = netdev_priv(dev);
1903 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1904 bcm_sysport_init_rx_coalesce(priv);
1905 napi_enable(&priv->napi);
1908 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1913 if (!priv->is_lite)
1914 intrl2_1_mask_clear(priv, 0xffffffff);
1916 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1919 static void rbuf_init(struct bcm_sysport_priv *priv)
1923 reg = rbuf_readl(priv, RBUF_CONTROL);
1926 if (priv->is_lite)
1934 rbuf_writel(priv, reg, RBUF_CONTROL);
1937 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1939 intrl2_0_mask_set(priv, 0xffffffff);
1940 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1941 if (!priv->is_lite) {
1942 intrl2_1_mask_set(priv, 0xffffffff);
1943 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1947 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1951 reg = gib_readl(priv, GIB_CONTROL);
1953 if (netdev_uses_dsa(priv->netdev)) {
1959 gib_writel(priv, reg, GIB_CONTROL);
1964 struct bcm_sysport_priv *priv = netdev_priv(dev);
1969 clk_prepare_enable(priv->clk);
1972 umac_reset(priv);
1975 topctrl_flush(priv);
1978 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1981 rbuf_init(priv);
1984 if (!priv->is_lite)
1985 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1987 gib_set_pad_extension(priv);
1995 umac_set_hw_addr(priv, dev->dev_addr);
1997 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1998 0, priv->phy_interface);
2009 priv->old_duplex = -1;
2010 priv->old_link = -1;
2011 priv->old_pause = -1;
2014 bcm_sysport_mask_all_intrs(priv);
2016 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2022 if (!priv->is_lite) {
2023 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2032 spin_lock_init(&priv->desc_lock);
2034 ret = bcm_sysport_init_tx_ring(priv, i);
2043 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2046 ret = bcm_sysport_init_rx_ring(priv);
2053 ret = rdma_enable_set(priv, 1);
2058 ret = tdma_enable_set(priv, 1);
2063 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2072 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2074 bcm_sysport_fini_rx_ring(priv);
2077 bcm_sysport_fini_tx_ring(priv, i);
2078 if (!priv->is_lite)
2079 free_irq(priv->irq1, dev);
2081 free_irq(priv->irq0, dev);
2085 clk_disable_unprepare(priv->clk);
2091 struct bcm_sysport_priv *priv = netdev_priv(dev);
2095 napi_disable(&priv->napi);
2096 cancel_work_sync(&priv->dim.dim.work);
2100 bcm_sysport_mask_all_intrs(priv);
2105 struct bcm_sysport_priv *priv = netdev_priv(dev);
2112 umac_enable_set(priv, CMD_RX_EN, 0);
2114 ret = tdma_enable_set(priv, 0);
2123 ret = rdma_enable_set(priv, 0);
2130 umac_enable_set(priv, CMD_TX_EN, 0);
2134 bcm_sysport_fini_tx_ring(priv, i);
2135 bcm_sysport_fini_rx_ring(priv);
2137 free_irq(priv->irq0, dev);
2138 if (!priv->is_lite)
2139 free_irq(priv->irq1, dev);
2144 clk_disable_unprepare(priv->clk);
2149 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2155 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2156 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2166 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2172 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2181 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2197 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2205 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2208 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2209 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2211 priv->filters_loc[index] = nfc->fs.location;
2212 set_bit(index, priv->filters);
2217 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2223 index = bcm_sysport_rule_find(priv, location);
2230 clear_bit(index, priv->filters);
2231 priv->filters_loc[index] = 0;
2239 struct bcm_sysport_priv *priv = netdev_priv(dev);
2244 ret = bcm_sysport_rule_get(priv, nfc);
2256 struct bcm_sysport_priv *priv = netdev_priv(dev);
2261 ret = bcm_sysport_rule_set(priv, nfc);
2264 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2297 struct bcm_sysport_priv *priv = netdev_priv(dev);
2308 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2335 struct bcm_sysport_priv *priv = netdev_priv(dev);
2354 if (priv->is_lite)
2360 if (priv->per_port_num_tx_queues &&
2361 priv->per_port_num_tx_queues != num_tx_queues)
2364 priv->per_port_num_tx_queues = num_tx_queues;
2368 ring = &priv->tx_rings[q];
2379 priv->ring_map[qp + port * num_tx_queues] = ring;
2390 struct bcm_sysport_priv *priv = netdev_priv(dev);
2400 ring = &priv->tx_rings[q];
2410 priv->ring_map[qp + port * num_tx_queues] = NULL;
2421 struct bcm_sysport_priv *priv;
2424 priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2425 if (priv->netdev != dev)
2474 struct bcm_sysport_priv *priv;
2506 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2511 priv = netdev_priv(dev);
2513 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2514 if (IS_ERR(priv->clk)) {
2515 ret = PTR_ERR(priv->clk);
2520 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2523 if (!priv->tx_rings) {
2528 priv->is_lite = params->is_lite;
2529 priv->num_rx_desc_words = params->num_rx_desc_words;
2531 priv->irq0 = platform_get_irq(pdev, 0);
2532 if (!priv->is_lite) {
2533 priv->irq1 = platform_get_irq(pdev, 1);
2534 priv->wol_irq = platform_get_irq_optional(pdev, 2);
2536 priv->wol_irq = platform_get_irq_optional(pdev, 1);
2538 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2543 priv->base = devm_platform_ioremap_resource(pdev, 0);
2544 if (IS_ERR(priv->base)) {
2545 ret = PTR_ERR(priv->base);
2549 priv->netdev = dev;
2550 priv->pdev = pdev;
2552 ret = of_get_phy_mode(dn, &priv->phy_interface);
2555 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2567 priv->phy_dn = dn;
2581 netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
2591 priv->wol_irq_disabled = 1;
2592 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2593 bcm_sysport_wol_isr, 0, dev->name, priv);
2597 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2598 if (IS_ERR(priv->wol_clk)) {
2599 ret = PTR_ERR(priv->wol_clk);
2610 priv->rx_max_coalesced_frames = 1;
2611 u64_stats_init(&priv->syncp);
2613 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2615 ret = register_netdevice_notifier(&priv->netdev_notifier);
2627 clk_prepare_enable(priv->clk);
2629 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2633 priv->is_lite ? " Lite" : "",
2634 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2635 priv->irq0, priv->irq1, txq, rxq);
2637 clk_disable_unprepare(priv->clk);
2642 unregister_netdevice_notifier(&priv->netdev_notifier);
2654 struct bcm_sysport_priv *priv = netdev_priv(dev);
2660 unregister_netdevice_notifier(&priv->netdev_notifier);
2668 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2670 struct net_device *ndev = priv->netdev;
2675 reg = umac_readl(priv, UMAC_MPD_CTRL);
2676 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2679 if (priv->wolopts & WAKE_MAGICSECURE) {
2681 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2683 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2687 umac_writel(priv, reg, UMAC_MPD_CTRL);
2689 if (priv->wolopts & WAKE_FILTER) {
2691 reg = rbuf_readl(priv, RBUF_CONTROL);
2692 if (priv->is_lite)
2696 rbuf_writel(priv, reg, RBUF_CONTROL);
2699 reg = rxchk_readl(priv, RXCHK_CONTROL);
2702 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2707 rxchk_writel(priv, reg, RXCHK_CONTROL);
2712 reg = rbuf_readl(priv, RBUF_STATUS);
2721 mpd_enable_set(priv, false);
2722 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2727 umac_enable_set(priv, CMD_RX_EN, 1);
2729 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2737 struct bcm_sysport_priv *priv = netdev_priv(dev);
2752 umac_enable_set(priv, CMD_RX_EN, 0);
2754 ret = rdma_enable_set(priv, 0);
2761 if (priv->rx_chk_en) {
2762 reg = rxchk_readl(priv, RXCHK_CONTROL);
2764 rxchk_writel(priv, reg, RXCHK_CONTROL);
2768 if (!priv->wolopts)
2769 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2771 ret = tdma_enable_set(priv, 0);
2780 umac_enable_set(priv, CMD_TX_EN, 0);
2782 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2786 bcm_sysport_fini_tx_ring(priv, i);
2787 bcm_sysport_fini_rx_ring(priv);
2790 if (device_may_wakeup(d) && priv->wolopts) {
2791 clk_prepare_enable(priv->wol_clk);
2792 ret = bcm_sysport_suspend_to_wol(priv);
2795 clk_disable_unprepare(priv->clk);
2803 struct bcm_sysport_priv *priv = netdev_priv(dev);
2810 clk_prepare_enable(priv->clk);
2811 if (priv->wolopts)
2812 clk_disable_unprepare(priv->wol_clk);
2814 umac_reset(priv);
2817 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2822 bcm_sysport_resume_from_wol(priv);
2826 ret = bcm_sysport_init_tx_ring(priv, i);
2835 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2838 ret = bcm_sysport_init_rx_ring(priv);
2845 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2847 ret = rdma_enable_set(priv, 1);
2856 rbuf_init(priv);
2859 if (!priv->is_lite)
2860 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2862 gib_set_pad_extension(priv);
2865 umac_set_hw_addr(priv, dev->dev_addr);
2867 umac_enable_set(priv, CMD_RX_EN, 1);
2870 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2872 umac_enable_set(priv, CMD_TX_EN, 1);
2874 ret = tdma_enable_set(priv, 1);
2889 bcm_sysport_fini_rx_ring(priv);
2892 bcm_sysport_fini_tx_ring(priv, i);
2893 clk_disable_unprepare(priv->clk);