Lines Matching refs:bp

56 #define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
57 * (bp)->rx_ring_size)
62 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
63 * (bp)->tx_ring_size)
66 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
128 static unsigned int macb_dma_desc_get_size(struct macb *bp)
133 switch (bp->hw_dma_cap) {
155 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
158 switch (bp->hw_dma_cap) {
174 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
182 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
184 return index & (bp->tx_ring_size - 1);
190 index = macb_tx_ring_wrap(queue->bp, index);
191 index = macb_adj_dma_desc_idx(queue->bp, index);
198 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
205 offset = macb_tx_ring_wrap(queue->bp, index) *
206 macb_dma_desc_get_size(queue->bp);
211 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
213 return index & (bp->rx_ring_size - 1);
218 index = macb_rx_ring_wrap(queue->bp, index);
219 index = macb_adj_dma_desc_idx(queue->bp, index);
225 return queue->rx_buffers + queue->bp->rx_buffer_size *
226 macb_rx_ring_wrap(queue->bp, index);
230 static u32 hw_readl_native(struct macb *bp, int offset)
232 return __raw_readl(bp->regs + offset);
235 static void hw_writel_native(struct macb *bp, int offset, u32 value)
237 __raw_writel(value, bp->regs + offset);
240 static u32 hw_readl(struct macb *bp, int offset)
242 return readl_relaxed(bp->regs + offset);
245 static void hw_writel(struct macb *bp, int offset, u32 value)
247 writel_relaxed(value, bp->regs + offset);
279 static void macb_set_hwaddr(struct macb *bp)
284 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
285 macb_or_gem_writel(bp, SA1B, bottom);
286 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
287 macb_or_gem_writel(bp, SA1T, top);
289 if (gem_has_ptp(bp)) {
290 gem_writel(bp, RXPTPUNI, bottom);
291 gem_writel(bp, TXPTPUNI, bottom);
295 macb_or_gem_writel(bp, SA2B, 0);
296 macb_or_gem_writel(bp, SA2T, 0);
297 macb_or_gem_writel(bp, SA3B, 0);
298 macb_or_gem_writel(bp, SA3T, 0);
299 macb_or_gem_writel(bp, SA4B, 0);
300 macb_or_gem_writel(bp, SA4T, 0);
303 static void macb_get_hwaddr(struct macb *bp)
312 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
313 top = macb_or_gem_readl(bp, SA1T + i * 8);
323 eth_hw_addr_set(bp->dev, addr);
328 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
329 eth_hw_addr_random(bp->dev);
332 static int macb_mdio_wait_for_idle(struct macb *bp)
336 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
342 struct macb *bp = bus->priv;
345 status = pm_runtime_resume_and_get(&bp->pdev->dev);
349 status = macb_mdio_wait_for_idle(bp);
353 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
359 status = macb_mdio_wait_for_idle(bp);
363 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
366 pm_runtime_mark_last_busy(&bp->pdev->dev);
367 pm_runtime_put_autosuspend(&bp->pdev->dev);
375 struct macb *bp = bus->priv;
378 status = pm_runtime_get_sync(&bp->pdev->dev);
380 pm_runtime_put_noidle(&bp->pdev->dev);
384 status = macb_mdio_wait_for_idle(bp);
388 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
395 status = macb_mdio_wait_for_idle(bp);
399 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
405 status = macb_mdio_wait_for_idle(bp);
409 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
412 pm_runtime_mark_last_busy(&bp->pdev->dev);
413 pm_runtime_put_autosuspend(&bp->pdev->dev);
421 struct macb *bp = bus->priv;
424 status = pm_runtime_resume_and_get(&bp->pdev->dev);
428 status = macb_mdio_wait_for_idle(bp);
432 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
439 status = macb_mdio_wait_for_idle(bp);
444 pm_runtime_mark_last_busy(&bp->pdev->dev);
445 pm_runtime_put_autosuspend(&bp->pdev->dev);
454 struct macb *bp = bus->priv;
457 status = pm_runtime_get_sync(&bp->pdev->dev);
459 pm_runtime_put_noidle(&bp->pdev->dev);
463 status = macb_mdio_wait_for_idle(bp);
467 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
474 status = macb_mdio_wait_for_idle(bp);
478 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
485 status = macb_mdio_wait_for_idle(bp);
490 pm_runtime_mark_last_busy(&bp->pdev->dev);
491 pm_runtime_put_autosuspend(&bp->pdev->dev);
496 static void macb_init_buffers(struct macb *bp)
501 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
504 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
510 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
519 * @bp: pointer to struct macb
522 static void macb_set_tx_clk(struct macb *bp, int speed)
526 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
530 if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
547 rate_rounded = clk_round_rate(bp->tx_clk, rate);
557 netdev_warn(bp->dev,
561 if (clk_set_rate(bp->tx_clk, rate_rounded))
562 netdev_err(bp->dev, "adjusting tx_clk failed.\n");
569 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
572 config = gem_readl(bp, USX_CONTROL);
577 gem_writel(bp, USX_CONTROL, config);
583 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
590 val = gem_readl(bp, USX_STATUS);
592 val = gem_readl(bp, NCFGR);
603 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
605 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
647 struct macb *bp = netdev_priv(ndev);
652 spin_lock_irqsave(&bp->lock, flags);
654 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
655 old_ncr = ncr = macb_or_gem_readl(bp, NCR);
657 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
660 } else if (macb_is_gem(bp)) {
669 } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
670 bp->phy_interface == PHY_INTERFACE_MODE_MII) {
677 macb_or_gem_writel(bp, NCFGR, ctrl);
680 macb_or_gem_writel(bp, NCR, ncr);
686 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
689 old_pcsctrl = gem_readl(bp, PCSCNTRL);
695 gem_writel(bp, PCSCNTRL, pcsctrl);
698 spin_unlock_irqrestore(&bp->lock, flags);
705 struct macb *bp = netdev_priv(ndev);
710 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
711 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
713 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
716 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
717 macb_writel(bp, NCR, ctrl);
729 struct macb *bp = netdev_priv(ndev);
735 spin_lock_irqsave(&bp->lock, flags);
737 ctrl = macb_or_gem_readl(bp, NCFGR);
747 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
749 if (macb_is_gem(bp)) {
762 bp->macbgem_ops.mog_init_rings(bp);
763 macb_init_buffers(bp);
765 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
767 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
770 macb_or_gem_writel(bp, NCFGR, ctrl);
772 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
773 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
774 gem_readl(bp, HS_MAC_CONFIG)));
776 spin_unlock_irqrestore(&bp->lock, flags);
778 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
779 macb_set_tx_clk(bp, speed);
782 ctrl = macb_readl(bp, NCR);
783 if (gem_has_ptp(bp))
786 macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
795 struct macb *bp = netdev_priv(ndev);
798 return &bp->phylink_usx_pcs;
800 return &bp->phylink_sgmii_pcs;
819 static int macb_phylink_connect(struct macb *bp)
821 struct device_node *dn = bp->pdev->dev.of_node;
822 struct net_device *dev = bp->dev;
827 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
830 phydev = phy_find_first(bp->mii_bus);
837 ret = phylink_connect_phy(bp->phylink, phydev);
845 phylink_start(bp->phylink);
854 struct macb *bp = netdev_priv(ndev);
856 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
862 struct macb *bp = netdev_priv(dev);
864 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
865 bp->phylink_sgmii_pcs.neg_mode = true;
866 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
867 bp->phylink_usx_pcs.neg_mode = true;
869 bp->phylink_config.dev = &dev->dev;
870 bp->phylink_config.type = PHYLINK_NETDEV;
871 bp->phylink_config.mac_managed_pm = true;
873 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
874 bp->phylink_config.poll_fixed_state = true;
875 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
878 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
882 bp->phylink_config.supported_interfaces);
884 bp->phylink_config.supported_interfaces);
887 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
888 bp->phylink_config.mac_capabilities |= MAC_1000FD;
889 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
890 bp->phylink_config.mac_capabilities |= MAC_1000HD;
893 bp->phylink_config.supported_interfaces);
894 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
896 if (bp->caps & MACB_CAPS_PCS)
898 bp->phylink_config.supported_interfaces);
900 if (bp->caps & MACB_CAPS_HIGH_SPEED) {
902 bp->phylink_config.supported_interfaces);
903 bp->phylink_config.mac_capabilities |= MAC_10000FD;
907 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
908 bp->phy_interface, &macb_phylink_ops);
909 if (IS_ERR(bp->phylink)) {
911 PTR_ERR(bp->phylink));
912 return PTR_ERR(bp->phylink);
918 static int macb_mdiobus_register(struct macb *bp)
920 struct device_node *child, *np = bp->pdev->dev.of_node;
927 int ret = of_mdiobus_register(bp->mii_bus, child);
934 return mdiobus_register(bp->mii_bus);
948 return of_mdiobus_register(bp->mii_bus, np);
951 return mdiobus_register(bp->mii_bus);
954 static int macb_mii_init(struct macb *bp)
959 macb_writel(bp, NCR, MACB_BIT(MPE));
961 bp->mii_bus = mdiobus_alloc();
962 if (!bp->mii_bus) {
967 bp->mii_bus->name = "MACB_mii_bus";
968 bp->mii_bus->read = &macb_mdio_read_c22;
969 bp->mii_bus->write = &macb_mdio_write_c22;
970 bp->mii_bus->read_c45 = &macb_mdio_read_c45;
971 bp->mii_bus->write_c45 = &macb_mdio_write_c45;
972 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
973 bp->pdev->name, bp->pdev->id);
974 bp->mii_bus->priv = bp;
975 bp->mii_bus->parent = &bp->pdev->dev;
977 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
979 err = macb_mdiobus_register(bp);
983 err = macb_mii_probe(bp->dev);
990 mdiobus_unregister(bp->mii_bus);
992 mdiobus_free(bp->mii_bus);
997 static void macb_update_stats(struct macb *bp)
999 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
1000 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
1006 *p += bp->macb_reg_readl(bp, offset);
1009 static int macb_halt_tx(struct macb *bp)
1014 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
1019 status = macb_readl(bp, TSR);
1029 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
1033 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1036 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1047 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
1052 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1053 desc_64 = macb_64b_desc(bp, desc);
1065 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1071 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1072 desc_64 = macb_64b_desc(bp, desc);
1078 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
1089 struct macb *bp = queue->bp;
1096 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1097 (unsigned int)(queue - bp->queues),
1107 spin_lock_irqsave(&bp->lock, flags);
1110 netif_tx_stop_all_queues(bp->dev);
1116 if (macb_halt_tx(bp)) {
1117 netdev_err(bp->dev, "BUG: halt tx timed out\n");
1118 macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
1136 macb_tx_unmap(bp, tx_skb, 0);
1146 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1147 macb_tx_ring_wrap(bp, tail),
1149 bp->dev->stats.tx_packets++;
1151 bp->dev->stats.tx_bytes += skb->len;
1160 netdev_err(bp->dev,
1166 macb_tx_unmap(bp, tx_skb, 0);
1171 macb_set_addr(bp, desc, 0);
1180 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1188 macb_writel(bp, TSR, macb_readl(bp, TSR));
1192 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
1195 netif_tx_start_all_queues(bp->dev);
1196 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1198 spin_unlock_irqrestore(&bp->lock, flags);
1234 struct macb *bp = queue->bp;
1235 u16 queue_index = queue - bp->queues;
1270 gem_ptp_do_txstamp(bp, skb, desc);
1272 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1273 macb_tx_ring_wrap(bp, tail),
1275 bp->dev->stats.tx_packets++;
1277 bp->dev->stats.tx_bytes += skb->len;
1283 macb_tx_unmap(bp, tx_skb, budget);
1295 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1297 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1298 netif_wake_subqueue(bp->dev, queue_index);
1309 struct macb *bp = queue->bp;
1313 bp->rx_ring_size) > 0) {
1314 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1323 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1325 netdev_err(bp->dev,
1331 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1332 bp->rx_buffer_size,
1334 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1341 if (entry == bp->rx_ring_size - 1)
1348 macb_set_addr(bp, desc, paddr);
1363 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1391 struct macb *bp = queue->bp;
1403 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1410 addr = macb_get_addr(bp, desc);
1424 netdev_err(bp->dev,
1426 bp->dev->stats.rx_dropped++;
1432 netdev_err(bp->dev,
1434 bp->dev->stats.rx_dropped++;
1440 len = ctrl & bp->rx_frm_len_mask;
1442 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1445 dma_unmap_single(&bp->pdev->dev, addr,
1446 bp->rx_buffer_size, DMA_FROM_DEVICE);
1448 skb->protocol = eth_type_trans(skb, bp->dev);
1450 if (bp->dev->features & NETIF_F_RXCSUM &&
1451 !(bp->dev->flags & IFF_PROMISC) &&
1455 bp->dev->stats.rx_packets++;
1457 bp->dev->stats.rx_bytes += skb->len;
1460 gem_ptp_do_rxstamp(bp, skb, desc);
1463 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1487 struct macb *bp = queue->bp;
1490 len = desc->ctrl & bp->rx_frm_len_mask;
1492 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1493 macb_rx_ring_wrap(bp, first_frag),
1494 macb_rx_ring_wrap(bp, last_frag), len);
1504 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1506 bp->dev->stats.rx_dropped++;
1526 unsigned int frag_len = bp->rx_buffer_size;
1538 offset += bp->rx_buffer_size;
1550 skb->protocol = eth_type_trans(skb, bp->dev);
1552 bp->dev->stats.rx_packets++;
1553 bp->dev->stats.rx_bytes += skb->len;
1554 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1563 struct macb *bp = queue->bp;
1569 for (i = 0; i < bp->rx_ring_size; i++) {
1571 macb_set_addr(bp, desc, addr);
1573 addr += bp->rx_buffer_size;
1582 struct macb *bp = queue->bp;
1634 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1636 spin_lock_irqsave(&bp->lock, flags);
1638 ctrl = macb_readl(bp, NCR);
1639 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1644 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1646 spin_unlock_irqrestore(&bp->lock, flags);
1660 struct macb *bp = queue->bp;
1664 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1676 struct macb *bp = queue->bp;
1679 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1681 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1682 (unsigned int)(queue - bp->queues), work_done, budget);
1685 queue_writel(queue, IER, bp->rx_intr_mask);
1698 queue_writel(queue, IDR, bp->rx_intr_mask);
1699 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1701 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1713 struct macb *bp = queue->bp;
1721 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1722 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1723 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1728 spin_lock_irq(&bp->lock);
1729 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1730 spin_unlock_irq(&bp->lock);
1755 struct macb *bp = queue->bp;
1763 netdev_vdbg(bp->dev, "poll: tx restart\n");
1767 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1768 (unsigned int)(queue - bp->queues), work_done, budget);
1785 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1787 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1797 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
1798 struct net_device *dev = bp->dev;
1803 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1804 queue_writel(queue, IDR, bp->rx_intr_mask |
1808 ctrl = macb_readl(bp, NCR);
1810 macb_writel(bp, NCR, ctrl);
1815 bp->macbgem_ops.mog_init_rings(bp);
1818 macb_init_buffers(bp);
1821 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1823 bp->rx_intr_mask |
1828 macb_writel(bp, NCR, ctrl);
1837 struct macb *bp = queue->bp;
1845 spin_lock(&bp->lock);
1849 macb_writel(bp, WOL, 0);
1850 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1851 (unsigned int)(queue - bp->queues),
1853 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1855 pm_wakeup_event(&bp->pdev->dev, 0);
1858 spin_unlock(&bp->lock);
1866 struct macb *bp = queue->bp;
1874 spin_lock(&bp->lock);
1878 gem_writel(bp, WOL, 0);
1879 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1880 (unsigned int)(queue - bp->queues),
1882 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1884 pm_wakeup_event(&bp->pdev->dev, 0);
1887 spin_unlock(&bp->lock);
1895 struct macb *bp = queue->bp;
1896 struct net_device *dev = bp->dev;
1904 spin_lock(&bp->lock);
1910 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1915 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1916 (unsigned int)(queue - bp->queues),
1919 if (status & bp->rx_intr_mask) {
1926 queue_writel(queue, IDR, bp->rx_intr_mask);
1927 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1931 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1939 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1949 netdev_vdbg(bp->dev, "scheduling TX softirq\n");
1958 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1976 ctrl = macb_readl(bp, NCR);
1977 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1979 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1981 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1987 if (macb_is_gem(bp))
1988 bp->hw_stats.gem.rx_overruns++;
1990 bp->hw_stats.macb.rx_overruns++;
1992 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1997 tasklet_schedule(&bp->hresp_err_tasklet);
2000 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2006 spin_unlock(&bp->lock);
2017 struct macb *bp = netdev_priv(dev);
2023 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2029 static unsigned int macb_tx_map(struct macb *bp,
2061 entry = macb_tx_ring_wrap(bp, tx_head);
2064 mapping = dma_map_single(&bp->pdev->dev,
2067 if (dma_mapping_error(&bp->pdev->dev, mapping))
2081 size = min(len, bp->max_tx_length);
2091 size = min(len, bp->max_tx_length);
2092 entry = macb_tx_ring_wrap(bp, tx_head);
2095 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2097 if (dma_mapping_error(&bp->pdev->dev, mapping))
2115 netdev_err(bp->dev, "BUG! empty skb!\n");
2130 entry = macb_tx_ring_wrap(bp, i);
2152 entry = macb_tx_ring_wrap(bp, i);
2161 if (unlikely(entry == (bp->tx_ring_size - 1)))
2168 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2179 macb_set_addr(bp, desc, tx_skb->mapping);
2192 netdev_err(bp->dev, "TX DMA map failed\n");
2197 macb_tx_unmap(bp, tx_skb, 0);
2310 struct macb *bp = netdev_priv(dev);
2311 struct macb_queue *queue = &bp->queues[queue_index];
2329 (bp->hw_dma_cap & HW_DMA_CAP_PTP))
2343 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2348 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
2351 netdev_vdbg(bp->dev,
2365 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2367 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2371 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2378 bp->tx_ring_size) < desc_cnt) {
2380 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2387 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2396 spin_lock_irq(&bp->lock);
2397 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2398 spin_unlock_irq(&bp->lock);
2400 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2409 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2411 if (!macb_is_gem(bp)) {
2412 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2414 bp->rx_buffer_size = size;
2416 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2417 netdev_dbg(bp->dev,
2420 bp->rx_buffer_size =
2421 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2425 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2426 bp->dev->mtu, bp->rx_buffer_size);
2429 static void gem_free_rx_buffers(struct macb *bp)
2438 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2442 for (i = 0; i < bp->rx_ring_size; i++) {
2449 addr = macb_get_addr(bp, desc);
2451 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2462 static void macb_free_rx_buffers(struct macb *bp)
2464 struct macb_queue *queue = &bp->queues[0];
2467 dma_free_coherent(&bp->pdev->dev,
2468 bp->rx_ring_size * bp->rx_buffer_size,
2474 static void macb_free_consistent(struct macb *bp)
2480 bp->macbgem_ops.mog_free_rx_buffers(bp);
2482 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2486 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2487 dma_free_coherent(&bp->pdev->dev, size,
2492 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2493 dma_free_coherent(&bp->pdev->dev, size,
2500 static int gem_alloc_rx_buffers(struct macb *bp)
2506 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2507 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2512 netdev_dbg(bp->dev,
2514 bp->rx_ring_size, queue->rx_skbuff);
2519 static int macb_alloc_rx_buffers(struct macb *bp)
2521 struct macb_queue *queue = &bp->queues[0];
2524 size = bp->rx_ring_size * bp->rx_buffer_size;
2525 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2530 netdev_dbg(bp->dev,
2536 static int macb_alloc_consistent(struct macb *bp)
2542 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2543 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2544 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2549 netdev_dbg(bp->dev,
2554 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2559 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2560 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2564 netdev_dbg(bp->dev,
2568 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2574 macb_free_consistent(bp);
2578 static void gem_init_rings(struct macb *bp)
2585 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2586 for (i = 0; i < bp->tx_ring_size; i++) {
2588 macb_set_addr(bp, desc, 0);
2603 static void macb_init_rings(struct macb *bp)
2608 macb_init_rx_ring(&bp->queues[0]);
2610 for (i = 0; i < bp->tx_ring_size; i++) {
2611 desc = macb_tx_desc(&bp->queues[0], i);
2612 macb_set_addr(bp, desc, 0);
2615 bp->queues[0].tx_head = 0;
2616 bp->queues[0].tx_tail = 0;
2620 static void macb_reset_hw(struct macb *bp)
2624 u32 ctrl = macb_readl(bp, NCR);
2634 macb_writel(bp, NCR, ctrl);
2637 macb_writel(bp, TSR, -1);
2638 macb_writel(bp, RSR, -1);
2641 gem_writel(bp, PBUFRXCUT, 0);
2644 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2647 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2652 static u32 gem_mdc_clk_div(struct macb *bp)
2655 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2677 static u32 macb_mdc_clk_div(struct macb *bp)
2682 if (macb_is_gem(bp))
2683 return gem_mdc_clk_div(bp);
2685 pclk_hz = clk_get_rate(bp->pclk);
2702 static u32 macb_dbw(struct macb *bp)
2704 if (!macb_is_gem(bp))
2707 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2725 static void macb_configure_dma(struct macb *bp)
2732 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2733 if (macb_is_gem(bp)) {
2734 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2735 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2741 if (bp->dma_burst_length)
2742 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2746 if (bp->native_io)
2751 if (bp->dev->features & NETIF_F_HW_CSUM)
2758 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2762 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2765 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2767 gem_writel(bp, DMACFG, dmacfg);
2771 static void macb_init_hw(struct macb *bp)
2775 macb_reset_hw(bp);
2776 macb_set_hwaddr(bp);
2778 config = macb_mdc_clk_div(bp);
2781 if (bp->caps & MACB_CAPS_JUMBO)
2785 if (bp->dev->flags & IFF_PROMISC)
2787 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2789 if (!(bp->dev->flags & IFF_BROADCAST))
2791 config |= macb_dbw(bp);
2792 macb_writel(bp, NCFGR, config);
2793 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2794 gem_writel(bp, JML, bp->jumbo_max_len);
2795 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2796 if (bp->caps & MACB_CAPS_JUMBO)
2797 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2799 macb_configure_dma(bp);
2802 if (bp->rx_watermark)
2803 gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU)));
2868 struct macb *bp = netdev_priv(dev);
2878 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2879 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2886 struct macb *bp = netdev_priv(dev);
2888 cfg = macb_readl(bp, NCFGR);
2895 if (macb_is_gem(bp))
2902 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2908 macb_or_gem_writel(bp, HRB, -1);
2909 macb_or_gem_writel(bp, HRT, -1);
2917 macb_or_gem_writel(bp, HRB, 0);
2918 macb_or_gem_writel(bp, HRT, 0);
2922 macb_writel(bp, NCFGR, cfg);
2928 struct macb *bp = netdev_priv(dev);
2933 netdev_dbg(bp->dev, "open\n");
2935 err = pm_runtime_resume_and_get(&bp->pdev->dev);
2940 macb_init_rx_buffer_size(bp, bufsz);
2942 err = macb_alloc_consistent(bp);
2949 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2954 macb_init_hw(bp);
2956 err = phy_power_on(bp->sgmii_phy);
2960 err = macb_phylink_connect(bp);
2966 if (bp->ptp_info)
2967 bp->ptp_info->ptp_init(dev);
2972 phy_power_off(bp->sgmii_phy);
2975 macb_reset_hw(bp);
2976 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2980 macb_free_consistent(bp);
2982 pm_runtime_put_sync(&bp->pdev->dev);
2988 struct macb *bp = netdev_priv(dev);
2995 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3000 phylink_stop(bp->phylink);
3001 phylink_disconnect_phy(bp->phylink);
3003 phy_power_off(bp->sgmii_phy);
3005 spin_lock_irqsave(&bp->lock, flags);
3006 macb_reset_hw(bp);
3008 spin_unlock_irqrestore(&bp->lock, flags);
3010 macb_free_consistent(bp);
3012 if (bp->ptp_info)
3013 bp->ptp_info->ptp_remove(dev);
3015 pm_runtime_put(&bp->pdev->dev);
3042 static void gem_update_stats(struct macb *bp)
3048 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
3052 u64 val = bp->macb_reg_readl(bp, offset);
3054 bp->ethtool_stats[i] += val;
3059 val = bp->macb_reg_readl(bp, offset + 4);
3060 bp->ethtool_stats[i] += ((u64)val) << 32;
3066 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3068 bp->ethtool_stats[idx++] = *stat;
3071 static struct net_device_stats *gem_get_stats(struct macb *bp)
3073 struct gem_stats *hwstat = &bp->hw_stats.gem;
3074 struct net_device_stats *nstat = &bp->dev->stats;
3076 if (!netif_running(bp->dev))
3079 gem_update_stats(bp);
3115 struct macb *bp;
3117 bp = netdev_priv(dev);
3118 gem_update_stats(bp);
3119 memcpy(data, &bp->ethtool_stats, sizeof(u64)
3125 struct macb *bp = netdev_priv(dev);
3129 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3138 struct macb *bp = netdev_priv(dev);
3149 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3162 struct macb *bp = netdev_priv(dev);
3163 struct net_device_stats *nstat = &bp->dev->stats;
3164 struct macb_stats *hwstat = &bp->hw_stats.macb;
3166 if (macb_is_gem(bp))
3167 return gem_get_stats(bp);
3170 macb_update_stats(bp);
3215 struct macb *bp = netdev_priv(dev);
3219 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3222 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3223 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3225 regs_buff[0] = macb_readl(bp, NCR);
3226 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
3227 regs_buff[2] = macb_readl(bp, NSR);
3228 regs_buff[3] = macb_readl(bp, TSR);
3229 regs_buff[4] = macb_readl(bp, RBQP);
3230 regs_buff[5] = macb_readl(bp, TBQP);
3231 regs_buff[6] = macb_readl(bp, RSR);
3232 regs_buff[7] = macb_readl(bp, IMR);
3236 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3237 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3239 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3240 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3241 if (macb_is_gem(bp))
3242 regs_buff[13] = gem_readl(bp, DMACFG);
3247 struct macb *bp = netdev_priv(netdev);
3249 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
3250 phylink_ethtool_get_wol(bp->phylink, wol);
3253 if (bp->wol & MACB_WOL_ENABLED)
3260 struct macb *bp = netdev_priv(netdev);
3264 ret = phylink_ethtool_set_wol(bp->phylink, wol);
3271 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
3276 bp->wol |= MACB_WOL_ENABLED;
3278 bp->wol &= ~MACB_WOL_ENABLED;
3280 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
3288 struct macb *bp = netdev_priv(netdev);
3290 return phylink_ethtool_ksettings_get(bp->phylink, kset);
3296 struct macb *bp = netdev_priv(netdev);
3298 return phylink_ethtool_ksettings_set(bp->phylink, kset);
3306 struct macb *bp = netdev_priv(netdev);
3311 ring->rx_pending = bp->rx_ring_size;
3312 ring->tx_pending = bp->tx_ring_size;
3320 struct macb *bp = netdev_priv(netdev);
3335 if ((new_tx_size == bp->tx_ring_size) &&
3336 (new_rx_size == bp->rx_ring_size)) {
3341 if (netif_running(bp->dev)) {
3343 macb_close(bp->dev);
3346 bp->rx_ring_size = new_rx_size;
3347 bp->tx_ring_size = new_tx_size;
3350 macb_open(bp->dev);
3356 static unsigned int gem_get_tsu_rate(struct macb *bp)
3361 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3365 else if (!IS_ERR(bp->pclk)) {
3366 tsu_clk = bp->pclk;
3381 struct macb *bp = netdev_priv(dev);
3383 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3403 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
3422 struct macb *bp = netdev_priv(netdev);
3424 if (bp->ptp_info)
3425 return bp->ptp_info->get_ts_info(netdev, info);
3430 static void gem_enable_flow_filters(struct macb *bp, bool enable)
3432 struct net_device *netdev = bp->dev;
3440 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3442 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3449 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3472 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3476 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3485 if (!macb_is_gem(bp))
3500 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3501 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3514 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3515 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3542 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3543 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3556 gem_writel_n(bp, SCRT2, index, t2_scr);
3562 struct macb *bp = netdev_priv(netdev);
3582 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3585 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3598 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3600 gem_prog_cmp_regs(bp, fs);
3601 bp->rx_fs_list.count++;
3603 gem_enable_flow_filters(bp, 1);
3605 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3609 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3617 struct macb *bp = netdev_priv(netdev);
3622 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3624 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3636 gem_writel_n(bp, SCRT2, fs->location, 0);
3639 bp->rx_fs_list.count--;
3640 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3646 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3653 struct macb *bp = netdev_priv(netdev);
3656 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3668 struct macb *bp = netdev_priv(netdev);
3672 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3678 cmd->data = bp->max_tuples;
3687 struct macb *bp = netdev_priv(netdev);
3692 cmd->data = bp->num_queues;
3695 cmd->rule_cnt = bp->rx_fs_list.count;
3714 struct macb *bp = netdev_priv(netdev);
3719 if ((cmd->fs.location >= bp->max_tuples)
3720 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3771 struct macb *bp = netdev_priv(dev);
3776 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3782 struct macb *bp = netdev_priv(dev);
3787 if (!bp->ptp_info)
3790 return bp->ptp_info->get_hwtst(dev, cfg);
3797 struct macb *bp = netdev_priv(dev);
3802 if (!bp->ptp_info)
3805 return bp->ptp_info->set_hwtst(dev, cfg, extack);
3808 static inline void macb_set_txcsum_feature(struct macb *bp,
3813 if (!macb_is_gem(bp))
3816 val = gem_readl(bp, DMACFG);
3822 gem_writel(bp, DMACFG, val);
3825 static inline void macb_set_rxcsum_feature(struct macb *bp,
3828 struct net_device *netdev = bp->dev;
3831 if (!macb_is_gem(bp))
3834 val = gem_readl(bp, NCFGR);
3840 gem_writel(bp, NCFGR, val);
3843 static inline void macb_set_rxflow_feature(struct macb *bp,
3846 if (!macb_is_gem(bp))
3849 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3855 struct macb *bp = netdev_priv(netdev);
3860 macb_set_txcsum_feature(bp, features);
3864 macb_set_rxcsum_feature(bp, features);
3868 macb_set_rxflow_feature(bp, features);
3873 static void macb_restore_features(struct macb *bp)
3875 struct net_device *netdev = bp->dev;
3880 macb_set_txcsum_feature(bp, features);
3883 macb_set_rxcsum_feature(bp, features);
3886 list_for_each_entry(item, &bp->rx_fs_list.list, list)
3887 gem_prog_cmp_regs(bp, &item->fs);
3889 macb_set_rxflow_feature(bp, features);
3914 static void macb_configure_caps(struct macb *bp,
3920 bp->caps = dt_conf->caps;
3922 if (hw_is_gem(bp->regs, bp->native_io)) {
3923 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3925 dcfg = gem_readl(bp, DCFG1);
3927 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3929 bp->caps |= MACB_CAPS_PCS;
3930 dcfg = gem_readl(bp, DCFG12);
3932 bp->caps |= MACB_CAPS_HIGH_SPEED;
3933 dcfg = gem_readl(bp, DCFG2);
3935 bp->caps |= MACB_CAPS_FIFO_MODE;
3936 if (gem_has_ptp(bp)) {
3937 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3938 dev_err(&bp->pdev->dev,
3942 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3943 bp->ptp_info = &gem_ptp_info;
3949 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
4077 struct macb *bp = netdev_priv(dev);
4082 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
4083 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
4090 if (!(bp->queue_mask & (1 << hw_q)))
4093 queue = &bp->queues[q];
4094 queue->bp = bp;
4107 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4121 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
4150 if (macb_is_gem(bp)) {
4151 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4152 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4153 bp->macbgem_ops.mog_init_rings = gem_init_rings;
4154 bp->macbgem_ops.mog_rx = gem_rx;
4157 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4158 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4159 bp->macbgem_ops.mog_init_rings = macb_init_rings;
4160 bp->macbgem_ops.mog_rx = macb_rx;
4170 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
4174 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4176 if (bp->caps & MACB_CAPS_SG_DISABLED)
4184 reg = gem_readl(bp, DCFG8);
4185 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
4187 INIT_LIST_HEAD(&bp->rx_fs_list.list);
4188 if (bp->max_tuples > 0) {
4194 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
4198 bp->rx_fs_list.count = 0;
4199 spin_lock_init(&bp->rx_fs_lock);
4201 bp->max_tuples = 0;
4204 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4206 if (phy_interface_mode_is_rgmii(bp->phy_interface))
4207 val = bp->usrio->rgmii;
4208 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4209 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4210 val = bp->usrio->rmii;
4211 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4212 val = bp->usrio->mii;
4214 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4215 val |= bp->usrio->refclk;
4217 macb_or_gem_writel(bp, USRIO, val);
4221 val = macb_mdc_clk_div(bp);
4222 val |= macb_dbw(bp);
4223 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4225 macb_writel(bp, NCFGR, val);
4595 struct macb *bp = netdev_priv(dev);
4598 bp->queues[0].bp = bp;
4608 macb_writel(bp, NCR, 0);
4610 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
4725 struct macb *bp = netdev_priv(dev);
4728 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4730 bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
4732 if (IS_ERR(bp->sgmii_phy))
4733 return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
4736 ret = phy_init(bp->sgmii_phy);
4765 phy_exit(bp->sgmii_phy);
4773 phy_exit(bp->sgmii_phy);
4984 struct macb *bp;
5014 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
5024 bp = netdev_priv(dev);
5025 bp->pdev = pdev;
5026 bp->dev = dev;
5027 bp->regs = mem;
5028 bp->native_io = native_io;
5030 bp->macb_reg_readl = hw_readl_native;
5031 bp->macb_reg_writel = hw_writel_native;
5033 bp->macb_reg_readl = hw_readl;
5034 bp->macb_reg_writel = hw_writel;
5036 bp->num_queues = num_queues;
5037 bp->queue_mask = queue_mask;
5039 bp->dma_burst_length = macb_config->dma_burst_length;
5040 bp->pclk = pclk;
5041 bp->hclk = hclk;
5042 bp->tx_clk = tx_clk;
5043 bp->rx_clk = rx_clk;
5044 bp->tsu_clk = tsu_clk;
5046 bp->jumbo_max_len = macb_config->jumbo_max_len;
5048 if (!hw_is_gem(bp->regs, bp->native_io))
5049 bp->max_tx_length = MACB_MAX_TX_LEN;
5051 bp->max_tx_length = macb_config->max_tx_length;
5053 bp->max_tx_length = GEM_MAX_TX_LEN;
5055 bp->wol = 0;
5057 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
5058 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
5060 bp->usrio = macb_config->usrio;
5065 if (GEM_BFEXT(PBUF_CUTTHRU, gem_readl(bp, DCFG6))) {
5066 err = of_property_read_u32(bp->pdev->dev.of_node,
5068 &bp->rx_watermark);
5074 wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1;
5075 if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) {
5076 dev_info(&bp->pdev->dev, "Invalid watermark value\n");
5077 bp->rx_watermark = 0;
5081 spin_lock_init(&bp->lock);
5084 macb_configure_caps(bp, macb_config);
5087 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
5089 bp->hw_dma_cap |= HW_DMA_CAP_64B;
5102 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
5103 dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
5107 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
5108 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
5110 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
5111 macb_dma_desc_get_size(bp);
5113 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
5115 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
5116 macb_dma_desc_get_size(bp);
5119 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
5120 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
5121 bp->rx_intr_mask |= MACB_BIT(RXUBR);
5123 err = of_get_ethdev_address(np, bp->dev);
5127 macb_get_hwaddr(bp);
5132 bp->phy_interface = PHY_INTERFACE_MODE_MII;
5134 bp->phy_interface = interface;
5141 err = macb_mii_init(bp);
5153 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
5156 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
5159 pm_runtime_mark_last_busy(&bp->pdev->dev);
5160 pm_runtime_put_autosuspend(&bp->pdev->dev);
5165 mdiobus_unregister(bp->mii_bus);
5166 mdiobus_free(bp->mii_bus);
5169 phy_exit(bp->sgmii_phy);
5186 struct macb *bp;
5191 bp = netdev_priv(dev);
5192 phy_exit(bp->sgmii_phy);
5193 mdiobus_unregister(bp->mii_bus);
5194 mdiobus_free(bp->mii_bus);
5197 tasklet_kill(&bp->hresp_err_tasklet);
5201 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
5202 bp->rx_clk, bp->tsu_clk);
5205 phylink_destroy(bp->phylink);
5213 struct macb *bp = netdev_priv(netdev);
5219 if (!device_may_wakeup(&bp->dev->dev))
5220 phy_exit(bp->sgmii_phy);
5225 if (bp->wol & MACB_WOL_ENABLED) {
5226 spin_lock_irqsave(&bp->lock, flags);
5228 macb_writel(bp, TSR, -1);
5229 macb_writel(bp, RSR, -1);
5230 for (q = 0, queue = bp->queues; q < bp->num_queues;
5235 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5241 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5242 if (macb_is_gem(bp)) {
5243 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
5244 IRQF_SHARED, netdev->name, bp->queues);
5248 bp->queues[0].irq, err);
5249 spin_unlock_irqrestore(&bp->lock, flags);
5252 queue_writel(bp->queues, IER, GEM_BIT(WOL));
5253 gem_writel(bp, WOL, MACB_BIT(MAG));
5255 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
5256 IRQF_SHARED, netdev->name, bp->queues);
5260 bp->queues[0].irq, err);
5261 spin_unlock_irqrestore(&bp->lock, flags);
5264 queue_writel(bp->queues, IER, MACB_BIT(WOL));
5265 macb_writel(bp, WOL, MACB_BIT(MAG));
5267 spin_unlock_irqrestore(&bp->lock, flags);
5269 enable_irq_wake(bp->queues[0].irq);
5273 for (q = 0, queue = bp->queues; q < bp->num_queues;
5279 if (!(bp->wol & MACB_WOL_ENABLED)) {
5281 phylink_stop(bp->phylink);
5283 spin_lock_irqsave(&bp->lock, flags);
5284 macb_reset_hw(bp);
5285 spin_unlock_irqrestore(&bp->lock, flags);
5288 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5289 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
5292 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
5294 if (bp->ptp_info)
5295 bp->ptp_info->ptp_remove(netdev);
5305 struct macb *bp = netdev_priv(netdev);
5311 if (!device_may_wakeup(&bp->dev->dev))
5312 phy_init(bp->sgmii_phy);
5320 if (bp->wol & MACB_WOL_ENABLED) {
5321 spin_lock_irqsave(&bp->lock, flags);
5323 if (macb_is_gem(bp)) {
5324 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
5325 gem_writel(bp, WOL, 0);
5327 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
5328 macb_writel(bp, WOL, 0);
5331 queue_readl(bp->queues, ISR);
5332 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
5333 queue_writel(bp->queues, ISR, -1);
5335 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
5336 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
5337 IRQF_SHARED, netdev->name, bp->queues);
5341 bp->queues[0].irq, err);
5342 spin_unlock_irqrestore(&bp->lock, flags);
5345 spin_unlock_irqrestore(&bp->lock, flags);
5347 disable_irq_wake(bp->queues[0].irq);
5353 phylink_stop(bp->phylink);
5357 for (q = 0, queue = bp->queues; q < bp->num_queues;
5364 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5366 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5367 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5369 macb_writel(bp, NCR, MACB_BIT(MPE));
5370 macb_init_hw(bp);
5372 macb_restore_features(bp);
5375 phylink_start(bp->phylink);
5379 if (bp->ptp_info)
5380 bp->ptp_info->ptp_init(netdev);
5388 struct macb *bp = netdev_priv(netdev);
5391 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5392 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
5393 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5401 struct macb *bp = netdev_priv(netdev);
5404 clk_prepare_enable(bp->pclk);
5405 clk_prepare_enable(bp->hclk);
5406 clk_prepare_enable(bp->tx_clk);
5407 clk_prepare_enable(bp->rx_clk);
5408 clk_prepare_enable(bp->tsu_clk);
5409 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
5410 clk_prepare_enable(bp->tsu_clk);