Lines Matching defs:alx

47 #include "alx.h"
51 static const char alx_drv_name[] = "alx";
71 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
73 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
80 if (++next == alx->rx_ringsz)
96 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
103 dma = dma_map_single(&alx->hw.pdev->dev,
104 skb->data, alx->rxbuf_size,
106 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
120 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
125 if (++next == alx->rx_ringsz)
135 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
141 static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
146 if (r_idx >= alx->num_txq)
147 r_idx = r_idx % alx->num_txq;
149 return alx->qnapi[r_idx]->txq;
166 struct alx_priv *alx;
172 alx = netdev_priv(txq->netdev);
176 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
199 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
206 static void alx_schedule_link_check(struct alx_priv *alx)
208 schedule_work(&alx->link_check_wk);
211 static void alx_schedule_reset(struct alx_priv *alx)
213 schedule_work(&alx->reset_wk);
218 struct alx_priv *alx;
225 alx = netdev_priv(rxq->netdev);
237 alx_schedule_reset(alx);
263 if (alx->dev->features & NETIF_F_RXCSUM &&
287 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
291 alx_refill_rx_ring(alx, GFP_ATOMIC);
299 struct alx_priv *alx = np->alx;
300 struct alx_hw *hw = &alx->hw;
316 if (alx->hw.pdev->msix_enabled) {
319 spin_lock_irqsave(&alx->irq_lock, flags);
320 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
321 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
322 spin_unlock_irqrestore(&alx->irq_lock, flags);
330 static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
332 struct alx_hw *hw = &alx->hw;
335 netif_warn(alx, hw, alx->dev,
337 alx_schedule_reset(alx);
342 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
349 alx->int_mask &= ~ALX_ISR_PHY;
350 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
351 alx_schedule_link_check(alx);
357 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
359 struct alx_hw *hw = &alx->hw;
361 spin_lock(&alx->irq_lock);
365 intr &= alx->int_mask;
367 if (alx_intr_handle_misc(alx, intr))
371 napi_schedule(&alx->qnapi[0]->napi);
373 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
374 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
380 spin_unlock(&alx->irq_lock);
387 struct alx_hw *hw = &np->alx->hw;
401 struct alx_priv *alx = data;
402 struct alx_hw *hw = &alx->hw;
410 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
412 if (alx_intr_handle_misc(alx, intr))
426 struct alx_priv *alx = data;
428 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
433 struct alx_priv *alx = data;
434 struct alx_hw *hw = &alx->hw;
439 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
442 return alx_intr_handle(alx, intr);
450 static void alx_init_ring_ptrs(struct alx_priv *alx)
452 struct alx_hw *hw = &alx->hw;
453 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
457 for (i = 0; i < alx->num_napi; i++) {
458 np = alx->qnapi[i];
477 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
480 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
481 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
482 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
533 static void alx_free_buffers(struct alx_priv *alx)
537 for (i = 0; i < alx->num_txq; i++)
538 if (alx->qnapi[i] && alx->qnapi[i]->txq)
539 alx_free_txring_buf(alx->qnapi[i]->txq);
541 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
542 alx_free_rxring_buf(alx->qnapi[0]->rxq);
545 static int alx_reinit_rings(struct alx_priv *alx)
547 alx_free_buffers(alx);
549 alx_init_ring_ptrs(alx);
551 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
570 struct alx_priv *alx = netdev_priv(netdev);
571 struct alx_hw *hw = &alx->hw;
599 struct alx_priv *alx = netdev_priv(netdev);
600 struct alx_hw *hw = &alx->hw;
616 static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
623 txq->tpd = alx->descmem.virt + offset;
624 txq->tpd_dma = alx->descmem.dma + offset;
630 static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
637 rxq->rrd = alx->descmem.virt + offset;
638 rxq->rrd_dma = alx->descmem.dma + offset;
641 rxq->rfd = alx->descmem.virt + offset;
642 rxq->rfd_dma = alx->descmem.dma + offset;
648 static int alx_alloc_rings(struct alx_priv *alx)
658 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
659 alx->num_txq +
660 sizeof(struct alx_rrd) * alx->rx_ringsz +
661 sizeof(struct alx_rfd) * alx->rx_ringsz;
662 alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
663 alx->descmem.size,
664 &alx->descmem.dma, GFP_KERNEL);
665 if (!alx->descmem.virt)
672 for (i = 0; i < alx->num_txq; i++) {
673 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
675 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
680 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
682 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
689 static void alx_free_rings(struct alx_priv *alx)
693 alx_free_buffers(alx);
695 for (i = 0; i < alx->num_txq; i++)
696 if (alx->qnapi[i] && alx->qnapi[i]->txq)
697 kfree(alx->qnapi[i]->txq->bufs);
699 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
700 kfree(alx->qnapi[0]->rxq->bufs);
702 if (alx->descmem.virt)
703 dma_free_coherent(&alx->hw.pdev->dev,
704 alx->descmem.size,
705 alx->descmem.virt,
706 alx->descmem.dma);
709 static void alx_free_napis(struct alx_priv *alx)
714 for (i = 0; i < alx->num_napi; i++) {
715 np = alx->qnapi[i];
723 alx->qnapi[i] = NULL;
738 static int alx_alloc_napis(struct alx_priv *alx)
745 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
748 for (i = 0; i < alx->num_napi; i++) {
753 np->alx = alx;
754 netif_napi_add(alx->dev, &np->napi, alx_poll);
755 alx->qnapi[i] = np;
759 for (i = 0; i < alx->num_txq; i++) {
760 np = alx->qnapi[i];
769 txq->count = alx->tx_ringsz;
770 txq->netdev = alx->dev;
771 txq->dev = &alx->hw.pdev->dev;
773 alx->int_mask |= tx_vect_mask[i];
777 np = alx->qnapi[0];
783 rxq->np = alx->qnapi[0];
785 rxq->count = alx->rx_ringsz;
786 rxq->netdev = alx->dev;
787 rxq->dev = &alx->hw.pdev->dev;
789 alx->int_mask |= rx_vect_mask[0];
794 netdev_err(alx->dev, "error allocating internal structures\n");
795 alx_free_napis(alx);
806 static void alx_config_vector_mapping(struct alx_priv *alx)
808 struct alx_hw *hw = &alx->hw;
812 if (alx->hw.pdev->msix_enabled) {
814 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
829 static int alx_enable_msix(struct alx_priv *alx)
837 err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
840 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
844 alx->num_vec = num_vec;
845 alx->num_napi = num_vec - 1;
846 alx->num_txq = num_txq;
847 alx->num_rxq = num_rxq;
852 static int alx_request_msix(struct alx_priv *alx)
854 struct net_device *netdev = alx->dev;
857 err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
858 0, netdev->name, alx);
862 for (i = 0; i < alx->num_napi; i++) {
863 struct alx_napi *np = alx->qnapi[i];
880 err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
888 free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
892 free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
893 alx->qnapi[i]);
899 static int alx_init_intr(struct alx_priv *alx)
903 ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
908 alx->num_vec = 1;
909 alx->num_napi = 1;
910 alx->num_txq = 1;
911 alx->num_rxq = 1;
915 static void alx_irq_enable(struct alx_priv *alx)
917 struct alx_hw *hw = &alx->hw;
922 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
925 if (alx->hw.pdev->msix_enabled) {
927 for (i = 0; i < alx->num_vec; i++)
932 static void alx_irq_disable(struct alx_priv *alx)
934 struct alx_hw *hw = &alx->hw;
941 if (alx->hw.pdev->msix_enabled) {
942 for (i = 0; i < alx->num_vec; i++) {
944 synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
947 synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
951 static int alx_realloc_resources(struct alx_priv *alx)
955 alx_free_rings(alx);
956 alx_free_napis(alx);
957 pci_free_irq_vectors(alx->hw.pdev);
959 err = alx_init_intr(alx);
963 err = alx_alloc_napis(alx);
967 err = alx_alloc_rings(alx);
974 static int alx_request_irq(struct alx_priv *alx)
976 struct pci_dev *pdev = alx->hw.pdev;
977 struct alx_hw *hw = &alx->hw;
983 if (alx->hw.pdev->msix_enabled) {
985 err = alx_request_msix(alx);
990 err = alx_realloc_resources(alx);
995 if (alx->hw.pdev->msi_enabled) {
999 alx->dev->name, alx);
1004 pci_free_irq_vectors(alx->hw.pdev);
1009 alx->dev->name, alx);
1012 alx_config_vector_mapping(alx);
1014 netdev_err(alx->dev, "IRQ registration failed!\n");
1018 static void alx_free_irq(struct alx_priv *alx)
1020 struct pci_dev *pdev = alx->hw.pdev;
1023 free_irq(pci_irq_vector(pdev, 0), alx);
1024 if (alx->hw.pdev->msix_enabled) {
1025 for (i = 0; i < alx->num_napi; i++)
1026 free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
1032 static int alx_identify_hw(struct alx_priv *alx)
1034 struct alx_hw *hw = &alx->hw;
1045 static int alx_init_sw(struct alx_priv *alx)
1047 struct pci_dev *pdev = alx->hw.pdev;
1048 struct alx_hw *hw = &alx->hw;
1051 err = alx_identify_hw(alx);
1057 alx->hw.lnk_patch =
1064 hw->mtu = alx->dev->mtu;
1065 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
1067 alx->dev->min_mtu = 34;
1068 alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
1069 alx->tx_ringsz = 256;
1070 alx->rx_ringsz = 512;
1072 alx->int_mask = ALX_ISR_MISC;
1074 hw->ith_tpd = alx->tx_ringsz / 3;
1093 mutex_init(&alx->mtx);
1108 static void alx_netif_stop(struct alx_priv *alx)
1112 netif_trans_update(alx->dev);
1113 if (netif_carrier_ok(alx->dev)) {
1114 netif_carrier_off(alx->dev);
1115 netif_tx_disable(alx->dev);
1116 for (i = 0; i < alx->num_napi; i++)
1117 napi_disable(&alx->qnapi[i]->napi);
1121 static void alx_halt(struct alx_priv *alx)
1123 struct alx_hw *hw = &alx->hw;
1125 lockdep_assert_held(&alx->mtx);
1127 alx_netif_stop(alx);
1135 alx_irq_disable(alx);
1136 alx_free_buffers(alx);
1139 static void alx_configure(struct alx_priv *alx)
1141 struct alx_hw *hw = &alx->hw;
1145 __alx_set_rx_mode(alx->dev);
1150 static void alx_activate(struct alx_priv *alx)
1152 lockdep_assert_held(&alx->mtx);
1155 alx_reinit_rings(alx);
1156 alx_configure(alx);
1159 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1161 alx_irq_enable(alx);
1163 alx_schedule_link_check(alx);
1166 static void alx_reinit(struct alx_priv *alx)
1168 lockdep_assert_held(&alx->mtx);
1170 alx_halt(alx);
1171 alx_activate(alx);
1176 struct alx_priv *alx = netdev_priv(netdev);
1180 alx->hw.mtu = mtu;
1181 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
1184 mutex_lock(&alx->mtx);
1185 alx_reinit(alx);
1186 mutex_unlock(&alx->mtx);
1191 static void alx_netif_start(struct alx_priv *alx)
1195 netif_tx_wake_all_queues(alx->dev);
1196 for (i = 0; i < alx->num_napi; i++)
1197 napi_enable(&alx->qnapi[i]->napi);
1198 netif_carrier_on(alx->dev);
1201 static int __alx_open(struct alx_priv *alx, bool resume)
1205 err = alx_enable_msix(alx);
1207 err = alx_init_intr(alx);
1213 netif_carrier_off(alx->dev);
1215 err = alx_alloc_napis(alx);
1219 err = alx_alloc_rings(alx);
1223 alx_configure(alx);
1225 err = alx_request_irq(alx);
1233 alx_reinit_rings(alx);
1235 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1236 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1239 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1241 alx_irq_enable(alx);
1244 netif_tx_start_all_queues(alx->dev);
1246 alx_schedule_link_check(alx);
1250 alx_free_rings(alx);
1251 alx_free_napis(alx);
1253 pci_free_irq_vectors(alx->hw.pdev);
1257 static void __alx_stop(struct alx_priv *alx)
1259 lockdep_assert_held(&alx->mtx);
1261 alx_free_irq(alx);
1263 cancel_work_sync(&alx->link_check_wk);
1264 cancel_work_sync(&alx->reset_wk);
1266 alx_halt(alx);
1267 alx_free_rings(alx);
1268 alx_free_napis(alx);
1289 static void alx_check_link(struct alx_priv *alx)
1291 struct alx_hw *hw = &alx->hw;
1296 lockdep_assert_held(&alx->mtx);
1308 spin_lock_irqsave(&alx->irq_lock, flags);
1309 alx->int_mask |= ALX_ISR_PHY;
1310 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1311 spin_unlock_irqrestore(&alx->irq_lock, flags);
1317 netif_info(alx, link, alx->dev,
1324 alx_netif_start(alx);
1327 alx_netif_stop(alx);
1328 netif_info(alx, link, alx->dev, "Link Down\n");
1332 alx_irq_disable(alx);
1335 err = alx_reinit_rings(alx);
1338 alx_configure(alx);
1341 alx_irq_enable(alx);
1347 alx_schedule_reset(alx);
1352 struct alx_priv *alx = netdev_priv(netdev);
1355 mutex_lock(&alx->mtx);
1356 ret = __alx_open(alx, false);
1357 mutex_unlock(&alx->mtx);
1364 struct alx_priv *alx = netdev_priv(netdev);
1366 mutex_lock(&alx->mtx);
1367 __alx_stop(alx);
1368 mutex_unlock(&alx->mtx);
1375 struct alx_priv *alx;
1377 alx = container_of(work, struct alx_priv, link_check_wk);
1379 mutex_lock(&alx->mtx);
1380 alx_check_link(alx);
1381 mutex_unlock(&alx->mtx);
1386 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1388 mutex_lock(&alx->mtx);
1389 alx_reinit(alx);
1390 mutex_unlock(&alx->mtx);
1534 struct alx_priv *alx;
1538 alx = netdev_priv(txq->netdev);
1561 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1576 struct alx_priv *alx = netdev_priv(netdev);
1577 return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1582 struct alx_priv *alx = netdev_priv(dev);
1584 alx_schedule_reset(alx);
1590 struct alx_priv *alx = netdev_priv(netdev);
1591 struct alx_hw *hw = &alx->hw;
1611 struct alx_priv *alx = netdev_priv(netdev);
1612 struct alx_hw *hw = &alx->hw;
1625 struct alx_priv *alx = netdev_priv(netdev);
1630 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1636 struct alx_priv *alx = netdev_priv(netdev);
1639 if (alx->hw.pdev->msix_enabled) {
1640 alx_intr_msix_misc(0, alx);
1641 for (i = 0; i < alx->num_txq; i++)
1642 alx_intr_msix_ring(0, alx->qnapi[i]);
1643 } else if (alx->hw.pdev->msi_enabled)
1644 alx_intr_msi(0, alx);
1646 alx_intr_legacy(0, alx);
1653 struct alx_priv *alx = netdev_priv(dev);
1654 struct alx_hw_stats *hw_stats = &alx->hw.stats;
1656 spin_lock(&alx->stats_lock);
1658 alx_update_hw_stats(&alx->hw);
1694 spin_unlock(&alx->stats_lock);
1717 struct alx_priv *alx;
1726 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1756 netdev = alloc_etherdev_mqs(sizeof(*alx),
1764 alx = netdev_priv(netdev);
1765 spin_lock_init(&alx->hw.mdio_lock);
1766 spin_lock_init(&alx->irq_lock);
1767 spin_lock_init(&alx->stats_lock);
1768 alx->dev = netdev;
1769 alx->hw.pdev = pdev;
1770 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1772 hw = &alx->hw;
1773 pci_set_drvdata(pdev, alx);
1790 err = alx_init_sw(alx);
1796 mutex_lock(&alx->mtx);
1854 mutex_unlock(&alx->mtx);
1856 INIT_WORK(&alx->link_check_wk, alx_link_check);
1857 INIT_WORK(&alx->reset_wk, alx_reset);
1873 mutex_unlock(&alx->mtx);
1887 struct alx_priv *alx = pci_get_drvdata(pdev);
1888 struct alx_hw *hw = &alx->hw;
1893 unregister_netdev(alx->dev);
1899 mutex_destroy(&alx->mtx);
1901 free_netdev(alx->dev);
1906 struct alx_priv *alx = dev_get_drvdata(dev);
1908 if (!netif_running(alx->dev))
1912 netif_device_detach(alx->dev);
1914 mutex_lock(&alx->mtx);
1915 __alx_stop(alx);
1916 mutex_unlock(&alx->mtx);
1924 struct alx_priv *alx = dev_get_drvdata(dev);
1925 struct alx_hw *hw = &alx->hw;
1929 mutex_lock(&alx->mtx);
1932 if (!netif_running(alx->dev)) {
1937 err = __alx_open(alx, true);
1941 netif_device_attach(alx->dev);
1944 mutex_unlock(&alx->mtx);
1954 struct alx_priv *alx = pci_get_drvdata(pdev);
1955 struct net_device *netdev = alx->dev;
1960 mutex_lock(&alx->mtx);
1964 alx_halt(alx);
1972 mutex_unlock(&alx->mtx);
1979 struct alx_priv *alx = pci_get_drvdata(pdev);
1980 struct alx_hw *hw = &alx->hw;
1985 mutex_lock(&alx->mtx);
1998 mutex_unlock(&alx->mtx);
2005 struct alx_priv *alx = pci_get_drvdata(pdev);
2006 struct net_device *netdev = alx->dev;
2010 mutex_lock(&alx->mtx);
2013 alx_activate(alx);
2017 mutex_unlock(&alx->mtx);