Lines Matching refs:np

322 static void free_rxtx_rings(struct netdev_private *np);
326 static void free_ringdesc(struct netdev_private *np);
353 struct netdev_private *np;
373 dev = alloc_etherdev(sizeof(*np));
393 np = netdev_priv(dev);
394 np->pci_dev = pdev;
395 np->chip_id = chip_idx;
396 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
397 spin_lock_init(&np->lock);
398 np->mii_if.dev = dev;
399 np->mii_if.mdio_read = mdio_read;
400 np->mii_if.mdio_write = mdio_write;
401 np->base_addr = ioaddr;
411 np->mii_if.full_duplex = 1;
418 np->mii_if.full_duplex = 1;
420 if (np->mii_if.full_duplex)
421 np->mii_if.force_media = 1;
435 if (np->drv_flags & CanHaveMII) {
440 np->phys[phy_idx++] = phy;
441 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
442 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
446 np->mii, phy, mii_status,
447 np->mii_if.advertising);
450 np->mii_cnt = phy_idx;
451 np->mii_if.phy_id = np->phys[0];
556 struct netdev_private *np = netdev_priv(dev);
557 void __iomem *mdio_addr = np->base_addr + MIICtrl;
586 struct netdev_private *np = netdev_priv(dev);
587 void __iomem *mdio_addr = np->base_addr + MIICtrl;
591 if (location == 4 && phy_id == np->phys[0])
592 np->mii_if.advertising = value;
618 struct netdev_private *np = netdev_priv(dev);
619 void __iomem *ioaddr = np->base_addr;
620 const int irq = np->pci_dev->irq;
637 spin_lock_irq(&np->lock);
640 spin_unlock_irq(&np->lock);
647 timer_setup(&np->timer, netdev_timer, 0);
648 np->timer.expires = jiffies + 1*HZ;
649 add_timer(&np->timer);
660 struct netdev_private *np = netdev_priv(dev);
664 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
667 return np->csr6;
669 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
675 np->phys[0]);
678 return np->csr6;
684 np->phys[0]);
688 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
697 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
702 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
703 negotiated = mii_reg & np->mii_if.advertising;
708 duplex |= np->mii_if.force_media;
710 result = np->csr6 & ~0x20000200;
715 if (result != np->csr6 && debug)
719 np->phys[0]);
726 struct netdev_private *np = netdev_priv(dev);
727 void __iomem *ioaddr = np->base_addr;
732 if (new==np->csr6)
735 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
757 np->csr6 = new;
759 iowrite32(np->csr6, ioaddr + NetworkConfig);
761 np->mii_if.full_duplex = 1;
766 struct netdev_private *np = from_timer(np, t, timer);
767 struct net_device *dev = pci_get_drvdata(np->pci_dev);
768 void __iomem *ioaddr = np->base_addr;
774 spin_lock_irq(&np->lock);
776 spin_unlock_irq(&np->lock);
777 np->timer.expires = jiffies + 10*HZ;
778 add_timer(&np->timer);
783 struct netdev_private *np = netdev_priv(dev);
786 np->rx_head_desc = &np->rx_ring[0];
787 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
791 np->rx_ring[i].length = np->rx_buf_sz;
792 np->rx_ring[i].status = 0;
793 np->rx_skbuff[i] = NULL;
796 np->rx_ring[i-1].length |= DescEndRing;
800 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
801 np->rx_skbuff[i] = skb;
804 np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
805 np->rx_buf_sz,
808 np->rx_ring[i].buffer1 = np->rx_addr[i];
809 np->rx_ring[i].status = DescOwned;
812 np->cur_rx = 0;
813 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
817 np->tx_skbuff[i] = NULL;
818 np->tx_ring[i].status = 0;
820 np->tx_full = 0;
821 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
823 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
824 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
825 np->base_addr + TxRingPtr);
829 static void free_rxtx_rings(struct netdev_private* np)
834 np->rx_ring[i].status = 0;
835 if (np->rx_skbuff[i]) {
836 dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
837 np->rx_skbuff[i]->len,
839 dev_kfree_skb(np->rx_skbuff[i]);
841 np->rx_skbuff[i] = NULL;
844 if (np->tx_skbuff[i]) {
845 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
846 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
847 dev_kfree_skb(np->tx_skbuff[i]);
849 np->tx_skbuff[i] = NULL;
855 struct netdev_private *np = netdev_priv(dev);
856 void __iomem *ioaddr = np->base_addr;
899 np->csr6 = 0;
913 struct netdev_private *np = netdev_priv(dev);
914 void __iomem *ioaddr = np->base_addr;
915 const int irq = np->pci_dev->irq;
922 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
924 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
926 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
928 printk(KERN_CONT " %08x", np->tx_ring[i].status);
932 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
936 spin_lock_irq(&np->lock);
943 iowrite32(1, np->base_addr+PCIBusCfg);
946 free_rxtx_rings(np);
949 spin_unlock_irq(&np->lock);
954 np->stats.tx_errors++;
960 struct netdev_private *np = netdev_priv(dev);
962 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
964 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
967 &np->ring_dma_addr, GFP_KERNEL);
968 if(!np->rx_ring)
974 static void free_ringdesc(struct netdev_private *np)
976 dma_free_coherent(&np->pci_dev->dev,
979 np->rx_ring, np->ring_dma_addr);
985 struct netdev_private *np = netdev_priv(dev);
992 entry = np->cur_tx % TX_RING_SIZE;
994 np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
996 np->tx_skbuff[entry] = skb;
998 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1000 np->tx_ring[entry].length = DescWholePkt | skb->len;
1004 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1005 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1008 np->tx_ring[entry].length |= DescEndRing;
1012 * increasing np->cur_tx and setting DescOwned:
1013 * - if np->cur_tx is increased first the interrupt
1018 * since the np->cur_tx was not yet increased.
1020 spin_lock_irq(&np->lock);
1021 np->cur_tx++;
1024 np->tx_ring[entry].status = DescOwned;
1026 iowrite32(0, np->base_addr + TxStartDemand);
1027 np->tx_q_bytes += skb->len;
1030 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1031 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1034 np->tx_full = 1;
1036 spin_unlock_irq(&np->lock);
1040 np->cur_tx, entry);
1047 struct netdev_private *np = netdev_priv(dev);
1048 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1049 int entry = np->dirty_tx % TX_RING_SIZE;
1050 int tx_status = np->tx_ring[entry].status;
1060 np->stats.tx_errors++;
1061 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1062 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1063 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1064 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1065 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1066 np->stats.tx_heartbeat_errors++;
1073 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1074 np->stats.collisions += (tx_status >> 3) & 15;
1075 np->stats.tx_packets++;
1078 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
1079 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
1080 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1081 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1082 np->tx_skbuff[entry] = NULL;
1084 if (np->tx_full &&
1085 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1086 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1088 np->tx_full = 0;
1099 struct netdev_private *np = netdev_priv(dev);
1100 void __iomem *ioaddr = np->base_addr;
1126 np->cur_tx != np->dirty_tx) {
1127 spin_lock(&np->lock);
1129 spin_unlock(&np->lock);
1143 spin_lock(&np->lock);
1148 spin_unlock(&np->lock);
1163 struct netdev_private *np = netdev_priv(dev);
1164 int entry = np->cur_rx % RX_RING_SIZE;
1165 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1169 entry, np->rx_ring[entry].status);
1174 struct w840_rx_desc *desc = np->rx_head_desc;
1188 np->cur_rx, status);
1189 np->stats.rx_length_errors++;
1196 np->stats.rx_errors++; /* end of a packet.*/
1197 if (status & 0x0890) np->stats.rx_length_errors++;
1198 if (status & 0x004C) np->stats.rx_frame_errors++;
1199 if (status & 0x0002) np->stats.rx_crc_errors++;
1216 dma_sync_single_for_cpu(&np->pci_dev->dev,
1217 np->rx_addr[entry],
1218 np->rx_skbuff[entry]->len,
1220 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1222 dma_sync_single_for_device(&np->pci_dev->dev,
1223 np->rx_addr[entry],
1224 np->rx_skbuff[entry]->len,
1227 dma_unmap_single(&np->pci_dev->dev,
1228 np->rx_addr[entry],
1229 np->rx_skbuff[entry]->len,
1231 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1232 np->rx_skbuff[entry] = NULL;
1244 np->stats.rx_packets++;
1245 np->stats.rx_bytes += pkt_len;
1247 entry = (++np->cur_rx) % RX_RING_SIZE;
1248 np->rx_head_desc = &np->rx_ring[entry];
1252 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1254 entry = np->dirty_rx % RX_RING_SIZE;
1255 if (np->rx_skbuff[entry] == NULL) {
1256 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1257 np->rx_skbuff[entry] = skb;
1260 np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
1262 np->rx_buf_sz,
1264 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1267 np->rx_ring[entry].status = DescOwned;
1275 struct netdev_private *np = netdev_priv(dev);
1276 void __iomem *ioaddr = np->base_addr;
1282 spin_lock(&np->lock);
1290 new = np->csr6 + 0x4000;
1292 new = (np->csr6 >> 14)&0x7f;
1297 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1303 np->stats.rx_errors++;
1310 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1312 spin_unlock(&np->lock);
1317 struct netdev_private *np = netdev_priv(dev);
1318 void __iomem *ioaddr = np->base_addr;
1321 spin_lock_irq(&np->lock);
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 spin_unlock_irq(&np->lock);
1326 return &np->stats;
1332 struct netdev_private *np = netdev_priv(dev);
1333 void __iomem *ioaddr = np->base_addr;
1366 struct netdev_private *np = netdev_priv(dev);
1368 spin_lock_irq(&np->lock);
1369 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1370 spin_unlock_irq(&np->lock);
1375 struct netdev_private *np = netdev_priv(dev);
1378 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1384 struct netdev_private *np = netdev_priv(dev);
1386 spin_lock_irq(&np->lock);
1387 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1388 spin_unlock_irq(&np->lock);
1396 struct netdev_private *np = netdev_priv(dev);
1399 spin_lock_irq(&np->lock);
1400 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1401 spin_unlock_irq(&np->lock);
1408 struct netdev_private *np = netdev_priv(dev);
1409 return mii_nway_restart(&np->mii_if);
1414 struct netdev_private *np = netdev_priv(dev);
1415 return mii_link_ok(&np->mii_if);
1441 struct netdev_private *np = netdev_priv(dev);
1449 spin_lock_irq(&np->lock);
1451 spin_unlock_irq(&np->lock);
1455 spin_lock_irq(&np->lock);
1457 spin_unlock_irq(&np->lock);
1466 struct netdev_private *np = netdev_priv(dev);
1467 void __iomem *ioaddr = np->base_addr;
1476 np->cur_tx, np->dirty_tx,
1477 np->cur_rx, np->dirty_rx);
1481 spin_lock_irq(&np->lock);
1485 spin_unlock_irq(&np->lock);
1487 free_irq(np->pci_dev->irq, dev);
1492 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1498 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1501 i, np->tx_ring[i].length,
1502 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1503 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1506 i, np->rx_ring[i].length,
1507 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1512 del_timer_sync(&np->timer);
1514 free_rxtx_rings(np);
1515 free_ringdesc(np);
1525 struct netdev_private *np = netdev_priv(dev);
1527 pci_iounmap(pdev, np->base_addr);
1537 * spin_lock_irq(np->lock), doesn't touch hw if not present
1558 struct netdev_private *np = netdev_priv(dev);
1559 void __iomem *ioaddr = np->base_addr;
1563 del_timer_sync(&np->timer);
1565 spin_lock_irq(&np->lock);
1569 spin_unlock_irq(&np->lock);
1571 synchronize_irq(np->pci_dev->irq);
1574 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1578 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1582 free_rxtx_rings(np);
1593 struct netdev_private *np = netdev_priv(dev);
1599 spin_lock_irq(&np->lock);
1600 iowrite32(1, np->base_addr+PCIBusCfg);
1601 ioread32(np->base_addr+PCIBusCfg);
1606 spin_unlock_irq(&np->lock);
1610 mod_timer(&np->timer, jiffies + 1*HZ);