• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/

Lines Matching refs:rp

434 	struct rhine_private *rp = netdev_priv(dev);
435 void __iomem *ioaddr = rp->base;
440 if (rp->quirks & rqStatusWBRace)
451 struct rhine_private *rp = netdev_priv(dev);
452 void __iomem *ioaddr = rp->base;
455 if (rp->quirks & rqWOL) {
465 if (rp->quirks & rq6patterns)
470 if (rp->quirks & rq6patterns)
475 if (rp->quirks & rq6patterns)
507 struct rhine_private *rp = netdev_priv(dev);
508 void __iomem *ioaddr = rp->base;
518 if (rp->quirks & rqForceReset)
552 struct rhine_private *rp = netdev_priv(dev);
553 void __iomem *ioaddr = rp->base;
564 enable_mmio(pioaddr, rp->quirks);
568 if (rp->quirks & rqWOL)
584 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
585 struct net_device *dev = rp->dev;
586 void __iomem *ioaddr = rp->base;
605 struct rhine_private *rp = netdev_priv(dev);
611 if (rp->quirks & rqRhineI)
638 struct rhine_private *rp;
717 rp = netdev_priv(dev);
718 rp->dev = dev;
719 rp->quirks = quirks;
720 rp->pioaddr = pioaddr;
721 rp->pdev = pdev;
754 rp->base = ioaddr;
776 spin_lock_init(&rp->lock);
777 INIT_WORK(&rp->reset_task, rhine_reset_task);
779 rp->mii_if.dev = dev;
780 rp->mii_if.mdio_read = mdio_read;
781 rp->mii_if.mdio_write = mdio_write;
782 rp->mii_if.phy_id_mask = 0x1f;
783 rp->mii_if.reg_num_mask = 0x1f;
790 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
792 if (rp->quirks & rqRhineI)
817 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
821 mii_status, rp->mii_if.advertising,
832 rp->mii_if.phy_id = phy_id;
851 struct rhine_private *rp = netdev_priv(dev);
855 ring = pci_alloc_consistent(rp->pdev,
863 if (rp->quirks & rqRhineI) {
864 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
866 &rp->tx_bufs_dma);
867 if (rp->tx_bufs == NULL) {
868 pci_free_consistent(rp->pdev,
876 rp->rx_ring = ring;
877 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
878 rp->rx_ring_dma = ring_dma;
879 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
886 struct rhine_private *rp = netdev_priv(dev);
888 pci_free_consistent(rp->pdev,
891 rp->rx_ring, rp->rx_ring_dma);
892 rp->tx_ring = NULL;
894 if (rp->tx_bufs)
895 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
896 rp->tx_bufs, rp->tx_bufs_dma);
898 rp->tx_bufs = NULL;
904 struct rhine_private *rp = netdev_priv(dev);
908 rp->dirty_rx = rp->cur_rx = 0;
910 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
911 rp->rx_head_desc = &rp->rx_ring[0];
912 next = rp->rx_ring_dma;
916 rp->rx_ring[i].rx_status = 0;
917 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
919 rp->rx_ring[i].next_desc = cpu_to_le32(next);
920 rp->rx_skbuff[i] = NULL;
923 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
927 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
928 rp->rx_skbuff[i] = skb;
933 rp->rx_skbuff_dma[i] =
934 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
937 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
938 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
940 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
945 struct rhine_private *rp = netdev_priv(dev);
950 rp->rx_ring[i].rx_status = 0;
951 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
952 if (rp->rx_skbuff[i]) {
953 pci_unmap_single(rp->pdev,
954 rp->rx_skbuff_dma[i],
955 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
956 dev_kfree_skb(rp->rx_skbuff[i]);
958 rp->rx_skbuff[i] = NULL;
964 struct rhine_private *rp = netdev_priv(dev);
968 rp->dirty_tx = rp->cur_tx = 0;
969 next = rp->tx_ring_dma;
971 rp->tx_skbuff[i] = NULL;
972 rp->tx_ring[i].tx_status = 0;
973 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
975 rp->tx_ring[i].next_desc = cpu_to_le32(next);
976 if (rp->quirks & rqRhineI)
977 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
979 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
985 struct rhine_private *rp = netdev_priv(dev);
989 rp->tx_ring[i].tx_status = 0;
990 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
991 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
992 if (rp->tx_skbuff[i]) {
993 if (rp->tx_skbuff_dma[i]) {
994 pci_unmap_single(rp->pdev,
995 rp->tx_skbuff_dma[i],
996 rp->tx_skbuff[i]->len,
999 dev_kfree_skb(rp->tx_skbuff[i]);
1001 rp->tx_skbuff[i] = NULL;
1002 rp->tx_buf[i] = NULL;
1008 struct rhine_private *rp = netdev_priv(dev);
1009 void __iomem *ioaddr = rp->base;
1011 mii_check_media(&rp->mii_if, debug, init_media);
1013 if (rp->mii_if.full_duplex)
1021 rp->mii_if.force_media, netif_carrier_ok(dev));
1042 struct rhine_private *rp = netdev_priv(dev);
1043 void __iomem *ioaddr = rp->base;
1053 rp->tx_thresh = 0x20;
1054 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1056 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1057 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1061 napi_enable(&rp->napi);
1114 struct rhine_private *rp = netdev_priv(dev);
1115 void __iomem *ioaddr = rp->base;
1118 rhine_disable_linkmon(ioaddr, rp->quirks);
1133 struct rhine_private *rp = netdev_priv(dev);
1134 void __iomem *ioaddr = rp->base;
1136 rhine_disable_linkmon(ioaddr, rp->quirks);
1150 struct rhine_private *rp = netdev_priv(dev);
1151 void __iomem *ioaddr = rp->base;
1154 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1161 dev->name, rp->pdev->irq);
1165 free_irq(rp->pdev->irq, dev);
1176 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1185 struct rhine_private *rp = container_of(work, struct rhine_private,
1187 struct net_device *dev = rp->dev;
1190 disable_irq(rp->pdev->irq);
1192 napi_disable(&rp->napi);
1194 spin_lock_bh(&rp->lock);
1206 spin_unlock_bh(&rp->lock);
1207 enable_irq(rp->pdev->irq);
1216 struct rhine_private *rp = netdev_priv(dev);
1217 void __iomem *ioaddr = rp->base;
1222 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1224 schedule_work(&rp->reset_task);
1230 struct rhine_private *rp = netdev_priv(dev);
1231 void __iomem *ioaddr = rp->base;
1239 entry = rp->cur_tx % TX_RING_SIZE;
1244 rp->tx_skbuff[entry] = skb;
1246 if ((rp->quirks & rqRhineI) &&
1252 rp->tx_skbuff[entry] = NULL;
1258 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1260 memset(rp->tx_buf[entry] + skb->len, 0,
1262 rp->tx_skbuff_dma[entry] = 0;
1263 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1264 (rp->tx_buf[entry] -
1265 rp->tx_bufs));
1267 rp->tx_skbuff_dma[entry] =
1268 pci_map_single(rp->pdev, skb->data, skb->len,
1270 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1273 rp->tx_ring[entry].desc_length =
1277 spin_lock_irqsave(&rp->lock, flags);
1279 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1282 rp->cur_tx++;
1291 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1294 spin_unlock_irqrestore(&rp->lock, flags);
1298 dev->name, rp->cur_tx-1, entry);
1308 struct rhine_private *rp = netdev_priv(dev);
1309 void __iomem *ioaddr = rp->base;
1334 napi_schedule(&rp->napi);
1374 struct rhine_private *rp = netdev_priv(dev);
1375 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1377 spin_lock(&rp->lock);
1380 while (rp->dirty_tx != rp->cur_tx) {
1381 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1401 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1404 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1409 if (rp->quirks & rqRhineI)
1417 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1421 if (rp->tx_skbuff_dma[entry]) {
1422 pci_unmap_single(rp->pdev,
1423 rp->tx_skbuff_dma[entry],
1424 rp->tx_skbuff[entry]->len,
1427 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1428 rp->tx_skbuff[entry] = NULL;
1429 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1431 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1434 spin_unlock(&rp->lock);
1440 struct rhine_private *rp = netdev_priv(dev);
1442 int entry = rp->cur_rx % RX_RING_SIZE;
1447 le32_to_cpu(rp->rx_head_desc->rx_status));
1452 struct rx_desc *desc = rp->rx_head_desc;
1472 rp->rx_head_desc, &rp->rx_ring[entry]);
1489 spin_lock(&rp->lock);
1491 spin_unlock(&rp->lock);
1504 pci_dma_sync_single_for_cpu(rp->pdev,
1505 rp->rx_skbuff_dma[entry],
1506 rp->rx_buf_sz,
1510 rp->rx_skbuff[entry]->data,
1513 pci_dma_sync_single_for_device(rp->pdev,
1514 rp->rx_skbuff_dma[entry],
1515 rp->rx_buf_sz,
1518 skb = rp->rx_skbuff[entry];
1525 rp->rx_skbuff[entry] = NULL;
1527 pci_unmap_single(rp->pdev,
1528 rp->rx_skbuff_dma[entry],
1529 rp->rx_buf_sz,
1537 entry = (++rp->cur_rx) % RX_RING_SIZE;
1538 rp->rx_head_desc = &rp->rx_ring[entry];
1542 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1544 entry = rp->dirty_rx % RX_RING_SIZE;
1545 if (rp->rx_skbuff[entry] == NULL) {
1546 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1547 rp->rx_skbuff[entry] = skb;
1551 rp->rx_skbuff_dma[entry] =
1552 pci_map_single(rp->pdev, skb->data,
1553 rp->rx_buf_sz,
1555 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1557 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1577 struct rhine_private *rp = netdev_priv(dev);
1578 void __iomem *ioaddr = rp->base;
1579 int entry = rp->dirty_tx % TX_RING_SIZE;
1591 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1612 struct rhine_private *rp = netdev_priv(dev);
1613 void __iomem *ioaddr = rp->base;
1615 spin_lock(&rp->lock);
1630 if (rp->tx_thresh < 0xE0)
1631 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1635 dev->name, rp->tx_thresh);
1645 if (rp->tx_thresh < 0xE0) {
1646 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1651 dev->name, rp->tx_thresh);
1665 spin_unlock(&rp->lock);
1670 struct rhine_private *rp = netdev_priv(dev);
1671 void __iomem *ioaddr = rp->base;
1674 spin_lock_irqsave(&rp->lock, flags);
1678 spin_unlock_irqrestore(&rp->lock, flags);
1685 struct rhine_private *rp = netdev_priv(dev);
1686 void __iomem *ioaddr = rp->base;
1713 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1718 struct rhine_private *rp = netdev_priv(dev);
1722 strcpy(info->bus_info, pci_name(rp->pdev));
1727 struct rhine_private *rp = netdev_priv(dev);
1730 spin_lock_irq(&rp->lock);
1731 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1732 spin_unlock_irq(&rp->lock);
1739 struct rhine_private *rp = netdev_priv(dev);
1742 spin_lock_irq(&rp->lock);
1743 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1744 spin_unlock_irq(&rp->lock);
1745 rhine_set_carrier(&rp->mii_if);
1752 struct rhine_private *rp = netdev_priv(dev);
1754 return mii_nway_restart(&rp->mii_if);
1759 struct rhine_private *rp = netdev_priv(dev);
1761 return mii_link_ok(&rp->mii_if);
1776 struct rhine_private *rp = netdev_priv(dev);
1778 if (!(rp->quirks & rqWOL))
1781 spin_lock_irq(&rp->lock);
1784 wol->wolopts = rp->wolopts;
1785 spin_unlock_irq(&rp->lock);
1790 struct rhine_private *rp = netdev_priv(dev);
1794 if (!(rp->quirks & rqWOL))
1800 spin_lock_irq(&rp->lock);
1801 rp->wolopts = wol->wolopts;
1802 spin_unlock_irq(&rp->lock);
1821 struct rhine_private *rp = netdev_priv(dev);
1827 spin_lock_irq(&rp->lock);
1828 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1829 spin_unlock_irq(&rp->lock);
1830 rhine_set_carrier(&rp->mii_if);
1837 struct rhine_private *rp = netdev_priv(dev);
1838 void __iomem *ioaddr = rp->base;
1840 napi_disable(&rp->napi);
1841 cancel_work_sync(&rp->reset_task);
1844 spin_lock_irq(&rp->lock);
1852 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1860 spin_unlock_irq(&rp->lock);
1862 free_irq(rp->pdev->irq, dev);
1874 struct rhine_private *rp = netdev_priv(dev);
1878 pci_iounmap(pdev, rp->base);
1889 struct rhine_private *rp = netdev_priv(dev);
1890 void __iomem *ioaddr = rp->base;
1892 if (!(rp->quirks & rqWOL))
1898 if (rp->quirks & rq6patterns)
1901 if (rp->wolopts & WAKE_MAGIC) {
1910 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1913 if (rp->wolopts & WAKE_PHY)
1916 if (rp->wolopts & WAKE_UCAST)
1919 if (rp->wolopts) {
1937 struct rhine_private *rp = netdev_priv(dev);
1943 napi_disable(&rp->napi);
1948 spin_lock_irqsave(&rp->lock, flags);
1950 spin_unlock_irqrestore(&rp->lock, flags);
1959 struct rhine_private *rp = netdev_priv(dev);
1976 spin_lock_irqsave(&rp->lock, flags);
1978 enable_mmio(rp->pioaddr, rp->quirks);
1986 spin_unlock_irqrestore(&rp->lock, flags);