Lines Matching refs:np

475 	struct netdev_private *np;
536 np = netdev_priv(dev);
537 np->mem = ioaddr;
538 spin_lock_init(&np->lock);
539 np->pci_dev = pdev;
540 np->flags = skel_netdrv_tbl[chip_id].flags;
542 np->mii.dev = dev;
543 np->mii.mdio_read = mdio_read;
544 np->mii.mdio_write = mdio_write;
545 np->mii.phy_id_mask = 0x1f;
546 np->mii.reg_num_mask = 0x1f;
554 np->rx_ring = ring_space;
555 np->rx_ring_dma = ring_dma;
563 np->tx_ring = ring_space;
564 np->tx_ring_dma = ring_dma;
567 if (np->flags == HAS_MII_XCVR) {
570 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
575 np->phys[phy_idx++] = phy;
583 data = mdio_read(dev, np->phys[0], 2);
585 np->PHYType = SeeqPHY;
587 np->PHYType = AhdocPHY;
589 np->PHYType = MarvellPHY;
591 np->PHYType = Myson981;
593 np->PHYType = LevelOnePHY;
595 np->PHYType = OtherPHY;
600 np->mii_cnt = phy_idx;
606 np->phys[0] = 32;
610 np->PHYType = MysonPHY;
612 np->PHYType = OtherPHY;
614 np->mii.phy_id = np->phys[0];
622 np->mii.full_duplex = 1;
623 np->default_port = option & 15;
627 np->mii.full_duplex = full_duplex[card_idx];
629 if (np->mii.full_duplex) {
632 // if (np->PHYType==MarvellPHY)
633 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
636 data = mdio_read(dev, np->phys[0], 9);
638 mdio_write(dev, np->phys[0], 9, data);
641 if (np->flags == HAS_MII_XCVR)
642 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
645 np->mii.force_media = 1;
663 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
664 np->tx_ring_dma);
666 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
667 np->rx_ring_dma);
683 struct netdev_private *np = netdev_priv(dev);
685 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
686 np->tx_ring_dma);
687 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
688 np->rx_ring_dma);
690 pci_iounmap(pdev, np->mem);
749 struct netdev_private *np = netdev_priv(dev);
750 void __iomem *miiport = np->mem + MANAGEMENT;
788 struct netdev_private *np = netdev_priv(dev);
789 void __iomem *miiport = np->mem + MANAGEMENT;
820 struct netdev_private *np = netdev_priv(dev);
821 void __iomem *ioaddr = np->mem;
822 const int irq = np->pci_dev->irq;
837 iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
838 iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
857 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
859 np->bcrvalue |= 0x04; /* big-endian */
864 np->crvalue = 0xa00;
867 np->crvalue = 0xe00; /* rx 128 burst length */
872 // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
873 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
874 if (np->pci_dev->device == 0x891) {
875 np->bcrvalue |= 0x200; /* set PROG bit */
876 np->crvalue |= CR_W_ENH; /* set enhanced bit */
877 np->imrvalue |= ETI;
879 iowrite32(np->bcrvalue, ioaddr + BCR);
882 dev->if_port = np->default_port;
886 // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
887 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
888 np->mii.full_duplex = np->mii.force_media;
890 if (np->linkok)
898 iowrite32(np->imrvalue, ioaddr + IMR);
904 timer_setup(&np->timer, netdev_timer, 0);
905 np->timer.expires = RUN_AT(3 * HZ);
908 add_timer(&np->timer);
910 timer_setup(&np->reset_timer, reset_timer, 0);
911 np->reset_timer_armed = 0;
921 struct netdev_private *np = netdev_priv(dev);
924 np->linkok = 0;
926 if (np->PHYType == MysonPHY) {
928 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
929 np->linkok = 1;
936 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
937 np->linkok = 1;
948 struct netdev_private *np = netdev_priv(dev);
950 if (np->PHYType == MysonPHY) { /* 3-in-1 case */
951 if (ioread32(np->mem + TCRRCR) & CR_R_FD)
952 np->duplexmode = 2; /* full duplex */
954 np->duplexmode = 1; /* half duplex */
955 if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
956 np->line_speed = 1; /* 10M */
958 np->line_speed = 2; /* 100M */
960 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
963 data = mdio_read(dev, np->phys[0], MIIRegister18);
965 np->line_speed = 2; /* 100M */
967 np->line_speed = 1; /* 10M */
969 np->duplexmode = 2; /* full duplex mode */
971 np->duplexmode = 1; /* half duplex mode */
972 } else if (np->PHYType == AhdocPHY) {
975 data = mdio_read(dev, np->phys[0], DiagnosticReg);
977 np->line_speed = 2; /* 100M */
979 np->line_speed = 1; /* 10M */
981 np->duplexmode = 2; /* full duplex mode */
983 np->duplexmode = 1; /* half duplex mode */
986 else if (np->PHYType == MarvellPHY) {
989 data = mdio_read(dev, np->phys[0], SpecificReg);
991 np->duplexmode = 2; /* full duplex mode */
993 np->duplexmode = 1; /* half duplex mode */
996 np->line_speed = 3; /* 1000M */
998 np->line_speed = 2; /* 100M */
1000 np->line_speed = 1; /* 10M */
1004 else if (np->PHYType == Myson981) {
1007 data = mdio_read(dev, np->phys[0], StatusRegister);
1010 np->line_speed = 2;
1012 np->line_speed = 1;
1015 np->duplexmode = 2;
1017 np->duplexmode = 1;
1021 else if (np->PHYType == LevelOnePHY) {
1024 data = mdio_read(dev, np->phys[0], SpecificReg);
1026 np->duplexmode = 2; /* full duplex mode */
1028 np->duplexmode = 1; /* half duplex mode */
1031 np->line_speed = 3; /* 1000M */
1033 np->line_speed = 2; /* 100M */
1035 np->line_speed = 1; /* 10M */
1037 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1038 if (np->line_speed == 1)
1039 np->crvalue |= CR_W_PS10;
1040 else if (np->line_speed == 3)
1041 np->crvalue |= CR_W_PS1000;
1042 if (np->duplexmode == 2)
1043 np->crvalue |= CR_W_FD;
1051 struct netdev_private *np = netdev_priv(dev);
1054 while (np->really_rx_count != RX_RING_SIZE) {
1057 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1061 while (np->lack_rxbuf->skbuff)
1062 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1064 np->lack_rxbuf->skbuff = skb;
1065 np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
1067 np->rx_buf_sz,
1069 np->lack_rxbuf->status = RXOWN;
1070 ++np->really_rx_count;
1077 struct netdev_private *np = from_timer(np, t, timer);
1078 struct net_device *dev = np->mii.dev;
1079 void __iomem *ioaddr = np->mem;
1080 int old_crvalue = np->crvalue;
1081 unsigned int old_linkok = np->linkok;
1089 spin_lock_irqsave(&np->lock, flags);
1091 if (np->flags == HAS_MII_XCVR) {
1093 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
1095 if (np->crvalue != old_crvalue) {
1096 stop_nic_rxtx(ioaddr, np->crvalue);
1097 iowrite32(np->crvalue, ioaddr + TCRRCR);
1104 spin_unlock_irqrestore(&np->lock, flags);
1106 np->timer.expires = RUN_AT(10 * HZ);
1107 add_timer(&np->timer);
1115 struct netdev_private *np = netdev_priv(dev);
1116 void __iomem *ioaddr = np->mem;
1141 struct netdev_private *np = netdev_priv(dev);
1142 void __iomem *ioaddr = np->mem;
1146 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1148 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1151 iowrite32(np->bcrvalue, ioaddr + BCR);
1154 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1158 iowrite32(np->imrvalue, ioaddr + IMR);
1166 struct netdev_private *np = from_timer(np, t, reset_timer);
1167 struct net_device *dev = np->mii.dev;
1172 spin_lock_irqsave(&np->lock, flags);
1173 np->crvalue = np->crvalue_sv;
1174 np->imrvalue = np->imrvalue_sv;
1182 np->reset_timer_armed = 0;
1184 spin_unlock_irqrestore(&np->lock, flags);
1190 struct netdev_private *np = netdev_priv(dev);
1191 void __iomem *ioaddr = np->mem;
1200 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1203 (unsigned int) np->rx_ring[i].status);
1205 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1207 printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1211 spin_lock_irqsave(&np->lock, flags);
1217 spin_unlock_irqrestore(&np->lock, flags);
1228 struct netdev_private *np = netdev_priv(dev);
1232 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1233 np->cur_rx = &np->rx_ring[0];
1234 np->lack_rxbuf = np->rx_ring;
1235 np->really_rx_count = 0;
1239 np->rx_ring[i].status = 0;
1240 np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1241 np->rx_ring[i].next_desc = np->rx_ring_dma +
1243 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1244 np->rx_ring[i].skbuff = NULL;
1248 np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1249 np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1253 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1256 np->lack_rxbuf = &np->rx_ring[i];
1260 ++np->really_rx_count;
1261 np->rx_ring[i].skbuff = skb;
1262 np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
1264 np->rx_buf_sz,
1266 np->rx_ring[i].status = RXOWN;
1267 np->rx_ring[i].control |= RXIC;
1271 np->cur_tx = &np->tx_ring[0];
1272 np->cur_tx_copy = &np->tx_ring[0];
1273 np->really_tx_count = 0;
1274 np->free_tx_count = TX_RING_SIZE;
1277 np->tx_ring[i].status = 0;
1278 /* do we need np->tx_ring[i].control = XXX; ?? */
1279 np->tx_ring[i].next_desc = np->tx_ring_dma +
1281 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1282 np->tx_ring[i].skbuff = NULL;
1286 np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1287 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1293 struct netdev_private *np = netdev_priv(dev);
1296 spin_lock_irqsave(&np->lock, flags);
1298 np->cur_tx_copy->skbuff = skb;
1303 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
1305 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1306 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1307 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1309 if (np->pci_dev->device == 0x891)
1310 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1311 np->cur_tx_copy->status = TXOWN;
1312 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1313 --np->free_tx_count;
1319 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1322 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1323 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1324 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
1327 next = np->cur_tx_copy->next_desc_logical;
1333 if (np->pci_dev->device == 0x891)
1334 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1340 np->cur_tx_copy->status = TXOWN;
1342 np->cur_tx_copy = next->next_desc_logical;
1343 np->free_tx_count -= 2;
1345 np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1348 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1349 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1350 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1352 if (np->pci_dev->device == 0x891)
1353 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1354 np->cur_tx_copy->status = TXOWN;
1355 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1356 --np->free_tx_count;
1360 if (np->free_tx_count < 2)
1362 ++np->really_tx_count;
1363 iowrite32(0, np->mem + TXPDR);
1365 spin_unlock_irqrestore(&np->lock, flags);
1374 struct netdev_private *np = netdev_priv(dev);
1379 np->cur_tx = &np->tx_ring[0];
1380 np->cur_tx_copy = &np->tx_ring[0];
1381 np->really_tx_count = 0;
1382 np->free_tx_count = TX_RING_SIZE;
1385 cur = &np->tx_ring[i];
1387 dma_unmap_single(&np->pci_dev->dev, cur->buffer,
1395 cur->next_desc = np->tx_ring_dma +
1397 cur->next_desc_logical = &np->tx_ring[i + 1];
1400 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1401 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1408 struct netdev_private *np = netdev_priv(dev);
1409 struct fealnx_desc *cur = np->cur_rx;
1420 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1421 np->mem + RXLBA);
1430 struct netdev_private *np = netdev_priv(dev);
1431 void __iomem *ioaddr = np->mem;
1436 spin_lock(&np->lock);
1450 if (!(intr_status & np->imrvalue))
1481 stop_nic_rx(ioaddr, np->crvalue);
1483 iowrite32(np->crvalue, ioaddr + TCRRCR);
1487 while (np->really_tx_count) {
1488 long tx_status = np->cur_tx->status;
1489 long tx_control = np->cur_tx->control;
1494 next = np->cur_tx->next_desc_logical;
1502 if (!(np->crvalue & CR_W_ENH)) {
1513 if ((tx_status & HF) && np->mii.full_duplex == 0)
1531 dma_unmap_single(&np->pci_dev->dev,
1532 np->cur_tx->buffer,
1533 np->cur_tx->skbuff->len,
1535 dev_consume_skb_irq(np->cur_tx->skbuff);
1536 np->cur_tx->skbuff = NULL;
1537 --np->really_tx_count;
1538 if (np->cur_tx->control & TXLD) {
1539 np->cur_tx = np->cur_tx->next_desc_logical;
1540 ++np->free_tx_count;
1542 np->cur_tx = np->cur_tx->next_desc_logical;
1543 np->cur_tx = np->cur_tx->next_desc_logical;
1544 np->free_tx_count += 2;
1549 if (num_tx && np->free_tx_count >= 2)
1553 if (np->crvalue & CR_W_ENH) {
1568 if (!np->reset_timer_armed) {
1569 np->reset_timer_armed = 1;
1570 np->reset_timer.expires = RUN_AT(HZ/2);
1571 add_timer(&np->reset_timer);
1576 np->crvalue_sv = np->crvalue;
1577 np->imrvalue_sv = np->imrvalue;
1578 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1579 np->imrvalue = 0;
1598 iowrite32(np->imrvalue, ioaddr + IMR);
1600 spin_unlock(&np->lock);
1610 struct netdev_private *np = netdev_priv(dev);
1611 void __iomem *ioaddr = np->mem;
1614 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1615 s32 rx_status = np->cur_rx->status;
1617 if (np->really_rx_count == 0)
1646 cur = np->cur_rx;
1647 while (desno <= np->really_rx_count) {
1655 if (desno > np->really_rx_count)
1667 if (!np->cur_rx->skbuff) {
1672 np->cur_rx->status = RXOWN;
1673 np->cur_rx = np->cur_rx->next_desc_logical;
1677 stop_nic_rx(ioaddr, np->crvalue);
1679 iowrite32(np->crvalue, ioaddr + TCRRCR);
1700 dma_sync_single_for_cpu(&np->pci_dev->dev,
1701 np->cur_rx->buffer,
1702 np->rx_buf_sz,
1708 np->cur_rx->skbuff->data, pkt_len);
1711 skb_put_data(skb, np->cur_rx->skbuff->data,
1714 dma_sync_single_for_device(&np->pci_dev->dev,
1715 np->cur_rx->buffer,
1716 np->rx_buf_sz,
1719 dma_unmap_single(&np->pci_dev->dev,
1720 np->cur_rx->buffer,
1721 np->rx_buf_sz,
1723 skb_put(skb = np->cur_rx->skbuff, pkt_len);
1724 np->cur_rx->skbuff = NULL;
1725 --np->really_rx_count;
1733 np->cur_rx = np->cur_rx->next_desc_logical;
1745 struct netdev_private *np = netdev_priv(dev);
1746 void __iomem *ioaddr = np->mem;
1774 struct netdev_private *np = netdev_priv(dev);
1775 void __iomem *ioaddr = np->mem;
1799 stop_nic_rxtx(ioaddr, np->crvalue);
1803 np->crvalue &= ~CR_W_RXMODEMASK;
1804 np->crvalue |= rx_mode;
1805 iowrite32(np->crvalue, ioaddr + TCRRCR);
1810 struct netdev_private *np = netdev_priv(dev);
1813 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1819 struct netdev_private *np = netdev_priv(dev);
1821 spin_lock_irq(&np->lock);
1822 mii_ethtool_get_link_ksettings(&np->mii, cmd);
1823 spin_unlock_irq(&np->lock);
1831 struct netdev_private *np = netdev_priv(dev);
1834 spin_lock_irq(&np->lock);
1835 rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1836 spin_unlock_irq(&np->lock);
1843 struct netdev_private *np = netdev_priv(dev);
1844 return mii_nway_restart(&np->mii);
1849 struct netdev_private *np = netdev_priv(dev);
1850 return mii_link_ok(&np->mii);
1875 struct netdev_private *np = netdev_priv(dev);
1881 spin_lock_irq(&np->lock);
1882 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1883 spin_unlock_irq(&np->lock);
1891 struct netdev_private *np = netdev_priv(dev);
1892 void __iomem *ioaddr = np->mem;
1903 del_timer_sync(&np->timer);
1904 del_timer_sync(&np->reset_timer);
1906 free_irq(np->pci_dev->irq, dev);
1910 struct sk_buff *skb = np->rx_ring[i].skbuff;
1912 np->rx_ring[i].status = 0;
1914 dma_unmap_single(&np->pci_dev->dev,
1915 np->rx_ring[i].buffer, np->rx_buf_sz,
1918 np->rx_ring[i].skbuff = NULL;
1923 struct sk_buff *skb = np->tx_ring[i].skbuff;
1926 dma_unmap_single(&np->pci_dev->dev,
1927 np->tx_ring[i].buffer, skb->len,
1930 np->tx_ring[i].skbuff = NULL;