Lines Matching refs:vp

584 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
660 static void window_set(struct vortex_private *vp, int window)
662 if (window != vp->window) {
663 iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
664 vp->window = window;
670 window_read ## size(struct vortex_private *vp, int window, int addr) \
674 spin_lock_irqsave(&vp->window_lock, flags); \
675 window_set(vp, window); \
676 ret = ioread ## size(vp->ioaddr + addr); \
677 spin_unlock_irqrestore(&vp->window_lock, flags); \
681 window_write ## size(struct vortex_private *vp, u ## size value, \
685 spin_lock_irqsave(&vp->window_lock, flags); \
686 window_set(vp, window); \
687 iowrite ## size(value, vp->ioaddr + addr); \
688 spin_unlock_irqrestore(&vp->window_lock, flags); \
700 #define VORTEX_PCI(vp) \
701 ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
709 #define VORTEX_EISA(vp) \
710 ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
758 static void mdio_sync(struct vortex_private *vp, int bits);
760 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
930 struct vortex_private *vp;
941 vp = netdev_priv(dev);
942 ioaddr = vp->ioaddr;
1091 struct vortex_private *vp;
1119 dev = alloc_etherdev(sizeof(*vp));
1125 vp = netdev_priv(dev);
1148 vp->enable_wol = 1;
1164 vp->ioaddr = ioaddr;
1165 vp->large_frames = mtu > 1500;
1166 vp->drv_flags = vci->drv_flags;
1167 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1168 vp->io_size = vci->io_size;
1169 vp->card_idx = card_idx;
1170 vp->window = -1;
1200 spin_lock_init(&vp->lock);
1201 spin_lock_init(&vp->mii_lock);
1202 spin_lock_init(&vp->window_lock);
1203 vp->gendev = gendev;
1204 vp->mii.dev = dev;
1205 vp->mii.mdio_read = mdio_read;
1206 vp->mii.mdio_write = mdio_write;
1207 vp->mii.phy_id_mask = 0x1f;
1208 vp->mii.reg_num_mask = 0x1f;
1211 vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1213 &vp->rx_ring_dma, GFP_KERNEL);
1215 if (!vp->rx_ring)
1218 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1219 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1228 vp->media_override = 7;
1230 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1231 if (vp->media_override != 7)
1232 vp->medialock = 1;
1233 vp->full_duplex = (option & 0x200) ? 1 : 0;
1234 vp->bus_master = (option & 16) ? 1 : 0;
1238 vp->full_duplex = 1;
1240 vp->enable_wol = 1;
1244 vp->full_duplex = 1;
1246 vp->flow_ctrl = 1;
1248 vp->enable_wol = 1;
1251 vp->mii.force_media = vp->full_duplex;
1252 vp->options = option;
1266 window_write16(vp, base + i, 0, Wn0EepromCmd);
1270 if ((window_read16(vp, 0, Wn0EepromCmd) &
1274 eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1300 window_write8(vp, dev->dev_addr[i], 2, i);
1309 step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1320 vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1321 if (!vp->cb_fn_base) {
1330 vp->cb_fn_base);
1333 n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1334 if (vp->drv_flags & INVERT_LED_PWR)
1336 if (vp->drv_flags & INVERT_MII_PWR)
1338 window_write16(vp, n, 2, Wn2_ResetOptions);
1339 if (vp->drv_flags & WNO_XCVR_PWR) {
1340 window_write16(vp, 0x0800, 0, 0);
1345 vp->info1 = eeprom[13];
1346 vp->info2 = eeprom[15];
1347 vp->capabilities = eeprom[16];
1349 if (vp->info1 & 0x8000) {
1350 vp->full_duplex = 1;
1358 vp->available_media = window_read16(vp, 3, Wn3_Options);
1359 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1360 vp->available_media = 0x40;
1361 config = window_read32(vp, 3, Wn3_Config);
1364 config, window_read16(vp, 3, Wn3_Options));
1373 vp->default_media = XCVR(config);
1374 if (vp->default_media == XCVR_NWAY)
1375 vp->has_nway = 1;
1376 vp->autoselect = AUTOSELECT(config);
1379 if (vp->media_override != 7) {
1381 print_name, vp->media_override,
1382 media_tbl[vp->media_override].name);
1383 dev->if_port = vp->media_override;
1385 dev->if_port = vp->default_media;
1387 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1391 if (vp->drv_flags & EXTRA_PREAMBLE)
1393 mdio_sync(vp, 32);
1410 vp->phys[phy_idx++] = phyx;
1422 vp->phys[0] = 24;
1424 vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1425 if (vp->full_duplex) {
1427 vp->advertising &= ~0x02A0;
1428 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1431 vp->mii.phy_id = vp->phys[0];
1434 if (vp->capabilities & CapBusMaster) {
1435 vp->full_bus_master_tx = 1;
1438 (vp->info2 & 1) ? "early" : "whole-frame" );
1440 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1441 vp->bus_master = 0; /* AKPM: vortex only */
1445 if (vp->full_bus_master_tx) {
1449 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1467 vp->pm_state_valid = 1;
1479 vp->rx_ring, vp->rx_ring_dma);
1490 struct vortex_private *vp = netdev_priv(dev);
1491 void __iomem *ioaddr = vp->ioaddr;
1517 struct vortex_private *vp = netdev_priv(dev);
1520 dev->name, (vp->full_duplex) ? "full" : "half");
1523 window_write16(vp,
1524 ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1525 (vp->large_frames ? 0x40 : 0) |
1526 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1533 struct vortex_private *vp = netdev_priv(dev);
1539 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1540 vp->full_duplex = vp->mii.full_duplex;
1550 struct vortex_private *vp = netdev_priv(dev);
1551 void __iomem *ioaddr = vp->ioaddr;
1555 if (VORTEX_PCI(vp)) {
1556 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
1557 if (vp->pm_state_valid)
1558 pci_restore_state(VORTEX_PCI(vp));
1559 err = pci_enable_device(VORTEX_PCI(vp));
1567 config = window_read32(vp, 3, Wn3_Config);
1569 if (vp->media_override != 7) {
1571 dev->name, vp->media_override,
1572 media_tbl[vp->media_override].name);
1573 dev->if_port = vp->media_override;
1574 } else if (vp->autoselect) {
1575 if (vp->has_nway) {
1583 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1590 dev->if_port = vp->default_media;
1596 timer_setup(&vp->timer, vortex_timer, 0);
1597 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1603 vp->full_duplex = vp->mii.force_media;
1607 window_write32(vp, config, 3, Wn3_Config);
1610 mdio_read(dev, vp->phys[0], MII_BMSR);
1611 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1612 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1613 vp->mii.full_duplex = vp->full_duplex;
1631 dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1636 window_write8(vp, dev->dev_addr[i], 2, i);
1638 window_write16(vp, 0, 2, i);
1640 if (vp->cb_fn_base) {
1641 unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1642 if (vp->drv_flags & INVERT_LED_PWR)
1644 if (vp->drv_flags & INVERT_MII_PWR)
1646 window_write16(vp, n, 2, Wn2_ResetOptions);
1653 window_write16(vp,
1654 (window_read16(vp, 4, Wn4_Media) &
1663 window_read8(vp, 6, i);
1664 window_read16(vp, 6, 10);
1665 window_read16(vp, 6, 12);
1667 window_read8(vp, 4, 12);
1669 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1671 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1672 vp->cur_rx = 0;
1676 iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1678 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1679 vp->cur_tx = vp->dirty_tx = 0;
1680 if (vp->drv_flags & IS_BOOMERANG)
1684 vp->rx_ring[i].status = 0;
1686 vp->tx_skbuff[i] = NULL;
1698 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1699 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1700 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1701 (vp->bus_master ? DMADone : 0);
1702 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1703 (vp->full_bus_master_rx ? 0 : RxComplete) |
1705 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1706 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1710 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1711 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1712 iowrite32(0x8000, vp->cb_fn_base + 4);
1722 struct vortex_private *vp = netdev_priv(dev);
1733 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1738 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1739 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1740 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1744 vp->rx_skbuff[i] = skb;
1749 dma = dma_map_single(vp->gendev, skb->data,
1751 if (dma_mapping_error(vp->gendev, dma))
1753 vp->rx_ring[i].addr = cpu_to_le32(dma);
1761 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1770 if (vp->rx_skbuff[i]) {
1771 dev_kfree_skb(vp->rx_skbuff[i]);
1772 vp->rx_skbuff[i] = NULL;
1786 struct vortex_private *vp = from_timer(vp, t, timer);
1787 struct net_device *dev = vp->mii.dev;
1788 void __iomem *ioaddr = vp->ioaddr;
1799 media_status = window_read16(vp, 4, Wn4_Media);
1832 if (vp->medialock)
1838 spin_lock_irq(&vp->lock);
1842 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1844 dev->if_port = vp->default_media;
1854 window_write16(vp,
1859 config = window_read32(vp, 3, Wn3_Config);
1861 window_write32(vp, config, 3, Wn3_Config);
1869 spin_unlock_irq(&vp->lock);
1877 mod_timer(&vp->timer, RUN_AT(next_tick));
1878 if (vp->deferred)
1884 struct vortex_private *vp = netdev_priv(dev);
1885 void __iomem *ioaddr = vp->ioaddr;
1891 window_read16(vp, 4, Wn4_NetDiag),
1892 window_read16(vp, 4, Wn4_Media),
1894 window_read16(vp, 4, Wn4_FIFODiag));
1912 if (vp->full_bus_master_tx) {
1914 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
1915 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1917 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
1921 if (vp->drv_flags & IS_BOOMERANG)
1941 struct vortex_private *vp = netdev_priv(dev);
1942 void __iomem *ioaddr = vp->ioaddr;
1965 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
1969 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1992 (window_read16(vp, 5, 10) & ~StatsFull),
1994 vp->intr_enable &= ~StatsFull;
1999 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
2000 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
2004 fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
2008 if (vp->full_bus_master_tx) {
2037 if (!vp->full_bus_master_tx)
2045 struct vortex_private *vp = netdev_priv(dev);
2046 void __iomem *ioaddr = vp->ioaddr;
2051 if (vp->bus_master) {
2054 vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2056 if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2062 spin_lock_irq(&vp->window_lock);
2063 window_set(vp, 7);
2064 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2066 spin_unlock_irq(&vp->window_lock);
2067 vp->tx_skb = skb;
2113 struct vortex_private *vp = netdev_priv(dev);
2114 void __iomem *ioaddr = vp->ioaddr;
2116 int entry = vp->cur_tx % TX_RING_SIZE;
2118 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2125 dev->name, vp->cur_tx);
2134 if (vp->handling_irq)
2137 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2145 vp->tx_skbuff[entry] = skb;
2147 vp->tx_ring[entry].next = 0;
2150 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2152 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2155 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2157 if (dma_mapping_error(vp->gendev, dma_addr))
2160 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2161 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2165 dma_addr = dma_map_single(vp->gendev, skb->data,
2167 if (dma_mapping_error(vp->gendev, dma_addr))
2170 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2171 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2176 dma_addr = skb_frag_dma_map(vp->gendev, frag,
2180 if (dma_mapping_error(vp->gendev, dma_addr)) {
2182 dma_unmap_page(vp->gendev,
2183 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2184 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2187 dma_unmap_single(vp->gendev,
2188 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2189 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2195 vp->tx_ring[entry].frag[i+1].addr =
2199 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
2201 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
2205 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2206 if (dma_mapping_error(vp->gendev, dma_addr))
2208 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2209 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2210 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2213 spin_lock_irqsave(&vp->lock, flags);
2216 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2218 iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2219 vp->queued_packet++;
2222 vp->cur_tx++;
2225 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2237 spin_unlock_irqrestore(&vp->lock, flags);
2241 dev_err(vp->gendev, "Error mapping dma buffer\n");
2256 struct vortex_private *vp = netdev_priv(dev);
2263 ioaddr = vp->ioaddr;
2275 status |= vp->deferred;
2276 vp->deferred = 0;
2286 spin_lock(&vp->window_lock);
2287 window_set(vp, 7);
2307 dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2309 bytes_compl += vp->tx_skb->len;
2310 dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2330 spin_unlock(&vp->window_lock);
2332 spin_lock(&vp->window_lock);
2333 window_set(vp, 7);
2341 vp->deferred |= status;
2342 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2344 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2347 mod_timer(&vp->timer, jiffies + 1*HZ);
2355 spin_unlock(&vp->window_lock);
2372 struct vortex_private *vp = netdev_priv(dev);
2379 ioaddr = vp->ioaddr;
2381 vp->handling_irq = 1;
2399 status |= vp->deferred;
2400 vp->deferred = 0;
2418 unsigned int dirty_tx = vp->dirty_tx;
2421 while (vp->cur_tx - dirty_tx > 0) {
2425 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2428 if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2432 if (vp->tx_skbuff[entry]) {
2433 struct sk_buff *skb = vp->tx_skbuff[entry];
2436 dma_unmap_single(vp->gendev,
2437 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2438 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2442 dma_unmap_page(vp->gendev,
2443 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2444 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2447 dma_unmap_single(vp->gendev,
2448 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2453 vp->tx_skbuff[entry] = NULL;
2460 vp->dirty_tx = dirty_tx;
2461 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2477 vp->deferred |= status;
2478 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2480 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2483 mod_timer(&vp->timer, jiffies + 1*HZ);
2488 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2489 iowrite32(0x8000, vp->cb_fn_base + 4);
2498 vp->handling_irq = 0;
2506 struct vortex_private *vp = netdev_priv(dev);
2510 spin_lock_irqsave(&vp->lock, flags);
2512 if (vp->full_bus_master_rx)
2517 spin_unlock_irqrestore(&vp->lock, flags);
2524 struct vortex_private *vp = netdev_priv(dev);
2525 void __iomem *ioaddr = vp->ioaddr;
2555 if (vp->bus_master &&
2557 dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2564 dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2593 struct vortex_private *vp = netdev_priv(dev);
2594 int entry = vp->cur_rx % RX_RING_SIZE;
2595 void __iomem *ioaddr = vp->ioaddr;
2602 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2620 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2631 dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2633 skb_put_data(skb, vp->rx_skbuff[entry]->data,
2635 dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2636 vp->rx_copy++;
2647 newdma = dma_map_single(vp->gendev, newskb->data,
2649 if (dma_mapping_error(vp->gendev, newdma)) {
2656 skb = vp->rx_skbuff[entry];
2657 vp->rx_skbuff[entry] = newskb;
2658 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2660 dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2661 vp->rx_nocopy++;
2670 vp->rx_csumhits++;
2678 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2680 entry = (++vp->cur_rx) % RX_RING_SIZE;
2688 struct vortex_private *vp = netdev_priv(dev);
2689 void __iomem *ioaddr = vp->ioaddr;
2694 del_timer_sync(&vp->timer);
2713 if (vp->full_bus_master_rx)
2715 if (vp->full_bus_master_tx)
2718 if (final_down && VORTEX_PCI(vp)) {
2719 vp->pm_state_valid = 1;
2720 pci_save_state(VORTEX_PCI(vp));
2728 struct vortex_private *vp = netdev_priv(dev);
2729 void __iomem *ioaddr = vp->ioaddr;
2740 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2744 if (vp->rx_csumhits &&
2745 (vp->drv_flags & HAS_HWCKSM) == 0 &&
2746 (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2754 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2756 if (vp->rx_skbuff[i]) {
2757 dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2759 dev_kfree_skb(vp->rx_skbuff[i]);
2760 vp->rx_skbuff[i] = NULL;
2763 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2765 if (vp->tx_skbuff[i]) {
2766 struct sk_buff *skb = vp->tx_skbuff[i];
2771 dma_unmap_single(vp->gendev,
2772 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2773 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2776 dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2779 vp->tx_skbuff[i] = NULL;
2791 struct vortex_private *vp = netdev_priv(dev);
2792 void __iomem *ioaddr = vp->ioaddr;
2794 if (vp->full_bus_master_tx) {
2799 vp->full_bus_master_tx,
2800 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2801 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2804 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2810 length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2812 length = le32_to_cpu(vp->tx_ring[i].length);
2815 i, &vp->tx_ring[i], length,
2816 le32_to_cpu(vp->tx_ring[i].status));
2826 struct vortex_private *vp = netdev_priv(dev);
2827 void __iomem *ioaddr = vp->ioaddr;
2831 spin_lock_irqsave (&vp->lock, flags);
2833 spin_unlock_irqrestore (&vp->lock, flags);
2847 struct vortex_private *vp = netdev_priv(dev);
2851 dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
2852 dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
2853 dev->stats.tx_window_errors += window_read8(vp, 6, 4);
2854 dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
2855 dev->stats.tx_packets += window_read8(vp, 6, 6);
2856 dev->stats.tx_packets += (window_read8(vp, 6, 9) &
2858 /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
2862 dev->stats.rx_bytes += window_read16(vp, 6, 10);
2863 dev->stats.tx_bytes += window_read16(vp, 6, 12);
2865 vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
2866 vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
2867 vp->xstats.tx_deferred += window_read8(vp, 6, 8);
2868 vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
2870 dev->stats.collisions = vp->xstats.tx_multiple_collisions
2871 + vp->xstats.tx_single_collisions
2872 + vp->xstats.tx_max_collisions;
2875 u8 up = window_read8(vp, 4, 13);
2883 struct vortex_private *vp = netdev_priv(dev);
2885 return mii_nway_restart(&vp->mii);
2891 struct vortex_private *vp = netdev_priv(dev);
2893 mii_ethtool_get_link_ksettings(&vp->mii, cmd);
2901 struct vortex_private *vp = netdev_priv(dev);
2903 return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
2929 struct vortex_private *vp = netdev_priv(dev);
2930 void __iomem *ioaddr = vp->ioaddr;
2933 spin_lock_irqsave(&vp->lock, flags);
2935 spin_unlock_irqrestore(&vp->lock, flags);
2937 data[0] = vp->xstats.tx_deferred;
2938 data[1] = vp->xstats.tx_max_collisions;
2939 data[2] = vp->xstats.tx_multiple_collisions;
2940 data[3] = vp->xstats.tx_single_collisions;
2941 data[4] = vp->xstats.rx_bad_ssd;
2960 struct vortex_private *vp = netdev_priv(dev);
2963 if (VORTEX_PCI(vp)) {
2964 strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
2967 if (VORTEX_EISA(vp))
2968 strscpy(info->bus_info, dev_name(vp->gendev),
2978 struct vortex_private *vp = netdev_priv(dev);
2980 if (!VORTEX_PCI(vp))
2986 if (vp->enable_wol)
2992 struct vortex_private *vp = netdev_priv(dev);
2994 if (!VORTEX_PCI(vp))
3001 vp->enable_wol = 1;
3003 vp->enable_wol = 0;
3032 struct vortex_private *vp = netdev_priv(dev);
3035 if(VORTEX_PCI(vp))
3036 state = VORTEX_PCI(vp)->current_state;
3041 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
3042 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
3044 pci_set_power_state(VORTEX_PCI(vp), state);
3056 struct vortex_private *vp = netdev_priv(dev);
3057 void __iomem *ioaddr = vp->ioaddr;
3082 struct vortex_private *vp = netdev_priv(dev);
3085 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
3093 window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3097 window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3101 vp->large_frames = dev->mtu > 1500 || enable;
3103 mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3104 if (vp->large_frames)
3108 window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3128 static void mdio_delay(struct vortex_private *vp)
3130 window_read32(vp, 4, Wn4_PhysicalMgmt);
3142 static void mdio_sync(struct vortex_private *vp, int bits)
3146 window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3147 mdio_delay(vp);
3148 window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3150 mdio_delay(vp);
3157 struct vortex_private *vp = netdev_priv(dev);
3161 spin_lock_bh(&vp->mii_lock);
3164 mdio_sync(vp, 32);
3169 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3170 mdio_delay(vp);
3171 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3173 mdio_delay(vp);
3177 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3178 mdio_delay(vp);
3180 ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3182 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3184 mdio_delay(vp);
3187 spin_unlock_bh(&vp->mii_lock);
3194 struct vortex_private *vp = netdev_priv(dev);
3198 spin_lock_bh(&vp->mii_lock);
3201 mdio_sync(vp, 32);
3206 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3207 mdio_delay(vp);
3208 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3210 mdio_delay(vp);
3214 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3215 mdio_delay(vp);
3216 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3218 mdio_delay(vp);
3221 spin_unlock_bh(&vp->mii_lock);
3228 struct vortex_private *vp = netdev_priv(dev);
3229 void __iomem *ioaddr = vp->ioaddr;
3231 device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3233 if (vp->enable_wol) {
3235 window_write16(vp, 2, 7, 0x0c);
3240 if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
3241 pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
3243 vp->enable_wol = 0;
3247 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3251 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3259 struct vortex_private *vp;
3266 vp = netdev_priv(dev);
3268 if (vp->cb_fn_base)
3269 pci_iounmap(pdev, vp->cb_fn_base);
3274 if (vp->pm_state_valid)
3279 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3280 vp->ioaddr + EL3_CMD);
3282 pci_iounmap(pdev, vp->ioaddr);
3287 vp->rx_ring, vp->rx_ring_dma);