• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/net/

Lines Matching defs:vp

573 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
647 #define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
655 #define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
705 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
784 struct vortex_private *vp = netdev_priv(dev);
787 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
815 struct vortex_private *vp = netdev_priv(dev);
818 if (dev && vp) {
828 if (request_irq(dev->irq, vp->full_bus_master_rx ?
879 struct vortex_private *vp;
890 vp = netdev_priv(dev);
891 ioaddr = vp->ioaddr;
993 struct vortex_private *vp;
1020 dev = alloc_etherdev(sizeof(*vp));
1028 vp = netdev_priv(dev);
1051 vp->enable_wol = 1;
1067 vp->ioaddr = ioaddr;
1068 vp->large_frames = mtu > 1500;
1069 vp->drv_flags = vci->drv_flags;
1070 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1071 vp->io_size = vci->io_size;
1072 vp->card_idx = card_idx;
1084 vp->must_free_region = 1;
1108 spin_lock_init(&vp->lock);
1109 vp->gendev = gendev;
1110 vp->mii.dev = dev;
1111 vp->mii.mdio_read = mdio_read;
1112 vp->mii.mdio_write = mdio_write;
1113 vp->mii.phy_id_mask = 0x1f;
1114 vp->mii.reg_num_mask = 0x1f;
1117 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1119 &vp->rx_ring_dma);
1121 if (vp->rx_ring == 0)
1124 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1125 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1134 vp->media_override = 7;
1136 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1137 if (vp->media_override != 7)
1138 vp->medialock = 1;
1139 vp->full_duplex = (option & 0x200) ? 1 : 0;
1140 vp->bus_master = (option & 16) ? 1 : 0;
1144 vp->full_duplex = 1;
1146 vp->enable_wol = 1;
1150 vp->full_duplex = 1;
1152 vp->flow_ctrl = 1;
1154 vp->enable_wol = 1;
1157 vp->mii.force_media = vp->full_duplex;
1158 vp->options = option;
1230 vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1231 if (!vp->cb_fn_base) {
1241 vp->cb_fn_base);
1246 if (vp->drv_flags & INVERT_LED_PWR)
1248 if (vp->drv_flags & INVERT_MII_PWR)
1251 if (vp->drv_flags & WNO_XCVR_PWR) {
1258 vp->info1 = eeprom[13];
1259 vp->info2 = eeprom[15];
1260 vp->capabilities = eeprom[16];
1262 if (vp->info1 & 0x8000) {
1263 vp->full_duplex = 1;
1272 vp->available_media = ioread16(ioaddr + Wn3_Options);
1273 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1274 vp->available_media = 0x40;
1287 vp->default_media = XCVR(config);
1288 if (vp->default_media == XCVR_NWAY)
1289 vp->has_nway = 1;
1290 vp->autoselect = AUTOSELECT(config);
1293 if (vp->media_override != 7) {
1295 print_name, vp->media_override,
1296 media_tbl[vp->media_override].name);
1297 dev->if_port = vp->media_override;
1299 dev->if_port = vp->default_media;
1301 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1306 if (vp->drv_flags & EXTRA_PREAMBLE)
1325 vp->phys[phy_idx++] = phyx;
1337 vp->phys[0] = 24;
1339 vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1340 if (vp->full_duplex) {
1342 vp->advertising &= ~0x02A0;
1343 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1346 vp->mii.phy_id = vp->phys[0];
1349 if (vp->capabilities & CapBusMaster) {
1350 vp->full_bus_master_tx = 1;
1353 (vp->info2 & 1) ? "early" : "whole-frame" );
1355 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1356 vp->bus_master = 0; /* AKPM: vortex only */
1361 if (vp->full_bus_master_tx) {
1365 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1393 vp->pm_state_valid = 1;
1394 pci_save_state(VORTEX_PCI(vp));
1405 vp->rx_ring,
1406 vp->rx_ring_dma);
1408 if (vp->must_free_region)
1419 struct vortex_private *vp = netdev_priv(dev);
1420 void __iomem *ioaddr = vp->ioaddr;
1446 struct vortex_private *vp = netdev_priv(dev);
1447 void __iomem *ioaddr = vp->ioaddr;
1450 dev->name, (vp->full_duplex) ? "full" : "half");
1454 iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1455 (vp->large_frames ? 0x40 : 0) |
1456 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1463 struct vortex_private *vp = netdev_priv(dev);
1469 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1470 vp->full_duplex = vp->mii.full_duplex;
1480 struct vortex_private *vp = netdev_priv(dev);
1481 void __iomem *ioaddr = vp->ioaddr;
1485 if (VORTEX_PCI(vp)) {
1486 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
1487 if (vp->pm_state_valid)
1488 pci_restore_state(VORTEX_PCI(vp));
1489 pci_enable_device(VORTEX_PCI(vp));
1496 if (vp->media_override != 7) {
1498 dev->name, vp->media_override,
1499 media_tbl[vp->media_override].name);
1500 dev->if_port = vp->media_override;
1501 } else if (vp->autoselect) {
1502 if (vp->has_nway) {
1510 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1517 dev->if_port = vp->default_media;
1523 init_timer(&vp->timer);
1524 vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
1525 vp->timer.data = (unsigned long)dev;
1526 vp->timer.function = vortex_timer; /* timer handler */
1527 add_timer(&vp->timer);
1529 init_timer(&vp->rx_oom_timer);
1530 vp->rx_oom_timer.data = (unsigned long)dev;
1531 vp->rx_oom_timer.function = rx_oom_timer;
1537 vp->full_duplex = vp->mii.force_media;
1545 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1546 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1547 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1576 if (vp->cb_fn_base) {
1578 if (vp->drv_flags & INVERT_LED_PWR)
1580 if (vp->drv_flags & INVERT_MII_PWR)
1610 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1611 vp->cur_rx = vp->dirty_rx = 0;
1615 iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1617 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1618 vp->cur_tx = vp->dirty_tx = 0;
1619 if (vp->drv_flags & IS_BOOMERANG)
1623 vp->rx_ring[i].status = 0;
1625 vp->tx_skbuff[i] = NULL;
1637 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1638 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1639 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1640 (vp->bus_master ? DMADone : 0);
1641 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1642 (vp->full_bus_master_rx ? 0 : RxComplete) |
1644 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1645 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1649 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1650 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1651 iowrite32(0x8000, vp->cb_fn_base + 4);
1658 struct vortex_private *vp = netdev_priv(dev);
1663 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1669 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1674 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1675 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1676 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1678 vp->rx_skbuff[i] = skb;
1683 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1689 if (vp->rx_skbuff[j]) {
1690 dev_kfree_skb(vp->rx_skbuff[j]);
1691 vp->rx_skbuff[j] = NULL;
1698 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1716 struct vortex_private *vp = netdev_priv(dev);
1717 void __iomem *ioaddr = vp->ioaddr;
1751 spin_lock_bh(&vp->lock);
1753 spin_unlock_bh(&vp->lock);
1766 if (vp->medialock)
1774 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1776 dev->if_port = vp->default_media;
1809 mod_timer(&vp->timer, RUN_AT(next_tick));
1810 if (vp->deferred)
1817 struct vortex_private *vp = netdev_priv(dev);
1818 void __iomem *ioaddr = vp->ioaddr;
1843 if (vp->full_bus_master_tx)
1856 vp->stats.tx_errors++;
1857 if (vp->full_bus_master_tx) {
1859 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
1860 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1862 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
1864 if (vp->drv_flags & IS_BOOMERANG)
1868 vp->stats.tx_dropped++;
1887 struct vortex_private *vp = netdev_priv(dev);
1888 void __iomem *ioaddr = vp->ioaddr;
1909 if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
1910 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1911 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
1915 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1940 vp->intr_enable &= ~StatsFull;
1946 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1947 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1956 if (vp->full_bus_master_tx) {
1985 if (!vp->full_bus_master_tx)
1993 struct vortex_private *vp = netdev_priv(dev);
1994 void __iomem *ioaddr = vp->ioaddr;
1998 if (vp->bus_master) {
2001 iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
2004 vp->tx_skb = skb;
2032 if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
2033 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
2048 struct vortex_private *vp = netdev_priv(dev);
2049 void __iomem *ioaddr = vp->ioaddr;
2051 int entry = vp->cur_tx % TX_RING_SIZE;
2052 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2058 dev->name, vp->cur_tx);
2061 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2069 vp->tx_skbuff[entry] = skb;
2071 vp->tx_ring[entry].next = 0;
2074 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2076 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2079 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2081 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2085 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2087 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
2092 vp->tx_ring[entry].frag[i+1].addr =
2093 cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
2098 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
2100 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
2104 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
2105 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2106 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2109 spin_lock_irqsave(&vp->lock, flags);
2112 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2114 iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2115 vp->queued_packet++;
2118 vp->cur_tx++;
2119 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2130 spin_unlock_irqrestore(&vp->lock, flags);
2147 struct vortex_private *vp = netdev_priv(dev);
2153 ioaddr = vp->ioaddr;
2154 spin_lock(&vp->lock);
2166 status |= vp->deferred;
2167 vp->deferred = 0;
2195 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2196 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2217 vp->deferred |= status;
2218 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2220 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2223 mod_timer(&vp->timer, jiffies + 1*HZ);
2234 spin_unlock(&vp->lock);
2247 struct vortex_private *vp = netdev_priv(dev);
2252 ioaddr = vp->ioaddr;
2258 spin_lock(&vp->lock);
2275 status |= vp->deferred;
2276 vp->deferred = 0;
2294 unsigned int dirty_tx = vp->dirty_tx;
2297 while (vp->cur_tx - dirty_tx > 0) {
2300 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2303 if (vp->tx_skbuff[entry]) {
2304 struct sk_buff *skb = vp->tx_skbuff[entry];
2308 pci_unmap_single(VORTEX_PCI(vp),
2309 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2310 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2313 pci_unmap_single(VORTEX_PCI(vp),
2314 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2317 vp->tx_skbuff[entry] = NULL;
2321 /* vp->stats.tx_packets++; Counted below. */
2324 vp->dirty_tx = dirty_tx;
2325 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2341 vp->deferred |= status;
2342 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2344 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2347 mod_timer(&vp->timer, jiffies + 1*HZ);
2352 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2353 iowrite32(0x8000, vp->cb_fn_base + 4);
2361 spin_unlock(&vp->lock);
2367 struct vortex_private *vp = netdev_priv(dev);
2368 void __iomem *ioaddr = vp->ioaddr;
2380 vp->stats.rx_errors++;
2381 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2382 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2383 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2384 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2385 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2398 if (vp->bus_master &&
2400 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
2407 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
2417 vp->stats.rx_packets++;
2426 vp->stats.rx_dropped++;
2437 struct vortex_private *vp = netdev_priv(dev);
2438 int entry = vp->cur_rx % RX_RING_SIZE;
2439 void __iomem *ioaddr = vp->ioaddr;
2441 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2446 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2453 vp->stats.rx_errors++;
2454 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2455 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2456 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2457 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2458 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2463 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2473 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2476 vp->rx_skbuff[entry]->data,
2478 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2479 vp->rx_copy++;
2482 skb = vp->rx_skbuff[entry];
2483 vp->rx_skbuff[entry] = NULL;
2485 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2486 vp->rx_nocopy++;
2495 vp->rx_csumhits++;
2500 vp->stats.rx_packets++;
2502 entry = (++vp->cur_rx) % RX_RING_SIZE;
2505 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2507 entry = vp->dirty_rx % RX_RING_SIZE;
2508 if (vp->rx_skbuff[entry] == NULL) {
2516 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2517 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2522 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2523 vp->rx_skbuff[entry] = skb;
2525 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2539 struct vortex_private *vp = netdev_priv(dev);
2541 spin_lock_irq(&vp->lock);
2542 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2546 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2548 spin_unlock_irq(&vp->lock);
2554 struct vortex_private *vp = netdev_priv(dev);
2555 void __iomem *ioaddr = vp->ioaddr;
2559 del_timer_sync(&vp->rx_oom_timer);
2560 del_timer_sync(&vp->timer);
2562 /* Turn off statistics ASAP. We update vp->stats below. */
2579 if (vp->full_bus_master_rx)
2581 if (vp->full_bus_master_tx)
2584 if (final_down && VORTEX_PCI(vp)) {
2585 vp->pm_state_valid = 1;
2586 pci_save_state(VORTEX_PCI(vp));
2594 struct vortex_private *vp = netdev_priv(dev);
2595 void __iomem *ioaddr = vp->ioaddr;
2606 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2610 if (vp->rx_csumhits &&
2611 (vp->drv_flags & HAS_HWCKSM) == 0 &&
2612 (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2620 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2622 if (vp->rx_skbuff[i]) {
2623 pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
2625 dev_kfree_skb(vp->rx_skbuff[i]);
2626 vp->rx_skbuff[i] = NULL;
2629 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2631 if (vp->tx_skbuff[i]) {
2632 struct sk_buff *skb = vp->tx_skbuff[i];
2637 pci_unmap_single(VORTEX_PCI(vp),
2638 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2639 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2642 pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2645 vp->tx_skbuff[i] = NULL;
2657 struct vortex_private *vp = netdev_priv(dev);
2658 void __iomem *ioaddr = vp->ioaddr;
2660 if (vp->full_bus_master_tx) {
2665 vp->full_bus_master_tx,
2666 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2667 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2670 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2674 &vp->tx_ring[i],
2676 le32_to_cpu(vp->tx_ring[i].frag[0].length),
2678 le32_to_cpu(vp->tx_ring[i].length),
2680 le32_to_cpu(vp->tx_ring[i].status));
2690 struct vortex_private *vp = netdev_priv(dev);
2691 void __iomem *ioaddr = vp->ioaddr;
2695 spin_lock_irqsave (&vp->lock, flags);
2697 spin_unlock_irqrestore (&vp->lock, flags);
2699 return &vp->stats;
2711 struct vortex_private *vp = netdev_priv(dev);
2719 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
2720 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
2721 vp->stats.tx_window_errors += ioread8(ioaddr + 4);
2722 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
2723 vp->stats.tx_packets += ioread8(ioaddr + 6);
2724 vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
2729 vp->stats.rx_bytes += ioread16(ioaddr + 10);
2730 vp->stats.tx_bytes += ioread16(ioaddr + 12);
2732 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
2733 vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
2734 vp->xstats.tx_deferred += ioread8(ioaddr + 8);
2736 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2738 vp->stats.collisions = vp->xstats.tx_multiple_collisions
2739 + vp->xstats.tx_single_collisions
2740 + vp->xstats.tx_max_collisions;
2744 vp->stats.rx_bytes += (up & 0x0f) << 16;
2745 vp->stats.tx_bytes += (up & 0xf0) << 12;
2754 struct vortex_private *vp = netdev_priv(dev);
2755 void __iomem *ioaddr = vp->ioaddr;
2759 spin_lock_irqsave(&vp->lock, flags);
2761 rc = mii_nway_restart(&vp->mii);
2762 spin_unlock_irqrestore(&vp->lock, flags);
2768 struct vortex_private *vp = netdev_priv(dev);
2769 void __iomem *ioaddr = vp->ioaddr;
2773 spin_lock_irqsave(&vp->lock, flags);
2775 rc = mii_ethtool_gset(&vp->mii, cmd);
2776 spin_unlock_irqrestore(&vp->lock, flags);
2782 struct vortex_private *vp = netdev_priv(dev);
2783 void __iomem *ioaddr = vp->ioaddr;
2787 spin_lock_irqsave(&vp->lock, flags);
2789 rc = mii_ethtool_sset(&vp->mii, cmd);
2790 spin_unlock_irqrestore(&vp->lock, flags);
2812 struct vortex_private *vp = netdev_priv(dev);
2813 void __iomem *ioaddr = vp->ioaddr;
2816 spin_lock_irqsave(&vp->lock, flags);
2818 spin_unlock_irqrestore(&vp->lock, flags);
2820 data[0] = vp->xstats.tx_deferred;
2821 data[1] = vp->xstats.tx_max_collisions;
2822 data[2] = vp->xstats.tx_multiple_collisions;
2823 data[3] = vp->xstats.tx_single_collisions;
2824 data[4] = vp->xstats.rx_bad_ssd;
2843 struct vortex_private *vp = netdev_priv(dev);
2846 if (VORTEX_PCI(vp)) {
2847 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
2849 if (VORTEX_EISA(vp))
2850 sprintf(info->bus_info, vp->gendev->bus_id);
2878 struct vortex_private *vp = netdev_priv(dev);
2879 void __iomem *ioaddr = vp->ioaddr;
2883 if(VORTEX_PCI(vp))
2884 state = VORTEX_PCI(vp)->current_state;
2889 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2890 spin_lock_irqsave(&vp->lock, flags);
2892 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2893 spin_unlock_irqrestore(&vp->lock, flags);
2895 pci_set_power_state(VORTEX_PCI(vp), state);
2907 struct vortex_private *vp = netdev_priv(dev);
2908 void __iomem *ioaddr = vp->ioaddr;
2933 struct vortex_private *vp = netdev_priv(dev);
2934 void __iomem *ioaddr = vp->ioaddr;
2938 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
2956 vp->large_frames = dev->mtu > 1500 || enable;
2960 if (vp->large_frames)
3013 struct vortex_private *vp = netdev_priv(dev);
3014 void __iomem *ioaddr = vp->ioaddr;
3043 struct vortex_private *vp = netdev_priv(dev);
3044 void __iomem *ioaddr = vp->ioaddr;
3074 struct vortex_private *vp = netdev_priv(dev);
3075 void __iomem *ioaddr = vp->ioaddr;
3077 if (vp->enable_wol) {
3085 pci_enable_wake(VORTEX_PCI(vp), 0, 1);
3088 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3096 struct vortex_private *vp;
3103 vp = netdev_priv(dev);
3105 if (vp->cb_fn_base)
3106 pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
3110 if (VORTEX_PCI(vp)) {
3111 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
3112 if (vp->pm_state_valid)
3113 pci_restore_state(VORTEX_PCI(vp));
3114 pci_disable_device(VORTEX_PCI(vp));
3117 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3118 vp->ioaddr + EL3_CMD);
3120 pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
3125 vp->rx_ring,
3126 vp->rx_ring_dma);
3127 if (vp->must_free_region)
3128 release_region(dev->base_addr, vp->io_size);
3167 struct vortex_private *vp;
3176 vp = compaq_net_device->priv;