• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asus-wl-520gu-7.0.1.45/src/linux/linux/drivers/net/

Lines Matching refs:vp

37       vp->cur_tx and vp->tx_full.  This defeats the race between
55 - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
113 - Fixed a bug where, if vp->tx_full is set when the interface
138 - Fixed leakage of vp->rx_ring.
140 - Kill vp->tx_full (ANK)
160 - Use netif_running() instead of vp->open in suspend/resume.
728 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
822 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
965 struct vortex_private *vp;
982 dev = alloc_etherdev(sizeof(*vp));
989 vp = dev->priv;
1010 vp->enable_wol = 1;
1026 vp->drv_flags = vci->drv_flags;
1027 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1028 vp->io_size = vci->io_size;
1029 vp->card_idx = card_idx;
1033 vp->next_module = root_vortex_eisa_dev;
1042 vp->must_free_region = 1;
1066 spin_lock_init(&vp->lock);
1067 spin_lock_init(&vp->mdio_lock);
1068 vp->pdev = pdev;
1071 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1073 &vp->rx_ring_dma);
1075 if (vp->rx_ring == 0)
1078 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1079 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1086 vp->media_override = 7;
1088 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1089 if (vp->media_override != 7)
1090 vp->medialock = 1;
1091 vp->full_duplex = (option & 0x200) ? 1 : 0;
1092 vp->bus_master = (option & 16) ? 1 : 0;
1097 vp->full_duplex = 1;
1099 vp->flow_ctrl = 1;
1101 vp->enable_wol = 1;
1104 vp->force_fd = vp->full_duplex;
1105 vp->options = option;
1177 vp->cb_fn_base = ioremap(fn_st_addr, 128);
1179 if (!vp->cb_fn_base)
1184 print_name, fn_st_addr, vp->cb_fn_base);
1189 if (vp->drv_flags & INVERT_LED_PWR)
1191 if (vp->drv_flags & INVERT_MII_PWR)
1197 vp->info1 = eeprom[13];
1198 vp->info2 = eeprom[15];
1199 vp->capabilities = eeprom[16];
1201 if (vp->info1 & 0x8000) {
1202 vp->full_duplex = 1;
1211 vp->available_media = inw(ioaddr + Wn3_Options);
1212 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1213 vp->available_media = 0x40;
1226 vp->default_media = XCVR(config);
1227 if (vp->default_media == XCVR_NWAY)
1228 vp->has_nway = 1;
1229 vp->autoselect = AUTOSELECT(config);
1232 if (vp->media_override != 7) {
1234 print_name, vp->media_override,
1235 media_tbl[vp->media_override].name);
1236 dev->if_port = vp->media_override;
1238 dev->if_port = vp->default_media;
1261 vp->phys[phy_idx++] = phyx;
1273 vp->phys[0] = 24;
1275 vp->advertising = mdio_read(dev, vp->phys[0], 4);
1276 if (vp->full_duplex) {
1278 vp->advertising &= ~0x02A0;
1279 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1284 if (vp->capabilities & CapBusMaster) {
1285 vp->full_bus_master_tx = 1;
1288 (vp->info2 & 1) ? "early" : "whole-frame" );
1290 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1291 vp->bus_master = 0; /* AKPM: vortex only */
1296 if (vp->full_bus_master_tx) {
1300 if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
1321 if (pdev && vp->enable_wol) {
1322 vp->pm_state_valid = 1;
1323 pci_save_state(vp->pdev, vp->power_state);
1334 vp->rx_ring,
1335 vp->rx_ring_dma);
1337 if (vp->must_free_region)
1374 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1378 if (vp->pdev && vp->enable_wol) {
1379 pci_set_power_state(vp->pdev, 0); /* Go active */
1380 pci_restore_state(vp->pdev, vp->power_state);
1387 if (vp->media_override != 7) {
1389 dev->name, vp->media_override,
1390 media_tbl[vp->media_override].name);
1391 dev->if_port = vp->media_override;
1392 } else if (vp->autoselect) {
1393 if (vp->has_nway) {
1401 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1408 dev->if_port = vp->default_media;
1414 init_timer(&vp->timer);
1415 vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
1416 vp->timer.data = (unsigned long)dev;
1417 vp->timer.function = vortex_timer; /* timer handler */
1418 add_timer(&vp->timer);
1420 init_timer(&vp->rx_oom_timer);
1421 vp->rx_oom_timer.data = (unsigned long)dev;
1422 vp->rx_oom_timer.function = rx_oom_timer;
1428 vp->full_duplex = vp->force_fd;
1438 mii_reg1 = mdio_read(dev, vp->phys[0], 1);
1439 mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1445 vp->full_duplex = 1;
1448 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1452 dev->name, vp->phys[0],
1454 vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
1459 outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1461 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1490 if (vp->cb_fn_base) {
1492 if (vp->drv_flags & INVERT_LED_PWR)
1494 if (vp->drv_flags & INVERT_MII_PWR)
1524 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1525 vp->cur_rx = vp->dirty_rx = 0;
1529 outl(vp->rx_ring_dma, ioaddr + UpListPtr);
1531 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1532 vp->cur_tx = vp->dirty_tx = 0;
1533 if (vp->drv_flags & IS_BOOMERANG)
1537 vp->rx_ring[i].status = 0;
1539 vp->tx_skbuff[i] = 0;
1550 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1551 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1552 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1553 (vp->bus_master ? DMADone : 0);
1554 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1555 (vp->full_bus_master_rx ? 0 : RxComplete) |
1557 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1558 outw(vp->status_enable, ioaddr + EL3_CMD);
1562 outw(vp->intr_enable, ioaddr + EL3_CMD);
1563 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1564 writel(0x8000, vp->cb_fn_base + 4);
1571 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1576 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1582 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1587 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1588 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1589 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1591 vp->rx_skbuff[i] = skb;
1596 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1602 if (vp->rx_skbuff[j]) {
1603 dev_kfree_skb(vp->rx_skbuff[j]);
1604 vp->rx_skbuff[j] = 0;
1611 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1629 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1641 if (vp->medialock)
1663 mii_status = mdio_read(dev, vp->phys[0], 1);
1669 int mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1670 if (! vp->force_fd && mii_reg5 != 0xffff) {
1673 if (vp->full_duplex != duplex) {
1674 vp->full_duplex = duplex;
1677 dev->name, vp->full_duplex ? "full" : "half",
1678 vp->phys[0], mii_reg5);
1681 outw( (vp->full_duplex ? 0x20 : 0) |
1683 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1707 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1709 dev->if_port = vp->default_media;
1742 mod_timer(&vp->timer, RUN_AT(next_tick));
1743 if (vp->deferred)
1750 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1774 if (vp->full_bus_master_tx)
1787 vp->stats.tx_errors++;
1788 if (vp->full_bus_master_tx) {
1790 if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
1791 outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1793 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
1795 if (vp->drv_flags & IS_BOOMERANG)
1799 vp->stats.tx_dropped++;
1818 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1840 if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
1841 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1845 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1870 vp->intr_enable &= ~StatsFull;
1876 outw(vp->status_enable, ioaddr + EL3_CMD);
1877 outw(vp->intr_enable, ioaddr + EL3_CMD);
1886 if (vp->full_bus_master_tx) {
1912 if (!vp->full_bus_master_tx)
1920 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1925 if (vp->bus_master) {
1928 outl( vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data, len, PCI_DMA_TODEVICE),
1931 vp->tx_skb = skb;
1959 if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
1960 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1975 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1978 int entry = vp->cur_tx % TX_RING_SIZE;
1979 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
1986 dev->name, vp->cur_tx);
1989 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
1997 vp->tx_skbuff[entry] = skb;
1999 vp->tx_ring[entry].next = 0;
2002 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2004 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
2007 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
2009 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2013 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
2015 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
2020 vp->tx_ring[entry].frag[i+1].addr =
2021 cpu_to_le32(pci_map_single(vp->pdev,
2026 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
2028 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
2032 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
2033 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2034 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2037 spin_lock_irqsave(&vp->lock, flags);
2040 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2042 outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2043 vp->queued_packet++;
2046 vp->cur_tx++;
2047 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2058 spin_unlock_irqrestore(&vp->lock, flags);
2074 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2080 spin_lock(&vp->lock);
2091 status |= vp->deferred;
2092 vp->deferred = 0;
2120 pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2121 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2142 vp->deferred |= status;
2143 outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2145 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2148 mod_timer(&vp->timer, jiffies + 1*HZ);
2159 spin_unlock(&vp->lock);
2170 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2181 spin_lock(&vp->lock);
2198 status |= vp->deferred;
2199 vp->deferred = 0;
2217 unsigned int dirty_tx = vp->dirty_tx;
2220 while (vp->cur_tx - dirty_tx > 0) {
2223 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2226 if (vp->tx_skbuff[entry]) {
2227 struct sk_buff *skb = vp->tx_skbuff[entry];
2231 pci_unmap_single(vp->pdev,
2232 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2233 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2236 pci_unmap_single(vp->pdev,
2237 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2240 vp->tx_skbuff[entry] = 0;
2244 /* vp->stats.tx_packets++; Counted below. */
2247 vp->dirty_tx = dirty_tx;
2248 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2264 vp->deferred |= status;
2265 outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2267 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2270 mod_timer(&vp->timer, jiffies + 1*HZ);
2275 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2276 writel(0x8000, vp->cb_fn_base + 4);
2284 spin_unlock(&vp->lock);
2289 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2302 vp->stats.rx_errors++;
2303 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2304 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2305 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2306 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2307 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2321 if (vp->bus_master &&
2323 dma_addr_t dma = pci_map_single(vp->pdev, skb_put(skb, pkt_len),
2330 pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE);
2339 vp->stats.rx_packets++;
2349 vp->stats.rx_dropped++;
2359 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2360 int entry = vp->cur_rx % RX_RING_SIZE;
2363 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2368 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2375 vp->stats.rx_errors++;
2376 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2377 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2378 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2379 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2380 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2385 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2396 pci_dma_sync_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2399 vp->rx_skbuff[entry]->tail,
2401 vp->rx_copy++;
2404 skb = vp->rx_skbuff[entry];
2405 vp->rx_skbuff[entry] = NULL;
2407 pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2408 vp->rx_nocopy++;
2417 vp->rx_csumhits++;
2422 vp->stats.rx_packets++;
2424 entry = (++vp->cur_rx) % RX_RING_SIZE;
2427 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2429 entry = vp->dirty_rx % RX_RING_SIZE;
2430 if (vp->rx_skbuff[entry] == NULL) {
2438 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2439 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2444 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2445 vp->rx_skbuff[entry] = skb;
2447 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2461 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2463 spin_lock_irq(&vp->lock);
2464 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2468 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2470 spin_unlock_irq(&vp->lock);
2476 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2481 del_timer_sync(&vp->rx_oom_timer);
2482 del_timer_sync(&vp->timer);
2484 /* Turn off statistics ASAP. We update vp->stats below. */
2498 if (vp->full_bus_master_rx)
2500 if (vp->full_bus_master_tx)
2503 if (vp->pdev && vp->enable_wol) {
2504 pci_save_state(vp->pdev, vp->power_state);
2512 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2524 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2528 if ( vp->rx_csumhits &&
2529 ((vp->drv_flags & HAS_HWCKSM) == 0) &&
2530 (hw_checksums[vp->card_idx] == -1)) {
2538 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2540 if (vp->rx_skbuff[i]) {
2541 pci_unmap_single( vp->pdev, le32_to_cpu(vp->rx_ring[i].addr),
2543 dev_kfree_skb(vp->rx_skbuff[i]);
2544 vp->rx_skbuff[i] = 0;
2547 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2549 if (vp->tx_skbuff[i]) {
2550 struct sk_buff *skb = vp->tx_skbuff[i];
2555 pci_unmap_single(vp->pdev,
2556 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2557 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2560 pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2563 vp->tx_skbuff[i] = 0;
2575 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2578 if (vp->full_bus_master_tx) {
2583 vp->full_bus_master_tx,
2584 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2585 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2588 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2592 &vp->tx_ring[i],
2594 le32_to_cpu(vp->tx_ring[i].frag[0].length),
2596 le32_to_cpu(vp->tx_ring[i].length),
2598 le32_to_cpu(vp->tx_ring[i].status));
2608 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2612 spin_lock_irqsave (&vp->lock, flags);
2614 spin_unlock_irqrestore (&vp->lock, flags);
2616 return &vp->stats;
2628 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2636 vp->stats.tx_carrier_errors += inb(ioaddr + 0);
2637 vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
2639 vp->stats.collisions += inb(ioaddr + 3);
2640 vp->stats.tx_window_errors += inb(ioaddr + 4);
2641 vp->stats.rx_fifo_errors += inb(ioaddr + 5);
2642 vp->stats.tx_packets += inb(ioaddr + 6);
2643 vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
2649 vp->stats.rx_bytes += inw(ioaddr + 10);
2650 vp->stats.tx_bytes += inw(ioaddr + 12);
2657 vp->stats.rx_bytes += (up & 0x0f) << 16;
2658 vp->stats.tx_bytes += (up & 0xf0) << 12;
2668 struct vortex_private *vp = dev->priv;
2679 if (vp->pdev)
2680 strcpy(info.bus_info, vp->pdev->slot_name);
2696 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2699 int phy = vp->phys[0] & 0x1f;
2789 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2796 spin_lock_bh(&vp->mdio_lock);
2817 spin_unlock_bh(&vp->mdio_lock);
2823 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2829 spin_lock_bh(&vp->mdio_lock);
2849 spin_unlock_bh(&vp->mdio_lock);
2858 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2869 pci_enable_wake(vp->pdev, 0, 1);
2870 pci_set_power_state(vp->pdev, 3);
2877 struct vortex_private *vp;
2884 vp = dev->priv;
2890 if (vp->pdev && vp->enable_wol) {
2891 pci_set_power_state(vp->pdev, 0); /* Go active */
2892 if (vp->pm_state_valid)
2893 pci_restore_state(vp->pdev, vp->power_state);
2899 vp->rx_ring,
2900 vp->rx_ring_dma);
2901 if (vp->must_free_region)
2902 release_region(dev->base_addr, vp->io_size);
2942 struct vortex_private *vp;
2948 vp = dev->priv;
2956 dev = vp->next_module;