• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/

Lines Matching defs:vptr

398 	struct velocity_info *vptr = netdev_priv(dev);
401 iounmap(vptr->mac_regs);
495 * @vptr: velocity to program
500 static void velocity_init_cam_filter(struct velocity_info *vptr)
502 struct mac_regs __iomem *regs = vptr->mac_regs;
509 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
510 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
511 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
512 mac_set_cam_mask(regs, vptr->mCAMmask);
515 if (vptr->vlgrp) {
518 if (!vlan_group_get_device(vptr->vlgrp, 0))
522 if (vlan_group_get_device(vptr->vlgrp, vid)) {
524 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
529 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
536 struct velocity_info *vptr = netdev_priv(dev);
538 vptr->vlgrp = grp;
543 struct velocity_info *vptr = netdev_priv(dev);
545 spin_lock_irq(&vptr->lock);
546 velocity_init_cam_filter(vptr);
547 spin_unlock_irq(&vptr->lock);
552 struct velocity_info *vptr = netdev_priv(dev);
554 spin_lock_irq(&vptr->lock);
555 vlan_group_set_device(vptr->vlgrp, vid, NULL);
556 velocity_init_cam_filter(vptr);
557 spin_unlock_irq(&vptr->lock);
560 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
562 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
567 * @vptr: velocity we are resetting
572 static void velocity_rx_reset(struct velocity_info *vptr)
575 struct mac_regs __iomem *regs = vptr->mac_regs;
578 velocity_init_rx_ring_indexes(vptr);
583 for (i = 0; i < vptr->options.numrx; ++i)
584 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
586 writew(vptr->options.numrx, &regs->RBRDU);
587 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
589 writew(vptr->options.numrx - 1, &regs->RDCSize);
594 * @vptr: velocity adapter
600 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
604 switch (vptr->options.spd_dpx) {
621 vptr->mii_status = status;
794 * @vptr: velocity interface
799 static void set_mii_flow_control(struct velocity_info *vptr)
802 switch (vptr->options.flow_cntl) {
804 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
814 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
819 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
820 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
829 * @vptr: velocity
833 static void mii_set_auto_on(struct velocity_info *vptr)
835 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
836 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
838 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
885 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
888 struct mac_regs __iomem *regs = vptr->mac_regs;
890 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
891 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
894 set_mii_flow_control(vptr);
900 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
901 vptr->mii_status=check_connection_type(vptr->mac_regs);
907 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
908 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
918 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
919 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
920 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
923 mii_set_auto_on(vptr);
945 if (vptr->rev_id < REV_ID_VT3216_A0)
951 if (vptr->rev_id < REV_ID_VT3216_A0)
955 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
962 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
963 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
976 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
978 mii_set_auto_on(vptr);
979 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
981 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
982 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
988 * @vptr: velocity to report on
994 static void velocity_print_link_status(struct velocity_info *vptr)
997 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
998 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
999 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1002 if (vptr->mii_status & VELOCITY_SPEED_1000)
1004 else if (vptr->mii_status & VELOCITY_SPEED_100)
1009 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1014 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1015 switch (vptr->options.spd_dpx) {
1036 * @vptr: veloity to configure
1041 static void enable_flow_control_ability(struct velocity_info *vptr)
1044 struct mac_regs __iomem *regs = vptr->mac_regs;
1046 switch (vptr->options.flow_cntl) {
1088 * @vptr: velocity to reset
1093 static int velocity_soft_reset(struct velocity_info *vptr)
1095 struct mac_regs __iomem *regs = vptr->mac_regs;
1124 struct velocity_info *vptr = netdev_priv(dev);
1125 struct mac_regs __iomem *regs = vptr->mac_regs;
1134 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1140 int offset = MCAM_SIZE - vptr->multicast_limit;
1141 mac_get_cam_mask(regs, vptr->mCAMmask);
1146 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1150 mac_set_cam_mask(regs, vptr->mCAMmask);
1166 * @vptr: velocity adapter
1171 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1175 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1180 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1186 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1187 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1189 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1193 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1207 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1209 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1217 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1221 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1229 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1239 static void setup_queue_timers(struct velocity_info *vptr)
1242 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1246 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1248 txqueue_timer = vptr->options.txqueue_timer;
1249 rxqueue_timer = vptr->options.rxqueue_timer;
1252 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1253 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1259 * @vptr velocity adapter
1264 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1266 struct mac_regs __iomem *regs = vptr->mac_regs;
1267 u16 tx_intsup = vptr->options.tx_intsup;
1268 u16 rx_intsup = vptr->options.rx_intsup;
1271 vptr->int_mask = INT_MASK_DEF;
1276 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1285 vptr->int_mask &= ~ISR_PRXI;
1296 * @vptr: velocity to init
1302 static void velocity_init_registers(struct velocity_info *vptr,
1305 struct mac_regs __iomem *regs = vptr->mac_regs;
1314 netif_stop_queue(vptr->dev);
1319 velocity_rx_reset(vptr);
1323 mii_status = velocity_get_opt_media_mode(vptr);
1324 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1325 velocity_print_link_status(vptr);
1326 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1327 netif_wake_queue(vptr->dev);
1330 enable_flow_control_ability(vptr);
1344 velocity_soft_reset(vptr);
1349 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1355 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1356 mac_set_dma_length(regs, vptr->options.DMA_length);
1367 velocity_init_cam_filter(vptr);
1372 velocity_set_multi(vptr->dev);
1379 setup_adaptive_interrupts(vptr);
1381 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1382 writew(vptr->options.numrx - 1, &regs->RDCSize);
1386 writew(vptr->options.numtx - 1, &regs->TDCSize);
1388 for (i = 0; i < vptr->tx.numq; i++) {
1389 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1393 init_flow_control_register(vptr);
1398 mii_status = velocity_get_opt_media_mode(vptr);
1399 netif_stop_queue(vptr->dev);
1401 mii_init(vptr, mii_status);
1403 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1404 velocity_print_link_status(vptr);
1405 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1406 netif_wake_queue(vptr->dev);
1409 enable_flow_control_ability(vptr);
1411 mac_write_int_mask(vptr->int_mask, regs);
1417 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1419 struct mac_regs __iomem *regs = vptr->mac_regs;
1426 if (vptr->rx.filled < 4)
1431 unusable = vptr->rx.filled & 0x0003;
1432 dirty = vptr->rx.dirty - unusable;
1433 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1434 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1435 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1438 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1439 vptr->rx.filled = unusable;
1444 * @vptr: Velocity to set up
1449 static int velocity_init_dma_rings(struct velocity_info *vptr)
1451 struct velocity_opt *opt = &vptr->options;
1454 struct pci_dev *pdev = vptr->pdev;
1465 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1469 vptr->dev->name);
1473 vptr->rx.ring = pool;
1474 vptr->rx.pool_dma = pool_dma;
1479 for (i = 0; i < vptr->tx.numq; i++) {
1480 vptr->tx.rings[i] = pool;
1481 vptr->tx.pool_dma[i] = pool_dma;
1489 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1491 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1496 * @vptr: velocity
1504 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1506 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1507 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1509 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1519 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1520 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1527 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1534 static int velocity_rx_refill(struct velocity_info *vptr)
1536 int dirty = vptr->rx.dirty, done = 0;
1539 struct rx_desc *rd = vptr->rx.ring + dirty;
1545 if (!vptr->rx.info[dirty].skb) {
1546 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1550 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1551 } while (dirty != vptr->rx.curr);
1554 vptr->rx.dirty = dirty;
1555 vptr->rx.filled += done;
1563 * @vptr: velocity to clean up
1568 static void velocity_free_rd_ring(struct velocity_info *vptr)
1572 if (vptr->rx.info == NULL)
1575 for (i = 0; i < vptr->options.numrx; i++) {
1576 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1577 struct rx_desc *rd = vptr->rx.ring + i;
1583 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1591 kfree(vptr->rx.info);
1592 vptr->rx.info = NULL;
1599 * @vptr: velocity to configure
1604 static int velocity_init_rd_ring(struct velocity_info *vptr)
1608 vptr->rx.info = kcalloc(vptr->options.numrx,
1610 if (!vptr->rx.info)
1613 velocity_init_rx_ring_indexes(vptr);
1615 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1617 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1618 velocity_free_rd_ring(vptr);
1629 * @vptr: velocity
1635 static int velocity_init_td_ring(struct velocity_info *vptr)
1640 for (j = 0; j < vptr->tx.numq; j++) {
1642 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1645 if (!vptr->tx.infos[j]) {
1647 kfree(vptr->tx.infos[j]);
1651 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1658 * @vptr: Velocity to free from
1662 static void velocity_free_dma_rings(struct velocity_info *vptr)
1664 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1665 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1667 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1671 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1675 velocity_set_rxbufsize(vptr, mtu);
1677 ret = velocity_init_dma_rings(vptr);
1681 ret = velocity_init_rd_ring(vptr);
1685 ret = velocity_init_td_ring(vptr);
1692 velocity_free_rd_ring(vptr);
1694 velocity_free_dma_rings(vptr);
1700 * @vptr: velocity
1706 static void velocity_free_tx_buf(struct velocity_info *vptr,
1725 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1734 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1737 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1746 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1758 * @vptr: velocity
1763 static void velocity_free_td_ring(struct velocity_info *vptr)
1767 for (j = 0; j < vptr->tx.numq; j++) {
1768 if (vptr->tx.infos[j] == NULL)
1770 for (i = 0; i < vptr->options.numtx; i++)
1771 velocity_free_td_ring_entry(vptr, j, i);
1773 kfree(vptr->tx.infos[j]);
1774 vptr->tx.infos[j] = NULL;
1779 static void velocity_free_rings(struct velocity_info *vptr)
1781 velocity_free_td_ring(vptr);
1782 velocity_free_rd_ring(vptr);
1783 velocity_free_dma_rings(vptr);
1788 * @vptr: velocity
1797 static void velocity_error(struct velocity_info *vptr, int status)
1801 struct mac_regs __iomem *regs = vptr->mac_regs;
1806 netif_stop_queue(vptr->dev);
1811 struct mac_regs __iomem *regs = vptr->mac_regs;
1814 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1815 vptr->mii_status = check_connection_type(regs);
1822 if (vptr->rev_id < REV_ID_VT3216_A0) {
1823 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1831 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1836 setup_queue_timers(vptr);
1844 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1845 netif_carrier_on(vptr->dev);
1847 vptr->mii_status |= VELOCITY_LINK_FAIL;
1848 netif_carrier_off(vptr->dev);
1851 velocity_print_link_status(vptr);
1852 enable_flow_control_ability(vptr);
1861 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1862 netif_stop_queue(vptr->dev);
1864 netif_wake_queue(vptr->dev);
1868 velocity_update_hw_mibs(vptr);
1870 mac_rx_queue_wake(vptr->mac_regs);
1875 * @vptr; Velocity
1881 static int velocity_tx_srv(struct velocity_info *vptr)
1889 struct net_device_stats *stats = &vptr->dev->stats;
1891 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1892 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1893 idx = (idx + 1) % vptr->options.numtx) {
1898 td = &(vptr->tx.rings[qnum][idx]);
1899 tdinfo = &(vptr->tx.infos[qnum][idx]);
1922 velocity_free_tx_buf(vptr, tdinfo, td);
1923 vptr->tx.used[qnum]--;
1925 vptr->tx.tail[qnum] = idx;
1927 if (AVAIL_TD(vptr, qnum) < 1)
1934 if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1935 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1936 netif_wake_queue(vptr->dev);
1978 struct velocity_info *vptr)
1984 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1998 * @vptr: velocity we are handling
2005 static inline void velocity_iph_realign(struct velocity_info *vptr,
2008 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2017 * @vptr: velocity we are handling
2023 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2026 struct net_device_stats *stats = &vptr->dev->stats;
2027 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2028 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2033 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2043 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2044 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2050 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2061 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2062 velocity_iph_realign(vptr, skb, pkt_len);
2067 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2071 skb->protocol = eth_type_trans(skb, vptr->dev);
2073 if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
2074 vlan_hwaccel_rx(skb, vptr->vlgrp,
2087 * @vptr: velocity
2093 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2095 struct net_device_stats *stats = &vptr->dev->stats;
2096 int rd_curr = vptr->rx.curr;
2100 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2102 if (!vptr->rx.info[rd_curr].skb)
2114 if (velocity_receive_frame(vptr, rd_curr) < 0)
2128 if (rd_curr >= vptr->options.numrx)
2133 vptr->rx.curr = rd_curr;
2135 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2136 velocity_give_many_rx_descs(vptr);
2144 struct velocity_info *vptr = container_of(napi,
2149 spin_lock_irqsave(&vptr->lock, flags);
2154 rx_done = velocity_rx_srv(vptr, budget / 2);
2155 velocity_tx_srv(vptr);
2156 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2157 velocity_tx_srv(vptr);
2162 mac_enable_int(vptr->mac_regs);
2164 spin_unlock_irqrestore(&vptr->lock, flags);
2182 struct velocity_info *vptr = netdev_priv(dev);
2185 spin_lock(&vptr->lock);
2186 isr_status = mac_read_isr(vptr->mac_regs);
2190 spin_unlock(&vptr->lock);
2195 mac_write_isr(vptr->mac_regs, isr_status);
2197 if (likely(napi_schedule_prep(&vptr->napi))) {
2198 mac_disable_int(vptr->mac_regs);
2199 __napi_schedule(&vptr->napi);
2203 velocity_error(vptr, isr_status);
2205 spin_unlock(&vptr->lock);
2222 struct velocity_info *vptr = netdev_priv(dev);
2225 ret = velocity_init_rings(vptr, dev->mtu);
2230 pci_set_power_state(vptr->pdev, PCI_D0);
2232 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2234 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2238 pci_set_power_state(vptr->pdev, PCI_D3hot);
2239 velocity_free_rings(vptr);
2243 velocity_give_many_rx_descs(vptr);
2245 mac_enable_int(vptr->mac_regs);
2247 napi_enable(&vptr->napi);
2248 vptr->flags |= VELOCITY_FLAGS_OPENED;
2255 * @vptr: velocity to deactivate
2260 static void velocity_shutdown(struct velocity_info *vptr)
2262 struct mac_regs __iomem *regs = vptr->mac_regs;
2282 struct velocity_info *vptr = netdev_priv(dev);
2287 vptr->dev->name);
2310 tmp_vptr->pdev = vptr->pdev;
2311 tmp_vptr->options = vptr->options;
2312 tmp_vptr->tx.numq = vptr->tx.numq;
2318 spin_lock_irqsave(&vptr->lock, flags);
2321 velocity_shutdown(vptr);
2323 rx = vptr->rx;
2324 tx = vptr->tx;
2326 vptr->rx = tmp_vptr->rx;
2327 vptr->tx = tmp_vptr->tx;
2334 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2336 velocity_give_many_rx_descs(vptr);
2338 mac_enable_int(vptr->mac_regs);
2341 spin_unlock_irqrestore(&vptr->lock, flags);
2364 struct velocity_info *vptr = netdev_priv(dev);
2365 struct mac_regs __iomem *regs = vptr->mac_regs;
2375 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2379 spin_lock_irqsave(&vptr->lock, flags);
2380 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2381 spin_unlock_irqrestore(&vptr->lock, flags);
2382 check_connection_type(vptr->mac_regs);
2404 struct velocity_info *vptr = netdev_priv(dev);
2411 pci_set_power_state(vptr->pdev, PCI_D0);
2424 pci_set_power_state(vptr->pdev, PCI_D3hot);
2442 struct velocity_info *vptr = netdev_priv(dev);
2448 spin_lock_irq(&vptr->lock);
2449 velocity_update_hw_mibs(vptr);
2450 spin_unlock_irq(&vptr->lock);
2452 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2453 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2454 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2457 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2461 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2481 struct velocity_info *vptr = netdev_priv(dev);
2483 napi_disable(&vptr->napi);
2485 velocity_shutdown(vptr);
2487 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2488 velocity_get_ip(vptr);
2493 pci_set_power_state(vptr->pdev, PCI_D3hot);
2495 velocity_free_rings(vptr);
2497 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2512 struct velocity_info *vptr = netdev_priv(dev);
2535 spin_lock_irqsave(&vptr->lock, flags);
2537 index = vptr->tx.curr[qnum];
2538 td_ptr = &(vptr->tx.rings[qnum][index]);
2539 tdinfo = &(vptr->tx.infos[qnum][index]);
2549 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2559 tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2571 if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
2591 prev = vptr->options.numtx - 1;
2593 vptr->tx.used[qnum]++;
2594 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2596 if (AVAIL_TD(vptr, qnum) < 1)
2599 td_ptr = &(vptr->tx.rings[qnum][prev]);
2601 mac_tx_queue_wake(vptr->mac_regs, qnum);
2603 spin_unlock_irqrestore(&vptr->lock, flags);
2627 * @vptr: Velocity info
2634 struct velocity_info *vptr,
2637 memset(vptr, 0, sizeof(struct velocity_info));
2639 vptr->pdev = pdev;
2640 vptr->chip_id = info->chip_id;
2641 vptr->tx.numq = info->txqueue;
2642 vptr->multicast_limit = MCAM_SIZE;
2643 spin_lock_init(&vptr->lock);
2648 * @vptr: velocity device
2654 static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2656 vptr->rev_id = pdev->revision;
2660 vptr->ioaddr = pci_resource_start(pdev, 0);
2661 vptr->memaddr = pci_resource_start(pdev, 1);
2679 vptr->pdev = pdev;
2686 * @vptr: velocity
2691 static void __devinit velocity_print_info(struct velocity_info *vptr)
2693 struct net_device *dev = vptr->dev;
2695 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2702 struct velocity_info *vptr = netdev_priv(dev);
2703 struct mac_regs __iomem *regs = vptr->mac_regs;
2723 struct velocity_info *vptr;
2742 vptr = netdev_priv(dev);
2753 velocity_init_info(pdev, vptr, info);
2755 vptr->dev = dev;
2763 ret = velocity_get_pci_info(vptr, pdev);
2775 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2781 vptr->mac_regs = regs;
2785 dev->base_addr = vptr->ioaddr;
2793 velocity_get_options(&vptr->options, velocity_nics, drv_string);
2799 vptr->options.flags &= info->flags;
2805 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2807 vptr->wol_opts = vptr->options.wol_opts;
2808 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2810 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2815 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2826 vptr->mii_status |= VELOCITY_LINK_FAIL;
2829 velocity_print_info(vptr);
2887 static int velocity_set_wol(struct velocity_info *vptr)
2889 struct mac_regs __iomem *regs = vptr->mac_regs;
2903 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2907 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2910 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2921 memcpy(arp->ar_tip, vptr->ip_addr, 4);
2935 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2936 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2937 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2939 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2942 if (vptr->mii_status & VELOCITY_SPEED_1000)
2943 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2965 * @vptr: velocity
2973 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2975 struct mac_regs __iomem *regs = vptr->mac_regs;
2993 struct velocity_info *vptr = netdev_priv(dev);
2996 if (!netif_running(vptr->dev))
2999 netif_device_detach(vptr->dev);
3001 spin_lock_irqsave(&vptr->lock, flags);
3004 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3005 velocity_get_ip(vptr);
3006 velocity_save_context(vptr, &vptr->context);
3007 velocity_shutdown(vptr);
3008 velocity_set_wol(vptr);
3012 velocity_save_context(vptr, &vptr->context);
3013 velocity_shutdown(vptr);
3020 spin_unlock_irqrestore(&vptr->lock, flags);
3026 * @vptr: velocity
3032 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3034 struct mac_regs __iomem *regs = vptr->mac_regs;
3062 struct velocity_info *vptr = netdev_priv(dev);
3066 if (!netif_running(vptr->dev))
3073 mac_wol_reset(vptr->mac_regs);
3075 spin_lock_irqsave(&vptr->lock, flags);
3076 velocity_restore_context(vptr, &vptr->context);
3077 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3078 mac_disable_int(vptr->mac_regs);
3080 velocity_tx_srv(vptr);
3082 for (i = 0; i < vptr->tx.numq; i++) {
3083 if (vptr->tx.used[i])
3084 mac_tx_queue_wake(vptr->mac_regs, i);
3087 mac_enable_int(vptr->mac_regs);
3088 spin_unlock_irqrestore(&vptr->lock, flags);
3089 netif_device_attach(vptr->dev);
3120 struct velocity_info *vptr = netdev_priv(dev);
3122 pci_set_power_state(vptr->pdev, PCI_D0);
3135 struct velocity_info *vptr = netdev_priv(dev);
3137 pci_set_power_state(vptr->pdev, PCI_D3hot);
3142 struct velocity_info *vptr = netdev_priv(dev);
3143 struct mac_regs __iomem *regs = vptr->mac_regs;
3145 status = check_connection_type(vptr->mac_regs);
3176 struct velocity_info *vptr = netdev_priv(dev);
3181 curr_status = check_connection_type(vptr->mac_regs);
3192 velocity_set_media_mode(vptr, new_status);
3199 struct velocity_info *vptr = netdev_priv(dev);
3202 strcpy(info->bus_info, pci_name(vptr->pdev));
3207 struct velocity_info *vptr = netdev_priv(dev);
3211 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3214 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3216 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3218 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3223 struct velocity_info *vptr = netdev_priv(dev);
3227 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3231 vptr->wol_opts|=VELOCITY_WOL_PHY;
3232 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3237 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3238 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3241 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3242 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3245 vptr->wol_opts |= VELOCITY_WOL_ARP;
3246 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3248 memcpy(vptr->wol_passwd, wol->sopass, 6);
3308 struct velocity_info *vptr = netdev_priv(dev);
3310 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3311 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3313 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3314 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3322 struct velocity_info *vptr = netdev_priv(dev);
3337 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3338 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3340 set_pending_timer_val(&vptr->options.rxqueue_timer,
3342 set_pending_timer_val(&vptr->options.txqueue_timer,
3346 spin_lock_irqsave(&vptr->lock, flags);
3347 mac_disable_int(vptr->mac_regs);
3348 setup_adaptive_interrupts(vptr);
3349 setup_queue_timers(vptr);
3351 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3352 mac_clear_isr(vptr->mac_regs);
3353 mac_enable_int(vptr->mac_regs);
3354 spin_unlock_irqrestore(&vptr->lock, flags);