• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/net/

Lines Matching refs:vptr

232 static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
235 static void velocity_print_info(struct velocity_info *vptr);
246 static void velocity_free_rd_ring(struct velocity_info *vptr);
247 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
248 static int velocity_soft_reset(struct velocity_info *vptr);
249 static void mii_init(struct velocity_info *vptr, u32 mii_status);
251 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
252 static void velocity_print_link_status(struct velocity_info *vptr);
254 static void velocity_shutdown(struct velocity_info *vptr);
255 static void enable_flow_control_ability(struct velocity_info *vptr);
261 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
348 struct velocity_info *vptr = netdev_priv(dev);
355 list_del(&vptr->list);
359 iounmap(vptr->mac_regs);
459 * @vptr: velocity to program
465 static void velocity_init_cam_filter(struct velocity_info *vptr)
467 struct mac_regs __iomem * regs = vptr->mac_regs;
474 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
475 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
476 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
477 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
480 if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
483 if (vptr->options.vid != 0)
486 mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM);
487 vptr->vCAMmask[0] |= 1;
488 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
499 * @vptr: velocity we are resetting
505 static void velocity_rx_reset(struct velocity_info *vptr)
508 struct mac_regs __iomem * regs = vptr->mac_regs;
511 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;
516 for (i = 0; i < vptr->options.numrx; ++i)
517 vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
519 writew(vptr->options.numrx, &regs->RBRDU);
520 writel(vptr->rd_pool_dma, &regs->RDBaseLo);
522 writew(vptr->options.numrx - 1, &regs->RDCSize);
527 * @vptr: velocity to init
534 static void velocity_init_registers(struct velocity_info *vptr,
537 struct mac_regs __iomem * regs = vptr->mac_regs;
546 netif_stop_queue(vptr->dev);
551 velocity_rx_reset(vptr);
555 mii_status = velocity_get_opt_media_mode(vptr);
556 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
557 velocity_print_link_status(vptr);
558 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
559 netif_wake_queue(vptr->dev);
562 enable_flow_control_ability(vptr);
576 velocity_soft_reset(vptr);
581 writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
587 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
588 mac_set_dma_length(regs, vptr->options.DMA_length);
599 velocity_init_cam_filter(vptr);
604 velocity_set_multi(vptr->dev);
611 vptr->int_mask = INT_MASK_DEF;
613 writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo);
614 writew(vptr->options.numrx - 1, &regs->RDCSize);
618 writew(vptr->options.numtx - 1, &regs->TDCSize);
620 for (i = 0; i < vptr->num_txq; i++) {
621 writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
625 init_flow_control_register(vptr);
630 mii_status = velocity_get_opt_media_mode(vptr);
631 netif_stop_queue(vptr->dev);
633 mii_init(vptr, mii_status);
635 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
636 velocity_print_link_status(vptr);
637 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
638 netif_wake_queue(vptr->dev);
641 enable_flow_control_ability(vptr);
643 mac_write_int_mask(vptr->int_mask, regs);
651 * @vptr: velocity to reset
657 static int velocity_soft_reset(struct velocity_info *vptr)
659 struct mac_regs __iomem * regs = vptr->mac_regs;
693 struct velocity_info *vptr;
713 vptr = netdev_priv(dev);
724 velocity_init_info(pdev, vptr, info);
726 vptr->dev = dev;
734 ret = velocity_get_pci_info(vptr, pdev);
746 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
752 vptr->mac_regs = regs;
756 dev->base_addr = vptr->ioaddr;
762 velocity_get_options(&vptr->options, velocity_nics, dev->name);
768 vptr->options.flags &= info->flags;
774 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
776 vptr->wol_opts = vptr->options.wol_opts;
777 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
779 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
794 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
805 velocity_print_info(vptr);
816 list_add(&vptr->list, &velocity_dev_list);
837 * @vptr: velocity
843 static void __devinit velocity_print_info(struct velocity_info *vptr)
845 struct net_device *dev = vptr->dev;
847 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
857 * @vptr: Velocity info
865 struct velocity_info *vptr,
868 memset(vptr, 0, sizeof(struct velocity_info));
870 vptr->pdev = pdev;
871 vptr->chip_id = info->chip_id;
872 vptr->num_txq = info->txqueue;
873 vptr->multicast_limit = MCAM_SIZE;
874 spin_lock_init(&vptr->lock);
875 INIT_LIST_HEAD(&vptr->list);
880 * @vptr: velocity device
887 static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
889 if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
894 vptr->ioaddr = pci_resource_start(pdev, 0);
895 vptr->memaddr = pci_resource_start(pdev, 1);
913 vptr->pdev = pdev;
920 * @vptr: Velocity to set up
926 static int velocity_init_rings(struct velocity_info *vptr)
938 psize = vptr->options.numrx * sizeof(struct rx_desc) +
939 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
945 pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma);
949 vptr->dev->name);
955 vptr->rd_ring = (struct rx_desc *) pool;
957 vptr->rd_pool_dma = pool_dma;
959 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
960 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
961 &vptr->tx_bufs_dma);
963 if (vptr->tx_bufs == NULL) {
965 vptr->dev->name);
966 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
970 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
972 i = vptr->options.numrx * sizeof(struct rx_desc);
975 for (i = 0; i < vptr->num_txq; i++) {
976 int offset = vptr->options.numtx * sizeof(struct tx_desc);
978 vptr->td_pool_dma[i] = pool_dma;
979 vptr->td_rings[i] = (struct tx_desc *) pool;
988 * @vptr: Velocity to free from
993 static void velocity_free_rings(struct velocity_info *vptr)
997 size = vptr->options.numrx * sizeof(struct rx_desc) +
998 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1000 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
1002 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1004 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
1007 static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
1009 struct mac_regs __iomem *regs = vptr->mac_regs;
1016 if (vptr->rd_filled < 4)
1021 unusable = vptr->rd_filled & 0x0003;
1022 dirty = vptr->rd_dirty - unusable;
1023 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
1024 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1025 vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
1028 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
1029 vptr->rd_filled = unusable;
1032 static int velocity_rx_refill(struct velocity_info *vptr)
1034 int dirty = vptr->rd_dirty, done = 0, ret = 0;
1037 struct rx_desc *rd = vptr->rd_ring + dirty;
1043 if (!vptr->rd_info[dirty].skb) {
1044 ret = velocity_alloc_rx_buf(vptr, dirty);
1049 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1050 } while (dirty != vptr->rd_curr);
1053 vptr->rd_dirty = dirty;
1054 vptr->rd_filled += done;
1055 velocity_give_many_rx_descs(vptr);
1063 * @vptr: velocity to configure
1069 static int velocity_init_rd_ring(struct velocity_info *vptr)
1073 vptr->options.numrx;
1075 vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
1076 if(vptr->rd_info == NULL)
1078 memset(vptr->rd_info, 0, rsize);
1080 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
1082 ret = velocity_rx_refill(vptr);
1085 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1086 velocity_free_rd_ring(vptr);
1094 * @vptr: velocity to clean up
1100 static void velocity_free_rd_ring(struct velocity_info *vptr)
1104 if (vptr->rd_info == NULL)
1107 for (i = 0; i < vptr->options.numrx; i++) {
1108 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
1109 struct rx_desc *rd = vptr->rd_ring + i;
1115 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1123 kfree(vptr->rd_info);
1124 vptr->rd_info = NULL;
1129 * @vptr: velocity
1136 static int velocity_init_td_ring(struct velocity_info *vptr)
1143 vptr->options.numtx;
1146 for (j = 0; j < vptr->num_txq; j++) {
1147 curr = vptr->td_pool_dma[j];
1149 vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL);
1150 if(vptr->td_infos[j] == NULL)
1153 kfree(vptr->td_infos[j]);
1156 memset(vptr->td_infos[j], 0, tsize);
1158 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
1159 td = &(vptr->td_rings[j][i]);
1160 td_info = &(vptr->td_infos[j][i]);
1161 td_info->buf = vptr->tx_bufs +
1162 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1163 td_info->buf_dma = vptr->tx_bufs_dma +
1164 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1166 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1172 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1175 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);
1185 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1197 * @vptr: velocity
1203 static void velocity_free_td_ring(struct velocity_info *vptr)
1207 for (j = 0; j < vptr->num_txq; j++) {
1208 if (vptr->td_infos[j] == NULL)
1210 for (i = 0; i < vptr->options.numtx; i++) {
1211 velocity_free_td_ring_entry(vptr, j, i);
1214 kfree(vptr->td_infos[j]);
1215 vptr->td_infos[j] = NULL;
1221 * @vptr: velocity
1229 static int velocity_rx_srv(struct velocity_info *vptr, int status)
1231 struct net_device_stats *stats = &vptr->stats;
1232 int rd_curr = vptr->rd_curr;
1236 struct rx_desc *rd = vptr->rd_ring + rd_curr;
1238 if (!vptr->rd_info[rd_curr].skb)
1250 if (velocity_receive_frame(vptr, rd_curr) < 0)
1263 vptr->dev->last_rx = jiffies;
1266 if (rd_curr >= vptr->options.numrx)
1270 vptr->rd_curr = rd_curr;
1272 if (works > 0 && velocity_rx_refill(vptr) < 0) {
1274 "%s: rx buf allocation failure\n", vptr->dev->name);
1320 struct velocity_info *vptr)
1329 new_skb->dev = vptr->dev;
1332 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
1347 * @vptr: velocity we are handling
1354 static inline void velocity_iph_realign(struct velocity_info *vptr,
1357 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1368 * @vptr: velocity we are handling
1375 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1378 struct net_device_stats *stats = &vptr->stats;
1379 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1380 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1385 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
1391 vptr->stats.multicast++;
1395 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1396 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1402 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
1413 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
1414 velocity_iph_realign(vptr, skb, pkt_len);
1419 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,
1423 skb->protocol = eth_type_trans(skb, vptr->dev);
1433 * @vptr: velocity
1442 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1444 struct rx_desc *rd = &(vptr->rd_ring[idx]);
1445 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
1447 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
1456 rd_info->skb->dev = vptr->dev;
1457 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1464 rd->len = cpu_to_le32(vptr->rx_buf_sz);
1473 * @vptr; Velocity
1481 static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1489 struct net_device_stats *stats = &vptr->stats;
1491 for (qnum = 0; qnum < vptr->num_txq; qnum++) {
1492 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;
1493 idx = (idx + 1) % vptr->options.numtx) {
1498 td = &(vptr->td_rings[qnum][idx]);
1499 tdinfo = &(vptr->td_infos[qnum][idx]);
1522 velocity_free_tx_buf(vptr, tdinfo);
1523 vptr->td_used[qnum]--;
1525 vptr->td_tail[qnum] = idx;
1527 if (AVAIL_TD(vptr, qnum) < 1) {
1535 if (netif_queue_stopped(vptr->dev) && (full == 0)
1536 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1537 netif_wake_queue(vptr->dev);
1544 * @vptr: velocity to report on
1551 static void velocity_print_link_status(struct velocity_info *vptr)
1554 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1555 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1556 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1557 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1559 if (vptr->mii_status & VELOCITY_SPEED_1000)
1561 else if (vptr->mii_status & VELOCITY_SPEED_100)
1566 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1571 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1572 switch (vptr->options.spd_dpx) {
1593 * @vptr: velocity
1603 static void velocity_error(struct velocity_info *vptr, int status)
1607 struct mac_regs __iomem * regs = vptr->mac_regs;
1612 netif_stop_queue(vptr->dev);
1617 struct mac_regs __iomem * regs = vptr->mac_regs;
1620 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1621 vptr->mii_status = check_connection_type(regs);
1628 if (vptr->rev_id < REV_ID_VT3216_A0) {
1629 if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1637 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {
1649 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1650 netif_carrier_on(vptr->dev);
1652 vptr->mii_status |= VELOCITY_LINK_FAIL;
1653 netif_carrier_off(vptr->dev);
1656 velocity_print_link_status(vptr);
1657 enable_flow_control_ability(vptr);
1666 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1667 netif_stop_queue(vptr->dev);
1669 netif_wake_queue(vptr->dev);
1673 velocity_update_hw_mibs(vptr);
1675 mac_rx_queue_wake(vptr->mac_regs);
1680 * @vptr: velocity
1687 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1699 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
1701 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
1723 struct velocity_info *vptr = netdev_priv(dev);
1726 vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
1728 ret = velocity_init_rings(vptr);
1732 ret = velocity_init_rd_ring(vptr);
1736 ret = velocity_init_td_ring(vptr);
1741 pci_set_power_state(vptr->pdev, PCI_D0);
1743 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1745 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
1749 pci_set_power_state(vptr->pdev, PCI_D3hot);
1753 mac_enable_int(vptr->mac_regs);
1755 vptr->flags |= VELOCITY_FLAGS_OPENED;
1760 velocity_free_td_ring(vptr);
1762 velocity_free_rd_ring(vptr);
1764 velocity_free_rings(vptr);
1780 struct velocity_info *vptr = netdev_priv(dev);
1787 vptr->dev->name);
1792 spin_lock_irqsave(&vptr->lock, flags);
1795 velocity_shutdown(vptr);
1797 velocity_free_td_ring(vptr);
1798 velocity_free_rd_ring(vptr);
1802 vptr->rx_buf_sz = 9 * 1024;
1804 vptr->rx_buf_sz = 8192;
1806 vptr->rx_buf_sz = 4 * 1024;
1808 ret = velocity_init_rd_ring(vptr);
1812 ret = velocity_init_td_ring(vptr);
1816 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1818 mac_enable_int(vptr->mac_regs);
1821 spin_unlock_irqrestore(&vptr->lock, flags);
1829 * @vptr: velocity to deactivate
1835 static void velocity_shutdown(struct velocity_info *vptr)
1837 struct mac_regs __iomem * regs = vptr->mac_regs;
1856 struct velocity_info *vptr = netdev_priv(dev);
1859 velocity_shutdown(vptr);
1861 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
1862 velocity_get_ip(vptr);
1867 pci_set_power_state(vptr->pdev, PCI_D3hot);
1870 velocity_free_td_ring(vptr);
1871 velocity_free_rd_ring(vptr);
1872 velocity_free_rings(vptr);
1874 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
1889 struct velocity_info *vptr = netdev_priv(dev);
1905 spin_lock_irqsave(&vptr->lock, flags);
1907 index = vptr->td_curr[qnum];
1908 td_ptr = &(vptr->td_rings[qnum][index]);
1909 tdinfo = &(vptr->td_infos[qnum][index]);
1948 tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
1960 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
1978 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
1987 if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
1988 td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
1997 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2011 prev = vptr->options.numtx - 1;
2013 vptr->td_used[qnum]++;
2014 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
2016 if (AVAIL_TD(vptr, qnum) < 1)
2019 td_ptr = &(vptr->td_rings[qnum][prev]);
2021 mac_tx_queue_wake(vptr->mac_regs, qnum);
2024 spin_unlock_irqrestore(&vptr->lock, flags);
2042 struct velocity_info *vptr = netdev_priv(dev);
2047 spin_lock(&vptr->lock);
2048 isr_status = mac_read_isr(vptr->mac_regs);
2052 spin_unlock(&vptr->lock);
2056 mac_disable_int(vptr->mac_regs);
2064 mac_write_isr(vptr->mac_regs, isr_status);
2066 velocity_error(vptr, isr_status);
2068 max_count += velocity_rx_srv(vptr, isr_status);
2070 max_count += velocity_tx_srv(vptr, isr_status);
2071 isr_status = mac_read_isr(vptr->mac_regs);
2072 if (max_count > vptr->options.int_works)
2079 spin_unlock(&vptr->lock);
2080 mac_enable_int(vptr->mac_regs);
2097 struct velocity_info *vptr = netdev_priv(dev);
2098 struct mac_regs __iomem * regs = vptr->mac_regs;
2107 } else if ((dev->mc_count > vptr->multicast_limit)
2113 int offset = MCAM_SIZE - vptr->multicast_limit;
2114 mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
2118 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
2121 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
2144 struct velocity_info *vptr = netdev_priv(dev);
2148 return &vptr->stats;
2150 spin_lock_irq(&vptr->lock);
2151 velocity_update_hw_mibs(vptr);
2152 spin_unlock_irq(&vptr->lock);
2154 vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2155 vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2156 vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2159 vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2163 vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2171 return &vptr->stats;
2187 struct velocity_info *vptr = netdev_priv(dev);
2194 pci_set_power_state(vptr->pdev, PCI_D0);
2207 pci_set_power_state(vptr->pdev, PCI_D3hot);
2275 * @vptr: velocity adapter
2281 static void mii_init(struct velocity_info *vptr, u32 mii_status)
2285 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
2290 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2296 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2297 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2299 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2303 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
2310 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2316 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2317 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2319 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2327 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
2331 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2336 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
2339 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
2472 * @vptr: velocity adapter
2479 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
2483 switch (vptr->options.spd_dpx) {
2500 vptr->mii_status = status;
2506 * @vptr: velocity
2511 static void mii_set_auto_on(struct velocity_info *vptr)
2513 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
2514 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2516 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2521 static void mii_set_auto_off(struct velocity_info * vptr)
2523 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2529 * @vptr: velocity interface
2535 static void set_mii_flow_control(struct velocity_info *vptr)
2538 switch (vptr->options.flow_cntl) {
2540 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2541 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2545 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2546 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2550 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2551 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2555 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2556 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2572 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
2575 struct mac_regs __iomem * regs = vptr->mac_regs;
2577 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
2578 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
2581 set_mii_flow_control(vptr);
2587 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2588 vptr->mii_status=check_connection_type(vptr->mac_regs);
2594 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
2595 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2606 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
2607 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2608 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
2611 mii_set_auto_on(vptr);
2633 if (vptr->rev_id < REV_ID_VT3216_A0)
2639 if (vptr->rev_id < REV_ID_VT3216_A0)
2643 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2650 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
2651 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
2664 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
2666 mii_set_auto_on(vptr);
2667 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2669 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
2670 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
2754 * @vptr: veloity to configure
2760 static void enable_flow_control_ability(struct velocity_info *vptr)
2763 struct mac_regs __iomem * regs = vptr->mac_regs;
2765 switch (vptr->options.flow_cntl) {
2816 struct velocity_info *vptr = netdev_priv(dev);
2818 pci_set_power_state(vptr->pdev, PCI_D0);
2832 struct velocity_info *vptr = netdev_priv(dev);
2834 pci_set_power_state(vptr->pdev, PCI_D3hot);
2839 struct velocity_info *vptr = netdev_priv(dev);
2840 struct mac_regs __iomem * regs = vptr->mac_regs;
2842 status = check_connection_type(vptr->mac_regs);
2873 struct velocity_info *vptr = netdev_priv(dev);
2878 curr_status = check_connection_type(vptr->mac_regs);
2889 velocity_set_media_mode(vptr, new_status);
2896 struct velocity_info *vptr = netdev_priv(dev);
2897 struct mac_regs __iomem * regs = vptr->mac_regs;
2903 struct velocity_info *vptr = netdev_priv(dev);
2906 strcpy(info->bus_info, pci_name(vptr->pdev));
2911 struct velocity_info *vptr = netdev_priv(dev);
2915 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2918 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2920 if (vptr->wol_opts & VELOCITY_WOL_ARP)
2922 memcpy(&wol->sopass, vptr->wol_passwd, 6);
2927 struct velocity_info *vptr = netdev_priv(dev);
2931 vptr->wol_opts = VELOCITY_WOL_MAGIC;
2935 vptr->wol_opts|=VELOCITY_WOL_PHY;
2936 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
2941 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
2942 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2945 vptr->wol_opts |= VELOCITY_WOL_UCAST;
2946 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2949 vptr->wol_opts |= VELOCITY_WOL_ARP;
2950 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2952 memcpy(vptr->wol_passwd, wol->sopass, 6);
2992 struct velocity_info *vptr = netdev_priv(dev);
2993 struct mac_regs __iomem * regs = vptr->mac_regs;
3005 if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
3011 spin_lock_irqsave(&vptr->lock, flags);
3012 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
3013 spin_unlock_irqrestore(&vptr->lock, flags);
3014 check_connection_type(vptr->mac_regs);
3028 * @vptr: velocity
3037 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
3039 struct mac_regs __iomem * regs = vptr->mac_regs;
3056 * @vptr: velocity
3063 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3065 struct mac_regs __iomem * regs = vptr->mac_regs;
3132 static int velocity_set_wol(struct velocity_info *vptr)
3134 struct mac_regs __iomem * regs = vptr->mac_regs;
3148 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3152 if (vptr->wol_opts & VELOCITY_WOL_UCAST) {
3156 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3167 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3181 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3182 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3183 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
3185 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
3188 if (vptr->mii_status & VELOCITY_SPEED_1000)
3189 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
3212 struct velocity_info *vptr = netdev_priv(dev);
3215 if(!netif_running(vptr->dev))
3218 netif_device_detach(vptr->dev);
3220 spin_lock_irqsave(&vptr->lock, flags);
3223 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3224 velocity_get_ip(vptr);
3225 velocity_save_context(vptr, &vptr->context);
3226 velocity_shutdown(vptr);
3227 velocity_set_wol(vptr);
3231 velocity_save_context(vptr, &vptr->context);
3232 velocity_shutdown(vptr);
3239 spin_unlock_irqrestore(&vptr->lock, flags);
3246 struct velocity_info *vptr = netdev_priv(dev);
3250 if(!netif_running(vptr->dev))
3257 mac_wol_reset(vptr->mac_regs);
3259 spin_lock_irqsave(&vptr->lock, flags);
3260 velocity_restore_context(vptr, &vptr->context);
3261 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3262 mac_disable_int(vptr->mac_regs);
3264 velocity_tx_srv(vptr, 0);
3266 for (i = 0; i < vptr->num_txq; i++) {
3267 if (vptr->td_used[i]) {
3268 mac_tx_queue_wake(vptr->mac_regs, i);
3272 mac_enable_int(vptr->mac_regs);
3273 spin_unlock_irqrestore(&vptr->lock, flags);
3274 netif_device_attach(vptr->dev);
3287 struct velocity_info *vptr;
3291 list_for_each_entry(vptr, &velocity_dev_list, list) {
3292 if (vptr->dev == dev) {
3293 velocity_get_ip(vptr);