• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/

Lines Matching refs:ap

94 #define ACE_IS_TIGON_I(ap)	0
95 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
97 #define ACE_IS_TIGON_I(ap) (ap->version == 1)
98 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
462 struct ace_private *ap;
475 ap = dev->priv;
476 ap->pdev = pdev;
477 ap->name = pci_name(pdev);
511 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
514 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
517 ap->name);
518 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
519 pci_write_config_word(ap->pdev, PCI_COMMAND,
520 ap->pci_command);
524 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
525 if (ap->pci_latency <= 0x40) {
526 ap->pci_latency = 0x40;
527 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
536 ap->regs = ioremap(dev->base_addr, 0x4000);
537 if (!ap->regs) {
540 ap->name, boards_found);
548 ap->name);
551 ap->name);
555 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
558 printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
563 ap->name);
567 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
570 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
578 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
590 ap->board_idx = BOARD_IDX_OVERFLOW;
592 ap->board_idx = boards_found;
594 ap->board_idx = BOARD_IDX_STATIC;
604 ap->name = dev->name;
606 if (ap->pci_using_dac)
624 struct ace_private *ap = netdev_priv(dev);
625 struct ace_regs __iomem *regs = ap->regs;
631 if (ap->version >= 2)
652 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
658 ringp = &ap->skb->rx_std_skbuff[i];
660 pci_unmap_page(ap->pdev, mapping,
664 ap->rx_std_ring[i].size = 0;
665 ap->skb->rx_std_skbuff[i].skb = NULL;
670 if (ap->version >= 2) {
672 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
678 ringp = &ap->skb->rx_mini_skbuff[i];
680 pci_unmap_page(ap->pdev, mapping,
684 ap->rx_mini_ring[i].size = 0;
685 ap->skb->rx_mini_skbuff[i].skb = NULL;
692 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
697 ringp = &ap->skb->rx_jumbo_skbuff[i];
699 pci_unmap_page(ap->pdev, mapping,
703 ap->rx_jumbo_ring[i].size = 0;
704 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
735 struct ace_private *ap = netdev_priv(dev);
738 if (ap->rx_std_ring != NULL) {
744 pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
745 ap->rx_ring_base_dma);
746 ap->rx_std_ring = NULL;
747 ap->rx_jumbo_ring = NULL;
748 ap->rx_mini_ring = NULL;
749 ap->rx_return_ring = NULL;
751 if (ap->evt_ring != NULL) {
753 pci_free_consistent(ap->pdev, size, ap->evt_ring,
754 ap->evt_ring_dma);
755 ap->evt_ring = NULL;
757 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
759 pci_free_consistent(ap->pdev, size, ap->tx_ring,
760 ap->tx_ring_dma);
762 ap->tx_ring = NULL;
764 if (ap->evt_prd != NULL) {
765 pci_free_consistent(ap->pdev, sizeof(u32),
766 (void *)ap->evt_prd, ap->evt_prd_dma);
767 ap->evt_prd = NULL;
769 if (ap->rx_ret_prd != NULL) {
770 pci_free_consistent(ap->pdev, sizeof(u32),
771 (void *)ap->rx_ret_prd,
772 ap->rx_ret_prd_dma);
773 ap->rx_ret_prd = NULL;
775 if (ap->tx_csm != NULL) {
776 pci_free_consistent(ap->pdev, sizeof(u32),
777 (void *)ap->tx_csm, ap->tx_csm_dma);
778 ap->tx_csm = NULL;
785 struct ace_private *ap = netdev_priv(dev);
794 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
795 &ap->rx_ring_base_dma);
796 if (ap->rx_std_ring == NULL)
799 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
800 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
801 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
805 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
807 if (ap->evt_ring == NULL)
814 if (!ACE_IS_TIGON_I(ap)) {
817 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
818 &ap->tx_ring_dma);
820 if (ap->tx_ring == NULL)
824 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
825 &ap->evt_prd_dma);
826 if (ap->evt_prd == NULL)
829 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
830 &ap->rx_ret_prd_dma);
831 if (ap->rx_ret_prd == NULL)
834 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
835 &ap->tx_csm_dma);
836 if (ap->tx_csm == NULL)
854 struct ace_private *ap;
856 ap = netdev_priv(dev);
860 if (ap->info)
861 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
862 ap->info, ap->info_dma);
863 kfree(ap->skb);
864 kfree(ap->trace_buf);
869 iounmap(ap->regs);
891 struct ace_private *ap;
902 ap = netdev_priv(dev);
903 regs = ap->regs;
905 board_idx = ap->board_idx;
949 ap->version = 1;
950 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
966 ap->version = 2;
967 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
1037 pdev = ap->pdev;
1057 ap->pci_latency);
1070 if (ap->version >= 2) {
1077 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1078 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1080 ap->pci_command);
1084 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1105 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1107 ap->pci_command);
1145 ap->pci_using_dac = 1;
1147 ap->pci_using_dac = 0;
1158 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
1159 &ap->info_dma))) {
1163 ap->info = info;
1168 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1183 spin_lock_init(&ap->debug_lock);
1184 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1185 ap->last_std_rx = 0;
1186 ap->last_mini_rx = 0;
1189 memset(ap->info, 0, sizeof(struct ace_info));
1190 memset(ap->skb, 0, sizeof(struct ace_skb));
1193 ap->fw_running = 0;
1195 tmp_ptr = ap->info_dma;
1199 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1201 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1204 *(ap->evt_prd) = 0;
1206 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1219 tmp_ptr = ap->info_dma;
1223 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1228 memset(ap->rx_std_ring, 0,
1232 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1234 ap->rx_std_skbprd = 0;
1235 atomic_set(&ap->cur_rx_bufs, 0);
1238 (ap->rx_ring_base_dma +
1244 memset(ap->rx_jumbo_ring, 0,
1248 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1250 ap->rx_jumbo_skbprd = 0;
1251 atomic_set(&ap->cur_jumbo_bufs, 0);
1253 memset(ap->rx_mini_ring, 0,
1256 if (ap->version >= 2) {
1258 (ap->rx_ring_base_dma +
1267 ap->rx_mini_ring[i].flags =
1275 ap->rx_mini_skbprd = 0;
1276 atomic_set(&ap->cur_mini_bufs, 0);
1279 (ap->rx_ring_base_dma +
1287 memset(ap->rx_return_ring, 0,
1290 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1291 *(ap->rx_ret_prd) = 0;
1295 if (ACE_IS_TIGON_I(ap)) {
1296 ap->tx_ring = (struct tx_desc *) regs->Window;
1299 writel(0, (void __iomem *)ap->tx_ring + i * 4);
1303 memset(ap->tx_ring, 0,
1306 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1309 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1315 if (!ACE_IS_TIGON_I(ap))
1322 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1341 ap->name, ACE_MAX_MOD_PARMS);
1367 if(ap->version >= 2)
1380 ap->name);
1393 "forcing auto negotiation\n", ap->name);
1401 "negotiation\n", ap->name);
1404 if ((option & 0x400) && (ap->version >= 2)) {
1406 ap->name);
1411 ap->link = tmp;
1413 if (ap->version >= 2)
1416 if (ACE_IS_TIGON_I(ap))
1418 if (ap->version == 2)
1429 ap->cur_rx = 0;
1430 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1433 ace_set_txprd(regs, ap, 0);
1439 memset(&ap->stats, 0, sizeof(ap->stats));
1459 while (time_before(jiffies, myjif) && !ap->fw_running)
1462 if (!ap->fw_running) {
1463 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1465 ace_dump_trace(ap);
1478 if (ap->version >= 2)
1492 if (!test_and_set_bit(0, &ap->std_refill_busy))
1493 ace_load_std_rx_ring(ap, RX_RING_SIZE);
1496 ap->name);
1497 if (ap->version >= 2) {
1498 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1499 ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
1502 "the RX mini ring\n", ap->name);
1514 struct ace_private *ap = netdev_priv(dev);
1515 struct ace_regs __iomem *regs = ap->regs;
1516 int board_idx = ap->board_idx;
1553 struct ace_private *ap = netdev_priv(dev);
1554 struct ace_regs __iomem *regs = ap->regs;
1561 if (*ap->tx_csm != ap->tx_ret_csm) {
1574 struct ace_private *ap = netdev_priv((struct net_device *)dev);
1577 cur_size = atomic_read(&ap->cur_rx_bufs);
1579 !test_and_set_bit(0, &ap->std_refill_busy)) {
1583 ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
1586 if (ap->version >= 2) {
1587 cur_size = atomic_read(&ap->cur_mini_bufs);
1589 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1594 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
1598 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1599 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1600 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1604 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
1606 ap->tasklet_pending = 0;
1613 static void ace_dump_trace(struct ace_private *ap)
1625 static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1627 struct ace_regs __iomem *regs = ap->regs;
1631 prefetchw(&ap->cur_rx_bufs);
1633 idx = ap->rx_std_skbprd;
1645 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1649 ap->skb->rx_std_skbuff[idx].skb = skb;
1650 pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1653 rd = &ap->rx_std_ring[idx];
1663 atomic_add(i, &ap->cur_rx_bufs);
1664 ap->rx_std_skbprd = idx;
1666 if (ACE_IS_TIGON_I(ap)) {
1670 cmd.idx = ap->rx_std_skbprd;
1678 clear_bit(0, &ap->std_refill_busy);
1688 static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1690 struct ace_regs __iomem *regs = ap->regs;
1693 prefetchw(&ap->cur_mini_bufs);
1695 idx = ap->rx_mini_skbprd;
1706 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1710 ap->skb->rx_mini_skbuff[idx].skb = skb;
1711 pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1714 rd = &ap->rx_mini_ring[idx];
1724 atomic_add(i, &ap->cur_mini_bufs);
1726 ap->rx_mini_skbprd = idx;
1732 clear_bit(0, &ap->mini_refill_busy);
1745 static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
1747 struct ace_regs __iomem *regs = ap->regs;
1750 idx = ap->rx_jumbo_skbprd;
1762 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1766 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1767 pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1770 rd = &ap->rx_jumbo_ring[idx];
1780 atomic_add(i, &ap->cur_jumbo_bufs);
1781 ap->rx_jumbo_skbprd = idx;
1783 if (ACE_IS_TIGON_I(ap)) {
1787 cmd.idx = ap->rx_jumbo_skbprd;
1795 clear_bit(0, &ap->jumbo_refill_busy);
1812 struct ace_private *ap;
1814 ap = netdev_priv(dev);
1817 switch (ap->evt_ring[evtcsm].evt) {
1820 ap->name);
1821 ap->fw_running = 1;
1828 u16 code = ap->evt_ring[evtcsm].code;
1832 u32 state = readl(&ap->regs->GigLnkState);
1835 ap->name,
1843 ap->name);
1847 "UP\n", ap->name);
1851 "state %02x\n", ap->name, code);
1856 switch(ap->evt_ring[evtcsm].code) {
1859 ap->name);
1863 "error\n", ap->name);
1867 ap->name);
1871 ap->name, ap->evt_ring[evtcsm].code);
1878 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1879 ap->rx_jumbo_ring[i].size = 0;
1880 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1881 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1882 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1886 if (ACE_IS_TIGON_I(ap)) {
1891 ace_issue_cmd(ap->regs, &cmd);
1893 writel(0, &((ap->regs)->RxJumboPrd));
1897 ap->jumbo = 0;
1898 ap->rx_jumbo_skbprd = 0;
1900 ap->name);
1901 clear_bit(0, &ap->jumbo_refill_busy);
1906 ap->name, ap->evt_ring[evtcsm].evt);
1917 struct ace_private *ap = netdev_priv(dev);
1923 prefetchw(&ap->cur_rx_bufs);
1924 prefetchw(&ap->cur_mini_bufs);
1939 retdesc = &ap->rx_return_ring[idx];
1953 rip = &ap->skb->rx_std_skbuff[skbidx];
1955 rxdesc = &ap->rx_std_ring[skbidx];
1959 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1961 rxdesc = &ap->rx_jumbo_ring[skbidx];
1962 atomic_dec(&ap->cur_jumbo_bufs);
1965 rip = &ap->skb->rx_mini_skbuff[skbidx];
1967 rxdesc = &ap->rx_mini_ring[skbidx];
1979 pci_unmap_page(ap->pdev,
2005 if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {
2006 vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);
2012 ap->stats.rx_packets++;
2013 ap->stats.rx_bytes += retdesc->size;
2018 atomic_sub(std_count, &ap->cur_rx_bufs);
2019 if (!ACE_IS_TIGON_I(ap))
2020 atomic_sub(mini_count, &ap->cur_mini_bufs);
2027 if (ACE_IS_TIGON_I(ap)) {
2028 writel(idx, &ap->regs->RxRetCsm);
2030 ap->cur_rx = idx;
2042 struct ace_private *ap = netdev_priv(dev);
2049 info = ap->skb->tx_skbuff + idx;
2054 pci_unmap_page(ap->pdev, mapping,
2061 ap->stats.tx_packets++;
2062 ap->stats.tx_bytes += skb->len;
2067 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2074 ap->tx_ret_csm = txcsm;
2082 struct ace_private *ap = netdev_priv(dev);
2083 struct ace_regs __iomem *regs = ap->regs;
2098 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
2114 rxretprd = *ap->rx_ret_prd;
2115 rxretcsm = ap->cur_rx;
2120 txcsm = *ap->tx_csm;
2121 idx = ap->tx_ret_csm;
2131 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2136 evtprd = *ap->evt_prd;
2151 cur_size = atomic_read(&ap->cur_rx_bufs);
2154 !test_and_set_bit(0, &ap->std_refill_busy)) {
2158 ace_load_std_rx_ring(ap,
2164 if (!ACE_IS_TIGON_I(ap)) {
2165 cur_size = atomic_read(&ap->cur_mini_bufs);
2169 &ap->mini_refill_busy)) {
2174 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
2180 if (ap->jumbo) {
2181 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2185 &ap->jumbo_refill_busy)){
2190 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
2195 if (run_tasklet && !ap->tasklet_pending) {
2196 ap->tasklet_pending = 1;
2197 tasklet_schedule(&ap->ace_tasklet);
2208 struct ace_private *ap = netdev_priv(dev);
2214 ap->vlgrp = grp;
2224 struct ace_private *ap = netdev_priv(dev);
2225 struct ace_regs __iomem *regs = ap->regs;
2228 if (!(ap->fw_running)) {
2245 if (ap->jumbo &&
2246 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2247 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2255 ap->promisc = 1;
2257 ap->promisc = 0;
2258 ap->mcast_all = 0;
2266 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
2273 struct ace_private *ap = netdev_priv(dev);
2274 struct ace_regs __iomem *regs = ap->regs;
2287 if (ap->promisc) {
2292 ap->promisc = 0;
2300 tasklet_kill(&ap->ace_tasklet);
2310 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2315 info = ap->skb->tx_skbuff + i;
2320 if (ACE_IS_TIGON_I(ap)) {
2322 = (struct tx_desc __iomem *) &ap->tx_ring[i];
2327 memset(ap->tx_ring + i, 0,
2329 pci_unmap_page(ap->pdev, mapping,
2340 if (ap->jumbo) {
2355 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2361 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2365 info = ap->skb->tx_skbuff + idx;
2374 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2381 if (ACE_IS_TIGON_I(ap)) {
2402 struct ace_private *ap = netdev_priv(dev);
2403 struct ace_regs __iomem *regs = ap->regs;
2409 idx = ap->tx_prd;
2411 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2418 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2428 desc = ap->tx_ring + idx;
2429 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2432 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2435 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2441 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2452 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2454 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2461 info = ap->skb->tx_skbuff + idx;
2462 desc = ap->tx_ring + idx;
2464 mapping = pci_map_page(ap->pdev, frag->page,
2471 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2475 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2488 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2493 ap->tx_prd = idx;
2494 ace_set_txprd(regs, ap, idx);
2505 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2543 struct ace_private *ap = netdev_priv(dev);
2544 struct ace_regs __iomem *regs = ap->regs;
2553 if (!(ap->jumbo)) {
2556 ap->jumbo = 1;
2557 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2558 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2562 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2565 if (ap->jumbo) {
2580 struct ace_private *ap = netdev_priv(dev);
2581 struct ace_regs __iomem *regs = ap->regs;
2624 struct ace_private *ap = netdev_priv(dev);
2625 struct ace_regs __iomem *regs = ap->regs;
2643 if (!ACE_IS_TIGON_I(ap))
2665 if (link != ap->link) {
2670 ap->link = link;
2672 if (!ACE_IS_TIGON_I(ap))
2687 struct ace_private *ap = netdev_priv(dev);
2694 if (ap->pdev)
2695 strlcpy(info->bus_info, pci_name(ap->pdev),
2705 struct ace_private *ap = netdev_priv(dev);
2706 struct ace_regs __iomem *regs = ap->regs;
2733 struct ace_private *ap = netdev_priv(dev);
2734 struct ace_regs __iomem *regs = ap->regs;
2737 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2742 ap->mcast_all = 1;
2743 } else if (ap->mcast_all) {
2748 ap->mcast_all = 0;
2751 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2756 ap->promisc = 1;
2757 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2762 ap->promisc = 0;
2771 if ((dev->mc_count) && !(ap->mcast_all)) {
2776 }else if (!ap->mcast_all) {
2787 struct ace_private *ap = netdev_priv(dev);
2789 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2791 ap->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2792 ap->stats.multicast = readl(&mac_stats->kept_mc);
2793 ap->stats.collisions = readl(&mac_stats->coll);
2795 return &ap->stats;
2867 struct ace_private *ap = netdev_priv(dev);
2868 struct ace_regs __iomem *regs = ap->regs;
2872 "CPU is running!\n", ap->name);
2881 if (ACE_IS_TIGON_I(ap)) {
2888 }else if (ap->version == 2) {
3048 struct ace_private *ap = netdev_priv(dev);
3049 struct ace_regs __iomem *regs = ap->regs;
3072 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3081 ap->name);
3090 ap->name);
3100 ap->name);
3157 ap->name, offset);