• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/

Lines Matching refs:ep

315 	struct epic_private *ep;
349 dev = alloc_etherdev(sizeof (*ep));
369 ep = dev->priv;
370 ep->mii.dev = dev;
371 ep->mii.mdio_read = mdio_read;
372 ep->mii.mdio_write = mdio_write;
373 ep->mii.phy_id_mask = 0x1f;
374 ep->mii.reg_num_mask = 0x1f;
379 ep->tx_ring = (struct epic_tx_desc *)ring_space;
380 ep->tx_ring_dma = ring_dma;
385 ep->rx_ring = (struct epic_rx_desc *)ring_space;
386 ep->rx_ring_dma = ring_dma;
401 spin_lock_init(&ep->lock);
402 spin_lock_init(&ep->napi_lock);
403 ep->reschedule_in_poll = 0;
429 ep->pci_dev = pdev;
430 ep->chip_id = chip_idx;
431 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
432 ep->irq_mask =
433 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
441 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
444 ep->phys[phy_idx++] = phy;
451 ep->mii_phy_cnt = phy_idx;
453 phy = ep->phys[0];
454 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
458 ep->mii.advertising, mdio_read(dev, phy, 5));
459 } else if ( ! (ep->chip_flags & NO_MII)) {
463 ep->phys[0] = 3;
465 ep->mii.phy_id = ep->phys[0];
469 if (ep->chip_flags & MII_PWRDWN)
475 ep->mii.force_media = ep->mii.full_duplex = 1;
478 dev->if_port = ep->default_port = option;
507 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
509 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
546 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
561 struct epic_private *ep)
565 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
570 struct epic_private *ep)
575 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
654 struct epic_private *ep = dev->priv;
677 if (ep->chip_flags & MII_PWRDWN)
695 ep->tx_threshold = TX_FIFO_THRESH;
696 outl(ep->tx_threshold, ioaddr + TxThresh);
699 if (ep->mii_phy_cnt)
700 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
705 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
708 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
711 ep->mii.full_duplex = 1;
713 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
717 ep->mii.full_duplex ? "full" : "half",
718 ep->phys[0], mii_lpa);
722 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
723 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
724 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
733 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
741 ep->mii.full_duplex ? "full" : "half");
745 init_timer(&ep->timer);
746 ep->timer.expires = jiffies + 3*HZ;
747 ep->timer.data = (unsigned long)dev;
748 ep->timer.function = &epic_timer; /* timer handler */
749 add_timer(&ep->timer);
759 struct epic_private *ep = dev->priv;
770 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
771 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
772 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
782 struct epic_private *ep = dev->priv;
789 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
802 if (ep->chip_flags & MII_PWRDWN)
808 ep->tx_threshold = TX_FIFO_THRESH;
809 outl(ep->tx_threshold, ioaddr + TxThresh);
810 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
811 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
813 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
821 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
834 struct epic_private *ep = dev->priv;
836 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
837 int negotiated = mii_lpa & ep->mii.advertising;
840 if (ep->mii.force_media)
844 if (ep->mii.full_duplex != duplex) {
845 ep->mii.full_duplex = duplex;
848 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
849 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
856 struct epic_private *ep = dev->priv;
871 ep->timer.expires = jiffies + next_tick;
872 add_timer(&ep->timer);
877 struct epic_private *ep = dev->priv;
886 dev->name, ep->dirty_tx, ep->cur_tx);
890 ep->stats.tx_fifo_errors++;
898 ep->stats.tx_errors++;
899 if (!ep->tx_full)
906 struct epic_private *ep = dev->priv;
909 ep->tx_full = 0;
910 ep->dirty_tx = ep->cur_tx = 0;
911 ep->cur_rx = ep->dirty_rx = 0;
912 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
916 ep->rx_ring[i].rxstatus = 0;
917 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
918 ep->rx_ring[i].next = ep->rx_ring_dma +
920 ep->rx_skbuff[i] = NULL;
923 ep->rx_ring[i-1].next = ep->rx_ring_dma;
927 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
928 ep->rx_skbuff[i] = skb;
932 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
933 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
934 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
936 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
941 ep->tx_skbuff[i] = NULL;
942 ep->tx_ring[i].txstatus = 0x0000;
943 ep->tx_ring[i].next = ep->tx_ring_dma +
946 ep->tx_ring[i-1].next = ep->tx_ring_dma;
952 struct epic_private *ep = dev->priv;
964 spin_lock_irqsave(&ep->lock, flags);
965 free_count = ep->cur_tx - ep->dirty_tx;
966 entry = ep->cur_tx % TX_RING_SIZE;
968 ep->tx_skbuff[entry] = skb;
969 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
980 ep->tx_full = 1;
982 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
983 ep->tx_ring[entry].txstatus =
987 ep->cur_tx++;
988 if (ep->tx_full)
991 spin_unlock_irqrestore(&ep->lock, flags);
1005 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1008 struct net_device_stats *stats = &ep->stats;
1027 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1035 cur_tx = ep->cur_tx;
1036 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1039 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1045 ep->stats.collisions += (txstatus >> 8) & 15;
1046 ep->stats.tx_packets++;
1047 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1049 epic_tx_error(dev, ep, txstatus);
1052 skb = ep->tx_skbuff[entry];
1053 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1056 ep->tx_skbuff[entry] = NULL;
1063 dev->name, dirty_tx, cur_tx, ep->tx_full);
1067 ep->dirty_tx = dirty_tx;
1068 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1070 ep->tx_full = 0;
1080 struct epic_private *ep = dev->priv;
1100 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1101 spin_lock(&ep->napi_lock);
1103 epic_napi_irq_off(dev, ep);
1106 ep->reschedule_in_poll++;
1107 spin_unlock(&ep->napi_lock);
1117 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1118 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1119 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1122 ep->stats.tx_fifo_errors++;
1123 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1148 struct epic_private *ep = dev->priv;
1149 int entry = ep->cur_rx % RX_RING_SIZE;
1150 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1155 ep->rx_ring[entry].rxstatus);
1161 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1162 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1175 ep->stats.rx_length_errors++;
1178 ep->stats.rx_errors++;
1196 pci_dma_sync_single_for_cpu(ep->pci_dev,
1197 ep->rx_ring[entry].bufaddr,
1198 ep->rx_buf_sz,
1200 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1202 pci_dma_sync_single_for_device(ep->pci_dev,
1203 ep->rx_ring[entry].bufaddr,
1204 ep->rx_buf_sz,
1207 pci_unmap_single(ep->pci_dev,
1208 ep->rx_ring[entry].bufaddr,
1209 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1210 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1211 ep->rx_skbuff[entry] = NULL;
1216 ep->stats.rx_packets++;
1217 ep->stats.rx_bytes += pkt_len;
1220 entry = (++ep->cur_rx) % RX_RING_SIZE;
1224 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1225 entry = ep->dirty_rx % RX_RING_SIZE;
1226 if (ep->rx_skbuff[entry] == NULL) {
1228 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1232 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1233 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1236 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1241 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1251 ep->stats.rx_errors++;
1258 struct epic_private *ep = dev->priv;
1266 epic_tx(dev, ep);
1270 epic_rx_err(dev, ep);
1281 spin_lock_irqsave(&ep->napi_lock, flags);
1283 more = ep->reschedule_in_poll;
1287 epic_napi_irq_on(dev, ep);
1289 ep->reschedule_in_poll--;
1291 spin_unlock_irqrestore(&ep->napi_lock, flags);
1303 struct epic_private *ep = dev->priv;
1313 del_timer_sync(&ep->timer);
1315 epic_disable_int(dev, ep);
1323 skb = ep->rx_skbuff[i];
1324 ep->rx_skbuff[i] = NULL;
1325 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1326 ep->rx_ring[i].buflength = 0;
1328 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1329 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1332 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1335 skb = ep->tx_skbuff[i];
1336 ep->tx_skbuff[i] = NULL;
1339 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1352 struct epic_private *ep = dev->priv;
1357 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1358 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1359 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1362 return &ep->stats;
1367 new frame, not around filling ep->setup_frame. This is non-deterministic
1373 struct epic_private *ep = dev->priv;
1402 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1405 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1530 struct epic_private *ep = dev->priv;
1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);