• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/net/

Lines Matching refs:yp

183 the 'yp->tx_full' flag.
187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
573 struct yellowfin_private *yp = netdev_priv(dev);
574 void __iomem *ioaddr = yp->base;
589 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
590 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
611 yp->tx_threshold = 32;
612 iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
615 dev->if_port = yp->default_port;
620 if (yp->drv_flags & IsGigabit) {
622 yp->full_duplex = 1;
627 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
643 init_timer(&yp->timer);
644 yp->timer.expires = jiffies + 3*HZ;
645 yp->timer.data = (unsigned long)dev;
646 yp->timer.function = &yellowfin_timer; /* timer handler */
647 add_timer(&yp->timer);
655 struct yellowfin_private *yp = netdev_priv(dev);
656 void __iomem *ioaddr = yp->base;
664 if (yp->mii_cnt) {
665 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
666 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
667 int negotiated = lpa & yp->advertising;
671 dev->name, yp->phys[0], bmsr, lpa);
673 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
675 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
683 yp->timer.expires = jiffies + next_tick;
684 add_timer(&yp->timer);
689 struct yellowfin_private *yp = netdev_priv(dev);
690 void __iomem *ioaddr = yp->base;
694 dev->name, yp->cur_tx, yp->dirty_tx,
700 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
702 printk(" %8.8x", yp->rx_ring[i].result_status);
703 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
705 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
706 yp->tx_ring[i].result_status);
715 iowrite32(0x10001000, yp->base + TxCtrl);
716 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
720 yp->stats.tx_errors++;
726 struct yellowfin_private *yp = netdev_priv(dev);
729 yp->tx_full = 0;
730 yp->cur_rx = yp->cur_tx = 0;
731 yp->dirty_tx = 0;
733 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
736 yp->rx_ring[i].dbdma_cmd =
737 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
738 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
743 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
744 yp->rx_skbuff[i] = skb;
749 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
750 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
753 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
759 yp->tx_skbuff[i] = NULL;
760 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
761 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
765 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
773 yp->tx_skbuff[i] = 0;
775 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
776 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
779 if (yp->flags & FullTxStatus) {
780 yp->tx_ring[j].dbdma_cmd =
781 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
782 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
783 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
787 yp->tx_ring[j].dbdma_cmd =
789 yp->tx_ring[j].request_cnt = 2;
791 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
793 &(yp->tx_status[0].tx_errs) -
794 &(yp->tx_status[0]));
796 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
800 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
803 yp->tx_tail_desc = &yp->tx_status[0];
809 struct yellowfin_private *yp = netdev_priv(dev);
819 entry = yp->cur_tx % TX_RING_SIZE;
827 yp->tx_skbuff[entry] = NULL;
833 yp->tx_skbuff[entry] = skb;
836 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
838 yp->tx_ring[entry].result_status = 0;
841 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
842 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
845 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
846 yp->tx_ring[entry].dbdma_cmd =
849 yp->cur_tx++;
851 yp->tx_ring[entry<<1].request_cnt = len;
852 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
857 yp->cur_tx++;
859 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
860 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
864 yp->tx_ring[entry<<1].dbdma_cmd =
872 iowrite32(0x10001000, yp->base + TxCtrl);
874 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
877 yp->tx_full = 1;
882 dev->name, yp->cur_tx, entry);
892 struct yellowfin_private *yp;
897 yp = netdev_priv(dev);
898 ioaddr = yp->base;
900 spin_lock (&yp->lock);
919 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
920 int entry = yp->dirty_tx % TX_RING_SIZE;
923 if (yp->tx_ring[entry].result_status == 0)
925 skb = yp->tx_skbuff[entry];
926 yp->stats.tx_packets++;
927 yp->stats.tx_bytes += skb->len;
929 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
932 yp->tx_skbuff[entry] = NULL;
934 if (yp->tx_full
935 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
937 yp->tx_full = 0;
941 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
942 unsigned dirty_tx = yp->dirty_tx;
944 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
948 u16 tx_errs = yp->tx_status[entry].tx_errs;
956 yp->tx_status[entry].tx_cnt,
957 yp->tx_status[entry].tx_errs,
958 yp->tx_status[entry].total_tx_cnt,
959 yp->tx_status[entry].paused);
963 skb = yp->tx_skbuff[entry];
971 yp->stats.tx_errors++;
972 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
973 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
974 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
975 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
982 yp->stats.tx_bytes += skb->len;
983 yp->stats.collisions += tx_errs & 15;
984 yp->stats.tx_packets++;
987 pci_unmap_single(yp->pci_dev,
988 yp->tx_ring[entry<<1].addr, skb->len,
991 yp->tx_skbuff[entry] = 0;
993 yp->tx_status[entry].tx_errs = 0;
997 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
999 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1004 if (yp->tx_full
1005 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1007 yp->tx_full = 0;
1011 yp->dirty_tx = dirty_tx;
1012 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1032 spin_unlock (&yp->lock);
1040 struct yellowfin_private *yp = netdev_priv(dev);
1041 int entry = yp->cur_rx % RX_RING_SIZE;
1042 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1046 entry, yp->rx_ring[entry].result_status);
1048 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1049 yp->rx_ring[entry].result_status);
1054 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1055 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1063 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1064 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1079 yp->stats.rx_length_errors++;
1080 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1085 yp->stats.rx_errors++;
1086 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1087 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1088 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1089 if (frame_status < 0) yp->stats.rx_dropped++;
1090 } else if ( !(yp->drv_flags & IsGigabit) &&
1094 yp->stats.rx_errors++;
1095 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1096 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1097 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1098 if (status2 & 0x80) yp->stats.rx_dropped++;
1100 } else if ((yp->flags & HasMACAddrBug) &&
1101 memcmp(le32_to_cpu(yp->rx_ring_dma +
1104 memcmp(le32_to_cpu(yp->rx_ring_dma +
1116 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1129 pci_unmap_single(yp->pci_dev,
1130 yp->rx_ring[entry].addr,
1131 yp->rx_buf_sz,
1133 yp->rx_skbuff[entry] = NULL;
1141 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1142 yp->rx_buf_sz,
1148 yp->stats.rx_packets++;
1149 yp->stats.rx_bytes += pkt_len;
1151 entry = (++yp->cur_rx) % RX_RING_SIZE;
1155 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1156 entry = yp->dirty_rx % RX_RING_SIZE;
1157 if (yp->rx_skbuff[entry] == NULL) {
1158 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1161 yp->rx_skbuff[entry] = skb;
1164 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1165 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1167 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1168 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1170 yp->rx_ring[entry - 1].dbdma_cmd =
1171 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1173 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1175 | yp->rx_buf_sz);
1183 struct yellowfin_private *yp = netdev_priv(dev);
1189 yp->stats.tx_errors++;
1191 yp->stats.rx_errors++;
1196 struct yellowfin_private *yp = netdev_priv(dev);
1197 void __iomem *ioaddr = yp->base;
1209 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1219 del_timer(&yp->timer);
1224 (unsigned long long)yp->tx_ring_dma);
1227 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1228 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1229 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1230 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1233 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1234 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1237 (unsigned long long)yp->rx_ring_dma);
1240 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1241 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1242 yp->rx_ring[i].result_status);
1244 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1248 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1260 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1261 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1262 if (yp->rx_skbuff[i]) {
1263 dev_kfree_skb(yp->rx_skbuff[i]);
1265 yp->rx_skbuff[i] = NULL;
1268 if (yp->tx_skbuff[i])
1269 dev_kfree_skb(yp->tx_skbuff[i]);
1270 yp->tx_skbuff[i] = NULL;
1285 struct yellowfin_private *yp = netdev_priv(dev);
1286 return &yp->stats;
1293 struct yellowfin_private *yp = netdev_priv(dev);
1294 void __iomem *ioaddr = yp->base;
1315 if (yp->drv_flags & HasMulticastBug) {