• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/

Lines Matching defs:yp

182 the 'yp->tx_full' flag.
186 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
571 struct yellowfin_private *yp = netdev_priv(dev);
572 void __iomem *ioaddr = yp->base;
592 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
593 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
614 yp->tx_threshold = 32;
615 iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
618 dev->if_port = yp->default_port;
623 if (yp->drv_flags & IsGigabit) {
625 yp->full_duplex = 1;
630 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
645 init_timer(&yp->timer);
646 yp->timer.expires = jiffies + 3*HZ;
647 yp->timer.data = (unsigned long)dev;
648 yp->timer.function = &yellowfin_timer; /* timer handler */
649 add_timer(&yp->timer);
657 struct yellowfin_private *yp = netdev_priv(dev);
658 void __iomem *ioaddr = yp->base;
666 if (yp->mii_cnt) {
667 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
668 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
669 int negotiated = lpa & yp->advertising;
672 yp->phys[0], bmsr, lpa);
674 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
676 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
684 yp->timer.expires = jiffies + next_tick;
685 add_timer(&yp->timer);
690 struct yellowfin_private *yp = netdev_priv(dev);
691 void __iomem *ioaddr = yp->base;
694 yp->cur_tx, yp->dirty_tx,
701 pr_warning(" Rx ring %p: ", yp->rx_ring);
703 pr_cont(" %08x", yp->rx_ring[i].result_status);
705 pr_warning(" Tx ring %p: ", yp->tx_ring);
708 yp->tx_status[i].tx_errs,
709 yp->tx_ring[i].result_status);
718 iowrite32(0x10001000, yp->base + TxCtrl);
719 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
729 struct yellowfin_private *yp = netdev_priv(dev);
732 yp->tx_full = 0;
733 yp->cur_rx = yp->cur_tx = 0;
734 yp->dirty_tx = 0;
736 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
739 yp->rx_ring[i].dbdma_cmd =
740 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
741 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
746 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
747 yp->rx_skbuff[i] = skb;
752 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
753 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
757 dev_kfree_skb(yp->rx_skbuff[j]);
760 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
761 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
767 yp->tx_skbuff[i] = NULL;
768 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
769 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
773 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
779 yp->tx_skbuff[i] = 0;
781 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
782 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
785 if (yp->flags & FullTxStatus) {
786 yp->tx_ring[j].dbdma_cmd =
787 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
788 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
789 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
793 yp->tx_ring[j].dbdma_cmd =
795 yp->tx_ring[j].request_cnt = 2;
797 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
799 &(yp->tx_status[0].tx_errs) -
800 &(yp->tx_status[0]));
802 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
806 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
809 yp->tx_tail_desc = &yp->tx_status[0];
816 struct yellowfin_private *yp = netdev_priv(dev);
826 entry = yp->cur_tx % TX_RING_SIZE;
834 yp->tx_skbuff[entry] = NULL;
840 yp->tx_skbuff[entry] = skb;
843 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
845 yp->tx_ring[entry].result_status = 0;
848 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
849 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
852 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
853 yp->tx_ring[entry].dbdma_cmd =
856 yp->cur_tx++;
858 yp->tx_ring[entry<<1].request_cnt = len;
859 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
864 yp->cur_tx++;
866 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
867 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
871 yp->tx_ring[entry<<1].dbdma_cmd =
879 iowrite32(0x10001000, yp->base + TxCtrl);
881 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
884 yp->tx_full = 1;
888 yp->cur_tx, entry);
898 struct yellowfin_private *yp;
903 yp = netdev_priv(dev);
904 ioaddr = yp->base;
906 spin_lock (&yp->lock);
925 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
926 int entry = yp->dirty_tx % TX_RING_SIZE;
929 if (yp->tx_ring[entry].result_status == 0)
931 skb = yp->tx_skbuff[entry];
935 pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
938 yp->tx_skbuff[entry] = NULL;
940 if (yp->tx_full &&
941 yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
943 yp->tx_full = 0;
947 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
948 unsigned dirty_tx = yp->dirty_tx;
950 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
954 u16 tx_errs = yp->tx_status[entry].tx_errs;
961 yp->tx_status[entry].tx_cnt,
962 yp->tx_status[entry].tx_errs,
963 yp->tx_status[entry].total_tx_cnt,
964 yp->tx_status[entry].paused);
968 skb = yp->tx_skbuff[entry];
992 pci_unmap_single(yp->pci_dev,
993 yp->tx_ring[entry<<1].addr, skb->len,
996 yp->tx_skbuff[entry] = 0;
998 yp->tx_status[entry].tx_errs = 0;
1002 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1004 dirty_tx, yp->cur_tx, yp->tx_full);
1009 if (yp->tx_full &&
1010 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1012 yp->tx_full = 0;
1016 yp->dirty_tx = dirty_tx;
1017 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1036 spin_unlock (&yp->lock);
1044 struct yellowfin_private *yp = netdev_priv(dev);
1045 int entry = yp->cur_rx % RX_RING_SIZE;
1046 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1050 entry, yp->rx_ring[entry].result_status);
1052 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1053 yp->rx_ring[entry].result_status);
1058 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1059 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1067 pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1068 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1084 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1094 } else if ( !(yp->drv_flags & IsGigabit) &&
1104 } else if ((yp->flags & HasMACAddrBug) &&
1105 memcmp(le32_to_cpu(yp->rx_ring_dma +
1108 memcmp(le32_to_cpu(yp->rx_ring_dma +
1118 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1130 pci_unmap_single(yp->pci_dev,
1131 le32_to_cpu(yp->rx_ring[entry].addr),
1132 yp->rx_buf_sz,
1134 yp->rx_skbuff[entry] = NULL;
1142 pci_dma_sync_single_for_device(yp->pci_dev,
1144 yp->rx_buf_sz,
1152 entry = (++yp->cur_rx) % RX_RING_SIZE;
1156 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1157 entry = yp->dirty_rx % RX_RING_SIZE;
1158 if (yp->rx_skbuff[entry] == NULL) {
1159 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1162 yp->rx_skbuff[entry] = skb;
1165 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1166 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1168 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1169 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1171 yp->rx_ring[entry - 1].dbdma_cmd =
1172 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1174 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1176 | yp->rx_buf_sz);
1194 struct yellowfin_private *yp = netdev_priv(dev);
1195 void __iomem *ioaddr = yp->base;
1206 yp->cur_tx, yp->dirty_tx,
1207 yp->cur_rx, yp->dirty_rx);
1217 del_timer(&yp->timer);
1222 (unsigned long long)yp->tx_ring_dma);
1225 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1226 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1227 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1228 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1231 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1232 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1235 (unsigned long long)yp->rx_ring_dma);
1238 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1239 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1240 yp->rx_ring[i].result_status);
1242 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1248 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1260 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1261 yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1262 if (yp->rx_skbuff[i]) {
1263 dev_kfree_skb(yp->rx_skbuff[i]);
1265 yp->rx_skbuff[i] = NULL;
1268 if (yp->tx_skbuff[i])
1269 dev_kfree_skb(yp->tx_skbuff[i]);
1270 yp->tx_skbuff[i] = NULL;
1287 struct yellowfin_private *yp = netdev_priv(dev);
1288 void __iomem *ioaddr = yp->base;
1310 if (yp->drv_flags & HasMulticastBug) {