• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/staging/et131x/

Lines Matching defs:etdev

91 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
94 struct et131x_adapter *etdev);
95 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
190 * @etdev: pointer to our private adapter structure
195 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
197 struct txdma_regs __iomem *txdma = &etdev->regs->txdma;
200 writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
202 writel((u32) etdev->tx_ring.tx_desc_ring_pa,
209 writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
211 writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
213 *etdev->tx_ring.tx_status = 0;
216 etdev->tx_ring.send_idx = 0;
221 * @etdev: pointer to our adapter structure
223 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
227 &etdev->regs->txdma.csr);
232 * @etdev: pointer to our adapter structure
236 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
242 &etdev->regs->txdma.csr);
289 struct et131x_adapter *etdev = NULL;
291 etdev = netdev_priv(netdev);
300 if (etdev->tx_ring.used >= NUM_TCB) {
310 if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
315 etdev->net_stats.tx_dropped++;
317 status = et131x_send_packet(skb, etdev);
324 etdev->net_stats.tx_dropped++;
334 * @etdev: a pointer to the device's private adapter structure
341 struct et131x_adapter *etdev)
353 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
355 tcb = etdev->tx_ring.tcb_qhead;
358 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
362 etdev->tx_ring.tcb_qhead = tcb->next;
364 if (etdev->tx_ring.tcb_qhead == NULL)
365 etdev->tx_ring.tcb_qtail = NULL;
367 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
385 status = nic_send_packet(etdev, tcb);
388 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
390 if (etdev->tx_ring.tcb_qtail)
391 etdev->tx_ring.tcb_qtail->next = tcb;
394 etdev->tx_ring.tcb_qhead = tcb;
396 etdev->tx_ring.tcb_qtail = tcb;
397 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
400 WARN_ON(etdev->tx_ring.used > NUM_TCB);
406 * @etdev: pointer to our adapter
411 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
465 pci_map_single(etdev->pdev,
484 pci_map_single(etdev->pdev,
503 pci_map_single(etdev->pdev,
523 pci_map_page(etdev->pdev,
534 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
535 if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
538 etdev->tx_ring.since_irq = 0;
547 tcb->index_start = etdev->tx_ring.send_idx;
550 spin_lock_irqsave(&etdev->SendHWLock, flags);
553 INDEX10(etdev->tx_ring.send_idx);
562 memcpy(etdev->tx_ring.tx_desc_ring +
563 INDEX10(etdev->tx_ring.send_idx), desc,
566 add_10bit(&etdev->tx_ring.send_idx, thiscopy);
568 if (INDEX10(etdev->tx_ring.send_idx) == 0 ||
569 INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
570 etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
571 etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
575 memcpy(etdev->tx_ring.tx_desc_ring,
579 add_10bit(&etdev->tx_ring.send_idx, remainder);
582 if (INDEX10(etdev->tx_ring.send_idx) == 0) {
583 if (etdev->tx_ring.send_idx)
588 tcb->index = etdev->tx_ring.send_idx - 1;
590 spin_lock(&etdev->TCBSendQLock);
592 if (etdev->tx_ring.send_tail)
593 etdev->tx_ring.send_tail->next = tcb;
595 etdev->tx_ring.send_head = tcb;
597 etdev->tx_ring.send_tail = tcb;
601 etdev->tx_ring.used++;
603 spin_unlock(&etdev->TCBSendQLock);
606 writel(etdev->tx_ring.send_idx,
607 &etdev->regs->txdma.service_request);
612 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
614 &etdev->regs->global.watchdog_timer);
616 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
624 * @etdev: pointer to our adapter
630 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
635 struct net_device_stats *stats = &etdev->net_stats;
638 atomic_inc(&etdev->Stats.brdcstxmt);
640 atomic_inc(&etdev->Stats.multixmt);
642 atomic_inc(&etdev->Stats.unixmt);
652 desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring +
655 pci_unmap_single(etdev->pdev,
665 } while (desc != (etdev->tx_ring.tx_desc_ring +
674 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
676 etdev->Stats.opackets++;
678 if (etdev->tx_ring.tcb_qtail)
679 etdev->tx_ring.tcb_qtail->next = tcb;
682 etdev->tx_ring.tcb_qhead = tcb;
684 etdev->tx_ring.tcb_qtail = tcb;
686 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
687 WARN_ON(etdev->tx_ring.used < 0);
692 * @etdev: pointer to our adapter
696 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
703 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
705 tcb = etdev->tx_ring.send_head;
710 etdev->tx_ring.send_head = next;
713 etdev->tx_ring.send_tail = NULL;
715 etdev->tx_ring.used--;
717 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
720 et131x_free_send_packet(etdev, tcb);
722 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
724 tcb = etdev->tx_ring.send_head;
729 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
731 etdev->tx_ring.used = 0;
736 * @etdev: pointer to our adapter
743 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
750 serviced = readl(&etdev->regs->txdma.NewServiceComplete);
756 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
758 tcb = etdev->tx_ring.send_head;
763 etdev->tx_ring.used--;
764 etdev->tx_ring.send_head = tcb->next;
766 etdev->tx_ring.send_tail = NULL;
768 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
769 et131x_free_send_packet(etdev, tcb);
770 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
773 tcb = etdev->tx_ring.send_head;
778 etdev->tx_ring.used--;
779 etdev->tx_ring.send_head = tcb->next;
781 etdev->tx_ring.send_tail = NULL;
783 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
784 et131x_free_send_packet(etdev, tcb);
785 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
788 tcb = etdev->tx_ring.send_head;
792 if (etdev->tx_ring.used <= NUM_TCB / 3)
793 netif_wake_queue(etdev->netdev);
795 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);