• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/amso1100/

Lines Matching defs:c2_port

79 static void c2_reset(struct c2_port *c2_port);
93 static void c2_set_rxbufsize(struct c2_port *c2_port)
95 struct net_device *netdev = c2_port->netdev;
98 c2_port->rx_buf_size =
102 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
207 static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
209 struct c2_dev *c2dev = c2_port->c2dev;
216 skb = dev_alloc_skb(c2_port->rx_buf_size);
219 c2_port->netdev->name);
226 skb->dev = c2_port->netdev;
228 maplen = c2_port->rx_buf_size;
256 static int c2_rx_fill(struct c2_port *c2_port)
258 struct c2_ring *rx_ring = &c2_port->rx_ring;
264 if (c2_rx_alloc(c2_port, elem)) {
275 static void c2_rx_clean(struct c2_port *c2_port)
277 struct c2_dev *c2dev = c2_port->c2dev;
278 struct c2_ring *rx_ring = &c2_port->rx_ring;
322 static void c2_tx_clean(struct c2_port *c2_port)
324 struct c2_ring *tx_ring = &c2_port->tx_ring;
330 spin_lock_irqsave(&c2_port->tx_lock, flags);
348 c2_port->netdev->stats.tx_dropped++;
359 c2_tx_free(c2_port->c2dev, elem);
364 c2_port->tx_avail = c2_port->tx_ring.count - 1;
365 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
367 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
368 netif_wake_queue(c2_port->netdev);
370 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
379 struct c2_port *c2_port = netdev_priv(netdev);
380 struct c2_dev *c2dev = c2_port->c2dev;
381 struct c2_ring *tx_ring = &c2_port->tx_ring;
385 spin_lock(&c2_port->tx_lock);
395 if (netif_msg_tx_done(c2_port)) {
406 ++(c2_port->tx_avail);
412 && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
415 spin_unlock(&c2_port->tx_lock);
418 static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
428 elem - c2_port->rx_ring.start);
456 c2_port->netdev->stats.rx_dropped++;
461 struct c2_port *c2_port = netdev_priv(netdev);
462 struct c2_dev *c2dev = c2_port->c2dev;
463 struct c2_ring *rx_ring = &c2_port->rx_ring;
492 c2_rx_error(c2_port, elem);
500 if (c2_rx_alloc(c2_port, elem)) {
501 c2_rx_error(c2_port, elem);
585 struct c2_port *c2_port = netdev_priv(netdev);
586 struct c2_dev *c2dev = c2_port->c2dev;
594 if (netif_msg_ifup(c2_port))
598 c2_set_rxbufsize(c2_port);
601 rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
602 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
604 c2_port->mem_size = tx_size + rx_size;
605 c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
606 &c2_port->dma);
607 if (c2_port->mem == NULL) {
613 memset(c2_port->mem, 0, c2_port->mem_size);
617 c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
624 if (c2_rx_fill(c2_port)) {
630 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
631 c2_port->dma + rx_size,
638 c2_port->tx_avail = c2_port->tx_ring.count - 1;
639 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
640 c2_port->tx_ring.start + c2dev->cur_tx;
644 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
647 c2_reset(c2_port);
650 for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
679 c2_rx_clean(c2_port);
680 kfree(c2_port->rx_ring.start);
683 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
684 c2_port->dma);
691 struct c2_port *c2_port = netdev_priv(netdev);
692 struct c2_dev *c2dev = c2_port->c2dev;
694 if (netif_msg_ifdown(c2_port))
713 c2_reset(c2_port);
718 c2_tx_clean(c2_port);
719 c2_rx_clean(c2_port);
722 kfree(c2_port->rx_ring.start);
723 kfree(c2_port->tx_ring.start);
724 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
725 c2_port->dma);
730 static void c2_reset(struct c2_port *c2_port)
732 struct c2_dev *c2dev = c2_port->c2dev;
758 struct c2_port *c2_port = netdev_priv(netdev);
759 struct c2_dev *c2dev = c2_port->c2dev;
760 struct c2_ring *tx_ring = &c2_port->tx_ring;
767 spin_lock_irqsave(&c2_port->tx_lock, flags);
769 if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
771 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
827 c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
829 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
831 if (netif_msg_tx_queued(c2_port))
836 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
845 struct c2_port *c2_port = netdev_priv(netdev);
847 if (netif_msg_timer(c2_port))
850 c2_tx_clean(c2_port);
885 struct c2_port *c2_port = NULL;
886 struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
889 pr_debug("c2_port etherdev alloc failed");
899 c2_port = netdev_priv(netdev);
900 c2_port->netdev = netdev;
901 c2_port->c2dev = c2dev;
902 c2_port->msg_enable = netif_msg_init(debug, default_msg);
903 c2_port->tx_ring.count = C2_NUM_TX_DESC;
904 c2_port->rx_ring.count = C2_NUM_RX_DESC;
906 spin_lock_init(&c2_port->tx_lock);