Lines Matching defs:tx_ring

49 	return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
50 txq->tx_ring.complete_txreqs);
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1)
63 if (atomic_dec_and_test(&txq->tx_ring.stops))
70 txq->tx_ring.max_items - 1);
76 txq->tx_ring.max_items) >> 1;
81 ++txq->tx_ring.sent_txreqs;
83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) {
108 atomic_xchg(&txq->tx_ring.ring_full, 0)) {
136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
140 for (i = 0; i < tx_ring->max_items; i++) {
141 tx = hfi1_txreq_from_idx(tx_ring, i);
147 tx_ring->head = 0;
148 tx_ring->tail = 0;
149 tx_ring->complete_txreqs = 0;
150 tx_ring->sent_txreqs = 0;
151 tx_ring->avail = hfi1_ipoib_ring_hwat(txq);
158 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
159 u32 head = tx_ring->head;
160 u32 max_tx = tx_ring->max_items;
162 struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head);
173 tx = hfi1_txreq_from_idx(tx_ring, head);
175 tx_ring->complete_txreqs += work_done;
178 smp_store_release(&tx_ring->head, head);
330 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs));
355 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
356 u32 tail = tx_ring->tail;
359 if (unlikely(!tx_ring->avail)) {
366 head = smp_load_acquire(&tx_ring->head);
367 tx_ring->avail =
369 CIRC_CNT(head, tail, tx_ring->max_items));
371 tx_ring->avail--;
373 tx = hfi1_txreq_from_idx(tx_ring, tail);
461 struct hfi1_ipoib_circ_buf *tx_ring;
479 tx_ring = &txq->tx_ring;
480 trace_hfi1_tx_consume(tx, tx_ring->tail);
482 smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
512 struct hfi1_ipoib_circ_buf *tx_ring;
542 tx_ring = &txq->tx_ring;
543 trace_hfi1_tx_consume(tx, tx_ring->tail);
545 smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
636 if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
679 if (atomic_xchg(&txq->tx_ring.no_desc, 0))
687 struct hfi1_ipoib_circ_buf *tx_ring;
708 tx_ring = &txq->tx_ring;
720 atomic_set(&txq->tx_ring.stops, 0);
721 atomic_set(&txq->tx_ring.ring_full, 0);
722 atomic_set(&txq->tx_ring.no_desc, 0);
731 txq->tx_ring.items =
734 if (!txq->tx_ring.items)
737 txq->tx_ring.max_items = tx_ring_size;
738 txq->tx_ring.shift = ilog2(tx_item_size);
739 txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
740 tx_ring = &txq->tx_ring;
742 hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
745 if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
759 tx_ring = &txq->tx_ring;
761 kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
762 kvfree(tx_ring->items);
783 txq->tx_ring.complete_txreqs++;
790 hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
791 txq->tx_ring.complete_txreqs));
800 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
807 for (j = 0; j < tx_ring->max_items; j++)
808 kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
809 kvfree(tx_ring->items);
849 atomic_read(&txq->tx_ring.stops),
850 atomic_read(&txq->tx_ring.no_desc),
851 atomic_read(&txq->tx_ring.ring_full));
857 txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs,
860 dev->tx_queue_len, txq->tx_ring.max_items);
862 txq->tx_ring.head, txq->tx_ring.tail);