Lines Matching defs:txr

165 	struct tx_ring		*txr;
191 txr = &que->txr;
193 err = drbr_enqueue(ifp, txr->br, m);
196 if (IXL_TX_TRYLOCK(txr)) {
197 ixl_mq_start_locked(ifp, txr);
198 IXL_TX_UNLOCK(txr);
206 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
208 struct ixl_queue *que = txr->que;
219 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
222 drbr_advance(ifp, txr->br);
224 drbr_putback(ifp, txr->br, next);
227 drbr_advance(ifp, txr->br);
234 if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
247 struct tx_ring *txr = &que->txr;
251 IXL_TX_LOCK(txr);
252 if (!drbr_empty(ifp, txr->br))
253 ixl_mq_start_locked(ifp, txr);
254 IXL_TX_UNLOCK(txr);
267 struct tx_ring *txr = &que->txr;
269 IXL_TX_LOCK(txr);
270 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
272 IXL_TX_UNLOCK(txr);
319 struct tx_ring *txr = &que->txr;
339 first = txr->next_avail;
340 buf = &txr->buffers[first];
342 tag = txr->tx_tag;
346 tag = txr->tso_tag;
394 if (nsegs > txr->avail - 2) {
395 txr->no_desc++;
415 i = txr->next_avail;
419 buf = &txr->buffers[i];
421 txd = &txr->base[i];
443 txr->avail -= nsegs;
444 txr->next_avail = i;
451 txr->buffers[first].map = buf->map;
456 buf = &txr->buffers[first];
459 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
465 ++txr->total_packets;
466 wr32(hw, txr->tail, i);
469 atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
488 struct tx_ring *txr = &que->txr;
508 &txr->tx_tag))) {
525 &txr->tso_tag))) {
530 if (!(txr->buffers =
539 buf = txr->buffers;
541 buf->tag = txr->tx_tag;
557 free(txr->buffers, M_DEVBUF);
558 txr->buffers = NULL;
560 bus_dma_tag_destroy(txr->tso_tag);
561 txr->tso_tag = NULL;
563 bus_dma_tag_destroy(txr->tx_tag);
564 txr->tx_tag = NULL;
584 struct tx_ring *txr = &que->txr;
588 IXL_TX_LOCK(txr);
598 bzero((void *)txr->base,
602 txr->next_avail = 0;
603 txr->next_to_clean = 0;
606 txr->watchdog_timer = 0;
609 buf = txr->buffers;
636 txr->avail = que->num_tx_desc;
638 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
640 IXL_TX_UNLOCK(txr);
652 struct tx_ring *txr = &que->txr;
658 buf = &txr->buffers[i];
668 if (txr->buffers != NULL) {
669 free(txr->buffers, M_DEVBUF);
670 txr->buffers = NULL;
672 if (txr->tx_tag != NULL) {
673 bus_dma_tag_destroy(txr->tx_tag);
674 txr->tx_tag = NULL;
676 if (txr->tso_tag != NULL) {
677 bus_dma_tag_destroy(txr->tso_tag);
678 txr->tso_tag = NULL;
800 struct tx_ring *txr = &que->txr;
872 idx = txr->next_avail;
873 buf = &txr->buffers[idx];
874 TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
899 txr->avail--;
900 txr->next_avail = idx;
912 struct tx_ring *txr = &que->txr;
913 void *head = &txr->base[que->num_tx_desc];
926 struct tx_ring *txr = &que->txr;
931 mtx_assert(&txr->mtx, MA_OWNED);
940 if (txr->avail == que->num_tx_desc) {
941 atomic_store_rel_32(&txr->watchdog_timer, 0);
945 first = txr->next_to_clean;
946 buf = &txr->buffers[first];
947 tx_desc = (struct i40e_tx_desc *)&txr->base[first];
951 eop_desc = (struct i40e_tx_desc *)&txr->base[last];
954 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
978 ++txr->avail;
981 txr->bytes += /* for ITR adjustment */
983 txr->tx_bytes += /* for TX stats */
998 buf = &txr->buffers[first];
999 tx_desc = &txr->base[first];
1001 ++txr->packets;
1003 atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
1007 eop_desc = &txr->base[last];
1014 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
1017 txr->next_to_clean = first;
1022 if (txr->avail == que->num_tx_desc) {
1023 atomic_store_rel_32(&txr->watchdog_timer, 0);
1045 struct tx_ring *txr = &que->txr;
1051 mtx_assert(&txr->mtx, MA_OWNED);
1054 if (txr->avail == que->num_tx_desc) {
1055 atomic_store_rel_32(&txr->watchdog_timer, 0);
1060 first = txr->next_to_clean;
1061 buf = &txr->buffers[first];
1062 tx_desc = &txr->base[first];
1075 eop_desc = &txr->base[last];
1078 bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD);
1106 txr->bytes += buf->m_head->m_pkthdr.len;
1107 txr->tx_bytes += buf->m_head->m_pkthdr.len;
1115 ++txr->avail;
1119 buf = &txr->buffers[first];
1120 tx_desc = &txr->base[first];
1122 ++txr->packets;
1124 atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
1132 eop_desc = &txr->base[last];
1140 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
1143 txr->next_to_clean = first;
1148 if (txr->avail == que->num_tx_desc) {
1149 atomic_store_rel_32(&txr->watchdog_timer, 0);
2094 struct tx_ring *txr;
2107 txr = &(queues[q].txr);
2126 CTLFLAG_RD, &(txr->no_desc),
2129 CTLFLAG_RD, &(txr->total_packets),
2132 CTLFLAG_RD, &(txr->tx_bytes),
2147 CTLFLAG_RD, &(txr->itr), 0,
2151 CTLFLAG_RD, &(txr->watchdog_timer), 0,
2154 CTLFLAG_RD, &(txr->next_avail), 0,
2157 CTLFLAG_RD, &(txr->next_to_clean), 0,
2238 val = rd32(que->vsi->hw, que->txr.tail);
2297 struct tx_ring *txr;
2302 txr = &que->txr;
2308 if (atomic_cmpset_rel_32(&txr->watchdog_timer,
2310 timer = atomic_load_acq_32(&txr->watchdog_timer);
2320 atomic_store_rel_32(&txr->watchdog_timer, -1);
2333 atomic_cmpset_rel_32(&txr->watchdog_timer,