Lines Matching defs:txr

100 ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
103 struct adapter *adapter = txr->adapter;
105 IXGBE_TX_LOCK_ASSERT(txr);
113 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
120 if (ixgbe_xmit(txr, &m_head)) {
142 struct tx_ring *txr = adapter->tx_rings;
145 IXGBE_TX_LOCK(txr);
146 ixgbe_legacy_start_locked(ifp, txr);
147 IXGBE_TX_UNLOCK(txr);
161 struct tx_ring *txr;
192 txr = &adapter->tx_rings[i];
195 err = drbr_enqueue(ifp, txr->br, m);
198 if (IXGBE_TX_TRYLOCK(txr)) {
199 ixgbe_mq_start_locked(ifp, txr);
200 IXGBE_TX_UNLOCK(txr);
202 taskqueue_enqueue(que->tq, &txr->txq_task);
211 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
218 if (txr->adapter->link_active == 0)
223 next = drbr_dequeue(ifp, txr->br);
225 if ((err = ixgbe_xmit(txr, &next)) != 0) {
227 err = drbr_enqueue(ifp, txr->br, next);
229 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
230 err = ixgbe_xmit(txr, &next);
233 drbr_advance(ifp, txr->br);
235 drbr_putback(ifp, txr->br, next);
240 drbr_advance(ifp, txr->br);
249 if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
258 next = drbr_dequeue(ifp, txr->br);
262 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
263 ixgbe_txeof(txr);
276 struct tx_ring *txr = arg;
277 struct adapter *adapter = txr->adapter;
280 IXGBE_TX_LOCK(txr);
281 if (!drbr_empty(ifp, txr->br))
282 ixgbe_mq_start_locked(ifp, txr);
283 IXGBE_TX_UNLOCK(txr);
293 struct tx_ring *txr = adapter->tx_rings;
296 for (int i = 0; i < adapter->num_queues; i++, txr++) {
297 IXGBE_TX_LOCK(txr);
298 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
300 IXGBE_TX_UNLOCK(txr);
315 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
317 struct adapter *adapter = txr->adapter;
342 first = txr->next_avail_desc;
343 txbuf = &txr->tx_buffers[first];
350 error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs,
377 txr->no_tx_dma_setup++;
380 txr->no_tx_dma_setup++;
388 if (txr->tx_avail < (nsegs + 2)) {
389 txr->no_desc_avail++;
390 bus_dmamap_unload(txr->txtag, map);
399 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
408 (txr->atr_sample) && (!adapter->fdir_reinit)) {
409 ++txr->atr_count;
410 if (txr->atr_count >= atr_sample_rate) {
411 ixgbe_atr(txr, m_head);
412 txr->atr_count = 0;
417 i = txr->next_avail_desc;
422 txbuf = &txr->tx_buffers[i];
423 txd = &txr->tx_base[i];
428 txd->read.cmd_type_len = htole32(txr->txd_cmd |
432 if (++i == txr->num_desc)
437 txr->tx_avail -= nsegs;
438 txr->next_avail_desc = i;
447 txr->tx_buffers[first].map = txbuf->map;
449 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
452 txbuf = &txr->tx_buffers[first];
455 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
461 ++txr->total_packets;
462 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
465 if (txr->busy == 0)
466 txr->busy = 1;
480 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
482 struct adapter *adapter = txr->adapter;
504 &txr->txtag);
510 txr->tx_buffers =
513 if (txr->tx_buffers == NULL) {
520 txbuf = txr->tx_buffers;
522 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
541 ixgbe_setup_transmit_ring(struct tx_ring *txr)
543 struct adapter *adapter = txr->adapter;
551 IXGBE_TX_LOCK(txr);
559 slot = netmap_reset(na, NR_TX, txr->me, 0);
563 bzero((void *)txr->tx_base,
566 txr->next_avail_desc = 0;
567 txr->next_to_clean = 0;
570 txbuf = txr->tx_buffers;
571 for (int i = 0; i < txr->num_desc; i++, txbuf++) {
573 bus_dmamap_sync(txr->txtag, txbuf->map,
575 bus_dmamap_unload(txr->txtag, txbuf->map);
592 int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
593 netmap_load_map(na, txr->txtag,
604 txr->atr_sample = atr_sample_rate;
607 txr->tx_avail = adapter->num_tx_desc;
609 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
611 IXGBE_TX_UNLOCK(txr);
620 struct tx_ring *txr = adapter->tx_rings;
622 for (int i = 0; i < adapter->num_queues; i++, txr++)
623 ixgbe_setup_transmit_ring(txr);
634 struct tx_ring *txr = adapter->tx_rings;
636 for (int i = 0; i < adapter->num_queues; i++, txr++) {
637 IXGBE_TX_LOCK(txr);
638 ixgbe_free_transmit_buffers(txr);
639 ixgbe_dma_free(adapter, &txr->txdma);
640 IXGBE_TX_UNLOCK(txr);
641 IXGBE_TX_LOCK_DESTROY(txr);
652 ixgbe_free_transmit_buffers(struct tx_ring *txr)
654 struct adapter *adapter = txr->adapter;
660 if (txr->tx_buffers == NULL)
663 tx_buffer = txr->tx_buffers;
666 bus_dmamap_sync(txr->txtag, tx_buffer->map,
668 bus_dmamap_unload(txr->txtag, tx_buffer->map);
672 bus_dmamap_destroy(txr->txtag, tx_buffer->map);
676 bus_dmamap_unload(txr->txtag, tx_buffer->map);
677 bus_dmamap_destroy(txr->txtag, tx_buffer->map);
681 if (txr->br != NULL)
682 buf_ring_free(txr->br, M_DEVBUF);
683 if (txr->tx_buffers != NULL) {
684 free(txr->tx_buffers, M_DEVBUF);
685 txr->tx_buffers = NULL;
687 if (txr->txtag != NULL) {
688 bus_dma_tag_destroy(txr->txtag);
689 txr->txtag = NULL;
699 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
712 int ctxd = txr->next_avail_desc;
723 return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
732 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
742 } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
848 if (++ctxd == txr->num_desc)
850 txr->next_avail_desc = ctxd;
851 --txr->tx_avail;
863 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
928 ctxd = txr->next_avail_desc;
929 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
958 if (++ctxd == txr->num_desc)
961 txr->tx_avail--;
962 txr->next_avail_desc = ctxd;
966 ++txr->tso_tx;
980 ixgbe_txeof(struct tx_ring *txr)
982 struct adapter *adapter = txr->adapter;
988 mtx_assert(&txr->tx_mtx, MA_OWNED);
994 struct netmap_kring *kring = na->tx_rings[txr->me];
995 txd = txr->tx_base;
996 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1015 netmap_tx_irq(adapter->ifp, txr->me);
1021 if (txr->tx_avail == txr->num_desc) {
1022 txr->busy = 0;
1027 work = txr->next_to_clean;
1028 buf = &txr->tx_buffers[work];
1029 txd = &txr->tx_base[work];
1030 work -= txr->num_desc; /* The distance to ring end */
1031 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1043 txr->bytes += buf->m_head->m_pkthdr.len;
1044 bus_dmamap_sync(txr->txtag, buf->map,
1046 bus_dmamap_unload(txr->txtag, buf->map);
1051 ++txr->tx_avail;
1060 work -= txr->num_desc;
1061 buf = txr->tx_buffers;
1062 txd = txr->tx_base;
1065 txr->bytes += buf->m_head->m_pkthdr.len;
1066 bus_dmamap_sync(txr->txtag, buf->map,
1068 bus_dmamap_unload(txr->txtag, buf->map);
1072 ++txr->tx_avail;
1076 ++txr->packets;
1085 work -= txr->num_desc;
1086 buf = txr->tx_buffers;
1087 txd = txr->tx_base;
1092 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1095 work += txr->num_desc;
1096 txr->next_to_clean = work;
1106 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1107 ++txr->busy;
1113 txr->busy = 1;
1115 if (txr->tx_avail == txr->num_desc)
1116 txr->busy = 0;
2049 struct tx_ring *txr;
2092 txr = &adapter->tx_rings[i];
2093 txr->adapter = adapter;
2094 txr->br = NULL;
2096 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2098 txr->num_desc = adapter->num_tx_desc;
2101 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2102 device_get_nameunit(dev), txr->me);
2103 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2105 if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2112 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2113 bzero((void *)txr->tx_base, tsize);
2116 if (ixgbe_allocate_transmit_buffers(txr)) {
2124 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2125 M_WAITOK, &txr->tx_mtx);
2126 if (txr->br == NULL) {
2180 que->txr = &adapter->tx_rings[i];
2190 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2191 ixgbe_dma_free(adapter, &txr->txdma);