Lines Matching defs:txr

945 	struct tx_ring	*txr = adapter->tx_rings;
956 for (int i = 0; i < adapter->num_queues; i++, txr++) {
957 EM_TX_LOCK(txr);
959 if (!drbr_empty(ifp, txr->br))
960 em_mq_start_locked(ifp, txr);
963 em_start_locked(ifp, txr);
965 EM_TX_UNLOCK(txr);
976 em_start_locked(if_t ifp, struct tx_ring *txr)
981 EM_TX_LOCK_ASSERT(txr);
992 if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
993 em_txeof(txr);
994 if (txr->tx_avail < EM_MAX_SCATTER) {
1005 if (em_xmit(txr, &m_head)) {
1013 if (txr->busy == EM_TX_IDLE)
1014 txr->busy = EM_TX_BUSY;
1028 struct tx_ring *txr = adapter->tx_rings;
1031 EM_TX_LOCK(txr);
1032 em_start_locked(ifp, txr);
1033 EM_TX_UNLOCK(txr);
1053 struct tx_ring *txr = adapter->tx_rings;
1061 txr = &adapter->tx_rings[i];
1063 error = drbr_enqueue(ifp, txr->br, m);
1067 if (EM_TX_TRYLOCK(txr)) {
1068 em_mq_start_locked(ifp, txr);
1069 EM_TX_UNLOCK(txr);
1071 taskqueue_enqueue(txr->tq, &txr->tx_task);
1077 em_mq_start_locked(if_t ifp, struct tx_ring *txr)
1079 struct adapter *adapter = txr->adapter;
1083 EM_TX_LOCK_ASSERT(txr);
1091 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
1092 if ((err = em_xmit(txr, &next)) != 0) {
1095 drbr_advance(ifp, txr->br);
1102 drbr_putback(ifp, txr->br, next);
1106 drbr_advance(ifp, txr->br);
1117 if ((enq > 0) && (txr->busy == EM_TX_IDLE))
1118 txr->busy = EM_TX_BUSY;
1120 if (txr->tx_avail < EM_MAX_SCATTER)
1121 em_txeof(txr);
1122 if (txr->tx_avail < EM_MAX_SCATTER) {
1135 struct tx_ring *txr = adapter->tx_rings;
1138 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1139 EM_TX_LOCK(txr);
1140 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
1142 EM_TX_UNLOCK(txr);
1517 struct tx_ring *txr = adapter->tx_rings;
1542 EM_TX_LOCK(txr);
1543 em_txeof(txr);
1545 if (!drbr_empty(ifp, txr->br))
1546 em_mq_start_locked(ifp, txr);
1549 em_start_locked(ifp, txr);
1551 EM_TX_UNLOCK(txr);
1610 struct tx_ring *txr = adapter->tx_rings;
1616 EM_TX_LOCK(txr);
1617 em_txeof(txr);
1619 if (!drbr_empty(ifp, txr->br))
1620 em_mq_start_locked(ifp, txr);
1623 em_start_locked(ifp, txr);
1625 EM_TX_UNLOCK(txr);
1645 struct tx_ring *txr = arg;
1646 struct adapter *adapter = txr->adapter;
1649 ++txr->tx_irq;
1650 EM_TX_LOCK(txr);
1651 em_txeof(txr);
1653 if (!drbr_empty(ifp, txr->br))
1654 em_mq_start_locked(ifp, txr);
1657 em_start_locked(ifp, txr);
1661 E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1662 EM_TX_UNLOCK(txr);
1747 struct tx_ring *txr = context;
1748 struct adapter *adapter = txr->adapter;
1751 EM_TX_LOCK(txr);
1752 em_txeof(txr);
1754 if (!drbr_empty(ifp, txr->br))
1755 em_mq_start_locked(ifp, txr);
1758 em_start_locked(ifp, txr);
1760 E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1761 EM_TX_UNLOCK(txr);
1769 struct tx_ring *txr = adapter->tx_rings;
1782 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1783 EM_TX_LOCK(txr);
1785 if (!drbr_empty(ifp, txr->br))
1786 em_mq_start_locked(ifp, txr);
1789 em_start_locked(ifp, txr);
1791 EM_TX_UNLOCK(txr);
1915 em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1917 struct adapter *adapter = txr->adapter;
2060 first = txr->next_avail_desc;
2061 tx_buffer = &txr->tx_buffers[first];
2066 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
2106 if ((!do_tso) && (txr->tx_tso == TRUE)) {
2109 txr->tx_tso = FALSE;
2112 if (txr->tx_avail < (nsegs + EM_MAX_SCATTER)) {
2113 txr->no_desc_avail++;
2114 bus_dmamap_unload(txr->txtag, map);
2121 em_tso_setup(txr, m_head, ip_off, ip, tp,
2126 em_transmit_checksum_setup(txr, m_head,
2136 i = txr->next_avail_desc;
2143 tx_buffer = &txr->tx_buffers[i];
2144 ctxd = &txr->tx_base[i];
2162 txr->tx_avail--;
2163 ctxd = &txr->tx_base[i];
2164 tx_buffer = &txr->tx_buffers[i];
2187 txr->next_avail_desc = i;
2188 txr->tx_avail -= nsegs;
2199 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
2212 tx_buffer = &txr->tx_buffers[first];
2219 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2221 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
2331 struct tx_ring *txr = adapter->tx_rings;
2358 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2359 if (txr->busy == EM_TX_HUNG)
2361 if (txr->busy >= EM_TX_MAXTRIES)
2362 txr->busy = EM_TX_HUNG;
2364 if (txr->tx_avail <= EM_MAX_SCATTER)
2365 taskqueue_enqueue(txr->tq, &txr->tx_task);
2377 txr->me);
2391 struct tx_ring *txr = adapter->tx_rings;
2465 for (int i = 0; i < adapter->num_queues; i++, txr++)
2466 txr->busy = EM_TX_IDLE;
2485 struct tx_ring *txr = adapter->tx_rings;
2498 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2499 EM_TX_LOCK(txr);
2500 txr->busy = EM_TX_IDLE;
2501 EM_TX_UNLOCK(txr);
2579 struct tx_ring *txr = adapter->tx_rings;
2606 TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2607 txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2608 taskqueue_thread_enqueue, &txr->tq);
2609 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2636 struct tx_ring *txr = adapter->tx_rings;
2693 for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) {
2696 txr->res = bus_alloc_resource_any(dev,
2698 if (txr->res == NULL) {
2704 if ((error = bus_setup_intr(dev, txr->res,
2706 txr, &txr->tag)) != 0) {
2711 bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i);
2713 txr->msix = vector;
2718 bus_bind_intr(dev, txr->res, cpu_id);
2720 TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2721 txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2722 taskqueue_thread_enqueue, &txr->tq);
2723 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)",
2731 txr->ims = 1 << (22 + i);
2732 adapter->ims |= txr->ims;
2733 adapter->ivars |= (8 | txr->msix) << (8 + (i * 4));
2771 struct tx_ring *txr;
2780 txr = &adapter->tx_rings[i];
2782 if (txr == NULL)
2784 rid = txr->msix +1;
2785 if (txr->tag != NULL) {
2786 bus_teardown_intr(dev, txr->res, txr->tag);
2787 txr->tag = NULL;
2789 if (txr->res != NULL)
2791 rid, txr->res);
2942 struct tx_ring *txr = adapter->tx_rings;
2950 txd = &txr->tx_base[txr->next_avail_desc++];
2951 if (txr->next_avail_desc == adapter->num_tx_desc)
2952 txr->next_avail_desc = 0;
2955 txd->buffer_addr = txr->txdma.dma_paddr;
2962 E1000_WRITE_REG(hw, E1000_TDT(0), txr->next_avail_desc);
3429 struct tx_ring *txr = NULL;
3462 txr = &adapter->tx_rings[i];
3463 txr->adapter = adapter;
3464 txr->me = i;
3467 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3468 device_get_nameunit(dev), txr->me);
3469 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
3472 &txr->txdma, BUS_DMA_NOWAIT)) {
3478 txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
3479 bzero((void *)txr->tx_base, tsize);
3481 if (em_allocate_transmit_buffers(txr)) {
3489 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3490 M_WAITOK, &txr->tx_mtx);
3506 device_get_nameunit(dev), txr->me);
3534 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
3535 em_dma_free(adapter, &txr->txdma);
3539 buf_ring_free(txr->br, M_DEVBUF);
3555 em_allocate_transmit_buffers(struct tx_ring *txr)
3557 struct adapter *adapter = txr->adapter;
3576 &txr->txtag))) {
3581 if (!(txr->tx_buffers =
3590 txbuf = txr->tx_buffers;
3592 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
3612 em_setup_transmit_ring(struct tx_ring *txr)
3614 struct adapter *adapter = txr->adapter;
3623 EM_TX_LOCK(txr);
3625 slot = netmap_reset(na, NR_TX, txr->me, 0);
3628 bzero((void *)txr->tx_base,
3631 txr->next_avail_desc = 0;
3632 txr->next_to_clean = 0;
3635 txbuf = txr->tx_buffers;
3638 bus_dmamap_sync(txr->txtag, txbuf->map,
3640 bus_dmamap_unload(txr->txtag, txbuf->map);
3646 int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
3651 txr->tx_base[i].buffer_addr = htole64(paddr);
3653 netmap_load_map(na, txr->txtag, txbuf->map, addr);
3662 txr->tx_avail = adapter->num_tx_desc;
3663 txr->busy = EM_TX_IDLE;
3666 txr->last_hw_offload = 0;
3667 txr->last_hw_ipcss = 0;
3668 txr->last_hw_ipcso = 0;
3669 txr->last_hw_tucss = 0;
3670 txr->last_hw_tucso = 0;
3672 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3674 EM_TX_UNLOCK(txr);
3685 struct tx_ring *txr = adapter->tx_rings;
3687 for (int i = 0; i < adapter->num_queues; i++, txr++)
3688 em_setup_transmit_ring(txr);
3701 struct tx_ring *txr = adapter->tx_rings;
3707 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3708 u64 bus_addr = txr->txdma.dma_paddr;
3724 txr->busy = EM_TX_IDLE;
3824 struct tx_ring *txr = adapter->tx_rings;
3826 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3827 EM_TX_LOCK(txr);
3828 em_free_transmit_buffers(txr);
3829 em_dma_free(adapter, &txr->txdma);
3830 EM_TX_UNLOCK(txr);
3831 EM_TX_LOCK_DESTROY(txr);
3843 em_free_transmit_buffers(struct tx_ring *txr)
3845 struct adapter *adapter = txr->adapter;
3850 if (txr->tx_buffers == NULL)
3854 txbuf = &txr->tx_buffers[i];
3856 bus_dmamap_sync(txr->txtag, txbuf->map,
3858 bus_dmamap_unload(txr->txtag,
3863 bus_dmamap_destroy(txr->txtag,
3868 bus_dmamap_unload(txr->txtag,
3870 bus_dmamap_destroy(txr->txtag,
3876 if (txr->br != NULL)
3877 buf_ring_free(txr->br, M_DEVBUF);
3879 if (txr->tx_buffers != NULL) {
3880 free(txr->tx_buffers, M_DEVBUF);
3881 txr->tx_buffers = NULL;
3883 if (txr->txtag != NULL) {
3884 bus_dma_tag_destroy(txr->txtag);
3885 txr->txtag = NULL;
3908 em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
3911 struct adapter *adapter = txr->adapter;
3921 cur = txr->next_avail_desc;
3934 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3962 if (txr->last_hw_offload == offload) {
3964 if (txr->last_hw_ipcss == ipcss &&
3965 txr->last_hw_ipcso == ipcso &&
3966 txr->last_hw_tucss == tucss &&
3967 txr->last_hw_tucso == tucso)
3970 if (txr->last_hw_tucss == tucss &&
3971 txr->last_hw_tucso == tucso)
3975 txr->last_hw_offload = offload;
3976 txr->last_hw_tucss = tucss;
3977 txr->last_hw_tucso = tucso;
3984 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
4009 if (txr->last_hw_offload == offload) {
4011 if (txr->last_hw_ipcss == ipcss &&
4012 txr->last_hw_ipcso == ipcso &&
4013 txr->last_hw_tucss == tucss &&
4014 txr->last_hw_tucso == tucso)
4017 if (txr->last_hw_tucss == tucss &&
4018 txr->last_hw_tucso == tucso)
4022 txr->last_hw_offload = offload;
4023 txr->last_hw_tucss = tucss;
4024 txr->last_hw_tucso = tucso;
4031 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
4038 txr->last_hw_ipcss = ipcss;
4039 txr->last_hw_ipcso = ipcso;
4045 tx_buffer = &txr->tx_buffers[cur];
4052 txr->tx_avail--;
4053 txr->next_avail_desc = cur;
4063 em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
4066 struct adapter *adapter = txr->adapter;
4086 cur = txr->next_avail_desc;
4087 tx_buffer = &txr->tx_buffers[cur];
4088 TXD = (struct e1000_context_desc *) &txr->tx_base[cur];
4128 txr->tx_avail--;
4129 txr->next_avail_desc = cur;
4130 txr->tx_tso = TRUE;
4142 em_txeof(struct tx_ring *txr)
4144 struct adapter *adapter = txr->adapter;
4150 EM_TX_LOCK_ASSERT(txr);
4152 if (netmap_tx_irq(ifp, txr->me))
4157 if (txr->tx_avail == adapter->num_tx_desc) {
4158 txr->busy = EM_TX_IDLE;
4163 first = txr->next_to_clean;
4164 tx_desc = &txr->tx_base[first];
4165 tx_buffer = &txr->tx_buffers[first];
4167 eop_desc = &txr->tx_base[last];
4179 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4188 ++txr->tx_avail;
4192 bus_dmamap_sync(txr->txtag,
4195 bus_dmamap_unload(txr->txtag,
4205 tx_buffer = &txr->tx_buffers[first];
4206 tx_desc = &txr->tx_base[first];
4212 eop_desc = &txr->tx_base[last];
4219 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4222 txr->next_to_clean = first;
4231 if (txr->busy != EM_TX_HUNG)
4232 ++txr->busy;
4234 txr->busy = EM_TX_BUSY; /* note this clears HUNG */
4243 if (txr->tx_avail >= EM_MAX_SCATTER) {
4248 if (txr->tx_avail == adapter->num_tx_desc)
4249 txr->busy = EM_TX_IDLE;
5702 struct tx_ring *txr = adapter->tx_rings;
5751 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
5759 E1000_TDH(txr->me),
5764 E1000_TDT(txr->me),
5768 CTLFLAG_RD, &txr->tx_irq,
5771 CTLFLAG_RD, &txr->no_desc_avail,
6199 struct tx_ring *txr = adapter->tx_rings;
6212 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
6217 device_printf(dev, "Tx Queue Status = %d\n", txr->busy);
6219 txr->tx_avail);
6221 txr->no_desc_avail);