Lines Matching refs:txq

146 static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
147 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
148 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
149 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
154 sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
158 &txq->stmp[txq->ptr_mask]))
159 *pstmp = &txq->stmp[0];
165 sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf,
185 if (new_hw_cksum_flags == txq->hw_cksum_flags)
188 desc = &txq->pend_desc[txq->n_pend_desc];
189 efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc);
190 txq->hw_cksum_flags = new_hw_cksum_flags;
191 txq->n_pend_desc++;
193 sfxge_next_stmp(txq, pstmp);
199 sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf,
207 if (this_tag == txq->hw_vlan_tci)
210 desc = &txq->pend_desc[txq->n_pend_desc];
211 efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc);
212 txq->hw_vlan_tci = this_tag;
213 txq->n_pend_desc++;
215 sfxge_next_stmp(txq, pstmp);
221 sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
227 completed = txq->completed;
228 while (completed != txq->pending) {
232 id = completed++ & txq->ptr_mask;
234 stmp = &txq->stmp[id];
236 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
248 txq->completed = completed;
252 if (txq->blocked) {
255 level = txq->added - txq->completed;
256 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
257 sfxge_tx_qunblock(txq);
274 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
283 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
285 stdp = &txq->dpl;
325 sfxge_tx_qreap(struct sfxge_txq *txq)
327 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
329 txq->reaped = txq->completed;
333 sfxge_tx_qlist_post(struct sfxge_txq *txq)
340 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
342 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
343 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc,
344 ("txq->n_pend_desc too large"));
345 KASSERT(!txq->blocked, ("txq->blocked"));
347 old_added = txq->added;
350 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc,
351 txq->reaped, &txq->added);
358 KASSERT(txq->added - old_added == txq->n_pend_desc,
361 level = txq->added - txq->reaped;
362 KASSERT(level <= txq->entries, ("overfilled TX queue"));
365 txq->n_pend_desc = 0;
371 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc;
378 sfxge_tx_qreap(txq);
379 level = txq->added - txq->reaped;
383 txq->blocked = 1;
390 sfxge_tx_qreap(txq);
391 level = txq->added - txq->reaped;
394 txq->blocked = 0;
398 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
414 KASSERT(!txq->blocked, ("txq->blocked"));
422 (txq->tso_fw_assisted == 0))
433 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
439 id = txq->added & txq->ptr_mask;
440 stmp = &txq->stmp[id];
441 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
449 ++txq->collapses;
451 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
459 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
463 hw_cksum_flags_prev = txq->hw_cksum_flags;
464 hw_vlan_tci_prev = txq->hw_vlan_tci;
471 n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp);
472 n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp);
475 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg,
479 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask];
487 desc = &txq->pend_desc[i + n_extra_descs];
489 efx_tx_qdesc_dma_create(txq->common,
497 sfxge_next_stmp(txq, &stmp);
499 txq->n_pend_desc = n_dma_seg + n_extra_descs;
517 sfxge_tx_qlist_post(txq);
522 txq->hw_vlan_tci = hw_vlan_tci_prev;
523 txq->hw_cksum_flags = hw_cksum_flags_prev;
524 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
528 ++txq->drops;
537 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
547 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
549 sc = txq->sc;
550 stdp = &txq->dpl;
551 pushed = txq->added;
553 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) {
555 prefetch_read_many(txq->common);
576 rc = sfxge_tx_queue_mbuf(txq, mbuf);
583 if (txq->blocked)
587 if (txq->added - pushed >= SFXGE_TX_BATCH) {
588 efx_tx_qpush(txq->common, txq->added, pushed);
589 pushed = txq->added;
607 if (txq->added != pushed)
608 efx_tx_qpush(txq->common, txq->added, pushed);
610 KASSERT(txq->blocked || stdp->std_get_count == 0,
619 * NOTE: drops the txq mutex!
622 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
624 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
627 if (SFXGE_TX_QDPL_PENDING(txq))
628 sfxge_tx_qdpl_swizzle(txq);
630 if (!txq->blocked)
631 sfxge_tx_qdpl_drain(txq);
633 SFXGE_TXQ_UNLOCK(txq);
634 } while (SFXGE_TX_QDPL_PENDING(txq) &&
635 SFXGE_TXQ_TRYLOCK(txq));
642 sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf)
646 stdp = &txq->dpl;
650 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
653 txq->get_overflow++;
659 txq->get_non_tcp_overflow++;
679 sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf)
689 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
691 stdp = &txq->dpl;
703 atomic_add_long(&txq->put_overflow, 1);
714 * Called from if_transmit - will try to grab the txq lock and enqueue to the
718 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
722 if (!SFXGE_LINK_UP(txq->sc)) {
723 atomic_add_long(&txq->netdown_drops, 1);
728 * Try to grab the txq lock. If we are able to get the lock,
732 if (SFXGE_TXQ_TRYLOCK(txq)) {
734 sfxge_tx_qdpl_swizzle(txq);
736 rc = sfxge_tx_qdpl_put_locked(txq, m);
739 sfxge_tx_qdpl_service(txq);
742 rc = sfxge_tx_qdpl_put_unlocked(txq, m);
751 if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) {
752 sfxge_tx_qdpl_service(txq);
757 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
763 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
765 struct sfxge_tx_dpl *stdp = &txq->dpl;
768 SFXGE_TXQ_LOCK(txq);
770 sfxge_tx_qdpl_swizzle(txq);
780 SFXGE_TXQ_UNLOCK(txq);
792 sfxge_tx_qdpl_flush(sc->txq[i]);
880 struct sfxge_txq *txq;
927 txq = sc->txq[index];
929 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
931 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
934 rc = sfxge_tx_packet_add(txq, m);
1007 static int tso_init(struct sfxge_txq *txq)
1009 struct sfxge_softc *sc = txq->sc;
1014 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
1018 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
1027 sfxge_dma_free(&txq->tsoh_buffer[i]);
1028 free(txq->tsoh_buffer, M_SFXGE);
1029 txq->tsoh_buffer = NULL;
1033 static void tso_fini(struct sfxge_txq *txq)
1037 if (txq->tsoh_buffer != NULL) {
1038 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
1039 sfxge_dma_free(&txq->tsoh_buffer[i]);
1040 free(txq->tsoh_buffer, M_SFXGE);
1044 static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso,
1048 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp);
1055 tso->fw_assisted = txq->tso_fw_assisted;
1140 efx_tx_qdesc_dma_create(txq->common,
1157 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
1201 desc = &txq->pend_desc[txq->n_pend_desc++];
1202 efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc);
1221 static int tso_start_new_packet(struct sfxge_txq *txq,
1237 desc = &txq->pend_desc[txq->n_pend_desc];
1238 efx_tx_qdesc_tso2_create(txq->common,
1245 txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS;
1246 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1247 id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask;
1258 desc = &txq->pend_desc[txq->n_pend_desc++];
1259 efx_tx_qdesc_tso_create(txq->common,
1264 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1265 id = (id + 1) & txq->ptr_mask;
1273 txq->n_pend_desc++;
1274 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1275 id = (id + 1) & txq->ptr_mask;
1282 header = (txq->tsoh_buffer[page_index].esm_base +
1284 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
1286 map = txq->tsoh_buffer[page_index].esm_map;
1288 KASSERT(txq->stmp[id].flags == 0,
1291 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
1297 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
1304 bus_dmamap_unload(txq->packet_dma_tag,
1313 txq->tso_long_headers++;
1347 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1350 desc = &txq->pend_desc[txq->n_pend_desc++];
1351 efx_tx_qdesc_dma_create(txq->common,
1356 id = (id + 1) & txq->ptr_mask;
1361 txq->tso_packets++;
1368 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1376 tso_start(txq, &tso, dma_seg, mbuf);
1387 id = (txq->added + n_extra_descs) & txq->ptr_mask;
1388 if (__predict_false(tso_start_new_packet(txq, &tso, &id)))
1392 tso_fill_packet_with_fragment(txq, &tso);
1394 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1395 id = (id + 1) & txq->ptr_mask;
1419 if (txq->n_pend_desc + n_fatso_opt_desc +
1420 1 /* header */ + n_dma_seg > txq->max_pkt_desc) {
1421 txq->tso_pdrop_too_many++;
1424 if (__predict_false(tso_start_new_packet(txq, &tso,
1426 txq->tso_pdrop_no_rsrc++;
1432 txq->tso_bursts++;
1437 sfxge_tx_qunblock(struct sfxge_txq *txq)
1442 sc = txq->sc;
1443 evq = sc->evq[txq->evq_index];
1447 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
1450 SFXGE_TXQ_LOCK(txq);
1452 if (txq->blocked) {
1455 level = txq->added - txq->completed;
1456 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) {
1458 sfxge_tx_qreap(txq);
1459 txq->blocked = 0;
1463 sfxge_tx_qdpl_service(txq);
1468 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1471 txq->flush_state = SFXGE_FLUSH_DONE;
1477 struct sfxge_txq *txq;
1483 txq = sc->txq[index];
1484 evq = sc->evq[txq->evq_index];
1487 SFXGE_TXQ_LOCK(txq);
1489 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1490 ("txq->init_state != SFXGE_TXQ_STARTED"));
1492 txq->init_state = SFXGE_TXQ_INITIALIZED;
1494 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1495 txq->flush_state = SFXGE_FLUSH_PENDING;
1498 SFXGE_TXQ_UNLOCK(txq);
1501 if (efx_tx_qflush(txq->common) != 0) {
1504 txq->flush_state = SFXGE_FLUSH_DONE;
1510 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1515 SFXGE_TXQ_LOCK(txq);
1517 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1518 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1520 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1524 txq->flush_state = SFXGE_FLUSH_DONE;
1528 txq->blocked = 0;
1529 txq->pending = txq->added;
1531 sfxge_tx_qcomplete(txq, evq);
1532 KASSERT(txq->completed == txq->added,
1533 ("txq->completed != txq->added"));
1535 sfxge_tx_qreap(txq);
1536 KASSERT(txq->reaped == txq->completed,
1537 ("txq->reaped != txq->completed"));
1539 txq->added = 0;
1540 txq->pending = 0;
1541 txq->completed = 0;
1542 txq->reaped = 0;
1545 efx_tx_qdestroy(txq->common);
1546 txq->common = NULL;
1548 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1551 txq->hw_cksum_flags = 0;
1554 SFXGE_TXQ_UNLOCK(txq);
1618 struct sfxge_txq *txq;
1629 txq = sc->txq[index];
1630 esmp = &txq->mem;
1631 evq = sc->evq[txq->evq_index];
1633 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1634 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1639 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1645 switch (txq->type) {
1664 label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type;
1668 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1669 &txq->common, &desc_index)) != 0) {
1678 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1679 &txq->common, &desc_index)) != 0)
1684 txq->added = txq->pending = txq->completed = txq->reaped = desc_index;
1686 SFXGE_TXQ_LOCK(txq);
1689 efx_tx_qenable(txq->common);
1691 txq->init_state = SFXGE_TXQ_STARTED;
1692 txq->flush_state = SFXGE_FLUSH_REQUIRED;
1693 txq->tso_fw_assisted = tso_fw_assisted;
1695 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type,
1698 txq->hw_vlan_tci = 0;
1700 txq->hw_cksum_flags = flags &
1703 SFXGE_TXQ_UNLOCK(txq);
1708 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1753 sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node)
1755 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev);
1769 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset),
1782 struct sfxge_txq *txq;
1785 txq = sc->txq[index];
1787 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1788 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1790 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1791 tso_fini(txq);
1794 free(txq->pend_desc, M_SFXGE);
1797 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1798 free(txq->stmp, M_SFXGE);
1801 sfxge_dma_free(&txq->mem);
1803 sc->txq[index] = NULL;
1805 SFXGE_TXQ_LOCK_DESTROY(txq);
1807 free(txq, M_SFXGE);
1818 struct sfxge_txq *txq;
1826 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1827 txq->sc = sc;
1828 txq->entries = sc->txq_entries;
1829 txq->ptr_mask = txq->entries - 1;
1831 sc->txq[txq_index] = txq;
1832 esmp = &txq->mem;
1842 &txq->buf_base_id);
1850 &txq->packet_dma_tag) != 0) {
1851 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1857 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries,
1861 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1864 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1865 &txq->stmp[nmaps].map);
1879 (rc = tso_init(txq)) != 0)
1883 stdp = &txq->dpl;
1889 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1912 rc = sfxge_txq_stat_init(txq, txq_node);
1916 txq->type = type;
1917 txq->evq_index = evq_index;
1918 txq->init_state = SFXGE_TXQ_INITIALIZED;
1926 free(txq->pend_desc, M_SFXGE);
1929 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1930 free(txq->stmp, M_SFXGE);
1931 bus_dma_tag_destroy(txq->packet_dma_tag);
1950 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1980 struct sfxge_txq *txq;
1984 txq = sc->txq[index];
1986 * In theory, txq->put_overflow and txq->netdown_drops
1988 * obtained under txq lock, but it is just statistics.
1990 drops += txq->drops + txq->get_overflow +
1991 txq->get_non_tcp_overflow +
1992 txq->put_overflow + txq->netdown_drops +
1993 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc;
2057 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");