Deleted Added
full compact
1/*-
2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3 * All rights reserved.
4 *
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 29 unchanged lines hidden (view full) ---

38 *
39 * So, event queue plus label mapping to Tx queue index is:
40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42 * See sfxge_get_txq_by_label() sfxge_ev.c
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 277895 2015-01-29 19:11:37Z arybchik $");
46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 278221 2015-02-04 20:03:57Z arybchik $");
47
48#include <sys/types.h>
49#include <sys/mbuf.h>
50#include <sys/smp.h>
51#include <sys/socket.h>
52#include <sys/sysctl.h>
53#include <sys/syslog.h>
54

--- 58 unchanged lines hidden (view full) ---

113static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
114 const bus_dma_segment_t *dma_seg, int n_dma_seg);
115
116void
117sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
118{
119 unsigned int completed;
120
121 mtx_assert(&evq->lock, MA_OWNED);
121 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
122
123 completed = txq->completed;
124 while (completed != txq->pending) {
125 struct sfxge_tx_mapping *stmp;
126 unsigned int id;
127
128 id = completed++ & txq->ptr_mask;
129

--- 43 unchanged lines hidden (view full) ---

173{
174 struct sfxge_tx_dpl *stdp;
175 struct mbuf *mbuf, *get_next, **get_tailp;
176 volatile uintptr_t *putp;
177 uintptr_t put;
178 unsigned int count;
179 unsigned int non_tcp_count;
180
181 mtx_assert(&txq->lock, MA_OWNED);
181 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
182
183 stdp = &txq->dpl;
184
185 /* Acquire the put list. */
186 putp = &stdp->std_put;
187 put = atomic_readandclear_ptr(putp);
188 mbuf = (void *)put;
189

--- 26 unchanged lines hidden (view full) ---

216 stdp->std_get_non_tcp_count += non_tcp_count;
217}
218
219#endif /* SFXGE_HAVE_MQ */
220
221static void
222sfxge_tx_qreap(struct sfxge_txq *txq)
223{
224 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
224 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
225
226 txq->reaped = txq->completed;
227}
228
229static void
230sfxge_tx_qlist_post(struct sfxge_txq *txq)
231{
232 unsigned int old_added;
233 unsigned int level;
234 int rc;
235
236 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
236 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
237
238 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
239 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
240 ("txq->n_pend_desc too large"));
241 KASSERT(!txq->blocked, ("txq->blocked"));
242
243 old_added = txq->added;
244

--- 158 unchanged lines hidden (view full) ---

403 struct sfxge_softc *sc;
404 struct sfxge_tx_dpl *stdp;
405 struct mbuf *mbuf, *next;
406 unsigned int count;
407 unsigned int non_tcp_count;
408 unsigned int pushed;
409 int rc;
410
411 mtx_assert(&txq->lock, MA_OWNED);
411 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
412
413 sc = txq->sc;
414 stdp = &txq->dpl;
415 pushed = txq->added;
416
417 prefetch_read_many(sc->enp);
418 prefetch_read_many(txq->common);
419

--- 59 unchanged lines hidden (view full) ---

479/*
480 * Service the deferred packet list.
481 *
482 * NOTE: drops the txq mutex!
483 */
484static inline void
485sfxge_tx_qdpl_service(struct sfxge_txq *txq)
486{
487 mtx_assert(&txq->lock, MA_OWNED);
487 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
488
489 do {
490 if (SFXGE_TX_QDPL_PENDING(txq))
491 sfxge_tx_qdpl_swizzle(txq);
492
493 if (!txq->blocked)
494 sfxge_tx_qdpl_drain(txq);
495
496 mtx_unlock(&txq->lock);
496 SFXGE_TXQ_UNLOCK(txq);
497 } while (SFXGE_TX_QDPL_PENDING(txq) &&
498 mtx_trylock(&txq->lock));
498 SFXGE_TXQ_TRYLOCK(txq));
499}
500
501/*
502 * Put a packet on the deferred packet list.
503 *
504 * If we are called with the txq lock held, we put the packet on the "get
505 * list", otherwise we atomically push it on the "put list". The swizzle
506 * function takes care of ordering.

--- 7 unchanged lines hidden (view full) ---

514{
515 struct sfxge_tx_dpl *stdp;
516
517 stdp = &txq->dpl;
518
519 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
520
521 if (locked) {
522 mtx_assert(&txq->lock, MA_OWNED);
522 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
523
524 sfxge_tx_qdpl_swizzle(txq);
525
526 if (stdp->std_get_count >= stdp->std_get_max) {
527 txq->get_overflow++;
528 return (ENOBUFS);
529 }
530 if (sfxge_is_mbuf_non_tcp(mbuf)) {

--- 52 unchanged lines hidden (view full) ---

583 goto fail;
584 }
585
586 /*
587 * Try to grab the txq lock. If we are able to get the lock,
588 * the packet will be appended to the "get list" of the deferred
589 * packet list. Otherwise, it will be pushed on the "put list".
590 */
591 locked = mtx_trylock(&txq->lock);
591 locked = SFXGE_TXQ_TRYLOCK(txq);
592
593 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
594 if (locked)
595 mtx_unlock(&txq->lock);
595 SFXGE_TXQ_UNLOCK(txq);
596 rc = ENOBUFS;
597 goto fail;
598 }
599
600 /*
601 * Try to grab the lock again.
602 *
603 * If we are able to get the lock, we need to process the deferred
604 * packet list. If we are not able to get the lock, another thread
605 * is processing the list.
606 */
607 if (!locked)
608 locked = mtx_trylock(&txq->lock);
608 locked = SFXGE_TXQ_TRYLOCK(txq);
609
610 if (locked) {
611 /* Try to service the list. */
612 sfxge_tx_qdpl_service(txq);
613 /* Lock has been dropped. */
614 }
615
616 return (0);

--- 4 unchanged lines hidden (view full) ---

621}
622
623static void
624sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
625{
626 struct sfxge_tx_dpl *stdp = &txq->dpl;
627 struct mbuf *mbuf, *next;
628
629 mtx_lock(&txq->lock);
629 SFXGE_TXQ_LOCK(txq);
630
631 sfxge_tx_qdpl_swizzle(txq);
632 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
633 next = mbuf->m_nextpkt;
634 m_freem(mbuf);
635 }
636 stdp->std_get = NULL;
637 stdp->std_get_count = 0;
638 stdp->std_get_non_tcp_count = 0;
639 stdp->std_getp = &stdp->std_get;
640
641 mtx_unlock(&txq->lock);
641 SFXGE_TXQ_UNLOCK(txq);
642}
643
644void
645sfxge_if_qflush(struct ifnet *ifp)
646{
647 struct sfxge_softc *sc;
648 int i;
649

--- 98 unchanged lines hidden (view full) ---

748 efx_tx_qpush(txq->common, txq->added);
749 }
750}
751
752void sfxge_if_start(struct ifnet *ifp)
753{
754 struct sfxge_softc *sc = ifp->if_softc;
755
756 mtx_lock(&sc->tx_lock);
756 SFXGE_TXQ_LOCK(sc->txq[0]);
757 sfxge_if_start_locked(ifp);
758 mtx_unlock(&sc->tx_lock);
758 SFXGE_TXQ_UNLOCK(sc->txq[0]);
759}
760
761static inline void
762sfxge_tx_qdpl_service(struct sfxge_txq *txq)
763{
764 struct sfxge_softc *sc = txq->sc;
765 struct ifnet *ifp = sc->ifnet;
764 struct ifnet *ifp = txq->sc->ifnet;
765
767 mtx_assert(&sc->tx_lock, MA_OWNED);
766 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
767 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
768 sfxge_if_start_locked(ifp);
770 mtx_unlock(&sc->tx_lock);
769 SFXGE_TXQ_UNLOCK(txq);
770}
771
772#endif /* SFXGE_HAVE_MQ */
773
774/*
775 * Software "TSO". Not quite as good as doing it in hardware, but
776 * still faster than segmenting in the stack.
777 */

--- 334 unchanged lines hidden (view full) ---

1112sfxge_tx_qunblock(struct sfxge_txq *txq)
1113{
1114 struct sfxge_softc *sc;
1115 struct sfxge_evq *evq;
1116
1117 sc = txq->sc;
1118 evq = sc->evq[txq->evq_index];
1119
1121 mtx_assert(&evq->lock, MA_OWNED);
1120 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1121
1122 if (txq->init_state != SFXGE_TXQ_STARTED)
1123 return;
1124
1126 mtx_lock(SFXGE_TXQ_LOCK(txq));
1125 SFXGE_TXQ_LOCK(txq);
1126
1127 if (txq->blocked) {
1128 unsigned int level;
1129
1130 level = txq->added - txq->completed;
1131 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
1132 txq->blocked = 0;
1133 }

--- 14 unchanged lines hidden (view full) ---

1148{
1149 struct sfxge_txq *txq;
1150 struct sfxge_evq *evq;
1151 unsigned int count;
1152
1153 txq = sc->txq[index];
1154 evq = sc->evq[txq->evq_index];
1155
1157 mtx_lock(SFXGE_TXQ_LOCK(txq));
1156 SFXGE_TXQ_LOCK(txq);
1157
1158 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1159 ("txq->init_state != SFXGE_TXQ_STARTED"));
1160
1161 txq->init_state = SFXGE_TXQ_INITIALIZED;
1162 txq->flush_state = SFXGE_FLUSH_PENDING;
1163
1164 /* Flush the transmit queue. */
1165 efx_tx_qflush(txq->common);
1166
1168 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1167 SFXGE_TXQ_UNLOCK(txq);
1168
1169 count = 0;
1170 do {
1171 /* Spin for 100ms. */
1172 DELAY(100000);
1173
1174 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1175 break;
1176 } while (++count < 20);
1177
1179 mtx_lock(&evq->lock);
1180 mtx_lock(SFXGE_TXQ_LOCK(txq));
1178 SFXGE_EVQ_LOCK(evq);
1179 SFXGE_TXQ_LOCK(txq);
1180
1181 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1182 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1183
1184 txq->flush_state = SFXGE_FLUSH_DONE;
1185
1186 txq->blocked = 0;
1187 txq->pending = txq->added;

--- 13 unchanged lines hidden (view full) ---

1201
1202 /* Destroy the common code transmit queue. */
1203 efx_tx_qdestroy(txq->common);
1204 txq->common = NULL;
1205
1206 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1207 EFX_TXQ_NBUFS(sc->txq_entries));
1208
1210 mtx_unlock(&evq->lock);
1211 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1209 SFXGE_EVQ_UNLOCK(evq);
1210 SFXGE_TXQ_UNLOCK(txq);
1211}
1212
1213static int
1214sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1215{
1216 struct sfxge_txq *txq;
1217 efsys_mem_t *esmp;
1218 uint16_t flags;

--- 32 unchanged lines hidden (view full) ---

1251 }
1252
1253 /* Create the common code transmit queue. */
1254 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1255 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1256 &txq->common)) != 0)
1257 goto fail;
1258
1260 mtx_lock(SFXGE_TXQ_LOCK(txq));
1259 SFXGE_TXQ_LOCK(txq);
1260
1261 /* Enable the transmit queue. */
1262 efx_tx_qenable(txq->common);
1263
1264 txq->init_state = SFXGE_TXQ_STARTED;
1265
1267 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1266 SFXGE_TXQ_UNLOCK(txq);
1267
1268 return (0);
1269
1270fail:
1271 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1272 EFX_TXQ_NBUFS(sc->txq_entries));
1273 return (rc);
1274}

--- 81 unchanged lines hidden (view full) ---

1356 free(txq->stmp, M_SFXGE);
1357
1358 /* Release DMA memory mapping. */
1359 sfxge_dma_free(&txq->mem);
1360
1361 sc->txq[index] = NULL;
1362
1363#ifdef SFXGE_HAVE_MQ
1365 mtx_destroy(&txq->lock);
1364 SFXGE_TXQ_LOCK_DESTROY(txq);
1365#endif
1366
1367 free(txq, M_SFXGE);
1368}
1369
1370static int
1371sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1372 enum sfxge_txq_type type, unsigned int evq_index)

--- 89 unchanged lines hidden (view full) ---

1462
1463 /* Initialize the deferred packet list. */
1464 stdp = &txq->dpl;
1465 stdp->std_put_max = sfxge_tx_dpl_put_max;
1466 stdp->std_get_max = sfxge_tx_dpl_get_max;
1467 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1468 stdp->std_getp = &stdp->std_get;
1469
1471 mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
1470 SFXGE_TXQ_LOCK_INIT(txq, "txq");
1471
1472 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
1473 SYSCTL_CHILDREN(txq_node), OID_AUTO,
1474 "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS,
1475 &stdp->std_get_count, 0, "");
1476 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
1477 SYSCTL_CHILDREN(txq_node), OID_AUTO,
1478 "dpl_get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,

--- 155 unchanged lines hidden ---