Deleted Added
full compact
sfxge_tx.c (272325) sfxge_tx.c (272328)
1/*-
2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3 * All rights reserved.
4 *
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 29 unchanged lines hidden (view full) ---

38 *
39 * So, event queue plus label mapping to Tx queue index is:
40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42 * See sfxge_get_txq_by_label() sfxge_ev.c
43 */
44
45#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
3 * All rights reserved.
4 *
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 29 unchanged lines hidden (view full) ---

38 *
39 * So, event queue plus label mapping to Tx queue index is:
40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42 * See sfxge_get_txq_by_label() sfxge_ev.c
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 272325 2014-09-30 20:18:10Z gnn $");
46__FBSDID("$FreeBSD: head/sys/dev/sfxge/sfxge_tx.c 272328 2014-09-30 20:36:07Z gnn $");
47
48#include <sys/types.h>
49#include <sys/mbuf.h>
50#include <sys/smp.h>
51#include <sys/socket.h>
52#include <sys/sysctl.h>
53
54#include <net/bpf.h>

--- 15 unchanged lines hidden (view full) ---

70 * large number of descriptors for TSO. With minimum MSS and
71 * maximum mbuf length we might need more than a ring-ful of
72 * descriptors, but this should not happen in practice except
73 * due to deliberate attack. In that case we will truncate
74 * the output at a packet boundary. Allow for a reasonable
75 * minimum MSS of 512.
76 */
77#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
47
48#include <sys/types.h>
49#include <sys/mbuf.h>
50#include <sys/smp.h>
51#include <sys/socket.h>
52#include <sys/sysctl.h>
53
54#include <net/bpf.h>

--- 15 unchanged lines hidden (view full) ---

70 * large number of descriptors for TSO. With minimum MSS and
71 * maximum mbuf length we might need more than a ring-ful of
72 * descriptors, but this should not happen in practice except
73 * due to deliberate attack. In that case we will truncate
74 * the output at a packet boundary. Allow for a reasonable
75 * minimum MSS of 512.
76 */
77#define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
78#define SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
78#define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC)
79
80/* Forward declarations. */
81static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
82static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
83static void sfxge_tx_qunblock(struct sfxge_txq *txq);
84static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
85 const bus_dma_segment_t *dma_seg, int n_dma_seg);
86

--- 9 unchanged lines hidden (view full) ---

96
97 mtx_assert(&evq->lock, MA_OWNED);
98
99 completed = txq->completed;
100 while (completed != txq->pending) {
101 struct sfxge_tx_mapping *stmp;
102 unsigned int id;
103
79
80/* Forward declarations. */
81static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
82static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
83static void sfxge_tx_qunblock(struct sfxge_txq *txq);
84static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
85 const bus_dma_segment_t *dma_seg, int n_dma_seg);
86

--- 9 unchanged lines hidden (view full) ---

96
97 mtx_assert(&evq->lock, MA_OWNED);
98
99 completed = txq->completed;
100 while (completed != txq->pending) {
101 struct sfxge_tx_mapping *stmp;
102 unsigned int id;
103
104 id = completed++ & (SFXGE_NDESCS - 1);
104 id = completed++ & txq->ptr_mask;
105
106 stmp = &txq->stmp[id];
107 if (stmp->flags & TX_BUF_UNMAP) {
108 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
109 if (stmp->flags & TX_BUF_MBUF) {
110 struct mbuf *m = stmp->u.mbuf;
111 do
112 m = m_free(m);

--- 7 unchanged lines hidden (view full) ---

120 txq->completed = completed;
121
122 /* Check whether we need to unblock the queue. */
123 mb();
124 if (txq->blocked) {
125 unsigned int level;
126
127 level = txq->added - txq->completed;
105
106 stmp = &txq->stmp[id];
107 if (stmp->flags & TX_BUF_UNMAP) {
108 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
109 if (stmp->flags & TX_BUF_MBUF) {
110 struct mbuf *m = stmp->u.mbuf;
111 do
112 m = m_free(m);

--- 7 unchanged lines hidden (view full) ---

120 txq->completed = completed;
121
122 /* Check whether we need to unblock the queue. */
123 mb();
124 if (txq->blocked) {
125 unsigned int level;
126
127 level = txq->added - txq->completed;
128 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
128 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
129 sfxge_tx_qunblock(txq);
130 }
131}
132
133#ifdef SFXGE_HAVE_MQ
134
135/*
136 * Reorder the put list and append it to the get list.

--- 76 unchanged lines hidden (view full) ---

213 /* If efx_tx_qpost() had to refragment, our information about
214 * buffers to free may be associated with the wrong
215 * descriptors.
216 */
217 KASSERT(txq->added - old_added == txq->n_pend_desc,
218 ("efx_tx_qpost() refragmented descriptors"));
219
220 level = txq->added - txq->reaped;
129 sfxge_tx_qunblock(txq);
130 }
131}
132
133#ifdef SFXGE_HAVE_MQ
134
135/*
136 * Reorder the put list and append it to the get list.

--- 76 unchanged lines hidden (view full) ---

213 /* If efx_tx_qpost() had to refragment, our information about
214 * buffers to free may be associated with the wrong
215 * descriptors.
216 */
217 KASSERT(txq->added - old_added == txq->n_pend_desc,
218 ("efx_tx_qpost() refragmented descriptors"));
219
220 level = txq->added - txq->reaped;
221 KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
221 KASSERT(level <= txq->entries, ("overfilled TX queue"));
222
223 /* Clear the fragment list. */
224 txq->n_pend_desc = 0;
225
226 /* Have we reached the block level? */
222
223 /* Clear the fragment list. */
224 txq->n_pend_desc = 0;
225
226 /* Have we reached the block level? */
227 if (level < SFXGE_TXQ_BLOCK_LEVEL)
227 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
228 return;
229
230 /* Reap, and check again */
231 sfxge_tx_qreap(txq);
232 level = txq->added - txq->reaped;
228 return;
229
230 /* Reap, and check again */
231 sfxge_tx_qreap(txq);
232 level = txq->added - txq->reaped;
233 if (level < SFXGE_TXQ_BLOCK_LEVEL)
233 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
234 return;
235
236 txq->blocked = 1;
237
238 /*
239 * Avoid a race with completion interrupt handling that could leave
240 * the queue blocked.
241 */
242 mb();
243 sfxge_tx_qreap(txq);
244 level = txq->added - txq->reaped;
234 return;
235
236 txq->blocked = 1;
237
238 /*
239 * Avoid a race with completion interrupt handling that could leave
240 * the queue blocked.
241 */
242 mb();
243 sfxge_tx_qreap(txq);
244 level = txq->added - txq->reaped;
245 if (level < SFXGE_TXQ_BLOCK_LEVEL) {
245 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
246 mb();
247 txq->blocked = 0;
248 }
249}
250
251static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
252{
253 bus_dmamap_t *used_map;

--- 12 unchanged lines hidden (view full) ---

266 prefetch_read_many(mbuf->m_data);
267
268 if (txq->init_state != SFXGE_TXQ_STARTED) {
269 rc = EINTR;
270 goto reject;
271 }
272
273 /* Load the packet for DMA. */
246 mb();
247 txq->blocked = 0;
248 }
249}
250
251static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
252{
253 bus_dmamap_t *used_map;

--- 12 unchanged lines hidden (view full) ---

266 prefetch_read_many(mbuf->m_data);
267
268 if (txq->init_state != SFXGE_TXQ_STARTED) {
269 rc = EINTR;
270 goto reject;
271 }
272
273 /* Load the packet for DMA. */
274 id = txq->added & (SFXGE_NDESCS - 1);
274 id = txq->added & txq->ptr_mask;
275 stmp = &txq->stmp[id];
276 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
277 mbuf, dma_seg, &n_dma_seg, 0);
278 if (rc == EFBIG) {
279 /* Try again. */
280 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
281 SFXGE_TX_MAPPING_MAX_SEG);
282 if (new_mbuf == NULL)

--- 30 unchanged lines hidden (view full) ---

313 desc->eb_eop = 1;
314 break;
315 }
316 desc->eb_eop = 0;
317 i++;
318
319 stmp->flags = 0;
320 if (__predict_false(stmp ==
275 stmp = &txq->stmp[id];
276 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
277 mbuf, dma_seg, &n_dma_seg, 0);
278 if (rc == EFBIG) {
279 /* Try again. */
280 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
281 SFXGE_TX_MAPPING_MAX_SEG);
282 if (new_mbuf == NULL)

--- 30 unchanged lines hidden (view full) ---

313 desc->eb_eop = 1;
314 break;
315 }
316 desc->eb_eop = 0;
317 i++;
318
319 stmp->flags = 0;
320 if (__predict_false(stmp ==
321 &txq->stmp[SFXGE_NDESCS - 1]))
321 &txq->stmp[txq->ptr_mask]))
322 stmp = &txq->stmp[0];
323 else
324 stmp++;
325 }
326 txq->n_pend_desc = n_dma_seg;
327 }
328
329 /*

--- 427 unchanged lines hidden (view full) ---

757 * allocated from the heap.
758 */
759#define TSOH_STD_SIZE 128
760
761/* At most half the descriptors in the queue at any time will refer to
762 * a TSO header buffer, since they must always be followed by a
763 * payload descriptor referring to an mbuf.
764 */
322 stmp = &txq->stmp[0];
323 else
324 stmp++;
325 }
326 txq->n_pend_desc = n_dma_seg;
327 }
328
329 /*

--- 427 unchanged lines hidden (view full) ---

757 * allocated from the heap.
758 */
759#define TSOH_STD_SIZE 128
760
761/* At most half the descriptors in the queue at any time will refer to
762 * a TSO header buffer, since they must always be followed by a
763 * payload descriptor referring to an mbuf.
764 */
765#define TSOH_COUNT (SFXGE_NDESCS / 2u)
765#define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
766#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
766#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
767#define TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
767#define TSOH_PAGE_COUNT(_txq_entries) \
768 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
768
769static int tso_init(struct sfxge_txq *txq)
770{
771 struct sfxge_softc *sc = txq->sc;
769
770static int tso_init(struct sfxge_txq *txq)
771{
772 struct sfxge_softc *sc = txq->sc;
773 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
772 int i, rc;
773
774 /* Allocate TSO header buffers */
774 int i, rc;
775
776 /* Allocate TSO header buffers */
775 txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
777 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
776 M_SFXGE, M_WAITOK);
777
778 M_SFXGE, M_WAITOK);
779
778 for (i = 0; i < TSOH_PAGE_COUNT; i++) {
780 for (i = 0; i < tsoh_page_count; i++) {
779 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
780 if (rc != 0)
781 goto fail;
782 }
783
784 return (0);
785
786fail:

--- 4 unchanged lines hidden (view full) ---

791 return (rc);
792}
793
794static void tso_fini(struct sfxge_txq *txq)
795{
796 int i;
797
798 if (txq->tsoh_buffer != NULL) {
781 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
782 if (rc != 0)
783 goto fail;
784 }
785
786 return (0);
787
788fail:

--- 4 unchanged lines hidden (view full) ---

793 return (rc);
794}
795
796static void tso_fini(struct sfxge_txq *txq)
797{
798 int i;
799
800 if (txq->tsoh_buffer != NULL) {
799 for (i = 0; i < TSOH_PAGE_COUNT; i++)
801 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
800 sfxge_dma_free(&txq->tsoh_buffer[i]);
801 free(txq->tsoh_buffer, M_SFXGE);
802 }
803}
804
805static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
806{
807 struct ether_header *eh = mtod(mbuf, struct ether_header *);

--- 197 unchanged lines hidden (view full) ---

1005 ++dma_seg;
1006 tso.in_len = dma_seg->ds_len;
1007 tso.dma_addr = dma_seg->ds_addr;
1008 } else {
1009 tso.in_len = dma_seg->ds_len - tso.header_len;
1010 tso.dma_addr = dma_seg->ds_addr + tso.header_len;
1011 }
1012
802 sfxge_dma_free(&txq->tsoh_buffer[i]);
803 free(txq->tsoh_buffer, M_SFXGE);
804 }
805}
806
807static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
808{
809 struct ether_header *eh = mtod(mbuf, struct ether_header *);

--- 197 unchanged lines hidden (view full) ---

1007 ++dma_seg;
1008 tso.in_len = dma_seg->ds_len;
1009 tso.dma_addr = dma_seg->ds_addr;
1010 } else {
1011 tso.in_len = dma_seg->ds_len - tso.header_len;
1012 tso.dma_addr = dma_seg->ds_addr + tso.header_len;
1013 }
1014
1013 id = txq->added & (SFXGE_NDESCS - 1);
1015 id = txq->added & txq->ptr_mask;
1014 if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1016 if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1015 return -1;
1017 return (-1);
1016
1017 while (1) {
1018
1019 while (1) {
1018 id = (id + 1) & (SFXGE_NDESCS - 1);
1020 id = (id + 1) & txq->ptr_mask;
1019 tso_fill_packet_with_fragment(txq, &tso);
1020
1021 /* Move onto the next fragment? */
1022 if (tso.in_len == 0) {
1023 --n_dma_seg;
1024 if (n_dma_seg == 0)
1025 break;
1026 ++dma_seg;

--- 6 unchanged lines hidden (view full) ---

1033 /* If the queue is now full due to tiny MSS,
1034 * or we can't create another header, discard
1035 * the remainder of the input mbuf but do not
1036 * roll back the work we have done.
1037 */
1038 if (txq->n_pend_desc >
1039 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
1040 break;
1021 tso_fill_packet_with_fragment(txq, &tso);
1022
1023 /* Move onto the next fragment? */
1024 if (tso.in_len == 0) {
1025 --n_dma_seg;
1026 if (n_dma_seg == 0)
1027 break;
1028 ++dma_seg;

--- 6 unchanged lines hidden (view full) ---

1035 /* If the queue is now full due to tiny MSS,
1036 * or we can't create another header, discard
1037 * the remainder of the input mbuf but do not
1038 * roll back the work we have done.
1039 */
1040 if (txq->n_pend_desc >
1041 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
1042 break;
1041 next_id = (id + 1) & (SFXGE_NDESCS - 1);
1043 next_id = (id + 1) & txq->ptr_mask;
1042 if (__predict_false(tso_start_new_packet(txq, &tso,
1043 next_id)))
1044 break;
1045 id = next_id;
1046 }
1047 }
1048
1049 txq->tso_bursts++;

--- 15 unchanged lines hidden (view full) ---

1065 return;
1066
1067 mtx_lock(SFXGE_TXQ_LOCK(txq));
1068
1069 if (txq->blocked) {
1070 unsigned int level;
1071
1072 level = txq->added - txq->completed;
1044 if (__predict_false(tso_start_new_packet(txq, &tso,
1045 next_id)))
1046 break;
1047 id = next_id;
1048 }
1049 }
1050
1051 txq->tso_bursts++;

--- 15 unchanged lines hidden (view full) ---

1067 return;
1068
1069 mtx_lock(SFXGE_TXQ_LOCK(txq));
1070
1071 if (txq->blocked) {
1072 unsigned int level;
1073
1074 level = txq->added - txq->completed;
1073 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
1075 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
1074 txq->blocked = 0;
1075 }
1076
1077 sfxge_tx_qdpl_service(txq);
1078 /* note: lock has been dropped */
1079}
1080
1081void

--- 59 unchanged lines hidden (view full) ---

1141 txq->completed = 0;
1142 txq->reaped = 0;
1143
1144 /* Destroy the common code transmit queue. */
1145 efx_tx_qdestroy(txq->common);
1146 txq->common = NULL;
1147
1148 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1076 txq->blocked = 0;
1077 }
1078
1079 sfxge_tx_qdpl_service(txq);
1080 /* note: lock has been dropped */
1081}
1082
1083void

--- 59 unchanged lines hidden (view full) ---

1143 txq->completed = 0;
1144 txq->reaped = 0;
1145
1146 /* Destroy the common code transmit queue. */
1147 efx_tx_qdestroy(txq->common);
1148 txq->common = NULL;
1149
1150 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1149 EFX_TXQ_NBUFS(SFXGE_NDESCS));
1151 EFX_TXQ_NBUFS(sc->txq_entries));
1150
1151 mtx_unlock(&evq->lock);
1152 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1153}
1154
1155static int
1156sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1157{

--- 9 unchanged lines hidden (view full) ---

1167
1168 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1169 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1170 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1171 ("evq->init_state != SFXGE_EVQ_STARTED"));
1172
1173 /* Program the buffer table. */
1174 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1152
1153 mtx_unlock(&evq->lock);
1154 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1155}
1156
1157static int
1158sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1159{

--- 9 unchanged lines hidden (view full) ---

1169
1170 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1171 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1172 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1173 ("evq->init_state != SFXGE_EVQ_STARTED"));
1174
1175 /* Program the buffer table. */
1176 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1175 EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
1176 return rc;
1177 EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1178 return (rc);
1177
1178 /* Determine the kind of queue we are creating. */
1179 switch (txq->type) {
1180 case SFXGE_TXQ_NON_CKSUM:
1181 flags = 0;
1182 break;
1183 case SFXGE_TXQ_IP_CKSUM:
1184 flags = EFX_CKSUM_IPV4;

--- 4 unchanged lines hidden (view full) ---

1189 default:
1190 KASSERT(0, ("Impossible TX queue"));
1191 flags = 0;
1192 break;
1193 }
1194
1195 /* Create the common code transmit queue. */
1196 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1179
1180 /* Determine the kind of queue we are creating. */
1181 switch (txq->type) {
1182 case SFXGE_TXQ_NON_CKSUM:
1183 flags = 0;
1184 break;
1185 case SFXGE_TXQ_IP_CKSUM:
1186 flags = EFX_CKSUM_IPV4;

--- 4 unchanged lines hidden (view full) ---

1191 default:
1192 KASSERT(0, ("Impossible TX queue"));
1193 flags = 0;
1194 break;
1195 }
1196
1197 /* Create the common code transmit queue. */
1198 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1197 SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
1199 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1198 &txq->common)) != 0)
1199 goto fail;
1200
1201 mtx_lock(SFXGE_TXQ_LOCK(txq));
1202
1203 /* Enable the transmit queue. */
1204 efx_tx_qenable(txq->common);
1205
1206 txq->init_state = SFXGE_TXQ_STARTED;
1207
1208 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1209
1210 return (0);
1211
1212fail:
1213 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1200 &txq->common)) != 0)
1201 goto fail;
1202
1203 mtx_lock(SFXGE_TXQ_LOCK(txq));
1204
1205 /* Enable the transmit queue. */
1206 efx_tx_qenable(txq->common);
1207
1208 txq->init_state = SFXGE_TXQ_STARTED;
1209
1210 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1211
1212 return (0);
1213
1214fail:
1215 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1214 EFX_TXQ_NBUFS(SFXGE_NDESCS));
1215 return rc;
1216 EFX_TXQ_NBUFS(sc->txq_entries));
1217 return (rc);
1216}
1217
1218void
1219sfxge_tx_stop(struct sfxge_softc *sc)
1220{
1221 const efx_nic_cfg_t *encp;
1222 int index;
1223

--- 51 unchanged lines hidden (view full) ---

1275
1276/**
1277 * Destroy a transmit queue.
1278 */
1279static void
1280sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1281{
1282 struct sfxge_txq *txq;
1218}
1219
1220void
1221sfxge_tx_stop(struct sfxge_softc *sc)
1222{
1223 const efx_nic_cfg_t *encp;
1224 int index;
1225

--- 51 unchanged lines hidden (view full) ---

1277
1278/**
1279 * Destroy a transmit queue.
1280 */
1281static void
1282sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1283{
1284 struct sfxge_txq *txq;
1283 unsigned int nmaps = SFXGE_NDESCS;
1285 unsigned int nmaps;
1284
1285 txq = sc->txq[index];
1286
1287 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1288 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1289
1290 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1291 tso_fini(txq);
1292
1293 /* Free the context arrays. */
1294 free(txq->pend_desc, M_SFXGE);
1286
1287 txq = sc->txq[index];
1288
1289 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1290 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1291
1292 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1293 tso_fini(txq);
1294
1295 /* Free the context arrays. */
1296 free(txq->pend_desc, M_SFXGE);
1297 nmaps = sc->txq_entries;
1295 while (nmaps-- != 0)
1296 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1297 free(txq->stmp, M_SFXGE);
1298
1299 /* Release DMA memory mapping. */
1300 sfxge_dma_free(&txq->mem);
1301
1302 sc->txq[index] = NULL;

--- 15 unchanged lines hidden (view full) ---

1318 struct sfxge_tx_dpl *stdp;
1319#endif
1320 efsys_mem_t *esmp;
1321 unsigned int nmaps;
1322 int rc;
1323
1324 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1325 txq->sc = sc;
1298 while (nmaps-- != 0)
1299 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1300 free(txq->stmp, M_SFXGE);
1301
1302 /* Release DMA memory mapping. */
1303 sfxge_dma_free(&txq->mem);
1304
1305 sc->txq[index] = NULL;

--- 15 unchanged lines hidden (view full) ---

1321 struct sfxge_tx_dpl *stdp;
1322#endif
1323 efsys_mem_t *esmp;
1324 unsigned int nmaps;
1325 int rc;
1326
1327 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1328 txq->sc = sc;
1329 txq->entries = sc->txq_entries;
1330 txq->ptr_mask = txq->entries - 1;
1326
1327 sc->txq[txq_index] = txq;
1328 esmp = &txq->mem;
1329
1330 evq = sc->evq[evq_index];
1331
1332 /* Allocate and zero DMA space for the descriptor ring. */
1331
1332 sc->txq[txq_index] = txq;
1333 esmp = &txq->mem;
1334
1335 evq = sc->evq[evq_index];
1336
1337 /* Allocate and zero DMA space for the descriptor ring. */
1333 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
1338 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1334 return (rc);
1339 return (rc);
1335 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
1340 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
1336
1337 /* Allocate buffer table entries. */
1341
1342 /* Allocate buffer table entries. */
1338 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
1343 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1339 &txq->buf_base_id);
1340
1341 /* Create a DMA tag for packet mappings. */
1342 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1343 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1344 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1345 &txq->packet_dma_tag) != 0) {
1346 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1347 rc = ENOMEM;
1348 goto fail;
1349 }
1350
1351 /* Allocate pending descriptor array for batching writes. */
1344 &txq->buf_base_id);
1345
1346 /* Create a DMA tag for packet mappings. */
1347 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1348 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1349 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1350 &txq->packet_dma_tag) != 0) {
1351 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1352 rc = ENOMEM;
1353 goto fail;
1354 }
1355
1356 /* Allocate pending descriptor array for batching writes. */
1352 txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
1357 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
1353 M_SFXGE, M_ZERO | M_WAITOK);
1354
1355 /* Allocate and initialise mbuf DMA mapping array. */
1358 M_SFXGE, M_ZERO | M_WAITOK);
1359
1360 /* Allocate and initialise mbuf DMA mapping array. */
1356 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
1361 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1357 M_SFXGE, M_ZERO | M_WAITOK);
1362 M_SFXGE, M_ZERO | M_WAITOK);
1358 for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
1363 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1359 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1360 &txq->stmp[nmaps].map);
1361 if (rc != 0)
1362 goto fail2;
1363 }
1364
1365 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1366 (rc = tso_init(txq)) != 0)

--- 142 unchanged lines hidden ---
1364 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1365 &txq->stmp[nmaps].map);
1366 if (rc != 0)
1367 goto fail2;
1368 }
1369
1370 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1371 (rc = tso_init(txq)) != 0)

--- 142 unchanged lines hidden ---