Deleted Added
full compact
ef10_tx.c (300607) ef10_tx.c (300840)
1/*-
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/sfxge/common/ef10_tx.c 300607 2016-05-24 12:16:57Z arybchik $");
32__FBSDID("$FreeBSD: head/sys/dev/sfxge/common/ef10_tx.c 300840 2016-05-27 11:44:40Z arybchik $");
33
34#include "efx.h"
35#include "efx_impl.h"
36
37
38#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
39
40#if EFSYS_OPT_QSTATS
41#define EFX_TX_QSTAT_INCR(_etp, _stat) \
42 do { \
43 (_etp)->et_stat[_stat]++; \
44 _NOTE(CONSTANTCONDITION) \
45 } while (B_FALSE)
46#else
47#define EFX_TX_QSTAT_INCR(_etp, _stat)
48#endif
49
50static __checkReturn efx_rc_t
51efx_mcdi_init_txq(
52 __in efx_nic_t *enp,
53 __in uint32_t size,
54 __in uint32_t target_evq,
55 __in uint32_t label,
56 __in uint32_t instance,
57 __in uint16_t flags,
58 __in efsys_mem_t *esmp)
59{
60 efx_mcdi_req_t req;
61 uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
62 MC_CMD_INIT_TXQ_OUT_LEN)];
63 efx_qword_t *dma_addr;
64 uint64_t addr;
65 int npages;
66 int i;
67 efx_rc_t rc;
68
69 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
70 EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
71
72 npages = EFX_TXQ_NBUFS(size);
73 if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
74 rc = EINVAL;
75 goto fail1;
76 }
77
78 (void) memset(payload, 0, sizeof (payload));
79 req.emr_cmd = MC_CMD_INIT_TXQ;
80 req.emr_in_buf = payload;
81 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
82 req.emr_out_buf = payload;
83 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
84
85 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
86 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
87 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
88 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
89
90 MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
91 INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
92 INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
93 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
94 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
95 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
96 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
97 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
98 INIT_TXQ_IN_CRC_MODE, 0,
99 INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
100
101 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
102 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
103
104 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
105 addr = EFSYS_MEM_ADDR(esmp);
106
107 for (i = 0; i < npages; i++) {
108 EFX_POPULATE_QWORD_2(*dma_addr,
109 EFX_DWORD_1, (uint32_t)(addr >> 32),
110 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
111
112 dma_addr++;
113 addr += EFX_BUF_SIZE;
114 }
115
116 efx_mcdi_execute(enp, &req);
117
118 if (req.emr_rc != 0) {
119 rc = req.emr_rc;
120 goto fail2;
121 }
122
123 return (0);
124
125fail2:
126 EFSYS_PROBE(fail2);
127fail1:
128 EFSYS_PROBE1(fail1, efx_rc_t, rc);
129
130 return (rc);
131}
132
133static __checkReturn efx_rc_t
134efx_mcdi_fini_txq(
135 __in efx_nic_t *enp,
136 __in uint32_t instance)
137{
138 efx_mcdi_req_t req;
139 uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
140 MC_CMD_FINI_TXQ_OUT_LEN)];
141 efx_rc_t rc;
142
143 (void) memset(payload, 0, sizeof (payload));
144 req.emr_cmd = MC_CMD_FINI_TXQ;
145 req.emr_in_buf = payload;
146 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
147 req.emr_out_buf = payload;
148 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
149
150 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
151
152 efx_mcdi_execute_quiet(enp, &req);
153
154 if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
155 rc = req.emr_rc;
156 goto fail1;
157 }
158
159 return (0);
160
161fail1:
162 EFSYS_PROBE1(fail1, efx_rc_t, rc);
163
164 return (rc);
165}
166
167 __checkReturn efx_rc_t
168ef10_tx_init(
169 __in efx_nic_t *enp)
170{
171 _NOTE(ARGUNUSED(enp))
172 return (0);
173}
174
175 void
176ef10_tx_fini(
177 __in efx_nic_t *enp)
178{
179 _NOTE(ARGUNUSED(enp))
180}
181
182 __checkReturn efx_rc_t
183ef10_tx_qcreate(
184 __in efx_nic_t *enp,
185 __in unsigned int index,
186 __in unsigned int label,
187 __in efsys_mem_t *esmp,
188 __in size_t n,
189 __in uint32_t id,
190 __in uint16_t flags,
191 __in efx_evq_t *eep,
192 __in efx_txq_t *etp,
193 __out unsigned int *addedp)
194{
195 efx_qword_t desc;
196 efx_rc_t rc;
197
33
34#include "efx.h"
35#include "efx_impl.h"
36
37
38#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
39
40#if EFSYS_OPT_QSTATS
41#define EFX_TX_QSTAT_INCR(_etp, _stat) \
42 do { \
43 (_etp)->et_stat[_stat]++; \
44 _NOTE(CONSTANTCONDITION) \
45 } while (B_FALSE)
46#else
47#define EFX_TX_QSTAT_INCR(_etp, _stat)
48#endif
49
50static __checkReturn efx_rc_t
51efx_mcdi_init_txq(
52 __in efx_nic_t *enp,
53 __in uint32_t size,
54 __in uint32_t target_evq,
55 __in uint32_t label,
56 __in uint32_t instance,
57 __in uint16_t flags,
58 __in efsys_mem_t *esmp)
59{
60 efx_mcdi_req_t req;
61 uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
62 MC_CMD_INIT_TXQ_OUT_LEN)];
63 efx_qword_t *dma_addr;
64 uint64_t addr;
65 int npages;
66 int i;
67 efx_rc_t rc;
68
69 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
70 EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
71
72 npages = EFX_TXQ_NBUFS(size);
73 if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
74 rc = EINVAL;
75 goto fail1;
76 }
77
78 (void) memset(payload, 0, sizeof (payload));
79 req.emr_cmd = MC_CMD_INIT_TXQ;
80 req.emr_in_buf = payload;
81 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
82 req.emr_out_buf = payload;
83 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
84
85 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
86 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
87 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
88 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
89
90 MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
91 INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
92 INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
93 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
94 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
95 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
96 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
97 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
98 INIT_TXQ_IN_CRC_MODE, 0,
99 INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
100
101 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
102 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
103
104 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
105 addr = EFSYS_MEM_ADDR(esmp);
106
107 for (i = 0; i < npages; i++) {
108 EFX_POPULATE_QWORD_2(*dma_addr,
109 EFX_DWORD_1, (uint32_t)(addr >> 32),
110 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
111
112 dma_addr++;
113 addr += EFX_BUF_SIZE;
114 }
115
116 efx_mcdi_execute(enp, &req);
117
118 if (req.emr_rc != 0) {
119 rc = req.emr_rc;
120 goto fail2;
121 }
122
123 return (0);
124
125fail2:
126 EFSYS_PROBE(fail2);
127fail1:
128 EFSYS_PROBE1(fail1, efx_rc_t, rc);
129
130 return (rc);
131}
132
133static __checkReturn efx_rc_t
134efx_mcdi_fini_txq(
135 __in efx_nic_t *enp,
136 __in uint32_t instance)
137{
138 efx_mcdi_req_t req;
139 uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
140 MC_CMD_FINI_TXQ_OUT_LEN)];
141 efx_rc_t rc;
142
143 (void) memset(payload, 0, sizeof (payload));
144 req.emr_cmd = MC_CMD_FINI_TXQ;
145 req.emr_in_buf = payload;
146 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
147 req.emr_out_buf = payload;
148 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
149
150 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
151
152 efx_mcdi_execute_quiet(enp, &req);
153
154 if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
155 rc = req.emr_rc;
156 goto fail1;
157 }
158
159 return (0);
160
161fail1:
162 EFSYS_PROBE1(fail1, efx_rc_t, rc);
163
164 return (rc);
165}
166
167 __checkReturn efx_rc_t
168ef10_tx_init(
169 __in efx_nic_t *enp)
170{
171 _NOTE(ARGUNUSED(enp))
172 return (0);
173}
174
175 void
176ef10_tx_fini(
177 __in efx_nic_t *enp)
178{
179 _NOTE(ARGUNUSED(enp))
180}
181
182 __checkReturn efx_rc_t
183ef10_tx_qcreate(
184 __in efx_nic_t *enp,
185 __in unsigned int index,
186 __in unsigned int label,
187 __in efsys_mem_t *esmp,
188 __in size_t n,
189 __in uint32_t id,
190 __in uint16_t flags,
191 __in efx_evq_t *eep,
192 __in efx_txq_t *etp,
193 __out unsigned int *addedp)
194{
195 efx_qword_t desc;
196 efx_rc_t rc;
197
198 _NOTE(ARGUNUSED(id))
198
199 if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
200 esmp)) != 0)
201 goto fail1;
202
203 /*
204 * A previous user of this TX queue may have written a descriptor to the
205 * TX push collector, but not pushed the doorbell (e.g. after a crash).
206 * The next doorbell write would then push the stale descriptor.
207 *
208 * Ensure the (per network port) TX push collector is cleared by writing
209 * a no-op TX option descriptor. See bug29981 for details.
210 */
211 *addedp = 1;
212 EFX_POPULATE_QWORD_4(desc,
213 ESF_DZ_TX_DESC_IS_OPT, 1,
214 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
215 ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
216 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
217 ESF_DZ_TX_OPTION_IP_CSUM,
218 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
219
220 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
221 ef10_tx_qpush(etp, *addedp, 0);
222
223 return (0);
224
225fail1:
226 EFSYS_PROBE1(fail1, efx_rc_t, rc);
227
228 return (rc);
229}
230
231 void
232ef10_tx_qdestroy(
233 __in efx_txq_t *etp)
234{
235 /* FIXME */
236 _NOTE(ARGUNUSED(etp))
237 /* FIXME */
238}
239
240 __checkReturn efx_rc_t
241ef10_tx_qpio_enable(
242 __in efx_txq_t *etp)
243{
244 efx_nic_t *enp = etp->et_enp;
245 efx_piobuf_handle_t handle;
246 efx_rc_t rc;
247
248 if (etp->et_pio_size != 0) {
249 rc = EALREADY;
250 goto fail1;
251 }
252
253 /* Sub-allocate a PIO block from a piobuf */
254 if ((rc = ef10_nic_pio_alloc(enp,
255 &etp->et_pio_bufnum,
256 &handle,
257 &etp->et_pio_blknum,
258 &etp->et_pio_offset,
259 &etp->et_pio_size)) != 0) {
260 goto fail2;
261 }
262 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
263
264 /* Link the piobuf to this TXQ */
265 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
266 goto fail3;
267 }
268
269 /*
270 * et_pio_offset is the offset of the sub-allocated block within the
271 * hardware PIO buffer. It is used as the buffer address in the PIO
272 * option descriptor.
273 *
274 * et_pio_write_offset is the offset of the sub-allocated block from the
275 * start of the write-combined memory mapping, and is used for writing
276 * data into the PIO buffer.
277 */
278 etp->et_pio_write_offset =
279 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
280 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
281
282 return (0);
283
284fail3:
285 EFSYS_PROBE(fail3);
286 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
287 etp->et_pio_size = 0;
288fail2:
289 EFSYS_PROBE(fail2);
290fail1:
291 EFSYS_PROBE1(fail1, efx_rc_t, rc);
292
293 return (rc);
294}
295
296 void
297ef10_tx_qpio_disable(
298 __in efx_txq_t *etp)
299{
300 efx_nic_t *enp = etp->et_enp;
301
302 if (etp->et_pio_size != 0) {
303 /* Unlink the piobuf from this TXQ */
304 ef10_nic_pio_unlink(enp, etp->et_index);
305
306 /* Free the sub-allocated PIO block */
307 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
308 etp->et_pio_size = 0;
309 etp->et_pio_write_offset = 0;
310 }
311}
312
313 __checkReturn efx_rc_t
314ef10_tx_qpio_write(
315 __in efx_txq_t *etp,
316 __in_ecount(length) uint8_t *buffer,
317 __in size_t length,
318 __in size_t offset)
319{
320 efx_nic_t *enp = etp->et_enp;
321 efsys_bar_t *esbp = enp->en_esbp;
322 uint32_t write_offset;
323 uint32_t write_offset_limit;
324 efx_qword_t *eqp;
325 efx_rc_t rc;
326
327 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
328
329 if (etp->et_pio_size == 0) {
330 rc = ENOENT;
331 goto fail1;
332 }
333 if (offset + length > etp->et_pio_size) {
334 rc = ENOSPC;
335 goto fail2;
336 }
337
338 /*
339 * Writes to PIO buffers must be 64 bit aligned, and multiples of
340 * 64 bits.
341 */
342 write_offset = etp->et_pio_write_offset + offset;
343 write_offset_limit = write_offset + length;
344 eqp = (efx_qword_t *)buffer;
345 while (write_offset < write_offset_limit) {
346 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
347 eqp++;
348 write_offset += sizeof (efx_qword_t);
349 }
350
351 return (0);
352
353fail2:
354 EFSYS_PROBE(fail2);
355fail1:
356 EFSYS_PROBE1(fail1, efx_rc_t, rc);
357
358 return (rc);
359}
360
361 __checkReturn efx_rc_t
362ef10_tx_qpio_post(
363 __in efx_txq_t *etp,
364 __in size_t pkt_length,
365 __in unsigned int completed,
366 __inout unsigned int *addedp)
367{
368 efx_qword_t pio_desc;
369 unsigned int id;
370 size_t offset;
371 unsigned int added = *addedp;
372 efx_rc_t rc;
373
374
375 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
376 rc = ENOSPC;
377 goto fail1;
378 }
379
380 if (etp->et_pio_size == 0) {
381 rc = ENOENT;
382 goto fail2;
383 }
384
385 id = added++ & etp->et_mask;
386 offset = id * sizeof (efx_qword_t);
387
388 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
389 unsigned int, id, uint32_t, etp->et_pio_offset,
390 size_t, pkt_length);
391
392 EFX_POPULATE_QWORD_5(pio_desc,
393 ESF_DZ_TX_DESC_IS_OPT, 1,
394 ESF_DZ_TX_OPTION_TYPE, 1,
395 ESF_DZ_TX_PIO_CONT, 0,
396 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
397 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
398
399 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
400
401 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
402
403 *addedp = added;
404 return (0);
405
406fail2:
407 EFSYS_PROBE(fail2);
408fail1:
409 EFSYS_PROBE1(fail1, efx_rc_t, rc);
410
411 return (rc);
412}
413
414 __checkReturn efx_rc_t
415ef10_tx_qpost(
416 __in efx_txq_t *etp,
417 __in_ecount(n) efx_buffer_t *eb,
418 __in unsigned int n,
419 __in unsigned int completed,
420 __inout unsigned int *addedp)
421{
422 unsigned int added = *addedp;
423 unsigned int i;
424 efx_rc_t rc;
425
426 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
427 rc = ENOSPC;
428 goto fail1;
429 }
430
431 for (i = 0; i < n; i++) {
432 efx_buffer_t *ebp = &eb[i];
433 efsys_dma_addr_t addr = ebp->eb_addr;
434 size_t size = ebp->eb_size;
435 boolean_t eop = ebp->eb_eop;
436 unsigned int id;
437 size_t offset;
438 efx_qword_t qword;
439
440 /* Fragments must not span 4k boundaries. */
441 EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
442
443 id = added++ & etp->et_mask;
444 offset = id * sizeof (efx_qword_t);
445
446 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
447 unsigned int, id, efsys_dma_addr_t, addr,
448 size_t, size, boolean_t, eop);
449
450 EFX_POPULATE_QWORD_5(qword,
451 ESF_DZ_TX_KER_TYPE, 0,
452 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
453 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
454 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
455 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
456
457 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
458 }
459
460 EFX_TX_QSTAT_INCR(etp, TX_POST);
461
462 *addedp = added;
463 return (0);
464
465fail1:
466 EFSYS_PROBE1(fail1, efx_rc_t, rc);
467
468 return (rc);
469}
470
471/*
472 * This improves performance by pushing a TX descriptor at the same time as the
473 * doorbell. The descriptor must be added to the TXQ, so that can be used if the
474 * hardware decides not to use the pushed descriptor.
475 */
476 void
477ef10_tx_qpush(
478 __in efx_txq_t *etp,
479 __in unsigned int added,
480 __in unsigned int pushed)
481{
482 efx_nic_t *enp = etp->et_enp;
483 unsigned int wptr;
484 unsigned int id;
485 size_t offset;
486 efx_qword_t desc;
487 efx_oword_t oword;
488
489 wptr = added & etp->et_mask;
490 id = pushed & etp->et_mask;
491 offset = id * sizeof (efx_qword_t);
492
493 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
494 EFX_POPULATE_OWORD_3(oword,
495 ERF_DZ_TX_DESC_WPTR, wptr,
496 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
497 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
498
499 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
500 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
501 EFSYS_PIO_WRITE_BARRIER();
502 EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
503 &oword);
504}
505
506 __checkReturn efx_rc_t
507ef10_tx_qdesc_post(
508 __in efx_txq_t *etp,
509 __in_ecount(n) efx_desc_t *ed,
510 __in unsigned int n,
511 __in unsigned int completed,
512 __inout unsigned int *addedp)
513{
514 unsigned int added = *addedp;
515 unsigned int i;
516 efx_rc_t rc;
517
518 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
519 rc = ENOSPC;
520 goto fail1;
521 }
522
523 for (i = 0; i < n; i++) {
524 efx_desc_t *edp = &ed[i];
525 unsigned int id;
526 size_t offset;
527
528 id = added++ & etp->et_mask;
529 offset = id * sizeof (efx_desc_t);
530
531 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
532 }
533
534 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
535 unsigned int, added, unsigned int, n);
536
537 EFX_TX_QSTAT_INCR(etp, TX_POST);
538
539 *addedp = added;
540 return (0);
541
542fail1:
543 EFSYS_PROBE1(fail1, efx_rc_t, rc);
544
545 return (rc);
546}
547
548 void
549ef10_tx_qdesc_dma_create(
550 __in efx_txq_t *etp,
551 __in efsys_dma_addr_t addr,
552 __in size_t size,
553 __in boolean_t eop,
554 __out efx_desc_t *edp)
555{
556 /* Fragments must not span 4k boundaries. */
557 EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
558
559 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
560 efsys_dma_addr_t, addr,
561 size_t, size, boolean_t, eop);
562
563 EFX_POPULATE_QWORD_5(edp->ed_eq,
564 ESF_DZ_TX_KER_TYPE, 0,
565 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
566 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
567 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
568 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
569}
570
571 void
572ef10_tx_qdesc_tso_create(
573 __in efx_txq_t *etp,
574 __in uint16_t ipv4_id,
575 __in uint32_t tcp_seq,
576 __in uint8_t tcp_flags,
577 __out efx_desc_t *edp)
578{
579 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
580 uint16_t, ipv4_id, uint32_t, tcp_seq,
581 uint8_t, tcp_flags);
582
583 EFX_POPULATE_QWORD_5(edp->ed_eq,
584 ESF_DZ_TX_DESC_IS_OPT, 1,
585 ESF_DZ_TX_OPTION_TYPE,
586 ESE_DZ_TX_OPTION_DESC_TSO,
587 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
588 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
589 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
590}
591
592 void
593ef10_tx_qdesc_tso2_create(
594 __in efx_txq_t *etp,
595 __in uint16_t ipv4_id,
596 __in uint32_t tcp_seq,
597 __in uint16_t tcp_mss,
598 __out_ecount(count) efx_desc_t *edp,
599 __in int count)
600{
601 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
602 uint16_t, ipv4_id, uint32_t, tcp_seq,
603 uint16_t, tcp_mss);
604
605 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
606
607 EFX_POPULATE_QWORD_5(edp[0].ed_eq,
608 ESF_DZ_TX_DESC_IS_OPT, 1,
609 ESF_DZ_TX_OPTION_TYPE,
610 ESE_DZ_TX_OPTION_DESC_TSO,
611 ESF_DZ_TX_TSO_OPTION_TYPE,
612 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
613 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
614 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
615 EFX_POPULATE_QWORD_4(edp[1].ed_eq,
616 ESF_DZ_TX_DESC_IS_OPT, 1,
617 ESF_DZ_TX_OPTION_TYPE,
618 ESE_DZ_TX_OPTION_DESC_TSO,
619 ESF_DZ_TX_TSO_OPTION_TYPE,
620 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
621 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
622}
623
624 void
625ef10_tx_qdesc_vlantci_create(
626 __in efx_txq_t *etp,
627 __in uint16_t tci,
628 __out efx_desc_t *edp)
629{
630 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
631 uint16_t, tci);
632
633 EFX_POPULATE_QWORD_4(edp->ed_eq,
634 ESF_DZ_TX_DESC_IS_OPT, 1,
635 ESF_DZ_TX_OPTION_TYPE,
636 ESE_DZ_TX_OPTION_DESC_VLAN,
637 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
638 ESF_DZ_TX_VLAN_TAG1, tci);
639}
640
641
642 __checkReturn efx_rc_t
643ef10_tx_qpace(
644 __in efx_txq_t *etp,
645 __in unsigned int ns)
646{
647 efx_rc_t rc;
648
649 /* FIXME */
650 _NOTE(ARGUNUSED(etp, ns))
651 if (B_FALSE) {
652 rc = ENOTSUP;
653 goto fail1;
654 }
655 /* FIXME */
656
657 return (0);
658
659fail1:
660 EFSYS_PROBE1(fail1, efx_rc_t, rc);
661
662 return (rc);
663}
664
665 __checkReturn efx_rc_t
666ef10_tx_qflush(
667 __in efx_txq_t *etp)
668{
669 efx_nic_t *enp = etp->et_enp;
670 efx_rc_t rc;
671
672 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
673 goto fail1;
674
675 return (0);
676
677fail1:
678 EFSYS_PROBE1(fail1, efx_rc_t, rc);
679
680 return (rc);
681}
682
683 void
684ef10_tx_qenable(
685 __in efx_txq_t *etp)
686{
687 /* FIXME */
688 _NOTE(ARGUNUSED(etp))
689 /* FIXME */
690}
691
692#if EFSYS_OPT_QSTATS
693 void
694ef10_tx_qstats_update(
695 __in efx_txq_t *etp,
696 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
697{
698 unsigned int id;
699
700 for (id = 0; id < TX_NQSTATS; id++) {
701 efsys_stat_t *essp = &stat[id];
702
703 EFSYS_STAT_INCR(essp, etp->et_stat[id]);
704 etp->et_stat[id] = 0;
705 }
706}
707
708#endif /* EFSYS_OPT_QSTATS */
709
710#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
199
200 if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
201 esmp)) != 0)
202 goto fail1;
203
204 /*
205 * A previous user of this TX queue may have written a descriptor to the
206 * TX push collector, but not pushed the doorbell (e.g. after a crash).
207 * The next doorbell write would then push the stale descriptor.
208 *
209 * Ensure the (per network port) TX push collector is cleared by writing
210 * a no-op TX option descriptor. See bug29981 for details.
211 */
212 *addedp = 1;
213 EFX_POPULATE_QWORD_4(desc,
214 ESF_DZ_TX_DESC_IS_OPT, 1,
215 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
216 ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
217 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
218 ESF_DZ_TX_OPTION_IP_CSUM,
219 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
220
221 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
222 ef10_tx_qpush(etp, *addedp, 0);
223
224 return (0);
225
226fail1:
227 EFSYS_PROBE1(fail1, efx_rc_t, rc);
228
229 return (rc);
230}
231
232 void
233ef10_tx_qdestroy(
234 __in efx_txq_t *etp)
235{
236 /* FIXME */
237 _NOTE(ARGUNUSED(etp))
238 /* FIXME */
239}
240
241 __checkReturn efx_rc_t
242ef10_tx_qpio_enable(
243 __in efx_txq_t *etp)
244{
245 efx_nic_t *enp = etp->et_enp;
246 efx_piobuf_handle_t handle;
247 efx_rc_t rc;
248
249 if (etp->et_pio_size != 0) {
250 rc = EALREADY;
251 goto fail1;
252 }
253
254 /* Sub-allocate a PIO block from a piobuf */
255 if ((rc = ef10_nic_pio_alloc(enp,
256 &etp->et_pio_bufnum,
257 &handle,
258 &etp->et_pio_blknum,
259 &etp->et_pio_offset,
260 &etp->et_pio_size)) != 0) {
261 goto fail2;
262 }
263 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
264
265 /* Link the piobuf to this TXQ */
266 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
267 goto fail3;
268 }
269
270 /*
271 * et_pio_offset is the offset of the sub-allocated block within the
272 * hardware PIO buffer. It is used as the buffer address in the PIO
273 * option descriptor.
274 *
275 * et_pio_write_offset is the offset of the sub-allocated block from the
276 * start of the write-combined memory mapping, and is used for writing
277 * data into the PIO buffer.
278 */
279 etp->et_pio_write_offset =
280 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
281 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
282
283 return (0);
284
285fail3:
286 EFSYS_PROBE(fail3);
287 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
288 etp->et_pio_size = 0;
289fail2:
290 EFSYS_PROBE(fail2);
291fail1:
292 EFSYS_PROBE1(fail1, efx_rc_t, rc);
293
294 return (rc);
295}
296
297 void
298ef10_tx_qpio_disable(
299 __in efx_txq_t *etp)
300{
301 efx_nic_t *enp = etp->et_enp;
302
303 if (etp->et_pio_size != 0) {
304 /* Unlink the piobuf from this TXQ */
305 ef10_nic_pio_unlink(enp, etp->et_index);
306
307 /* Free the sub-allocated PIO block */
308 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
309 etp->et_pio_size = 0;
310 etp->et_pio_write_offset = 0;
311 }
312}
313
314 __checkReturn efx_rc_t
315ef10_tx_qpio_write(
316 __in efx_txq_t *etp,
317 __in_ecount(length) uint8_t *buffer,
318 __in size_t length,
319 __in size_t offset)
320{
321 efx_nic_t *enp = etp->et_enp;
322 efsys_bar_t *esbp = enp->en_esbp;
323 uint32_t write_offset;
324 uint32_t write_offset_limit;
325 efx_qword_t *eqp;
326 efx_rc_t rc;
327
328 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
329
330 if (etp->et_pio_size == 0) {
331 rc = ENOENT;
332 goto fail1;
333 }
334 if (offset + length > etp->et_pio_size) {
335 rc = ENOSPC;
336 goto fail2;
337 }
338
339 /*
340 * Writes to PIO buffers must be 64 bit aligned, and multiples of
341 * 64 bits.
342 */
343 write_offset = etp->et_pio_write_offset + offset;
344 write_offset_limit = write_offset + length;
345 eqp = (efx_qword_t *)buffer;
346 while (write_offset < write_offset_limit) {
347 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
348 eqp++;
349 write_offset += sizeof (efx_qword_t);
350 }
351
352 return (0);
353
354fail2:
355 EFSYS_PROBE(fail2);
356fail1:
357 EFSYS_PROBE1(fail1, efx_rc_t, rc);
358
359 return (rc);
360}
361
362 __checkReturn efx_rc_t
363ef10_tx_qpio_post(
364 __in efx_txq_t *etp,
365 __in size_t pkt_length,
366 __in unsigned int completed,
367 __inout unsigned int *addedp)
368{
369 efx_qword_t pio_desc;
370 unsigned int id;
371 size_t offset;
372 unsigned int added = *addedp;
373 efx_rc_t rc;
374
375
376 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
377 rc = ENOSPC;
378 goto fail1;
379 }
380
381 if (etp->et_pio_size == 0) {
382 rc = ENOENT;
383 goto fail2;
384 }
385
386 id = added++ & etp->et_mask;
387 offset = id * sizeof (efx_qword_t);
388
389 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
390 unsigned int, id, uint32_t, etp->et_pio_offset,
391 size_t, pkt_length);
392
393 EFX_POPULATE_QWORD_5(pio_desc,
394 ESF_DZ_TX_DESC_IS_OPT, 1,
395 ESF_DZ_TX_OPTION_TYPE, 1,
396 ESF_DZ_TX_PIO_CONT, 0,
397 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
398 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
399
400 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
401
402 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
403
404 *addedp = added;
405 return (0);
406
407fail2:
408 EFSYS_PROBE(fail2);
409fail1:
410 EFSYS_PROBE1(fail1, efx_rc_t, rc);
411
412 return (rc);
413}
414
415 __checkReturn efx_rc_t
416ef10_tx_qpost(
417 __in efx_txq_t *etp,
418 __in_ecount(n) efx_buffer_t *eb,
419 __in unsigned int n,
420 __in unsigned int completed,
421 __inout unsigned int *addedp)
422{
423 unsigned int added = *addedp;
424 unsigned int i;
425 efx_rc_t rc;
426
427 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
428 rc = ENOSPC;
429 goto fail1;
430 }
431
432 for (i = 0; i < n; i++) {
433 efx_buffer_t *ebp = &eb[i];
434 efsys_dma_addr_t addr = ebp->eb_addr;
435 size_t size = ebp->eb_size;
436 boolean_t eop = ebp->eb_eop;
437 unsigned int id;
438 size_t offset;
439 efx_qword_t qword;
440
441 /* Fragments must not span 4k boundaries. */
442 EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
443
444 id = added++ & etp->et_mask;
445 offset = id * sizeof (efx_qword_t);
446
447 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
448 unsigned int, id, efsys_dma_addr_t, addr,
449 size_t, size, boolean_t, eop);
450
451 EFX_POPULATE_QWORD_5(qword,
452 ESF_DZ_TX_KER_TYPE, 0,
453 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
454 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
455 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
456 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
457
458 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
459 }
460
461 EFX_TX_QSTAT_INCR(etp, TX_POST);
462
463 *addedp = added;
464 return (0);
465
466fail1:
467 EFSYS_PROBE1(fail1, efx_rc_t, rc);
468
469 return (rc);
470}
471
472/*
473 * This improves performance by pushing a TX descriptor at the same time as the
474 * doorbell. The descriptor must be added to the TXQ, so that can be used if the
475 * hardware decides not to use the pushed descriptor.
476 */
477 void
478ef10_tx_qpush(
479 __in efx_txq_t *etp,
480 __in unsigned int added,
481 __in unsigned int pushed)
482{
483 efx_nic_t *enp = etp->et_enp;
484 unsigned int wptr;
485 unsigned int id;
486 size_t offset;
487 efx_qword_t desc;
488 efx_oword_t oword;
489
490 wptr = added & etp->et_mask;
491 id = pushed & etp->et_mask;
492 offset = id * sizeof (efx_qword_t);
493
494 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
495 EFX_POPULATE_OWORD_3(oword,
496 ERF_DZ_TX_DESC_WPTR, wptr,
497 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
498 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
499
500 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
501 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
502 EFSYS_PIO_WRITE_BARRIER();
503 EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
504 &oword);
505}
506
507 __checkReturn efx_rc_t
508ef10_tx_qdesc_post(
509 __in efx_txq_t *etp,
510 __in_ecount(n) efx_desc_t *ed,
511 __in unsigned int n,
512 __in unsigned int completed,
513 __inout unsigned int *addedp)
514{
515 unsigned int added = *addedp;
516 unsigned int i;
517 efx_rc_t rc;
518
519 if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
520 rc = ENOSPC;
521 goto fail1;
522 }
523
524 for (i = 0; i < n; i++) {
525 efx_desc_t *edp = &ed[i];
526 unsigned int id;
527 size_t offset;
528
529 id = added++ & etp->et_mask;
530 offset = id * sizeof (efx_desc_t);
531
532 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
533 }
534
535 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
536 unsigned int, added, unsigned int, n);
537
538 EFX_TX_QSTAT_INCR(etp, TX_POST);
539
540 *addedp = added;
541 return (0);
542
543fail1:
544 EFSYS_PROBE1(fail1, efx_rc_t, rc);
545
546 return (rc);
547}
548
549 void
550ef10_tx_qdesc_dma_create(
551 __in efx_txq_t *etp,
552 __in efsys_dma_addr_t addr,
553 __in size_t size,
554 __in boolean_t eop,
555 __out efx_desc_t *edp)
556{
557 /* Fragments must not span 4k boundaries. */
558 EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
559
560 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
561 efsys_dma_addr_t, addr,
562 size_t, size, boolean_t, eop);
563
564 EFX_POPULATE_QWORD_5(edp->ed_eq,
565 ESF_DZ_TX_KER_TYPE, 0,
566 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
567 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
568 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
569 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
570}
571
572 void
573ef10_tx_qdesc_tso_create(
574 __in efx_txq_t *etp,
575 __in uint16_t ipv4_id,
576 __in uint32_t tcp_seq,
577 __in uint8_t tcp_flags,
578 __out efx_desc_t *edp)
579{
580 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
581 uint16_t, ipv4_id, uint32_t, tcp_seq,
582 uint8_t, tcp_flags);
583
584 EFX_POPULATE_QWORD_5(edp->ed_eq,
585 ESF_DZ_TX_DESC_IS_OPT, 1,
586 ESF_DZ_TX_OPTION_TYPE,
587 ESE_DZ_TX_OPTION_DESC_TSO,
588 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
589 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
590 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
591}
592
593 void
594ef10_tx_qdesc_tso2_create(
595 __in efx_txq_t *etp,
596 __in uint16_t ipv4_id,
597 __in uint32_t tcp_seq,
598 __in uint16_t tcp_mss,
599 __out_ecount(count) efx_desc_t *edp,
600 __in int count)
601{
602 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
603 uint16_t, ipv4_id, uint32_t, tcp_seq,
604 uint16_t, tcp_mss);
605
606 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
607
608 EFX_POPULATE_QWORD_5(edp[0].ed_eq,
609 ESF_DZ_TX_DESC_IS_OPT, 1,
610 ESF_DZ_TX_OPTION_TYPE,
611 ESE_DZ_TX_OPTION_DESC_TSO,
612 ESF_DZ_TX_TSO_OPTION_TYPE,
613 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
614 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
615 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
616 EFX_POPULATE_QWORD_4(edp[1].ed_eq,
617 ESF_DZ_TX_DESC_IS_OPT, 1,
618 ESF_DZ_TX_OPTION_TYPE,
619 ESE_DZ_TX_OPTION_DESC_TSO,
620 ESF_DZ_TX_TSO_OPTION_TYPE,
621 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
622 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
623}
624
625 void
626ef10_tx_qdesc_vlantci_create(
627 __in efx_txq_t *etp,
628 __in uint16_t tci,
629 __out efx_desc_t *edp)
630{
631 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
632 uint16_t, tci);
633
634 EFX_POPULATE_QWORD_4(edp->ed_eq,
635 ESF_DZ_TX_DESC_IS_OPT, 1,
636 ESF_DZ_TX_OPTION_TYPE,
637 ESE_DZ_TX_OPTION_DESC_VLAN,
638 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
639 ESF_DZ_TX_VLAN_TAG1, tci);
640}
641
642
643 __checkReturn efx_rc_t
644ef10_tx_qpace(
645 __in efx_txq_t *etp,
646 __in unsigned int ns)
647{
648 efx_rc_t rc;
649
650 /* FIXME */
651 _NOTE(ARGUNUSED(etp, ns))
652 if (B_FALSE) {
653 rc = ENOTSUP;
654 goto fail1;
655 }
656 /* FIXME */
657
658 return (0);
659
660fail1:
661 EFSYS_PROBE1(fail1, efx_rc_t, rc);
662
663 return (rc);
664}
665
666 __checkReturn efx_rc_t
667ef10_tx_qflush(
668 __in efx_txq_t *etp)
669{
670 efx_nic_t *enp = etp->et_enp;
671 efx_rc_t rc;
672
673 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
674 goto fail1;
675
676 return (0);
677
678fail1:
679 EFSYS_PROBE1(fail1, efx_rc_t, rc);
680
681 return (rc);
682}
683
684 void
685ef10_tx_qenable(
686 __in efx_txq_t *etp)
687{
688 /* FIXME */
689 _NOTE(ARGUNUSED(etp))
690 /* FIXME */
691}
692
693#if EFSYS_OPT_QSTATS
694 void
695ef10_tx_qstats_update(
696 __in efx_txq_t *etp,
697 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
698{
699 unsigned int id;
700
701 for (id = 0; id < TX_NQSTATS; id++) {
702 efsys_stat_t *essp = &stat[id];
703
704 EFSYS_STAT_INCR(essp, etp->et_stat[id]);
705 etp->et_stat[id] = 0;
706 }
707}
708
709#endif /* EFSYS_OPT_QSTATS */
710
711#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */