1/*-
2 * Copyright (c) 2012-2015 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 *    this list of conditions and the following disclaimer in the documentation
12 *    and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: releng/10.3/sys/dev/sfxge/common/hunt_tx.c 294381 2016-01-20 07:51:23Z arybchik $");
33
34#include "efx.h"
35#include "efx_impl.h"
36
37
38#if EFSYS_OPT_HUNTINGTON
39
40#if EFSYS_OPT_QSTATS
41#define	EFX_TX_QSTAT_INCR(_etp, _stat)					\
42	do {								\
43		(_etp)->et_stat[_stat]++;				\
44	_NOTE(CONSTANTCONDITION)					\
45	} while (B_FALSE)
46#else
47#define	EFX_TX_QSTAT_INCR(_etp, _stat)
48#endif
49
50static	__checkReturn	efx_rc_t
51efx_mcdi_init_txq(
52	__in		efx_nic_t *enp,
53	__in		uint32_t size,
54	__in		uint32_t target_evq,
55	__in		uint32_t label,
56	__in		uint32_t instance,
57	__in		uint16_t flags,
58	__in		efsys_mem_t *esmp)
59{
60	efx_mcdi_req_t req;
61	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
62			    MC_CMD_INIT_TXQ_OUT_LEN)];
63	efx_qword_t *dma_addr;
64	uint64_t addr;
65	int npages;
66	int i;
67	efx_rc_t rc;
68
69	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
70	    EFX_TXQ_NBUFS(EFX_TXQ_MAXNDESCS(&enp->en_nic_cfg)));
71
72	npages = EFX_TXQ_NBUFS(size);
73	if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
74		rc = EINVAL;
75		goto fail1;
76	}
77
78	(void) memset(payload, 0, sizeof (payload));
79	req.emr_cmd = MC_CMD_INIT_TXQ;
80	req.emr_in_buf = payload;
81	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
82	req.emr_out_buf = payload;
83	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
84
85	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
86	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
87	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
88	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
89
90	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
91	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
92	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
93	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
94	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
95	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
96	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
97	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
98	    INIT_TXQ_IN_CRC_MODE, 0,
99	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
100
101	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
102	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
103
104	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
105	addr = EFSYS_MEM_ADDR(esmp);
106
107	for (i = 0; i < npages; i++) {
108		EFX_POPULATE_QWORD_2(*dma_addr,
109		    EFX_DWORD_1, (uint32_t)(addr >> 32),
110		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
111
112		dma_addr++;
113		addr += EFX_BUF_SIZE;
114	}
115
116	efx_mcdi_execute(enp, &req);
117
118	if (req.emr_rc != 0) {
119		rc = req.emr_rc;
120		goto fail2;
121	}
122
123	return (0);
124
125fail2:
126	EFSYS_PROBE(fail2);
127fail1:
128	EFSYS_PROBE1(fail1, efx_rc_t, rc);
129
130	return (rc);
131}
132
133static	__checkReturn	efx_rc_t
134efx_mcdi_fini_txq(
135	__in		efx_nic_t *enp,
136	__in		uint32_t instance)
137{
138	efx_mcdi_req_t req;
139	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
140			    MC_CMD_FINI_TXQ_OUT_LEN)];
141	efx_rc_t rc;
142
143	(void) memset(payload, 0, sizeof (payload));
144	req.emr_cmd = MC_CMD_FINI_TXQ;
145	req.emr_in_buf = payload;
146	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
147	req.emr_out_buf = payload;
148	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
149
150	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
151
152	efx_mcdi_execute(enp, &req);
153
154	if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
155		rc = req.emr_rc;
156		goto fail1;
157	}
158
159	return (0);
160
161fail1:
162	EFSYS_PROBE1(fail1, efx_rc_t, rc);
163
164	return (rc);
165}
166
167	__checkReturn	efx_rc_t
168ef10_tx_init(
169	__in		efx_nic_t *enp)
170{
171	_NOTE(ARGUNUSED(enp))
172	return (0);
173}
174
175			void
176ef10_tx_fini(
177	__in		efx_nic_t *enp)
178{
179	_NOTE(ARGUNUSED(enp))
180}
181
182	__checkReturn	efx_rc_t
183ef10_tx_qcreate(
184	__in		efx_nic_t *enp,
185	__in		unsigned int index,
186	__in		unsigned int label,
187	__in		efsys_mem_t *esmp,
188	__in		size_t n,
189	__in		uint32_t id,
190	__in		uint16_t flags,
191	__in		efx_evq_t *eep,
192	__in		efx_txq_t *etp,
193	__out		unsigned int *addedp)
194{
195	efx_qword_t desc;
196	efx_rc_t rc;
197
198
199	if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
200	    esmp)) != 0)
201		goto fail1;
202
203	/*
204	 * A previous user of this TX queue may have written a descriptor to the
205	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
206	 * The next doorbell write would then push the stale descriptor.
207	 *
208	 * Ensure the (per network port) TX push collector is cleared by writing
209	 * a no-op TX option descriptor. See bug29981 for details.
210	 */
211	*addedp = 1;
212	EFX_POPULATE_QWORD_4(desc,
213	    ESF_DZ_TX_DESC_IS_OPT, 1,
214	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
215	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
216	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
217	    ESF_DZ_TX_OPTION_IP_CSUM,
218	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
219
220	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
221	ef10_tx_qpush(etp, *addedp, 0);
222
223	return (0);
224
225fail1:
226	EFSYS_PROBE1(fail1, efx_rc_t, rc);
227
228	return (rc);
229}
230
231		void
232ef10_tx_qdestroy(
233	__in	efx_txq_t *etp)
234{
235	/* FIXME */
236	_NOTE(ARGUNUSED(etp))
237	/* FIXME */
238}
239
240	__checkReturn	efx_rc_t
241ef10_tx_qpio_enable(
242	__in		efx_txq_t *etp)
243{
244	efx_nic_t *enp = etp->et_enp;
245	efx_piobuf_handle_t handle;
246	efx_rc_t rc;
247
248	if (etp->et_pio_size != 0) {
249		rc = EALREADY;
250		goto fail1;
251	}
252
253	/* Sub-allocate a PIO block from a piobuf */
254	if ((rc = ef10_nic_pio_alloc(enp,
255		    &etp->et_pio_bufnum,
256		    &handle,
257		    &etp->et_pio_blknum,
258		    &etp->et_pio_offset,
259		    &etp->et_pio_size)) != 0) {
260		goto fail2;
261	}
262	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
263
264	/* Link the piobuf to this TXQ */
265	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
266		goto fail3;
267	}
268
269	/*
270	 * et_pio_offset is the offset of the sub-allocated block within the
271	 * hardware PIO buffer. It is used as the buffer address in the PIO
272	 * option descriptor.
273	 *
274	 * et_pio_write_offset is the offset of the sub-allocated block from the
275	 * start of the write-combined memory mapping, and is used for writing
276	 * data into the PIO buffer.
277	 */
278	etp->et_pio_write_offset =
279	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
280	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
281
282	return (0);
283
284fail3:
285	EFSYS_PROBE(fail3);
286	ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
287	etp->et_pio_size = 0;
288fail2:
289	EFSYS_PROBE(fail2);
290fail1:
291	EFSYS_PROBE1(fail1, efx_rc_t, rc);
292
293	return (rc);
294}
295
296			void
297ef10_tx_qpio_disable(
298	__in		efx_txq_t *etp)
299{
300	efx_nic_t *enp = etp->et_enp;
301
302	if (etp->et_pio_size != 0) {
303		/* Unlink the piobuf from this TXQ */
304		ef10_nic_pio_unlink(enp, etp->et_index);
305
306		/* Free the sub-allocated PIO block */
307		ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
308		etp->et_pio_size = 0;
309		etp->et_pio_write_offset = 0;
310	}
311}
312
313	__checkReturn	efx_rc_t
314ef10_tx_qpio_write(
315	__in			efx_txq_t *etp,
316	__in_ecount(length)	uint8_t *buffer,
317	__in			size_t length,
318	__in			size_t offset)
319{
320	efx_nic_t *enp = etp->et_enp;
321	efsys_bar_t *esbp = enp->en_esbp;
322	uint32_t write_offset;
323	uint32_t write_offset_limit;
324	efx_qword_t *eqp;
325	efx_rc_t rc;
326
327	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
328
329	if (etp->et_pio_size == 0) {
330		rc = ENOENT;
331		goto fail1;
332	}
333	if (offset + length > etp->et_pio_size)	{
334		rc = ENOSPC;
335		goto fail2;
336	}
337
338	/*
339	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
340	 * 64 bits.
341	 */
342	write_offset = etp->et_pio_write_offset + offset;
343	write_offset_limit = write_offset + length;
344	eqp = (efx_qword_t *)buffer;
345	while (write_offset < write_offset_limit) {
346		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
347		eqp++;
348		write_offset += sizeof (efx_qword_t);
349	}
350
351	return (0);
352
353fail2:
354	EFSYS_PROBE(fail2);
355fail1:
356	EFSYS_PROBE1(fail1, efx_rc_t, rc);
357
358	return (rc);
359}
360
361	__checkReturn	efx_rc_t
362ef10_tx_qpio_post(
363	__in			efx_txq_t *etp,
364	__in			size_t pkt_length,
365	__in			unsigned int completed,
366	__inout			unsigned int *addedp)
367{
368	efx_qword_t pio_desc;
369	unsigned int id;
370	size_t offset;
371	unsigned int added = *addedp;
372	efx_rc_t rc;
373
374
375	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
376		rc = ENOSPC;
377		goto fail1;
378	}
379
380	if (etp->et_pio_size == 0) {
381		rc = ENOENT;
382		goto fail2;
383	}
384
385	id = added++ & etp->et_mask;
386	offset = id * sizeof (efx_qword_t);
387
388	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
389		    unsigned int, id, uint32_t, etp->et_pio_offset,
390		    size_t, pkt_length);
391
392	EFX_POPULATE_QWORD_5(pio_desc,
393			ESF_DZ_TX_DESC_IS_OPT, 1,
394			ESF_DZ_TX_OPTION_TYPE, 1,
395			ESF_DZ_TX_PIO_CONT, 0,
396			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
397			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
398
399	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
400
401	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
402
403	*addedp = added;
404	return (0);
405
406fail2:
407	EFSYS_PROBE(fail2);
408fail1:
409	EFSYS_PROBE1(fail1, efx_rc_t, rc);
410
411	return (rc);
412}
413
414	__checkReturn	efx_rc_t
415ef10_tx_qpost(
416	__in		efx_txq_t *etp,
417	__in_ecount(n)	efx_buffer_t *eb,
418	__in		unsigned int n,
419	__in		unsigned int completed,
420	__inout		unsigned int *addedp)
421{
422	unsigned int added = *addedp;
423	unsigned int i;
424	efx_rc_t rc;
425
426	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
427		rc = ENOSPC;
428		goto fail1;
429	}
430
431	for (i = 0; i < n; i++) {
432		efx_buffer_t *ebp = &eb[i];
433		efsys_dma_addr_t addr = ebp->eb_addr;
434		size_t size = ebp->eb_size;
435		boolean_t eop = ebp->eb_eop;
436		unsigned int id;
437		size_t offset;
438		efx_qword_t qword;
439
440		/* Fragments must not span 4k boundaries. */
441		EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
442
443		id = added++ & etp->et_mask;
444		offset = id * sizeof (efx_qword_t);
445
446		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
447		    unsigned int, id, efsys_dma_addr_t, addr,
448		    size_t, size, boolean_t, eop);
449
450		EFX_POPULATE_QWORD_5(qword,
451		    ESF_DZ_TX_KER_TYPE, 0,
452		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
453		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
454		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
455		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
456
457		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
458	}
459
460	EFX_TX_QSTAT_INCR(etp, TX_POST);
461
462	*addedp = added;
463	return (0);
464
465fail1:
466	EFSYS_PROBE1(fail1, efx_rc_t, rc);
467
468	return (rc);
469}
470
471/*
472 * This improves performance by pushing a TX descriptor at the same time as the
473 * doorbell. The descriptor must be added to the TXQ, so that can be used if the
474 * hardware decides not to use the pushed descriptor.
475 */
476			void
477ef10_tx_qpush(
478	__in		efx_txq_t *etp,
479	__in		unsigned int added,
480	__in		unsigned int pushed)
481{
482	efx_nic_t *enp = etp->et_enp;
483	unsigned int wptr;
484	unsigned int id;
485	size_t offset;
486	efx_qword_t desc;
487	efx_oword_t oword;
488
489	wptr = added & etp->et_mask;
490	id = pushed & etp->et_mask;
491	offset = id * sizeof (efx_qword_t);
492
493	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
494	EFX_POPULATE_OWORD_3(oword,
495	    ERF_DZ_TX_DESC_WPTR, wptr,
496	    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
497	    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
498
499	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
500	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
501	EFSYS_PIO_WRITE_BARRIER();
502	EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
503				    &oword);
504}
505
506	__checkReturn	efx_rc_t
507ef10_tx_qdesc_post(
508	__in		efx_txq_t *etp,
509	__in_ecount(n)	efx_desc_t *ed,
510	__in		unsigned int n,
511	__in		unsigned int completed,
512	__inout		unsigned int *addedp)
513{
514	unsigned int added = *addedp;
515	unsigned int i;
516	efx_rc_t rc;
517
518	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
519		rc = ENOSPC;
520		goto fail1;
521	}
522
523	for (i = 0; i < n; i++) {
524		efx_desc_t *edp = &ed[i];
525		unsigned int id;
526		size_t offset;
527
528		id = added++ & etp->et_mask;
529		offset = id * sizeof (efx_desc_t);
530
531		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
532	}
533
534	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
535		    unsigned int, added, unsigned int, n);
536
537	EFX_TX_QSTAT_INCR(etp, TX_POST);
538
539	*addedp = added;
540	return (0);
541
542fail1:
543	EFSYS_PROBE1(fail1, efx_rc_t, rc);
544
545	return (rc);
546}
547
548	void
549ef10_tx_qdesc_dma_create(
550	__in	efx_txq_t *etp,
551	__in	efsys_dma_addr_t addr,
552	__in	size_t size,
553	__in	boolean_t eop,
554	__out	efx_desc_t *edp)
555{
556	/* Fragments must not span 4k boundaries. */
557	EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
558
559	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
560		    efsys_dma_addr_t, addr,
561		    size_t, size, boolean_t, eop);
562
563	EFX_POPULATE_QWORD_5(edp->ed_eq,
564		    ESF_DZ_TX_KER_TYPE, 0,
565		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
566		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
567		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
568		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
569}
570
571	void
572hunt_tx_qdesc_tso_create(
573	__in	efx_txq_t *etp,
574	__in	uint16_t ipv4_id,
575	__in	uint32_t tcp_seq,
576	__in	uint8_t  tcp_flags,
577	__out	efx_desc_t *edp)
578{
579	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
580		    uint16_t, ipv4_id, uint32_t, tcp_seq,
581		    uint8_t, tcp_flags);
582
583	EFX_POPULATE_QWORD_5(edp->ed_eq,
584			    ESF_DZ_TX_DESC_IS_OPT, 1,
585			    ESF_DZ_TX_OPTION_TYPE,
586			    ESE_DZ_TX_OPTION_DESC_TSO,
587			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
588			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
589			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
590}
591
592	void
593ef10_tx_qdesc_tso2_create(
594	__in			efx_txq_t *etp,
595	__in			uint16_t ipv4_id,
596	__in			uint32_t tcp_seq,
597	__in			uint16_t tcp_mss,
598	__out_ecount(count)	efx_desc_t *edp,
599	__in			int count)
600{
601	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
602		    uint16_t, ipv4_id, uint32_t, tcp_seq,
603		    uint16_t, tcp_mss);
604
605	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
606
607	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
608			    ESF_DZ_TX_DESC_IS_OPT, 1,
609			    ESF_DZ_TX_OPTION_TYPE,
610			    ESE_DZ_TX_OPTION_DESC_TSO,
611			    ESF_DZ_TX_TSO_OPTION_TYPE,
612			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
613			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
614			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
615	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
616			    ESF_DZ_TX_DESC_IS_OPT, 1,
617			    ESF_DZ_TX_OPTION_TYPE,
618			    ESE_DZ_TX_OPTION_DESC_TSO,
619			    ESF_DZ_TX_TSO_OPTION_TYPE,
620			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
621			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
622}
623
624	void
625ef10_tx_qdesc_vlantci_create(
626	__in	efx_txq_t *etp,
627	__in	uint16_t  tci,
628	__out	efx_desc_t *edp)
629{
630	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
631		    uint16_t, tci);
632
633	EFX_POPULATE_QWORD_4(edp->ed_eq,
634			    ESF_DZ_TX_DESC_IS_OPT, 1,
635			    ESF_DZ_TX_OPTION_TYPE,
636			    ESE_DZ_TX_OPTION_DESC_VLAN,
637			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
638			    ESF_DZ_TX_VLAN_TAG1, tci);
639}
640
641
642	__checkReturn	efx_rc_t
643ef10_tx_qpace(
644	__in		efx_txq_t *etp,
645	__in		unsigned int ns)
646{
647	efx_rc_t rc;
648
649	/* FIXME */
650	_NOTE(ARGUNUSED(etp, ns))
651	if (B_FALSE) {
652		rc = ENOTSUP;
653		goto fail1;
654	}
655	/* FIXME */
656
657	return (0);
658
659fail1:
660	EFSYS_PROBE1(fail1, efx_rc_t, rc);
661
662	return (rc);
663}
664
665	__checkReturn	efx_rc_t
666ef10_tx_qflush(
667	__in		efx_txq_t *etp)
668{
669	efx_nic_t *enp = etp->et_enp;
670	efx_rc_t rc;
671
672	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
673		goto fail1;
674
675	return (0);
676
677fail1:
678	EFSYS_PROBE1(fail1, efx_rc_t, rc);
679
680	return (rc);
681}
682
683			void
684ef10_tx_qenable(
685	__in		efx_txq_t *etp)
686{
687	/* FIXME */
688	_NOTE(ARGUNUSED(etp))
689	/* FIXME */
690}
691
692#if EFSYS_OPT_QSTATS
693			void
694ef10_tx_qstats_update(
695	__in				efx_txq_t *etp,
696	__inout_ecount(TX_NQSTATS)	efsys_stat_t *stat)
697{
698	unsigned int id;
699
700	for (id = 0; id < TX_NQSTATS; id++) {
701		efsys_stat_t *essp = &stat[id];
702
703		EFSYS_STAT_INCR(essp, etp->et_stat[id]);
704		etp->et_stat[id] = 0;
705	}
706}
707
708#endif /* EFSYS_OPT_QSTATS */
709
710#endif /* EFSYS_OPT_HUNTINGTON */
711