ef10_ev.c revision 310939
1/*-
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 *    this list of conditions and the following disclaimer in the documentation
12 *    and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/dev/sfxge/common/ef10_ev.c 310939 2016-12-31 11:21:49Z arybchik $");
33
34#include "efx.h"
35#include "efx_impl.h"
36#if EFSYS_OPT_MON_STATS
37#include "mcdi_mon.h"
38#endif
39
40#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41
42#if EFSYS_OPT_QSTATS
43#define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44	do {								\
45		(_eep)->ee_stat[_stat]++;				\
46	_NOTE(CONSTANTCONDITION)					\
47	} while (B_FALSE)
48#else
49#define	EFX_EV_QSTAT_INCR(_eep, _stat)
50#endif
51
52
53static	__checkReturn	boolean_t
54ef10_ev_rx(
55	__in		efx_evq_t *eep,
56	__in		efx_qword_t *eqp,
57	__in		const efx_ev_callbacks_t *eecp,
58	__in_opt	void *arg);
59
60static	__checkReturn	boolean_t
61ef10_ev_tx(
62	__in		efx_evq_t *eep,
63	__in		efx_qword_t *eqp,
64	__in		const efx_ev_callbacks_t *eecp,
65	__in_opt	void *arg);
66
67static	__checkReturn	boolean_t
68ef10_ev_driver(
69	__in		efx_evq_t *eep,
70	__in		efx_qword_t *eqp,
71	__in		const efx_ev_callbacks_t *eecp,
72	__in_opt	void *arg);
73
74static	__checkReturn	boolean_t
75ef10_ev_drv_gen(
76	__in		efx_evq_t *eep,
77	__in		efx_qword_t *eqp,
78	__in		const efx_ev_callbacks_t *eecp,
79	__in_opt	void *arg);
80
81static	__checkReturn	boolean_t
82ef10_ev_mcdi(
83	__in		efx_evq_t *eep,
84	__in		efx_qword_t *eqp,
85	__in		const efx_ev_callbacks_t *eecp,
86	__in_opt	void *arg);
87
88
89static	__checkReturn	efx_rc_t
90efx_mcdi_set_evq_tmr(
91	__in		efx_nic_t *enp,
92	__in		uint32_t instance,
93	__in		uint32_t mode,
94	__in		uint32_t timer_ns)
95{
96	efx_mcdi_req_t req;
97	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
98			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
99	efx_rc_t rc;
100
101	(void) memset(payload, 0, sizeof (payload));
102	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
103	req.emr_in_buf = payload;
104	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
105	req.emr_out_buf = payload;
106	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
107
108	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
109	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
110	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
111	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
112
113	efx_mcdi_execute(enp, &req);
114
115	if (req.emr_rc != 0) {
116		rc = req.emr_rc;
117		goto fail1;
118	}
119
120	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
121		rc = EMSGSIZE;
122		goto fail2;
123	}
124
125	return (0);
126
127fail2:
128	EFSYS_PROBE(fail2);
129fail1:
130	EFSYS_PROBE1(fail1, efx_rc_t, rc);
131
132	return (rc);
133}
134
135static	__checkReturn	efx_rc_t
136efx_mcdi_init_evq(
137	__in		efx_nic_t *enp,
138	__in		unsigned int instance,
139	__in		efsys_mem_t *esmp,
140	__in		size_t nevs,
141	__in		uint32_t irq,
142	__in		uint32_t us,
143	__in		uint32_t flags,
144	__in		boolean_t low_latency)
145{
146	efx_mcdi_req_t req;
147	uint8_t payload[
148	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
149		MC_CMD_INIT_EVQ_OUT_LEN)];
150	efx_qword_t *dma_addr;
151	uint64_t addr;
152	int npages;
153	int i;
154	int ev_cut_through;
155	efx_rc_t rc;
156
157	npages = EFX_EVQ_NBUFS(nevs);
158	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
159		rc = EINVAL;
160		goto fail1;
161	}
162
163	(void) memset(payload, 0, sizeof (payload));
164	req.emr_cmd = MC_CMD_INIT_EVQ;
165	req.emr_in_buf = payload;
166	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
167	req.emr_out_buf = payload;
168	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
169
170	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
171	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
172	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
173
174	/*
175	 * On Huntington RX and TX event batching can only be requested together
176	 * (even if the datapath firmware doesn't actually support RX
177	 * batching). If event cut through is enabled no RX batching will occur.
178	 *
179	 * So always enable RX and TX event batching, and enable event cut
180	 * through if we want low latency operation.
181	 */
182	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
183	case EFX_EVQ_FLAGS_TYPE_AUTO:
184		ev_cut_through = low_latency ? 1 : 0;
185		break;
186	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
187		ev_cut_through = 0;
188		break;
189	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
190		ev_cut_through = 1;
191		break;
192	default:
193		rc = EINVAL;
194		goto fail2;
195	}
196	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
197	    INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
198	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
199	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
200	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
201	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
202	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
203
204	/* If the value is zero then disable the timer */
205	if (us == 0) {
206		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
207		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
208		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
209		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
210	} else {
211		unsigned int ticks;
212
213		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
214			goto fail3;
215
216		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
217		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
218		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
219		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
220	}
221
222	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
223	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
224	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
225
226	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
227	addr = EFSYS_MEM_ADDR(esmp);
228
229	for (i = 0; i < npages; i++) {
230		EFX_POPULATE_QWORD_2(*dma_addr,
231		    EFX_DWORD_1, (uint32_t)(addr >> 32),
232		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
233
234		dma_addr++;
235		addr += EFX_BUF_SIZE;
236	}
237
238	efx_mcdi_execute(enp, &req);
239
240	if (req.emr_rc != 0) {
241		rc = req.emr_rc;
242		goto fail4;
243	}
244
245	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
246		rc = EMSGSIZE;
247		goto fail5;
248	}
249
250	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
251
252	return (0);
253
254fail5:
255	EFSYS_PROBE(fail5);
256fail4:
257	EFSYS_PROBE(fail4);
258fail3:
259	EFSYS_PROBE(fail3);
260fail2:
261	EFSYS_PROBE(fail2);
262fail1:
263	EFSYS_PROBE1(fail1, efx_rc_t, rc);
264
265	return (rc);
266}
267
268
269static	__checkReturn	efx_rc_t
270efx_mcdi_init_evq_v2(
271	__in		efx_nic_t *enp,
272	__in		unsigned int instance,
273	__in		efsys_mem_t *esmp,
274	__in		size_t nevs,
275	__in		uint32_t irq,
276	__in		uint32_t us,
277	__in		uint32_t flags)
278{
279	efx_mcdi_req_t req;
280	uint8_t payload[
281		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
282		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
283	unsigned int evq_type;
284	efx_qword_t *dma_addr;
285	uint64_t addr;
286	int npages;
287	int i;
288	efx_rc_t rc;
289
290	npages = EFX_EVQ_NBUFS(nevs);
291	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
292		rc = EINVAL;
293		goto fail1;
294	}
295
296	(void) memset(payload, 0, sizeof (payload));
297	req.emr_cmd = MC_CMD_INIT_EVQ;
298	req.emr_in_buf = payload;
299	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
300	req.emr_out_buf = payload;
301	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
302
303	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
304	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
305	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
306
307	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
308	case EFX_EVQ_FLAGS_TYPE_AUTO:
309		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
310		break;
311	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
312		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
313		break;
314	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
315		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
316		break;
317	default:
318		rc = EINVAL;
319		goto fail2;
320	}
321	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
322	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
323	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
324	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
325	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
326
327	/* If the value is zero then disable the timer */
328	if (us == 0) {
329		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
330		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
331		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
332		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
333	} else {
334		unsigned int ticks;
335
336		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
337			goto fail3;
338
339		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
340		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
341		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
342		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
343	}
344
345	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
346	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
347	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
348
349	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
350	addr = EFSYS_MEM_ADDR(esmp);
351
352	for (i = 0; i < npages; i++) {
353		EFX_POPULATE_QWORD_2(*dma_addr,
354		    EFX_DWORD_1, (uint32_t)(addr >> 32),
355		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
356
357		dma_addr++;
358		addr += EFX_BUF_SIZE;
359	}
360
361	efx_mcdi_execute(enp, &req);
362
363	if (req.emr_rc != 0) {
364		rc = req.emr_rc;
365		goto fail4;
366	}
367
368	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
369		rc = EMSGSIZE;
370		goto fail5;
371	}
372
373	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
374
375	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
376		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
377
378	return (0);
379
380fail5:
381	EFSYS_PROBE(fail5);
382fail4:
383	EFSYS_PROBE(fail4);
384fail3:
385	EFSYS_PROBE(fail3);
386fail2:
387	EFSYS_PROBE(fail2);
388fail1:
389	EFSYS_PROBE1(fail1, efx_rc_t, rc);
390
391	return (rc);
392}
393
394static	__checkReturn	efx_rc_t
395efx_mcdi_fini_evq(
396	__in		efx_nic_t *enp,
397	__in		uint32_t instance)
398{
399	efx_mcdi_req_t req;
400	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
401			    MC_CMD_FINI_EVQ_OUT_LEN)];
402	efx_rc_t rc;
403
404	(void) memset(payload, 0, sizeof (payload));
405	req.emr_cmd = MC_CMD_FINI_EVQ;
406	req.emr_in_buf = payload;
407	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
408	req.emr_out_buf = payload;
409	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
410
411	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
412
413	efx_mcdi_execute_quiet(enp, &req);
414
415	if (req.emr_rc != 0) {
416		rc = req.emr_rc;
417		goto fail1;
418	}
419
420	return (0);
421
422fail1:
423	EFSYS_PROBE1(fail1, efx_rc_t, rc);
424
425	return (rc);
426}
427
428
429
430	__checkReturn	efx_rc_t
431ef10_ev_init(
432	__in		efx_nic_t *enp)
433{
434	_NOTE(ARGUNUSED(enp))
435	return (0);
436}
437
438			void
439ef10_ev_fini(
440	__in		efx_nic_t *enp)
441{
442	_NOTE(ARGUNUSED(enp))
443}
444
445	__checkReturn	efx_rc_t
446ef10_ev_qcreate(
447	__in		efx_nic_t *enp,
448	__in		unsigned int index,
449	__in		efsys_mem_t *esmp,
450	__in		size_t n,
451	__in		uint32_t id,
452	__in		uint32_t us,
453	__in		uint32_t flags,
454	__in		efx_evq_t *eep)
455{
456	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
457	uint32_t irq;
458	efx_rc_t rc;
459
460	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
461	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
462	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
463
464	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
465		rc = EINVAL;
466		goto fail1;
467	}
468
469	if (index >= encp->enc_evq_limit) {
470		rc = EINVAL;
471		goto fail2;
472	}
473
474	if (us > encp->enc_evq_timer_max_us) {
475		rc = EINVAL;
476		goto fail3;
477	}
478
479	/* Set up the handler table */
480	eep->ee_rx	= ef10_ev_rx;
481	eep->ee_tx	= ef10_ev_tx;
482	eep->ee_driver	= ef10_ev_driver;
483	eep->ee_drv_gen	= ef10_ev_drv_gen;
484	eep->ee_mcdi	= ef10_ev_mcdi;
485
486	/* Set up the event queue */
487	irq = index;	/* INIT_EVQ expects function-relative vector number */
488
489	/*
490	 * Interrupts may be raised for events immediately after the queue is
491	 * created. See bug58606.
492	 */
493
494	if (encp->enc_init_evq_v2_supported) {
495		/*
496		 * On Medford the low latency license is required to enable RX
497		 * and event cut through and to disable RX batching.  If event
498		 * queue type in flags is auto, we let the firmware decide the
499		 * settings to use. If the adapter has a low latency license,
500		 * it will choose the best settings for low latency, otherwise
501		 * it will choose the best settings for throughput.
502		 */
503		rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
504		if (rc != 0)
505			goto fail4;
506	} else {
507		/*
508		 * On Huntington we need to specify the settings to use.
509		 * If event queue type in flags is auto, we favour throughput
510		 * if the adapter is running virtualization supporting firmware
511		 * (i.e. the full featured firmware variant)
512		 * and latency otherwise. The Ethernet Virtual Bridging
513		 * capability is used to make this decision. (Note though that
514		 * the low latency firmware variant is also best for
515		 * throughput and corresponding type should be specified
516		 * to choose it.)
517		 */
518		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
519		rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
520		    low_latency);
521		if (rc != 0)
522			goto fail5;
523	}
524
525	return (0);
526
527fail5:
528	EFSYS_PROBE(fail5);
529fail4:
530	EFSYS_PROBE(fail4);
531fail3:
532	EFSYS_PROBE(fail3);
533fail2:
534	EFSYS_PROBE(fail2);
535fail1:
536	EFSYS_PROBE1(fail1, efx_rc_t, rc);
537
538	return (rc);
539}
540
541			void
542ef10_ev_qdestroy(
543	__in		efx_evq_t *eep)
544{
545	efx_nic_t *enp = eep->ee_enp;
546
547	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
548	    enp->en_family == EFX_FAMILY_MEDFORD);
549
550	(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
551}
552
553	__checkReturn	efx_rc_t
554ef10_ev_qprime(
555	__in		efx_evq_t *eep,
556	__in		unsigned int count)
557{
558	efx_nic_t *enp = eep->ee_enp;
559	uint32_t rptr;
560	efx_dword_t dword;
561
562	rptr = count & eep->ee_mask;
563
564	if (enp->en_nic_cfg.enc_bug35388_workaround) {
565		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
566		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
567		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
568		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
569
570		EFX_POPULATE_DWORD_2(dword,
571		    ERF_DD_EVQ_IND_RPTR_FLAGS,
572		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
573		    ERF_DD_EVQ_IND_RPTR,
574		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
575		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
576		    &dword, B_FALSE);
577
578		EFX_POPULATE_DWORD_2(dword,
579		    ERF_DD_EVQ_IND_RPTR_FLAGS,
580		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
581		    ERF_DD_EVQ_IND_RPTR,
582		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
583		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
584		    &dword, B_FALSE);
585	} else {
586		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
587		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
588		    &dword, B_FALSE);
589	}
590
591	return (0);
592}
593
594static	__checkReturn	efx_rc_t
595efx_mcdi_driver_event(
596	__in		efx_nic_t *enp,
597	__in		uint32_t evq,
598	__in		efx_qword_t data)
599{
600	efx_mcdi_req_t req;
601	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
602			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
603	efx_rc_t rc;
604
605	req.emr_cmd = MC_CMD_DRIVER_EVENT;
606	req.emr_in_buf = payload;
607	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
608	req.emr_out_buf = payload;
609	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
610
611	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
612
613	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
614	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
615	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
616	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
617
618	efx_mcdi_execute(enp, &req);
619
620	if (req.emr_rc != 0) {
621		rc = req.emr_rc;
622		goto fail1;
623	}
624
625	return (0);
626
627fail1:
628	EFSYS_PROBE1(fail1, efx_rc_t, rc);
629
630	return (rc);
631}
632
633			void
634ef10_ev_qpost(
635	__in	efx_evq_t *eep,
636	__in	uint16_t data)
637{
638	efx_nic_t *enp = eep->ee_enp;
639	efx_qword_t event;
640
641	EFX_POPULATE_QWORD_3(event,
642	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
643	    ESF_DZ_DRV_SUB_CODE, 0,
644	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
645
646	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
647}
648
649	__checkReturn	efx_rc_t
650ef10_ev_qmoderate(
651	__in		efx_evq_t *eep,
652	__in		unsigned int us)
653{
654	efx_nic_t *enp = eep->ee_enp;
655	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
656	efx_dword_t dword;
657	uint32_t mode;
658	efx_rc_t rc;
659
660	/* Check that hardware and MCDI use the same timer MODE values */
661	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
662	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
663	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
664	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
665	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
666	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
667	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
668	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
669
670	if (us > encp->enc_evq_timer_max_us) {
671		rc = EINVAL;
672		goto fail1;
673	}
674
675	/* If the value is zero then disable the timer */
676	if (us == 0) {
677		mode = FFE_CZ_TIMER_MODE_DIS;
678	} else {
679		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
680	}
681
682	if (encp->enc_bug61265_workaround) {
683		uint32_t ns = us * 1000;
684
685		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
686		if (rc != 0)
687			goto fail2;
688	} else {
689		unsigned int ticks;
690
691		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
692			goto fail3;
693
694		if (encp->enc_bug35388_workaround) {
695			EFX_POPULATE_DWORD_3(dword,
696			    ERF_DD_EVQ_IND_TIMER_FLAGS,
697			    EFE_DD_EVQ_IND_TIMER_FLAGS,
698			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
699			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
700			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
701			    eep->ee_index, &dword, 0);
702		} else {
703			EFX_POPULATE_DWORD_2(dword,
704			    ERF_DZ_TC_TIMER_MODE, mode,
705			    ERF_DZ_TC_TIMER_VAL, ticks);
706			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
707			    eep->ee_index, &dword, 0);
708		}
709	}
710
711	return (0);
712
713fail3:
714	EFSYS_PROBE(fail3);
715fail2:
716	EFSYS_PROBE(fail2);
717fail1:
718	EFSYS_PROBE1(fail1, efx_rc_t, rc);
719
720	return (rc);
721}
722
723
724#if EFSYS_OPT_QSTATS
725			void
726ef10_ev_qstats_update(
727	__in				efx_evq_t *eep,
728	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
729{
730	unsigned int id;
731
732	for (id = 0; id < EV_NQSTATS; id++) {
733		efsys_stat_t *essp = &stat[id];
734
735		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
736		eep->ee_stat[id] = 0;
737	}
738}
739#endif /* EFSYS_OPT_QSTATS */
740
741
742static	__checkReturn	boolean_t
743ef10_ev_rx(
744	__in		efx_evq_t *eep,
745	__in		efx_qword_t *eqp,
746	__in		const efx_ev_callbacks_t *eecp,
747	__in_opt	void *arg)
748{
749	efx_nic_t *enp = eep->ee_enp;
750	uint32_t size;
751	uint32_t label;
752	uint32_t mac_class;
753	uint32_t eth_tag_class;
754	uint32_t l3_class;
755	uint32_t l4_class;
756	uint32_t next_read_lbits;
757	uint16_t flags;
758	boolean_t cont;
759	boolean_t should_abort;
760	efx_evq_rxq_state_t *eersp;
761	unsigned int desc_count;
762	unsigned int last_used_id;
763
764	EFX_EV_QSTAT_INCR(eep, EV_RX);
765
766	/* Discard events after RXQ/TXQ errors */
767	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
768		return (B_FALSE);
769
770	/* Basic packet information */
771	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
772	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
773	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
774	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
775	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
776	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
777	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
778	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
779
780	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
781		/* Drop this event */
782		return (B_FALSE);
783	}
784	flags = 0;
785
786	if (cont != 0) {
787		/*
788		 * This may be part of a scattered frame, or it may be a
789		 * truncated frame if scatter is disabled on this RXQ.
790		 * Overlength frames can be received if e.g. a VF is configured
791		 * for 1500 MTU but connected to a port set to 9000 MTU
792		 * (see bug56567).
793		 * FIXME: There is not yet any driver that supports scatter on
794		 * Huntington.  Scatter support is required for OSX.
795		 */
796		flags |= EFX_PKT_CONT;
797	}
798
799	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
800		flags |= EFX_PKT_UNICAST;
801
802	/* Increment the count of descriptors read */
803	eersp = &eep->ee_rxq_state[label];
804	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
805	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
806	eersp->eers_rx_read_ptr += desc_count;
807
808	/*
809	 * FIXME: add error checking to make sure this a batched event.
810	 * This could also be an aborted scatter, see Bug36629.
811	 */
812	if (desc_count > 1) {
813		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
814		flags |= EFX_PKT_PREFIX_LEN;
815	}
816
817	/* Calculate the index of the last descriptor consumed */
818	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
819
820	/* Check for errors that invalidate checksum and L3/L4 fields */
821	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
822		/* RX frame truncated (error flag is misnamed) */
823		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
824		flags |= EFX_DISCARD;
825		goto deliver;
826	}
827	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
828		/* Bad Ethernet frame CRC */
829		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
830		flags |= EFX_DISCARD;
831		goto deliver;
832	}
833	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
834		/*
835		 * Hardware parse failed, due to malformed headers
836		 * or headers that are too long for the parser.
837		 * Headers and checksums must be validated by the host.
838		 */
839		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
840		goto deliver;
841	}
842
843	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
844	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
845		flags |= EFX_PKT_VLAN_TAGGED;
846	}
847
848	switch (l3_class) {
849	case ESE_DZ_L3_CLASS_IP4:
850	case ESE_DZ_L3_CLASS_IP4_FRAG:
851		flags |= EFX_PKT_IPV4;
852		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
853			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
854		} else {
855			flags |= EFX_CKSUM_IPV4;
856		}
857
858		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
859			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
860			flags |= EFX_PKT_TCP;
861		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
862			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
863			flags |= EFX_PKT_UDP;
864		} else {
865			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
866		}
867		break;
868
869	case ESE_DZ_L3_CLASS_IP6:
870	case ESE_DZ_L3_CLASS_IP6_FRAG:
871		flags |= EFX_PKT_IPV6;
872
873		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
874			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
875			flags |= EFX_PKT_TCP;
876		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
877			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
878			flags |= EFX_PKT_UDP;
879		} else {
880			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
881		}
882		break;
883
884	default:
885		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
886		break;
887	}
888
889	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
890		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
891			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
892		} else {
893			flags |= EFX_CKSUM_TCPUDP;
894		}
895	}
896
897deliver:
898	/* If we're not discarding the packet then it is ok */
899	if (~flags & EFX_DISCARD)
900		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
901
902	EFSYS_ASSERT(eecp->eec_rx != NULL);
903	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
904
905	return (should_abort);
906}
907
908static	__checkReturn	boolean_t
909ef10_ev_tx(
910	__in		efx_evq_t *eep,
911	__in		efx_qword_t *eqp,
912	__in		const efx_ev_callbacks_t *eecp,
913	__in_opt	void *arg)
914{
915	efx_nic_t *enp = eep->ee_enp;
916	uint32_t id;
917	uint32_t label;
918	boolean_t should_abort;
919
920	EFX_EV_QSTAT_INCR(eep, EV_TX);
921
922	/* Discard events after RXQ/TXQ errors */
923	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
924		return (B_FALSE);
925
926	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
927		/* Drop this event */
928		return (B_FALSE);
929	}
930
931	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
932	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
933	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
934
935	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
936
937	EFSYS_ASSERT(eecp->eec_tx != NULL);
938	should_abort = eecp->eec_tx(arg, label, id);
939
940	return (should_abort);
941}
942
943static	__checkReturn	boolean_t
944ef10_ev_driver(
945	__in		efx_evq_t *eep,
946	__in		efx_qword_t *eqp,
947	__in		const efx_ev_callbacks_t *eecp,
948	__in_opt	void *arg)
949{
950	unsigned int code;
951	boolean_t should_abort;
952
953	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
954	should_abort = B_FALSE;
955
956	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
957	switch (code) {
958	case ESE_DZ_DRV_TIMER_EV: {
959		uint32_t id;
960
961		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
962
963		EFSYS_ASSERT(eecp->eec_timer != NULL);
964		should_abort = eecp->eec_timer(arg, id);
965		break;
966	}
967
968	case ESE_DZ_DRV_WAKE_UP_EV: {
969		uint32_t id;
970
971		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
972
973		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
974		should_abort = eecp->eec_wake_up(arg, id);
975		break;
976	}
977
978	case ESE_DZ_DRV_START_UP_EV:
979		EFSYS_ASSERT(eecp->eec_initialized != NULL);
980		should_abort = eecp->eec_initialized(arg);
981		break;
982
983	default:
984		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
985		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
986		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
987		break;
988	}
989
990	return (should_abort);
991}
992
993static	__checkReturn	boolean_t
994ef10_ev_drv_gen(
995	__in		efx_evq_t *eep,
996	__in		efx_qword_t *eqp,
997	__in		const efx_ev_callbacks_t *eecp,
998	__in_opt	void *arg)
999{
1000	uint32_t data;
1001	boolean_t should_abort;
1002
1003	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1004	should_abort = B_FALSE;
1005
1006	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1007	if (data >= ((uint32_t)1 << 16)) {
1008		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1009		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1010		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1011
1012		return (B_TRUE);
1013	}
1014
1015	EFSYS_ASSERT(eecp->eec_software != NULL);
1016	should_abort = eecp->eec_software(arg, (uint16_t)data);
1017
1018	return (should_abort);
1019}
1020
1021static	__checkReturn	boolean_t
1022ef10_ev_mcdi(
1023	__in		efx_evq_t *eep,
1024	__in		efx_qword_t *eqp,
1025	__in		const efx_ev_callbacks_t *eecp,
1026	__in_opt	void *arg)
1027{
1028	efx_nic_t *enp = eep->ee_enp;
1029	unsigned int code;
1030	boolean_t should_abort = B_FALSE;
1031
1032	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1033
1034	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1035	switch (code) {
1036	case MCDI_EVENT_CODE_BADSSERT:
1037		efx_mcdi_ev_death(enp, EINTR);
1038		break;
1039
1040	case MCDI_EVENT_CODE_CMDDONE:
1041		efx_mcdi_ev_cpl(enp,
1042		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1043		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1044		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1045		break;
1046
1047#if EFSYS_OPT_MCDI_PROXY_AUTH
1048	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1049		/*
1050		 * This event notifies a function that an authorization request
1051		 * has been processed. If the request was authorized then the
1052		 * function can now re-send the original MCDI request.
1053		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1054		 */
1055		efx_mcdi_ev_proxy_response(enp,
1056		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1057		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1058		break;
1059#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1060
1061	case MCDI_EVENT_CODE_LINKCHANGE: {
1062		efx_link_mode_t link_mode;
1063
1064		ef10_phy_link_ev(enp, eqp, &link_mode);
1065		should_abort = eecp->eec_link_change(arg, link_mode);
1066		break;
1067	}
1068
1069	case MCDI_EVENT_CODE_SENSOREVT: {
1070#if EFSYS_OPT_MON_STATS
1071		efx_mon_stat_t id;
1072		efx_mon_stat_value_t value;
1073		efx_rc_t rc;
1074
1075		/* Decode monitor stat for MCDI sensor (if supported) */
1076		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1077			/* Report monitor stat change */
1078			should_abort = eecp->eec_monitor(arg, id, value);
1079		} else if (rc == ENOTSUP) {
1080			should_abort = eecp->eec_exception(arg,
1081				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1082				MCDI_EV_FIELD(eqp, DATA));
1083		} else {
1084			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1085		}
1086#endif
1087		break;
1088	}
1089
1090	case MCDI_EVENT_CODE_SCHEDERR:
1091		/* Informational only */
1092		break;
1093
1094	case MCDI_EVENT_CODE_REBOOT:
1095		/* Falcon/Siena only (should not been seen with Huntington). */
1096		efx_mcdi_ev_death(enp, EIO);
1097		break;
1098
1099	case MCDI_EVENT_CODE_MC_REBOOT:
1100		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1101		efx_mcdi_ev_death(enp, EIO);
1102		break;
1103
1104	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1105#if EFSYS_OPT_MAC_STATS
1106		if (eecp->eec_mac_stats != NULL) {
1107			eecp->eec_mac_stats(arg,
1108			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1109		}
1110#endif
1111		break;
1112
1113	case MCDI_EVENT_CODE_FWALERT: {
1114		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1115
1116		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1117			should_abort = eecp->eec_exception(arg,
1118				EFX_EXCEPTION_FWALERT_SRAM,
1119				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1120		else
1121			should_abort = eecp->eec_exception(arg,
1122				EFX_EXCEPTION_UNKNOWN_FWALERT,
1123				MCDI_EV_FIELD(eqp, DATA));
1124		break;
1125	}
1126
1127	case MCDI_EVENT_CODE_TX_ERR: {
1128		/*
1129		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1130		 * This may be followed by TX completions (which we discard),
1131		 * and then finally by a TX_FLUSH event. Firmware destroys the
1132		 * TXQ automatically after sending the TX_FLUSH event.
1133		 */
1134		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1135
1136		EFSYS_PROBE2(tx_descq_err,
1137			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1138			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1139
1140		/* Inform the driver that a reset is required. */
1141		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1142		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1143		break;
1144	}
1145
1146	case MCDI_EVENT_CODE_TX_FLUSH: {
1147		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1148
1149		/*
1150		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1151		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1152		 * We want to wait for all completions, so ignore the events
1153		 * with TX_FLUSH_TO_DRIVER.
1154		 */
1155		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1156			should_abort = B_FALSE;
1157			break;
1158		}
1159
1160		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1161
1162		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1163
1164		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1165		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1166		break;
1167	}
1168
1169	case MCDI_EVENT_CODE_RX_ERR: {
1170		/*
1171		 * After an RXQ error is detected, firmware sends an RX_ERR
1172		 * event. This may be followed by RX events (which we discard),
1173		 * and then finally by an RX_FLUSH event. Firmware destroys the
1174		 * RXQ automatically after sending the RX_FLUSH event.
1175		 */
1176		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1177
1178		EFSYS_PROBE2(rx_descq_err,
1179			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1180			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1181
1182		/* Inform the driver that a reset is required. */
1183		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1184		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1185		break;
1186	}
1187
1188	case MCDI_EVENT_CODE_RX_FLUSH: {
1189		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1190
1191		/*
1192		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1193		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1194		 * We want to wait for all completions, so ignore the events
1195		 * with RX_FLUSH_TO_DRIVER.
1196		 */
1197		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1198			should_abort = B_FALSE;
1199			break;
1200		}
1201
1202		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1203
1204		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1205
1206		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1207		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1208		break;
1209	}
1210
1211	default:
1212		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1213		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1214		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1215		break;
1216	}
1217
1218	return (should_abort);
1219}
1220
1221		void
1222ef10_ev_rxlabel_init(
1223	__in		efx_evq_t *eep,
1224	__in		efx_rxq_t *erp,
1225	__in		unsigned int label)
1226{
1227	efx_evq_rxq_state_t *eersp;
1228
1229	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1230	eersp = &eep->ee_rxq_state[label];
1231
1232	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1233
1234	eersp->eers_rx_read_ptr = 0;
1235	eersp->eers_rx_mask = erp->er_mask;
1236}
1237
1238		void
1239ef10_ev_rxlabel_fini(
1240	__in		efx_evq_t *eep,
1241	__in		unsigned int label)
1242{
1243	efx_evq_rxq_state_t *eersp;
1244
1245	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1246	eersp = &eep->ee_rxq_state[label];
1247
1248	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1249
1250	eersp->eers_rx_read_ptr = 0;
1251	eersp->eers_rx_mask = 0;
1252}
1253
1254#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1255