1/*-
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 *    this list of conditions and the following disclaimer in the documentation
12 *    and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/sys/dev/sfxge/common/ef10_ev.c 342516 2018-12-26 10:25:01Z arybchik $");
33
34#include "efx.h"
35#include "efx_impl.h"
36#if EFSYS_OPT_MON_STATS
37#include "mcdi_mon.h"
38#endif
39
40#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41
42#if EFSYS_OPT_QSTATS
43#define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44	do {								\
45		(_eep)->ee_stat[_stat]++;				\
46	_NOTE(CONSTANTCONDITION)					\
47	} while (B_FALSE)
48#else
49#define	EFX_EV_QSTAT_INCR(_eep, _stat)
50#endif
51
52/*
53 * Non-interrupting event queue requires interrrupting event queue to
54 * refer to for wake-up events even if wake ups are never used.
55 * It could be even non-allocated event queue.
56 */
57#define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
58
59static	__checkReturn	boolean_t
60ef10_ev_rx(
61	__in		efx_evq_t *eep,
62	__in		efx_qword_t *eqp,
63	__in		const efx_ev_callbacks_t *eecp,
64	__in_opt	void *arg);
65
66static	__checkReturn	boolean_t
67ef10_ev_tx(
68	__in		efx_evq_t *eep,
69	__in		efx_qword_t *eqp,
70	__in		const efx_ev_callbacks_t *eecp,
71	__in_opt	void *arg);
72
73static	__checkReturn	boolean_t
74ef10_ev_driver(
75	__in		efx_evq_t *eep,
76	__in		efx_qword_t *eqp,
77	__in		const efx_ev_callbacks_t *eecp,
78	__in_opt	void *arg);
79
80static	__checkReturn	boolean_t
81ef10_ev_drv_gen(
82	__in		efx_evq_t *eep,
83	__in		efx_qword_t *eqp,
84	__in		const efx_ev_callbacks_t *eecp,
85	__in_opt	void *arg);
86
87static	__checkReturn	boolean_t
88ef10_ev_mcdi(
89	__in		efx_evq_t *eep,
90	__in		efx_qword_t *eqp,
91	__in		const efx_ev_callbacks_t *eecp,
92	__in_opt	void *arg);
93
94
95static	__checkReturn	efx_rc_t
96efx_mcdi_set_evq_tmr(
97	__in		efx_nic_t *enp,
98	__in		uint32_t instance,
99	__in		uint32_t mode,
100	__in		uint32_t timer_ns)
101{
102	efx_mcdi_req_t req;
103	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
104		MC_CMD_SET_EVQ_TMR_OUT_LEN);
105	efx_rc_t rc;
106
107	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
108	req.emr_in_buf = payload;
109	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
110	req.emr_out_buf = payload;
111	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
112
113	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
114	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
115	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
116	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
117
118	efx_mcdi_execute(enp, &req);
119
120	if (req.emr_rc != 0) {
121		rc = req.emr_rc;
122		goto fail1;
123	}
124
125	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
126		rc = EMSGSIZE;
127		goto fail2;
128	}
129
130	return (0);
131
132fail2:
133	EFSYS_PROBE(fail2);
134fail1:
135	EFSYS_PROBE1(fail1, efx_rc_t, rc);
136
137	return (rc);
138}
139
140static	__checkReturn	efx_rc_t
141efx_mcdi_init_evq(
142	__in		efx_nic_t *enp,
143	__in		unsigned int instance,
144	__in		efsys_mem_t *esmp,
145	__in		size_t nevs,
146	__in		uint32_t irq,
147	__in		uint32_t us,
148	__in		uint32_t flags,
149	__in		boolean_t low_latency)
150{
151	efx_mcdi_req_t req;
152	EFX_MCDI_DECLARE_BUF(payload,
153		MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
154		MC_CMD_INIT_EVQ_OUT_LEN);
155	efx_qword_t *dma_addr;
156	uint64_t addr;
157	int npages;
158	int i;
159	boolean_t interrupting;
160	int ev_cut_through;
161	efx_rc_t rc;
162
163	npages = EFX_EVQ_NBUFS(nevs);
164	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
165		rc = EINVAL;
166		goto fail1;
167	}
168
169	req.emr_cmd = MC_CMD_INIT_EVQ;
170	req.emr_in_buf = payload;
171	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
172	req.emr_out_buf = payload;
173	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
174
175	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
176	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
177	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
178
179	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
180	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
181
182	/*
183	 * On Huntington RX and TX event batching can only be requested together
184	 * (even if the datapath firmware doesn't actually support RX
185	 * batching). If event cut through is enabled no RX batching will occur.
186	 *
187	 * So always enable RX and TX event batching, and enable event cut
188	 * through if we want low latency operation.
189	 */
190	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
191	case EFX_EVQ_FLAGS_TYPE_AUTO:
192		ev_cut_through = low_latency ? 1 : 0;
193		break;
194	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
195		ev_cut_through = 0;
196		break;
197	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
198		ev_cut_through = 1;
199		break;
200	default:
201		rc = EINVAL;
202		goto fail2;
203	}
204	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
205	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
206	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
207	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
208	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
209	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
210	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
211
212	/* If the value is zero then disable the timer */
213	if (us == 0) {
214		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
215		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
216		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
217		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
218	} else {
219		unsigned int ticks;
220
221		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
222			goto fail3;
223
224		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
225		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
226		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
227		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
228	}
229
230	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
231	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
232	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
233
234	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
235	addr = EFSYS_MEM_ADDR(esmp);
236
237	for (i = 0; i < npages; i++) {
238		EFX_POPULATE_QWORD_2(*dma_addr,
239		    EFX_DWORD_1, (uint32_t)(addr >> 32),
240		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
241
242		dma_addr++;
243		addr += EFX_BUF_SIZE;
244	}
245
246	efx_mcdi_execute(enp, &req);
247
248	if (req.emr_rc != 0) {
249		rc = req.emr_rc;
250		goto fail4;
251	}
252
253	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
254		rc = EMSGSIZE;
255		goto fail5;
256	}
257
258	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
259
260	return (0);
261
262fail5:
263	EFSYS_PROBE(fail5);
264fail4:
265	EFSYS_PROBE(fail4);
266fail3:
267	EFSYS_PROBE(fail3);
268fail2:
269	EFSYS_PROBE(fail2);
270fail1:
271	EFSYS_PROBE1(fail1, efx_rc_t, rc);
272
273	return (rc);
274}
275
276
277static	__checkReturn	efx_rc_t
278efx_mcdi_init_evq_v2(
279	__in		efx_nic_t *enp,
280	__in		unsigned int instance,
281	__in		efsys_mem_t *esmp,
282	__in		size_t nevs,
283	__in		uint32_t irq,
284	__in		uint32_t us,
285	__in		uint32_t flags)
286{
287	efx_mcdi_req_t req;
288	EFX_MCDI_DECLARE_BUF(payload,
289		MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
290		MC_CMD_INIT_EVQ_V2_OUT_LEN);
291	boolean_t interrupting;
292	unsigned int evq_type;
293	efx_qword_t *dma_addr;
294	uint64_t addr;
295	int npages;
296	int i;
297	efx_rc_t rc;
298
299	npages = EFX_EVQ_NBUFS(nevs);
300	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
301		rc = EINVAL;
302		goto fail1;
303	}
304
305	req.emr_cmd = MC_CMD_INIT_EVQ;
306	req.emr_in_buf = payload;
307	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
308	req.emr_out_buf = payload;
309	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
310
311	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
312	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
313	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
314
315	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
316	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
317
318	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
319	case EFX_EVQ_FLAGS_TYPE_AUTO:
320		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
321		break;
322	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
323		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
324		break;
325	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
326		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
327		break;
328	default:
329		rc = EINVAL;
330		goto fail2;
331	}
332	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
333	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
334	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
335	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
336	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
337
338	/* If the value is zero then disable the timer */
339	if (us == 0) {
340		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
341		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
342		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
343		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
344	} else {
345		unsigned int ticks;
346
347		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
348			goto fail3;
349
350		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
351		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
352		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
353		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
354	}
355
356	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
357	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
358	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
359
360	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
361	addr = EFSYS_MEM_ADDR(esmp);
362
363	for (i = 0; i < npages; i++) {
364		EFX_POPULATE_QWORD_2(*dma_addr,
365		    EFX_DWORD_1, (uint32_t)(addr >> 32),
366		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
367
368		dma_addr++;
369		addr += EFX_BUF_SIZE;
370	}
371
372	efx_mcdi_execute(enp, &req);
373
374	if (req.emr_rc != 0) {
375		rc = req.emr_rc;
376		goto fail4;
377	}
378
379	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
380		rc = EMSGSIZE;
381		goto fail5;
382	}
383
384	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
385
386	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
387		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
388
389	return (0);
390
391fail5:
392	EFSYS_PROBE(fail5);
393fail4:
394	EFSYS_PROBE(fail4);
395fail3:
396	EFSYS_PROBE(fail3);
397fail2:
398	EFSYS_PROBE(fail2);
399fail1:
400	EFSYS_PROBE1(fail1, efx_rc_t, rc);
401
402	return (rc);
403}
404
405static	__checkReturn	efx_rc_t
406efx_mcdi_fini_evq(
407	__in		efx_nic_t *enp,
408	__in		uint32_t instance)
409{
410	efx_mcdi_req_t req;
411	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
412		MC_CMD_FINI_EVQ_OUT_LEN);
413	efx_rc_t rc;
414
415	req.emr_cmd = MC_CMD_FINI_EVQ;
416	req.emr_in_buf = payload;
417	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
418	req.emr_out_buf = payload;
419	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
420
421	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
422
423	efx_mcdi_execute_quiet(enp, &req);
424
425	if (req.emr_rc != 0) {
426		rc = req.emr_rc;
427		goto fail1;
428	}
429
430	return (0);
431
432fail1:
433	/*
434	 * EALREADY is not an error, but indicates that the MC has rebooted and
435	 * that the EVQ has already been destroyed.
436	 */
437	if (rc != EALREADY)
438		EFSYS_PROBE1(fail1, efx_rc_t, rc);
439
440	return (rc);
441}
442
443
444
445	__checkReturn	efx_rc_t
446ef10_ev_init(
447	__in		efx_nic_t *enp)
448{
449	_NOTE(ARGUNUSED(enp))
450	return (0);
451}
452
453			void
454ef10_ev_fini(
455	__in		efx_nic_t *enp)
456{
457	_NOTE(ARGUNUSED(enp))
458}
459
460	__checkReturn	efx_rc_t
461ef10_ev_qcreate(
462	__in		efx_nic_t *enp,
463	__in		unsigned int index,
464	__in		efsys_mem_t *esmp,
465	__in		size_t n,
466	__in		uint32_t id,
467	__in		uint32_t us,
468	__in		uint32_t flags,
469	__in		efx_evq_t *eep)
470{
471	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
472	uint32_t irq;
473	efx_rc_t rc;
474
475	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
476	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
477	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
478
479	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
480		rc = EINVAL;
481		goto fail1;
482	}
483
484	if (index >= encp->enc_evq_limit) {
485		rc = EINVAL;
486		goto fail2;
487	}
488
489	if (us > encp->enc_evq_timer_max_us) {
490		rc = EINVAL;
491		goto fail3;
492	}
493
494	/* Set up the handler table */
495	eep->ee_rx	= ef10_ev_rx;
496	eep->ee_tx	= ef10_ev_tx;
497	eep->ee_driver	= ef10_ev_driver;
498	eep->ee_drv_gen	= ef10_ev_drv_gen;
499	eep->ee_mcdi	= ef10_ev_mcdi;
500
501	/* Set up the event queue */
502	/* INIT_EVQ expects function-relative vector number */
503	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
504	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
505		irq = index;
506	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
507		irq = index;
508		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
509		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
510	} else {
511		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
512	}
513
514	/*
515	 * Interrupts may be raised for events immediately after the queue is
516	 * created. See bug58606.
517	 */
518
519	if (encp->enc_init_evq_v2_supported) {
520		/*
521		 * On Medford the low latency license is required to enable RX
522		 * and event cut through and to disable RX batching.  If event
523		 * queue type in flags is auto, we let the firmware decide the
524		 * settings to use. If the adapter has a low latency license,
525		 * it will choose the best settings for low latency, otherwise
526		 * it will choose the best settings for throughput.
527		 */
528		rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
529		if (rc != 0)
530			goto fail4;
531	} else {
532		/*
533		 * On Huntington we need to specify the settings to use.
534		 * If event queue type in flags is auto, we favour throughput
535		 * if the adapter is running virtualization supporting firmware
536		 * (i.e. the full featured firmware variant)
537		 * and latency otherwise. The Ethernet Virtual Bridging
538		 * capability is used to make this decision. (Note though that
539		 * the low latency firmware variant is also best for
540		 * throughput and corresponding type should be specified
541		 * to choose it.)
542		 */
543		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
544		rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
545		    low_latency);
546		if (rc != 0)
547			goto fail5;
548	}
549
550	return (0);
551
552fail5:
553	EFSYS_PROBE(fail5);
554fail4:
555	EFSYS_PROBE(fail4);
556fail3:
557	EFSYS_PROBE(fail3);
558fail2:
559	EFSYS_PROBE(fail2);
560fail1:
561	EFSYS_PROBE1(fail1, efx_rc_t, rc);
562
563	return (rc);
564}
565
566			void
567ef10_ev_qdestroy(
568	__in		efx_evq_t *eep)
569{
570	efx_nic_t *enp = eep->ee_enp;
571
572	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
573	    enp->en_family == EFX_FAMILY_MEDFORD);
574
575	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
576}
577
578	__checkReturn	efx_rc_t
579ef10_ev_qprime(
580	__in		efx_evq_t *eep,
581	__in		unsigned int count)
582{
583	efx_nic_t *enp = eep->ee_enp;
584	uint32_t rptr;
585	efx_dword_t dword;
586
587	rptr = count & eep->ee_mask;
588
589	if (enp->en_nic_cfg.enc_bug35388_workaround) {
590		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
591		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
592		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
593		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
594
595		EFX_POPULATE_DWORD_2(dword,
596		    ERF_DD_EVQ_IND_RPTR_FLAGS,
597		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
598		    ERF_DD_EVQ_IND_RPTR,
599		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
600		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
601		    &dword, B_FALSE);
602
603		EFX_POPULATE_DWORD_2(dword,
604		    ERF_DD_EVQ_IND_RPTR_FLAGS,
605		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
606		    ERF_DD_EVQ_IND_RPTR,
607		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
608		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
609		    &dword, B_FALSE);
610	} else {
611		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
612		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
613		    &dword, B_FALSE);
614	}
615
616	return (0);
617}
618
619static	__checkReturn	efx_rc_t
620efx_mcdi_driver_event(
621	__in		efx_nic_t *enp,
622	__in		uint32_t evq,
623	__in		efx_qword_t data)
624{
625	efx_mcdi_req_t req;
626	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
627		MC_CMD_DRIVER_EVENT_OUT_LEN);
628	efx_rc_t rc;
629
630	req.emr_cmd = MC_CMD_DRIVER_EVENT;
631	req.emr_in_buf = payload;
632	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
633	req.emr_out_buf = payload;
634	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
635
636	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
637
638	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
639	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
640	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
641	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
642
643	efx_mcdi_execute(enp, &req);
644
645	if (req.emr_rc != 0) {
646		rc = req.emr_rc;
647		goto fail1;
648	}
649
650	return (0);
651
652fail1:
653	EFSYS_PROBE1(fail1, efx_rc_t, rc);
654
655	return (rc);
656}
657
658			void
659ef10_ev_qpost(
660	__in	efx_evq_t *eep,
661	__in	uint16_t data)
662{
663	efx_nic_t *enp = eep->ee_enp;
664	efx_qword_t event;
665
666	EFX_POPULATE_QWORD_3(event,
667	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
668	    ESF_DZ_DRV_SUB_CODE, 0,
669	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
670
671	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
672}
673
674	__checkReturn	efx_rc_t
675ef10_ev_qmoderate(
676	__in		efx_evq_t *eep,
677	__in		unsigned int us)
678{
679	efx_nic_t *enp = eep->ee_enp;
680	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
681	efx_dword_t dword;
682	uint32_t mode;
683	efx_rc_t rc;
684
685	/* Check that hardware and MCDI use the same timer MODE values */
686	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
687	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
688	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
689	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
690	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
691	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
692	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
693	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
694
695	if (us > encp->enc_evq_timer_max_us) {
696		rc = EINVAL;
697		goto fail1;
698	}
699
700	/* If the value is zero then disable the timer */
701	if (us == 0) {
702		mode = FFE_CZ_TIMER_MODE_DIS;
703	} else {
704		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
705	}
706
707	if (encp->enc_bug61265_workaround) {
708		uint32_t ns = us * 1000;
709
710		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
711		if (rc != 0)
712			goto fail2;
713	} else {
714		unsigned int ticks;
715
716		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
717			goto fail3;
718
719		if (encp->enc_bug35388_workaround) {
720			EFX_POPULATE_DWORD_3(dword,
721			    ERF_DD_EVQ_IND_TIMER_FLAGS,
722			    EFE_DD_EVQ_IND_TIMER_FLAGS,
723			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
724			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
725			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
726			    eep->ee_index, &dword, 0);
727		} else {
728			EFX_POPULATE_DWORD_2(dword,
729			    ERF_DZ_TC_TIMER_MODE, mode,
730			    ERF_DZ_TC_TIMER_VAL, ticks);
731			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
732			    eep->ee_index, &dword, 0);
733		}
734	}
735
736	return (0);
737
738fail3:
739	EFSYS_PROBE(fail3);
740fail2:
741	EFSYS_PROBE(fail2);
742fail1:
743	EFSYS_PROBE1(fail1, efx_rc_t, rc);
744
745	return (rc);
746}
747
748
749#if EFSYS_OPT_QSTATS
750			void
751ef10_ev_qstats_update(
752	__in				efx_evq_t *eep,
753	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
754{
755	unsigned int id;
756
757	for (id = 0; id < EV_NQSTATS; id++) {
758		efsys_stat_t *essp = &stat[id];
759
760		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
761		eep->ee_stat[id] = 0;
762	}
763}
764#endif /* EFSYS_OPT_QSTATS */
765
766
767static	__checkReturn	boolean_t
768ef10_ev_rx(
769	__in		efx_evq_t *eep,
770	__in		efx_qword_t *eqp,
771	__in		const efx_ev_callbacks_t *eecp,
772	__in_opt	void *arg)
773{
774	efx_nic_t *enp = eep->ee_enp;
775	uint32_t size;
776	uint32_t label;
777	uint32_t mac_class;
778	uint32_t eth_tag_class;
779	uint32_t l3_class;
780	uint32_t l4_class;
781	uint32_t next_read_lbits;
782	uint16_t flags;
783	boolean_t cont;
784	boolean_t should_abort;
785	efx_evq_rxq_state_t *eersp;
786	unsigned int desc_count;
787	unsigned int last_used_id;
788
789	EFX_EV_QSTAT_INCR(eep, EV_RX);
790
791	/* Discard events after RXQ/TXQ errors */
792	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
793		return (B_FALSE);
794
795	/* Basic packet information */
796	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
797	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
798	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
799	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
800	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
801	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
802	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
803	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
804
805	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
806		/* Drop this event */
807		return (B_FALSE);
808	}
809	flags = 0;
810
811	if (cont != 0) {
812		/*
813		 * This may be part of a scattered frame, or it may be a
814		 * truncated frame if scatter is disabled on this RXQ.
815		 * Overlength frames can be received if e.g. a VF is configured
816		 * for 1500 MTU but connected to a port set to 9000 MTU
817		 * (see bug56567).
818		 * FIXME: There is not yet any driver that supports scatter on
819		 * Huntington.  Scatter support is required for OSX.
820		 */
821		flags |= EFX_PKT_CONT;
822	}
823
824	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
825		flags |= EFX_PKT_UNICAST;
826
827	/* Increment the count of descriptors read */
828	eersp = &eep->ee_rxq_state[label];
829	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
830	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
831	eersp->eers_rx_read_ptr += desc_count;
832
833	/*
834	 * FIXME: add error checking to make sure this a batched event.
835	 * This could also be an aborted scatter, see Bug36629.
836	 */
837	if (desc_count > 1) {
838		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
839		flags |= EFX_PKT_PREFIX_LEN;
840	}
841
842	/* Calculate the index of the the last descriptor consumed */
843	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
844
845	/* Check for errors that invalidate checksum and L3/L4 fields */
846	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
847		/* RX frame truncated (error flag is misnamed) */
848		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
849		flags |= EFX_DISCARD;
850		goto deliver;
851	}
852	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
853		/* Bad Ethernet frame CRC */
854		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
855		flags |= EFX_DISCARD;
856		goto deliver;
857	}
858	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
859		/*
860		 * Hardware parse failed, due to malformed headers
861		 * or headers that are too long for the parser.
862		 * Headers and checksums must be validated by the host.
863		 */
864		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
865		goto deliver;
866	}
867
868	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
869	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
870		flags |= EFX_PKT_VLAN_TAGGED;
871	}
872
873	switch (l3_class) {
874	case ESE_DZ_L3_CLASS_IP4:
875	case ESE_DZ_L3_CLASS_IP4_FRAG:
876		flags |= EFX_PKT_IPV4;
877		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
878			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
879		} else {
880			flags |= EFX_CKSUM_IPV4;
881		}
882
883		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
884			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
885			flags |= EFX_PKT_TCP;
886		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
887			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
888			flags |= EFX_PKT_UDP;
889		} else {
890			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
891		}
892		break;
893
894	case ESE_DZ_L3_CLASS_IP6:
895	case ESE_DZ_L3_CLASS_IP6_FRAG:
896		flags |= EFX_PKT_IPV6;
897
898		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
899			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
900			flags |= EFX_PKT_TCP;
901		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
902			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
903			flags |= EFX_PKT_UDP;
904		} else {
905			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
906		}
907		break;
908
909	default:
910		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
911		break;
912	}
913
914	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
915		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
916			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
917		} else {
918			flags |= EFX_CKSUM_TCPUDP;
919		}
920	}
921
922deliver:
923	/* If we're not discarding the packet then it is ok */
924	if (~flags & EFX_DISCARD)
925		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
926
927	EFSYS_ASSERT(eecp->eec_rx != NULL);
928	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
929
930	return (should_abort);
931}
932
933static	__checkReturn	boolean_t
934ef10_ev_tx(
935	__in		efx_evq_t *eep,
936	__in		efx_qword_t *eqp,
937	__in		const efx_ev_callbacks_t *eecp,
938	__in_opt	void *arg)
939{
940	efx_nic_t *enp = eep->ee_enp;
941	uint32_t id;
942	uint32_t label;
943	boolean_t should_abort;
944
945	EFX_EV_QSTAT_INCR(eep, EV_TX);
946
947	/* Discard events after RXQ/TXQ errors */
948	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
949		return (B_FALSE);
950
951	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
952		/* Drop this event */
953		return (B_FALSE);
954	}
955
956	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
957	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
958	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
959
960	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
961
962	EFSYS_ASSERT(eecp->eec_tx != NULL);
963	should_abort = eecp->eec_tx(arg, label, id);
964
965	return (should_abort);
966}
967
968static	__checkReturn	boolean_t
969ef10_ev_driver(
970	__in		efx_evq_t *eep,
971	__in		efx_qword_t *eqp,
972	__in		const efx_ev_callbacks_t *eecp,
973	__in_opt	void *arg)
974{
975	unsigned int code;
976	boolean_t should_abort;
977
978	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
979	should_abort = B_FALSE;
980
981	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
982	switch (code) {
983	case ESE_DZ_DRV_TIMER_EV: {
984		uint32_t id;
985
986		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
987
988		EFSYS_ASSERT(eecp->eec_timer != NULL);
989		should_abort = eecp->eec_timer(arg, id);
990		break;
991	}
992
993	case ESE_DZ_DRV_WAKE_UP_EV: {
994		uint32_t id;
995
996		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
997
998		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
999		should_abort = eecp->eec_wake_up(arg, id);
1000		break;
1001	}
1002
1003	case ESE_DZ_DRV_START_UP_EV:
1004		EFSYS_ASSERT(eecp->eec_initialized != NULL);
1005		should_abort = eecp->eec_initialized(arg);
1006		break;
1007
1008	default:
1009		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1010		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1011		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1012		break;
1013	}
1014
1015	return (should_abort);
1016}
1017
1018static	__checkReturn	boolean_t
1019ef10_ev_drv_gen(
1020	__in		efx_evq_t *eep,
1021	__in		efx_qword_t *eqp,
1022	__in		const efx_ev_callbacks_t *eecp,
1023	__in_opt	void *arg)
1024{
1025	uint32_t data;
1026	boolean_t should_abort;
1027
1028	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1029	should_abort = B_FALSE;
1030
1031	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1032	if (data >= ((uint32_t)1 << 16)) {
1033		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1034		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1035		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1036
1037		return (B_TRUE);
1038	}
1039
1040	EFSYS_ASSERT(eecp->eec_software != NULL);
1041	should_abort = eecp->eec_software(arg, (uint16_t)data);
1042
1043	return (should_abort);
1044}
1045
1046static	__checkReturn	boolean_t
1047ef10_ev_mcdi(
1048	__in		efx_evq_t *eep,
1049	__in		efx_qword_t *eqp,
1050	__in		const efx_ev_callbacks_t *eecp,
1051	__in_opt	void *arg)
1052{
1053	efx_nic_t *enp = eep->ee_enp;
1054	unsigned int code;
1055	boolean_t should_abort = B_FALSE;
1056
1057	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1058
1059	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1060	switch (code) {
1061	case MCDI_EVENT_CODE_BADSSERT:
1062		efx_mcdi_ev_death(enp, EINTR);
1063		break;
1064
1065	case MCDI_EVENT_CODE_CMDDONE:
1066		efx_mcdi_ev_cpl(enp,
1067		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1068		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1069		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1070		break;
1071
1072#if EFSYS_OPT_MCDI_PROXY_AUTH
1073	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1074		/*
1075		 * This event notifies a function that an authorization request
1076		 * has been processed. If the request was authorized then the
1077		 * function can now re-send the original MCDI request.
1078		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1079		 */
1080		efx_mcdi_ev_proxy_response(enp,
1081		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1082		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1083		break;
1084#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1085
1086	case MCDI_EVENT_CODE_LINKCHANGE: {
1087		efx_link_mode_t link_mode;
1088
1089		ef10_phy_link_ev(enp, eqp, &link_mode);
1090		should_abort = eecp->eec_link_change(arg, link_mode);
1091		break;
1092	}
1093
1094	case MCDI_EVENT_CODE_SENSOREVT: {
1095#if EFSYS_OPT_MON_STATS
1096		efx_mon_stat_t id;
1097		efx_mon_stat_value_t value;
1098		efx_rc_t rc;
1099
1100		/* Decode monitor stat for MCDI sensor (if supported) */
1101		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1102			/* Report monitor stat change */
1103			should_abort = eecp->eec_monitor(arg, id, value);
1104		} else if (rc == ENOTSUP) {
1105			should_abort = eecp->eec_exception(arg,
1106				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1107				MCDI_EV_FIELD(eqp, DATA));
1108		} else {
1109			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1110		}
1111#endif
1112		break;
1113	}
1114
1115	case MCDI_EVENT_CODE_SCHEDERR:
1116		/* Informational only */
1117		break;
1118
1119	case MCDI_EVENT_CODE_REBOOT:
1120		/* Falcon/Siena only (should not been seen with Huntington). */
1121		efx_mcdi_ev_death(enp, EIO);
1122		break;
1123
1124	case MCDI_EVENT_CODE_MC_REBOOT:
1125		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1126		efx_mcdi_ev_death(enp, EIO);
1127		break;
1128
1129	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1130#if EFSYS_OPT_MAC_STATS
1131		if (eecp->eec_mac_stats != NULL) {
1132			eecp->eec_mac_stats(arg,
1133			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1134		}
1135#endif
1136		break;
1137
1138	case MCDI_EVENT_CODE_FWALERT: {
1139		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1140
1141		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1142			should_abort = eecp->eec_exception(arg,
1143				EFX_EXCEPTION_FWALERT_SRAM,
1144				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1145		else
1146			should_abort = eecp->eec_exception(arg,
1147				EFX_EXCEPTION_UNKNOWN_FWALERT,
1148				MCDI_EV_FIELD(eqp, DATA));
1149		break;
1150	}
1151
1152	case MCDI_EVENT_CODE_TX_ERR: {
1153		/*
1154		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1155		 * This may be followed by TX completions (which we discard),
1156		 * and then finally by a TX_FLUSH event. Firmware destroys the
1157		 * TXQ automatically after sending the TX_FLUSH event.
1158		 */
1159		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1160
1161		EFSYS_PROBE2(tx_descq_err,
1162			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1163			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1164
1165		/* Inform the driver that a reset is required. */
1166		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1167		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1168		break;
1169	}
1170
1171	case MCDI_EVENT_CODE_TX_FLUSH: {
1172		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1173
1174		/*
1175		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1176		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1177		 * We want to wait for all completions, so ignore the events
1178		 * with TX_FLUSH_TO_DRIVER.
1179		 */
1180		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1181			should_abort = B_FALSE;
1182			break;
1183		}
1184
1185		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1186
1187		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1188
1189		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1190		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1191		break;
1192	}
1193
1194	case MCDI_EVENT_CODE_RX_ERR: {
1195		/*
1196		 * After an RXQ error is detected, firmware sends an RX_ERR
1197		 * event. This may be followed by RX events (which we discard),
1198		 * and then finally by an RX_FLUSH event. Firmware destroys the
1199		 * RXQ automatically after sending the RX_FLUSH event.
1200		 */
1201		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1202
1203		EFSYS_PROBE2(rx_descq_err,
1204			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1205			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1206
1207		/* Inform the driver that a reset is required. */
1208		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1209		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1210		break;
1211	}
1212
1213	case MCDI_EVENT_CODE_RX_FLUSH: {
1214		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1215
1216		/*
1217		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1218		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1219		 * We want to wait for all completions, so ignore the events
1220		 * with RX_FLUSH_TO_DRIVER.
1221		 */
1222		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1223			should_abort = B_FALSE;
1224			break;
1225		}
1226
1227		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1228
1229		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1230
1231		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1232		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1233		break;
1234	}
1235
1236	default:
1237		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1238		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1239		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1240		break;
1241	}
1242
1243	return (should_abort);
1244}
1245
1246		void
1247ef10_ev_rxlabel_init(
1248	__in		efx_evq_t *eep,
1249	__in		efx_rxq_t *erp,
1250	__in		unsigned int label)
1251{
1252	efx_evq_rxq_state_t *eersp;
1253
1254	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1255	eersp = &eep->ee_rxq_state[label];
1256
1257	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1258
1259	eersp->eers_rx_read_ptr = 0;
1260	eersp->eers_rx_mask = erp->er_mask;
1261}
1262
1263		void
1264ef10_ev_rxlabel_fini(
1265	__in		efx_evq_t *eep,
1266	__in		unsigned int label)
1267{
1268	efx_evq_rxq_state_t *eersp;
1269
1270	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1271	eersp = &eep->ee_rxq_state[label];
1272
1273	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1274
1275	eersp->eers_rx_read_ptr = 0;
1276	eersp->eers_rx_mask = 0;
1277}
1278
1279#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1280