ql_isr.c revision 330555
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_isr.c 330555 2018-03-06 23:12:32Z davidcs $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46		uint32_t r_idx);
47
48static void
49qla_rcv_error(qla_host_t *ha)
50{
51	ha->stop_rcv = 1;
52	QL_INITIATE_RECOVERY(ha);
53}
54
55
56/*
57 * Name: qla_rx_intr
58 * Function: Handles normal ethernet frames received
59 */
60static void
61qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62{
63	qla_rx_buf_t		*rxb;
64	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65	struct ifnet		*ifp = ha->ifp;
66	qla_sds_t		*sdsp;
67	struct ether_vlan_header *eh;
68	uint32_t		i, rem_len = 0;
69	uint32_t		r_idx = 0;
70	qla_rx_ring_t		*rx_ring;
71	struct lro_ctrl		*lro;
72
73	lro = &ha->hw.sds[sds_idx].lro;
74
75	if (ha->hw.num_rds_rings > 1)
76		r_idx = sds_idx;
77
78	ha->hw.rds[r_idx].count++;
79
80	sdsp = &ha->hw.sds[sds_idx];
81	rx_ring = &ha->rx_ring[r_idx];
82
83	for (i = 0; i < sgc->num_handles; i++) {
84		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85
86		QL_ASSERT(ha, (rxb != NULL),
87			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88			sds_idx));
89
90		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91			/* log the error */
92			device_printf(ha->pci_dev,
93				"%s invalid rxb[%d, %d, 0x%04x]\n",
94				__func__, sds_idx, i, sgc->handle[i]);
95			qla_rcv_error(ha);
96			return;
97		}
98
99		mp = rxb->m_head;
100		if (i == 0)
101			mpf = mp;
102
103		QL_ASSERT(ha, (mp != NULL),
104			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105			sds_idx));
106
107		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108
109		rxb->m_head = NULL;
110		rxb->next = sdsp->rxb_free;
111		sdsp->rxb_free = rxb;
112		sdsp->rx_free++;
113
114		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115			/* log the error */
116			device_printf(ha->pci_dev,
117				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118				__func__, sds_idx, i, sgc->handle[i]);
119			qla_rcv_error(ha);
120			return;
121		}
122
123		if (i == 0) {
124			mpl = mpf = mp;
125			mp->m_flags |= M_PKTHDR;
126			mp->m_pkthdr.len = sgc->pkt_length;
127			mp->m_pkthdr.rcvif = ifp;
128			rem_len = mp->m_pkthdr.len;
129		} else {
130			mp->m_flags &= ~M_PKTHDR;
131			mpl->m_next = mp;
132			mpl = mp;
133			rem_len = rem_len - mp->m_len;
134		}
135	}
136
137	mpl->m_len = rem_len;
138
139	eh = mtod(mpf, struct ether_vlan_header *);
140
141	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142		uint32_t *data = (uint32_t *)eh;
143
144		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145		mpf->m_flags |= M_VLANTAG;
146
147		*(data + 3) = *(data + 2);
148		*(data + 2) = *(data + 1);
149		*(data + 1) = *data;
150
151		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152	}
153
154	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157		mpf->m_pkthdr.csum_data = 0xFFFF;
158	} else {
159		mpf->m_pkthdr.csum_flags = 0;
160	}
161
162	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
163
164	mpf->m_pkthdr.flowid = sgc->rss_hash;
165
166#if __FreeBSD_version >= 1100000
167	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
168#else
169#if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170        M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
171#else
172        M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
173#endif
174#endif /* #if __FreeBSD_version >= 1100000 */
175
176	if (ha->hw.enable_soft_lro) {
177
178#if (__FreeBSD_version >= 1100101)
179
180		tcp_lro_queue_mbuf(lro, mpf);
181
182#else
183		if (tcp_lro_rx(lro, mpf, 0))
184			(*ifp->if_input)(ifp, mpf);
185
186#endif /* #if (__FreeBSD_version >= 1100101) */
187
188
189	} else {
190		(*ifp->if_input)(ifp, mpf);
191	}
192
193	if (sdsp->rx_free > ha->std_replenish)
194		qla_replenish_normal_rx(ha, sdsp, r_idx);
195
196	return;
197}
198
199#define QLA_TCP_HDR_SIZE        20
200#define QLA_TCP_TS_OPTION_SIZE  12
201
202/*
203 * Name: qla_lro_intr
204 * Function: Handles normal ethernet frames received
205 */
206static int
207qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
208{
209	qla_rx_buf_t *rxb;
210	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
211	struct ifnet *ifp = ha->ifp;
212	qla_sds_t *sdsp;
213	struct ether_vlan_header *eh;
214	uint32_t i, rem_len = 0, pkt_length, iplen;
215	struct tcphdr *th;
216	struct ip *ip = NULL;
217	struct ip6_hdr *ip6 = NULL;
218	uint16_t etype;
219	uint32_t r_idx = 0;
220	qla_rx_ring_t *rx_ring;
221
222	if (ha->hw.num_rds_rings > 1)
223		r_idx = sds_idx;
224
225	ha->hw.rds[r_idx].count++;
226
227	rx_ring = &ha->rx_ring[r_idx];
228
229	ha->hw.rds[r_idx].lro_pkt_count++;
230
231	sdsp = &ha->hw.sds[sds_idx];
232
233	pkt_length = sgc->payload_length + sgc->l4_offset;
234
235	if (sgc->flags & Q8_LRO_COMP_TS) {
236		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
237	} else {
238		pkt_length += QLA_TCP_HDR_SIZE;
239	}
240	ha->hw.rds[r_idx].lro_bytes += pkt_length;
241
242	for (i = 0; i < sgc->num_handles; i++) {
243		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
244
245		QL_ASSERT(ha, (rxb != NULL),
246			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
247			sds_idx));
248
249		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
250			/* log the error */
251			device_printf(ha->pci_dev,
252				"%s invalid rxb[%d, %d, 0x%04x]\n",
253				__func__, sds_idx, i, sgc->handle[i]);
254			qla_rcv_error(ha);
255			return (0);
256		}
257
258		mp = rxb->m_head;
259		if (i == 0)
260			mpf = mp;
261
262		QL_ASSERT(ha, (mp != NULL),
263			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
264			sds_idx));
265
266		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
267
268		rxb->m_head = NULL;
269		rxb->next = sdsp->rxb_free;
270		sdsp->rxb_free = rxb;
271		sdsp->rx_free++;
272
273		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
274			/* log the error */
275			device_printf(ha->pci_dev,
276				"%s mp  == NULL [%d, %d, 0x%04x]\n",
277				__func__, sds_idx, i, sgc->handle[i]);
278			qla_rcv_error(ha);
279			return (0);
280		}
281
282		if (i == 0) {
283			mpl = mpf = mp;
284			mp->m_flags |= M_PKTHDR;
285			mp->m_pkthdr.len = pkt_length;
286			mp->m_pkthdr.rcvif = ifp;
287			rem_len = mp->m_pkthdr.len;
288		} else {
289			mp->m_flags &= ~M_PKTHDR;
290			mpl->m_next = mp;
291			mpl = mp;
292			rem_len = rem_len - mp->m_len;
293		}
294	}
295
296	mpl->m_len = rem_len;
297
298	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
299
300	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
301		th->th_flags |= TH_PUSH;
302
303	m_adj(mpf, sgc->l2_offset);
304
305	eh = mtod(mpf, struct ether_vlan_header *);
306
307	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
308		uint32_t *data = (uint32_t *)eh;
309
310		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
311		mpf->m_flags |= M_VLANTAG;
312
313		*(data + 3) = *(data + 2);
314		*(data + 2) = *(data + 1);
315		*(data + 1) = *data;
316
317		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
318
319		etype = ntohs(eh->evl_proto);
320	} else {
321		etype = ntohs(eh->evl_encap_proto);
322	}
323
324	if (etype == ETHERTYPE_IP) {
325		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
326
327		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
328				sgc->payload_length;
329
330                ip->ip_len = htons(iplen);
331
332		ha->ipv4_lro++;
333
334		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
335
336	} else if (etype == ETHERTYPE_IPV6) {
337		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
338
339		iplen = (th->th_off << 2) + sgc->payload_length;
340
341		ip6->ip6_plen = htons(iplen);
342
343		ha->ipv6_lro++;
344
345		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
346
347	} else {
348		m_freem(mpf);
349
350		if (sdsp->rx_free > ha->std_replenish)
351			qla_replenish_normal_rx(ha, sdsp, r_idx);
352		return 0;
353	}
354
355	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
356					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
357	mpf->m_pkthdr.csum_data = 0xFFFF;
358
359	mpf->m_pkthdr.flowid = sgc->rss_hash;
360
361	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
362
363	(*ifp->if_input)(ifp, mpf);
364
365	if (sdsp->rx_free > ha->std_replenish)
366		qla_replenish_normal_rx(ha, sdsp, r_idx);
367
368	return (0);
369}
370
371static int
372qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
373	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
374{
375	uint32_t i;
376	uint16_t num_handles;
377	q80_stat_desc_t *sdesc;
378	uint32_t opcode;
379
380	*nhandles = 0;
381	dcount--;
382
383	for (i = 0; i < dcount; i++) {
384		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
385		sdesc = (q80_stat_desc_t *)
386				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
387
388		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
389
390		if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
391			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
392				__func__, (void *)sdesc->data[0],
393				(void *)sdesc->data[1]);
394			return -1;
395		}
396
397		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
398		if (!num_handles) {
399			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
400				__func__, (void *)sdesc->data[0],
401				(void *)sdesc->data[1]);
402			return -1;
403		}
404
405		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
406			num_handles = -1;
407
408		switch (num_handles) {
409
410		case 1:
411			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
412			break;
413
414		case 2:
415			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
416			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
417			break;
418
419		case 3:
420			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
421			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
422			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
423			break;
424
425		case 4:
426			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
427			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
428			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
429			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
430			break;
431
432		case 5:
433			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
438			break;
439
440		case 6:
441			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
442			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
443			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
444			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
445			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
446			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
447			break;
448
449		case 7:
450			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
451			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
452			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
453			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
454			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
455			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
456			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
457			break;
458
459		default:
460			device_printf(ha->pci_dev,
461				"%s: invalid num handles %p %p\n",
462				__func__, (void *)sdesc->data[0],
463				(void *)sdesc->data[1]);
464
465			QL_ASSERT(ha, (0),\
466			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
467			__func__, "invalid num handles", sds_idx, num_handles,
468			(void *)sdesc->data[0],(void *)sdesc->data[1]));
469
470			qla_rcv_error(ha);
471			return 0;
472		}
473		*nhandles = *nhandles + num_handles;
474	}
475	return 0;
476}
477
478/*
479 * Name: ql_rcv_isr
480 * Function: Main Interrupt Service Routine
481 */
482uint32_t
483ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
484{
485	device_t dev;
486	qla_hw_t *hw;
487	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
488	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
489	uint32_t ret = 0;
490	qla_sgl_comp_t sgc;
491	uint16_t nhandles;
492	uint32_t sds_replenish_threshold = 0;
493	uint32_t r_idx = 0;
494	qla_sds_t *sdsp;
495
496	dev = ha->pci_dev;
497	hw = &ha->hw;
498
499	hw->sds[sds_idx].rcv_active = 1;
500	if (ha->stop_rcv) {
501		hw->sds[sds_idx].rcv_active = 0;
502		return 0;
503	}
504
505	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
506
507	/*
508	 * receive interrupts
509	 */
510	comp_idx = hw->sds[sds_idx].sdsr_next;
511
512	while (count-- && !ha->stop_rcv) {
513
514		sdesc = (q80_stat_desc_t *)
515				&hw->sds[sds_idx].sds_ring_base[comp_idx];
516
517		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
518
519		if (!opcode)
520			break;
521
522		switch (opcode) {
523
524		case Q8_STAT_DESC_OPCODE_RCV_PKT:
525
526			desc_count = 1;
527
528			bzero(&sgc, sizeof(qla_sgl_comp_t));
529
530			sgc.rcv.pkt_length =
531				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
532			sgc.rcv.num_handles = 1;
533			sgc.rcv.handle[0] =
534				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
535			sgc.rcv.chksum_status =
536				Q8_STAT_DESC_STATUS((sdesc->data[1]));
537
538			sgc.rcv.rss_hash =
539				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
540
541			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
542				sgc.rcv.vlan_tag =
543					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
544			}
545			qla_rx_intr(ha, &sgc.rcv, sds_idx);
546			break;
547
548		case Q8_STAT_DESC_OPCODE_SGL_RCV:
549
550			desc_count =
551				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
552
553			if (desc_count > 1) {
554				c_idx = (comp_idx + desc_count -1) &
555						(NUM_STATUS_DESCRIPTORS-1);
556				sdesc0 = (q80_stat_desc_t *)
557					&hw->sds[sds_idx].sds_ring_base[c_idx];
558
559				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
560						Q8_STAT_DESC_OPCODE_CONT) ||
561				QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
562					desc_count = 0;
563					break;
564				}
565			}
566
567			bzero(&sgc, sizeof(qla_sgl_comp_t));
568
569			sgc.rcv.pkt_length =
570				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
571					(sdesc->data[0]));
572			sgc.rcv.chksum_status =
573				Q8_STAT_DESC_STATUS((sdesc->data[1]));
574
575			sgc.rcv.rss_hash =
576				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
577
578			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
579				sgc.rcv.vlan_tag =
580					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
581			}
582
583			QL_ASSERT(ha, (desc_count <= 2) ,\
584				("%s: [sds_idx, data0, data1]="\
585				"%d, %p, %p]\n", __func__, sds_idx,\
586				(void *)sdesc->data[0],\
587				(void *)sdesc->data[1]));
588
589			sgc.rcv.num_handles = 1;
590			sgc.rcv.handle[0] =
591				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
592
593			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
594				&sgc.rcv.handle[1], &nhandles)) {
595				device_printf(dev,
596					"%s: [sds_idx, dcount, data0, data1]="
597					 "[%d, %d, 0x%llx, 0x%llx]\n",
598					__func__, sds_idx, desc_count,
599					(long long unsigned int)sdesc->data[0],
600					(long long unsigned int)sdesc->data[1]);
601				desc_count = 0;
602				break;
603			}
604
605			sgc.rcv.num_handles += nhandles;
606
607			qla_rx_intr(ha, &sgc.rcv, sds_idx);
608
609			break;
610
611		case Q8_STAT_DESC_OPCODE_SGL_LRO:
612
613			desc_count =
614				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
615
616			if (desc_count > 1) {
617				c_idx = (comp_idx + desc_count -1) &
618						(NUM_STATUS_DESCRIPTORS-1);
619				sdesc0 = (q80_stat_desc_t *)
620					&hw->sds[sds_idx].sds_ring_base[c_idx];
621
622				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
623						Q8_STAT_DESC_OPCODE_CONT) ||
624				QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
625					desc_count = 0;
626					break;
627				}
628			}
629			bzero(&sgc, sizeof(qla_sgl_comp_t));
630
631			sgc.lro.payload_length =
632			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
633
634			sgc.lro.rss_hash =
635				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
636
637			sgc.lro.num_handles = 1;
638			sgc.lro.handle[0] =
639				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
640
641			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
642				sgc.lro.flags |= Q8_LRO_COMP_TS;
643
644			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
645				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
646
647			sgc.lro.l2_offset =
648				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
649			sgc.lro.l4_offset =
650				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
651
652			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
653				sgc.lro.vlan_tag =
654					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
655			}
656
657			QL_ASSERT(ha, (desc_count <= 7) ,\
658				("%s: [sds_idx, data0, data1]="\
659				 "[%d, 0x%llx, 0x%llx]\n",\
660				__func__, sds_idx,\
661				(long long unsigned int)sdesc->data[0],\
662				(long long unsigned int)sdesc->data[1]));
663
664			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
665				desc_count, &sgc.lro.handle[1], &nhandles)) {
666				device_printf(dev,
667				"%s: [sds_idx, data0, data1]="\
668				 "[%d, 0x%llx, 0x%llx]\n",\
669				__func__, sds_idx,\
670				(long long unsigned int)sdesc->data[0],\
671				(long long unsigned int)sdesc->data[1]);
672
673				desc_count = 0;
674				break;
675			}
676
677			sgc.lro.num_handles += nhandles;
678
679			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
680				device_printf(dev,
681				"%s: [sds_idx, data0, data1]="\
682				 "[%d, 0x%llx, 0x%llx]\n",\
683				__func__, sds_idx,\
684				(long long unsigned int)sdesc->data[0],\
685				(long long unsigned int)sdesc->data[1]);
686				device_printf(dev,
687				"%s: [comp_idx, c_idx, dcount, nhndls]="\
688				 "[%d, %d, %d, %d]\n",\
689				__func__, comp_idx, c_idx, desc_count,
690				sgc.lro.num_handles);
691				if (desc_count > 1) {
692				device_printf(dev,
693				"%s: [sds_idx, data0, data1]="\
694				 "[%d, 0x%llx, 0x%llx]\n",\
695				__func__, sds_idx,\
696				(long long unsigned int)sdesc0->data[0],\
697				(long long unsigned int)sdesc0->data[1]);
698				}
699			}
700
701			break;
702
703		default:
704			device_printf(dev, "%s: default 0x%llx!\n", __func__,
705					(long long unsigned int)sdesc->data[0]);
706			break;
707		}
708
709		if (desc_count == 0)
710			break;
711
712		sds_replenish_threshold += desc_count;
713
714
715		while (desc_count--) {
716			sdesc->data[0] = 0ULL;
717			sdesc->data[1] = 0ULL;
718			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
719			sdesc = (q80_stat_desc_t *)
720				&hw->sds[sds_idx].sds_ring_base[comp_idx];
721		}
722
723		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
724			sds_replenish_threshold = 0;
725			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
726				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
727					comp_idx);
728			}
729			hw->sds[sds_idx].sdsr_next = comp_idx;
730		}
731	}
732
733	if (ha->hw.enable_soft_lro) {
734		struct lro_ctrl		*lro;
735
736		lro = &ha->hw.sds[sds_idx].lro;
737
738#if (__FreeBSD_version >= 1100101)
739
740		tcp_lro_flush_all(lro);
741
742#else
743		struct lro_entry *queued;
744
745		while ((!SLIST_EMPTY(&lro->lro_active))) {
746			queued = SLIST_FIRST(&lro->lro_active);
747			SLIST_REMOVE_HEAD(&lro->lro_active, next);
748			tcp_lro_flush(lro, queued);
749		}
750
751#endif /* #if (__FreeBSD_version >= 1100101) */
752
753	}
754
755	if (ha->stop_rcv)
756		goto ql_rcv_isr_exit;
757
758	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
759		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
760		hw->sds[sds_idx].sdsr_next = comp_idx;
761	} else {
762		if (ha->hw.num_rds_rings > 1)
763			r_idx = sds_idx;
764
765		sdsp = &ha->hw.sds[sds_idx];
766
767		if (sdsp->rx_free > ha->std_replenish)
768			qla_replenish_normal_rx(ha, sdsp, r_idx);
769	}
770
771	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
772	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
773
774	if (opcode)
775		ret = -1;
776
777ql_rcv_isr_exit:
778	hw->sds[sds_idx].rcv_active = 0;
779
780	return (ret);
781}
782
783void
784ql_mbx_isr(void *arg)
785{
786	qla_host_t *ha;
787	uint32_t data;
788	uint32_t prev_link_state;
789
790	ha = arg;
791
792	if (ha == NULL) {
793		device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
794		return;
795	}
796
797	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
798	if ((data & 0x3) != 0x1) {
799		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
800		return;
801	}
802
803	data = READ_REG32(ha, Q8_FW_MBOX0);
804
805	if ((data & 0xF000) != 0x8000)
806		return;
807
808	data = data & 0xFFFF;
809
810	switch (data) {
811
812	case 0x8001:  /* It's an AEN */
813
814		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
815
816		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
817		ha->hw.cable_length = data & 0xFFFF;
818
819		data = data >> 16;
820		ha->hw.link_speed = data & 0xFFF;
821
822		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
823
824		prev_link_state =  ha->hw.link_up;
825
826		data = (((data & 0xFF) == 0) ? 0 : 1);
827		atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
828
829		device_printf(ha->pci_dev,
830			"%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
831			__func__, data, prev_link_state);
832
833		if (prev_link_state !=  ha->hw.link_up) {
834			if (ha->hw.link_up)
835				if_link_state_change(ha->ifp, LINK_STATE_UP);
836			else
837				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
838		}
839
840
841		ha->hw.module_type = ((data >> 8) & 0xFF);
842		ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
843		ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
844
845		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
846		ha->hw.loopback_mode = data & 0x03;
847
848		ha->hw.link_faults = (data >> 3) & 0xFF;
849
850		break;
851
852        case 0x8100:
853		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
854		ha->hw.imd_compl=1;
855		break;
856
857        case 0x8101:
858                ha->async_event = 1;
859                ha->hw.aen_mb0 = 0x8101;
860                ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
861                ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
862                ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
863                ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
864		device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
865			__func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
866			ha->hw.aen_mb3, ha->hw.aen_mb4);
867                break;
868
869        case 0x8110:
870                /* for now just dump the registers */
871                {
872                        uint32_t ombx[5];
873
874                        ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
875                        ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
876                        ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
877                        ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
878                        ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
879
880                        device_printf(ha->pci_dev, "%s: "
881                                "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
882                                __func__, data, ombx[0], ombx[1], ombx[2],
883                                ombx[3], ombx[4]);
884                }
885
886                break;
887
888        case 0x8130:
889                /* sfp insertion aen */
890                device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
891                        __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
892                break;
893
894        case 0x8131:
895                /* sfp removal aen */
896                device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
897                break;
898
899	case 0x8140:
900		{
901			uint32_t ombx[3];
902
903			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
904			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
905			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
906
907			device_printf(ha->pci_dev, "%s: "
908				"0x%08x 0x%08x 0x%08x 0x%08x \n",
909				__func__, data, ombx[0], ombx[1], ombx[2]);
910		}
911		break;
912
913	default:
914		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
915		break;
916	}
917	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
918	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
919	return;
920}
921
922
923static void
924qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
925{
926	qla_rx_buf_t *rxb;
927	int count = sdsp->rx_free;
928	uint32_t rx_next;
929	qla_rdesc_t *rdesc;
930
931	/* we can play with this value via a sysctl */
932	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
933
934	rdesc = &ha->hw.rds[r_idx];
935
936	rx_next = rdesc->rx_next;
937
938	while (count--) {
939		rxb = sdsp->rxb_free;
940
941		if (rxb == NULL)
942			break;
943
944		sdsp->rxb_free = rxb->next;
945		sdsp->rx_free--;
946
947		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
948			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
949				rxb->handle,
950				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
951			rdesc->rx_in++;
952			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
953				rdesc->rx_in = 0;
954			rdesc->rx_next++;
955			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
956				rdesc->rx_next = 0;
957		} else {
958			device_printf(ha->pci_dev,
959				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
960				__func__, r_idx, rdesc->rx_in, rxb->handle);
961
962			rxb->m_head = NULL;
963			rxb->next = sdsp->rxb_free;
964			sdsp->rxb_free = rxb;
965			sdsp->rx_free++;
966
967			break;
968		}
969		if (replenish_thresh-- == 0) {
970			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
971				rdesc->rx_next);
972			rx_next = rdesc->rx_next;
973			replenish_thresh = ha->hw.rds_pidx_thres;
974		}
975	}
976
977	if (rx_next != rdesc->rx_next) {
978		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
979			rdesc->rx_next);
980	}
981}
982
983void
984ql_isr(void *arg)
985{
986	qla_ivec_t *ivec = arg;
987	qla_host_t *ha ;
988	int idx;
989	qla_hw_t *hw;
990	struct ifnet *ifp;
991	qla_tx_fp_t *fp;
992
993	ha = ivec->ha;
994	hw = &ha->hw;
995	ifp = ha->ifp;
996
997	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
998		return;
999
1000	fp = &ha->tx_fp[idx];
1001	hw->sds[idx].intr_count++;
1002
1003	if ((fp->fp_taskqueue != NULL) &&
1004		(ifp->if_drv_flags & IFF_DRV_RUNNING))
1005		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1006
1007	return;
1008}
1009
1010