ql_isr.c revision 322975
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/dev/qlxgbe/ql_isr.c 322975 2017-08-28 19:17:28Z davidcs $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46		uint32_t r_idx);
47
48static void
49qla_rcv_error(qla_host_t *ha)
50{
51	ha->stop_rcv = 1;
52	ha->qla_initiate_recovery = 1;
53}
54
55
56/*
57 * Name: qla_rx_intr
58 * Function: Handles normal ethernet frames received
59 */
60static void
61qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62{
63	qla_rx_buf_t		*rxb;
64	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65	struct ifnet		*ifp = ha->ifp;
66	qla_sds_t		*sdsp;
67	struct ether_vlan_header *eh;
68	uint32_t		i, rem_len = 0;
69	uint32_t		r_idx = 0;
70	qla_rx_ring_t		*rx_ring;
71	struct lro_ctrl		*lro;
72
73	lro = &ha->hw.sds[sds_idx].lro;
74
75	if (ha->hw.num_rds_rings > 1)
76		r_idx = sds_idx;
77
78	ha->hw.rds[r_idx].count++;
79
80	sdsp = &ha->hw.sds[sds_idx];
81	rx_ring = &ha->rx_ring[r_idx];
82
83	for (i = 0; i < sgc->num_handles; i++) {
84		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85
86		QL_ASSERT(ha, (rxb != NULL),
87			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88			sds_idx));
89
90		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91			/* log the error */
92			device_printf(ha->pci_dev,
93				"%s invalid rxb[%d, %d, 0x%04x]\n",
94				__func__, sds_idx, i, sgc->handle[i]);
95			qla_rcv_error(ha);
96			return;
97		}
98
99		mp = rxb->m_head;
100		if (i == 0)
101			mpf = mp;
102
103		QL_ASSERT(ha, (mp != NULL),
104			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105			sds_idx));
106
107		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108
109		rxb->m_head = NULL;
110		rxb->next = sdsp->rxb_free;
111		sdsp->rxb_free = rxb;
112		sdsp->rx_free++;
113
114		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115			/* log the error */
116			device_printf(ha->pci_dev,
117				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118				__func__, sds_idx, i, sgc->handle[i]);
119			qla_rcv_error(ha);
120			return;
121		}
122
123		if (i == 0) {
124			mpl = mpf = mp;
125			mp->m_flags |= M_PKTHDR;
126			mp->m_pkthdr.len = sgc->pkt_length;
127			mp->m_pkthdr.rcvif = ifp;
128			rem_len = mp->m_pkthdr.len;
129		} else {
130			mp->m_flags &= ~M_PKTHDR;
131			mpl->m_next = mp;
132			mpl = mp;
133			rem_len = rem_len - mp->m_len;
134		}
135	}
136
137	mpl->m_len = rem_len;
138
139	eh = mtod(mpf, struct ether_vlan_header *);
140
141	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142		uint32_t *data = (uint32_t *)eh;
143
144		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145		mpf->m_flags |= M_VLANTAG;
146
147		*(data + 3) = *(data + 2);
148		*(data + 2) = *(data + 1);
149		*(data + 1) = *data;
150
151		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152	}
153
154	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157		mpf->m_pkthdr.csum_data = 0xFFFF;
158	} else {
159		mpf->m_pkthdr.csum_flags = 0;
160	}
161
162	ifp->if_ipackets++;
163
164	mpf->m_pkthdr.flowid = sgc->rss_hash;
165
166#if __FreeBSD_version >= 1100000
167	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
168#else
169#if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170        M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
171#else
172        M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
173#endif
174#endif /* #if __FreeBSD_version >= 1100000 */
175
176	if (ha->hw.enable_soft_lro) {
177
178#if (__FreeBSD_version >= 1100101)
179
180		tcp_lro_queue_mbuf(lro, mpf);
181
182#else
183		if (tcp_lro_rx(lro, mpf, 0))
184			(*ifp->if_input)(ifp, mpf);
185
186#endif /* #if (__FreeBSD_version >= 1100101) */
187
188
189	} else {
190		(*ifp->if_input)(ifp, mpf);
191	}
192
193	if (sdsp->rx_free > ha->std_replenish)
194		qla_replenish_normal_rx(ha, sdsp, r_idx);
195
196	return;
197}
198
199#define QLA_TCP_HDR_SIZE        20
200#define QLA_TCP_TS_OPTION_SIZE  12
201
202/*
203 * Name: qla_lro_intr
204 * Function: Handles normal ethernet frames received
205 */
206static int
207qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
208{
209	qla_rx_buf_t *rxb;
210	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
211	struct ifnet *ifp = ha->ifp;
212	qla_sds_t *sdsp;
213	struct ether_vlan_header *eh;
214	uint32_t i, rem_len = 0, pkt_length, iplen;
215	struct tcphdr *th;
216	struct ip *ip = NULL;
217	struct ip6_hdr *ip6 = NULL;
218	uint16_t etype;
219	uint32_t r_idx = 0;
220	qla_rx_ring_t *rx_ring;
221
222	if (ha->hw.num_rds_rings > 1)
223		r_idx = sds_idx;
224
225	ha->hw.rds[r_idx].count++;
226
227	rx_ring = &ha->rx_ring[r_idx];
228
229	ha->hw.rds[r_idx].lro_pkt_count++;
230
231	sdsp = &ha->hw.sds[sds_idx];
232
233	pkt_length = sgc->payload_length + sgc->l4_offset;
234
235	if (sgc->flags & Q8_LRO_COMP_TS) {
236		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
237	} else {
238		pkt_length += QLA_TCP_HDR_SIZE;
239	}
240	ha->hw.rds[r_idx].lro_bytes += pkt_length;
241
242	for (i = 0; i < sgc->num_handles; i++) {
243		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
244
245		QL_ASSERT(ha, (rxb != NULL),
246			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
247			sds_idx));
248
249		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
250			/* log the error */
251			device_printf(ha->pci_dev,
252				"%s invalid rxb[%d, %d, 0x%04x]\n",
253				__func__, sds_idx, i, sgc->handle[i]);
254			qla_rcv_error(ha);
255			return (0);
256		}
257
258		mp = rxb->m_head;
259		if (i == 0)
260			mpf = mp;
261
262		QL_ASSERT(ha, (mp != NULL),
263			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
264			sds_idx));
265
266		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
267
268		rxb->m_head = NULL;
269		rxb->next = sdsp->rxb_free;
270		sdsp->rxb_free = rxb;
271		sdsp->rx_free++;
272
273		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
274			/* log the error */
275			device_printf(ha->pci_dev,
276				"%s mp  == NULL [%d, %d, 0x%04x]\n",
277				__func__, sds_idx, i, sgc->handle[i]);
278			qla_rcv_error(ha);
279			return (0);
280		}
281
282		if (i == 0) {
283			mpl = mpf = mp;
284			mp->m_flags |= M_PKTHDR;
285			mp->m_pkthdr.len = pkt_length;
286			mp->m_pkthdr.rcvif = ifp;
287			rem_len = mp->m_pkthdr.len;
288		} else {
289			mp->m_flags &= ~M_PKTHDR;
290			mpl->m_next = mp;
291			mpl = mp;
292			rem_len = rem_len - mp->m_len;
293		}
294	}
295
296	mpl->m_len = rem_len;
297
298	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
299
300	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
301		th->th_flags |= TH_PUSH;
302
303	m_adj(mpf, sgc->l2_offset);
304
305	eh = mtod(mpf, struct ether_vlan_header *);
306
307	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
308		uint32_t *data = (uint32_t *)eh;
309
310		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
311		mpf->m_flags |= M_VLANTAG;
312
313		*(data + 3) = *(data + 2);
314		*(data + 2) = *(data + 1);
315		*(data + 1) = *data;
316
317		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
318
319		etype = ntohs(eh->evl_proto);
320	} else {
321		etype = ntohs(eh->evl_encap_proto);
322	}
323
324	if (etype == ETHERTYPE_IP) {
325		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
326
327		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
328				sgc->payload_length;
329
330                ip->ip_len = htons(iplen);
331
332		ha->ipv4_lro++;
333
334		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
335
336	} else if (etype == ETHERTYPE_IPV6) {
337		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
338
339		iplen = (th->th_off << 2) + sgc->payload_length;
340
341		ip6->ip6_plen = htons(iplen);
342
343		ha->ipv6_lro++;
344
345		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
346
347	} else {
348		m_freem(mpf);
349
350		if (sdsp->rx_free > ha->std_replenish)
351			qla_replenish_normal_rx(ha, sdsp, r_idx);
352		return 0;
353	}
354
355	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
356					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
357	mpf->m_pkthdr.csum_data = 0xFFFF;
358
359	mpf->m_pkthdr.flowid = sgc->rss_hash;
360	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
361
362	ifp->if_ipackets++;
363
364	(*ifp->if_input)(ifp, mpf);
365
366	if (sdsp->rx_free > ha->std_replenish)
367		qla_replenish_normal_rx(ha, sdsp, r_idx);
368
369	return (0);
370}
371
372static int
373qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
374	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
375{
376	uint32_t i;
377	uint16_t num_handles;
378	q80_stat_desc_t *sdesc;
379	uint32_t opcode;
380
381	*nhandles = 0;
382	dcount--;
383
384	for (i = 0; i < dcount; i++) {
385		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
386		sdesc = (q80_stat_desc_t *)
387				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
388
389		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
390
391		if (!opcode) {
392			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
393				__func__, (void *)sdesc->data[0],
394				(void *)sdesc->data[1]);
395			return -1;
396		}
397
398		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
399		if (!num_handles) {
400			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
401				__func__, (void *)sdesc->data[0],
402				(void *)sdesc->data[1]);
403			return -1;
404		}
405
406		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
407			num_handles = -1;
408
409		switch (num_handles) {
410
411		case 1:
412			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
413			break;
414
415		case 2:
416			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
417			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
418			break;
419
420		case 3:
421			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
422			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
423			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
424			break;
425
426		case 4:
427			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
428			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
429			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
430			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
431			break;
432
433		case 5:
434			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
435			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
436			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
437			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
438			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
439			break;
440
441		case 6:
442			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
443			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
444			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
445			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
446			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
447			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
448			break;
449
450		case 7:
451			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
452			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
453			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
454			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
455			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
456			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
457			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
458			break;
459
460		default:
461			device_printf(ha->pci_dev,
462				"%s: invalid num handles %p %p\n",
463				__func__, (void *)sdesc->data[0],
464				(void *)sdesc->data[1]);
465
466			QL_ASSERT(ha, (0),\
467			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
468			__func__, "invalid num handles", sds_idx, num_handles,
469			(void *)sdesc->data[0],(void *)sdesc->data[1]));
470
471			qla_rcv_error(ha);
472			return 0;
473		}
474		*nhandles = *nhandles + num_handles;
475	}
476	return 0;
477}
478
479/*
480 * Name: ql_rcv_isr
481 * Function: Main Interrupt Service Routine
482 */
483uint32_t
484ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
485{
486	device_t dev;
487	qla_hw_t *hw;
488	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
489	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
490	uint32_t ret = 0;
491	qla_sgl_comp_t sgc;
492	uint16_t nhandles;
493	uint32_t sds_replenish_threshold = 0;
494	uint32_t r_idx = 0;
495	qla_sds_t *sdsp;
496
497	dev = ha->pci_dev;
498	hw = &ha->hw;
499
500	hw->sds[sds_idx].rcv_active = 1;
501	if (ha->stop_rcv) {
502		hw->sds[sds_idx].rcv_active = 0;
503		return 0;
504	}
505
506	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
507
508	/*
509	 * receive interrupts
510	 */
511	comp_idx = hw->sds[sds_idx].sdsr_next;
512
513	while (count-- && !ha->stop_rcv) {
514
515		sdesc = (q80_stat_desc_t *)
516				&hw->sds[sds_idx].sds_ring_base[comp_idx];
517
518		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
519
520		if (!opcode)
521			break;
522
523		switch (opcode) {
524
525		case Q8_STAT_DESC_OPCODE_RCV_PKT:
526
527			desc_count = 1;
528
529			bzero(&sgc, sizeof(qla_sgl_comp_t));
530
531			sgc.rcv.pkt_length =
532				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
533			sgc.rcv.num_handles = 1;
534			sgc.rcv.handle[0] =
535				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
536			sgc.rcv.chksum_status =
537				Q8_STAT_DESC_STATUS((sdesc->data[1]));
538
539			sgc.rcv.rss_hash =
540				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
541
542			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
543				sgc.rcv.vlan_tag =
544					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
545			}
546			qla_rx_intr(ha, &sgc.rcv, sds_idx);
547			break;
548
549		case Q8_STAT_DESC_OPCODE_SGL_RCV:
550
551			desc_count =
552				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
553
554			if (desc_count > 1) {
555				c_idx = (comp_idx + desc_count -1) &
556						(NUM_STATUS_DESCRIPTORS-1);
557				sdesc0 = (q80_stat_desc_t *)
558					&hw->sds[sds_idx].sds_ring_base[c_idx];
559
560				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
561						Q8_STAT_DESC_OPCODE_CONT) {
562					desc_count = 0;
563					break;
564				}
565			}
566
567			bzero(&sgc, sizeof(qla_sgl_comp_t));
568
569			sgc.rcv.pkt_length =
570				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
571					(sdesc->data[0]));
572			sgc.rcv.chksum_status =
573				Q8_STAT_DESC_STATUS((sdesc->data[1]));
574
575			sgc.rcv.rss_hash =
576				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
577
578			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
579				sgc.rcv.vlan_tag =
580					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
581			}
582
583			QL_ASSERT(ha, (desc_count <= 2) ,\
584				("%s: [sds_idx, data0, data1]="\
585				"%d, %p, %p]\n", __func__, sds_idx,\
586				(void *)sdesc->data[0],\
587				(void *)sdesc->data[1]));
588
589			sgc.rcv.num_handles = 1;
590			sgc.rcv.handle[0] =
591				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
592
593			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
594				&sgc.rcv.handle[1], &nhandles)) {
595				device_printf(dev,
596					"%s: [sds_idx, dcount, data0, data1]="
597					 "[%d, %d, 0x%llx, 0x%llx]\n",
598					__func__, sds_idx, desc_count,
599					(long long unsigned int)sdesc->data[0],
600					(long long unsigned int)sdesc->data[1]);
601				desc_count = 0;
602				break;
603			}
604
605			sgc.rcv.num_handles += nhandles;
606
607			qla_rx_intr(ha, &sgc.rcv, sds_idx);
608
609			break;
610
611		case Q8_STAT_DESC_OPCODE_SGL_LRO:
612
613			desc_count =
614				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
615
616			if (desc_count > 1) {
617				c_idx = (comp_idx + desc_count -1) &
618						(NUM_STATUS_DESCRIPTORS-1);
619				sdesc0 = (q80_stat_desc_t *)
620					&hw->sds[sds_idx].sds_ring_base[c_idx];
621
622				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
623						Q8_STAT_DESC_OPCODE_CONT) {
624					desc_count = 0;
625					break;
626				}
627			}
628			bzero(&sgc, sizeof(qla_sgl_comp_t));
629
630			sgc.lro.payload_length =
631			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
632
633			sgc.lro.rss_hash =
634				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
635
636			sgc.lro.num_handles = 1;
637			sgc.lro.handle[0] =
638				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
639
640			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
641				sgc.lro.flags |= Q8_LRO_COMP_TS;
642
643			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
644				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
645
646			sgc.lro.l2_offset =
647				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
648			sgc.lro.l4_offset =
649				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
650
651			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
652				sgc.lro.vlan_tag =
653					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
654			}
655
656			QL_ASSERT(ha, (desc_count <= 7) ,\
657				("%s: [sds_idx, data0, data1]="\
658				 "[%d, 0x%llx, 0x%llx]\n",\
659				__func__, sds_idx,\
660				(long long unsigned int)sdesc->data[0],\
661				(long long unsigned int)sdesc->data[1]));
662
663			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
664				desc_count, &sgc.lro.handle[1], &nhandles)) {
665				device_printf(dev,
666				"%s: [sds_idx, data0, data1]="\
667				 "[%d, 0x%llx, 0x%llx]\n",\
668				__func__, sds_idx,\
669				(long long unsigned int)sdesc->data[0],\
670				(long long unsigned int)sdesc->data[1]);
671
672				desc_count = 0;
673				break;
674			}
675
676			sgc.lro.num_handles += nhandles;
677
678			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
679				device_printf(dev,
680				"%s: [sds_idx, data0, data1]="\
681				 "[%d, 0x%llx, 0x%llx]\n",\
682				__func__, sds_idx,\
683				(long long unsigned int)sdesc->data[0],\
684				(long long unsigned int)sdesc->data[1]);
685				device_printf(dev,
686				"%s: [comp_idx, c_idx, dcount, nhndls]="\
687				 "[%d, %d, %d, %d]\n",\
688				__func__, comp_idx, c_idx, desc_count,
689				sgc.lro.num_handles);
690				if (desc_count > 1) {
691				device_printf(dev,
692				"%s: [sds_idx, data0, data1]="\
693				 "[%d, 0x%llx, 0x%llx]\n",\
694				__func__, sds_idx,\
695				(long long unsigned int)sdesc0->data[0],\
696				(long long unsigned int)sdesc0->data[1]);
697				}
698			}
699
700			break;
701
702		default:
703			device_printf(dev, "%s: default 0x%llx!\n", __func__,
704					(long long unsigned int)sdesc->data[0]);
705			break;
706		}
707
708		if (desc_count == 0)
709			break;
710
711		sds_replenish_threshold += desc_count;
712
713
714		while (desc_count--) {
715			sdesc->data[0] = 0ULL;
716			sdesc->data[1] = 0ULL;
717			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
718			sdesc = (q80_stat_desc_t *)
719				&hw->sds[sds_idx].sds_ring_base[comp_idx];
720		}
721
722		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
723			sds_replenish_threshold = 0;
724			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
725				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
726					comp_idx);
727			}
728			hw->sds[sds_idx].sdsr_next = comp_idx;
729		}
730	}
731
732	if (ha->hw.enable_soft_lro) {
733		struct lro_ctrl		*lro;
734
735		lro = &ha->hw.sds[sds_idx].lro;
736
737#if (__FreeBSD_version >= 1100101)
738
739		tcp_lro_flush_all(lro);
740
741#else
742		struct lro_entry *queued;
743
744		while ((!SLIST_EMPTY(&lro->lro_active))) {
745			queued = SLIST_FIRST(&lro->lro_active);
746			SLIST_REMOVE_HEAD(&lro->lro_active, next);
747			tcp_lro_flush(lro, queued);
748		}
749
750#endif /* #if (__FreeBSD_version >= 1100101) */
751
752	}
753
754	if (ha->stop_rcv)
755		goto ql_rcv_isr_exit;
756
757	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
758		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
759		hw->sds[sds_idx].sdsr_next = comp_idx;
760	} else {
761		if (ha->hw.num_rds_rings > 1)
762			r_idx = sds_idx;
763
764		sdsp = &ha->hw.sds[sds_idx];
765
766		if (sdsp->rx_free > ha->std_replenish)
767			qla_replenish_normal_rx(ha, sdsp, r_idx);
768	}
769
770	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
771	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
772
773	if (opcode)
774		ret = -1;
775
776ql_rcv_isr_exit:
777	hw->sds[sds_idx].rcv_active = 0;
778
779	return (ret);
780}
781
782void
783ql_mbx_isr(void *arg)
784{
785	qla_host_t *ha;
786	uint32_t data;
787	uint32_t prev_link_state;
788
789	ha = arg;
790
791	if (ha == NULL) {
792		device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
793		return;
794	}
795
796	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
797	if ((data & 0x3) != 0x1) {
798		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
799		return;
800	}
801
802	data = READ_REG32(ha, Q8_FW_MBOX0);
803
804	if ((data & 0xF000) != 0x8000)
805		return;
806
807	data = data & 0xFFFF;
808
809	switch (data) {
810
811	case 0x8001:  /* It's an AEN */
812
813		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
814
815		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
816		ha->hw.cable_length = data & 0xFFFF;
817
818		data = data >> 16;
819		ha->hw.link_speed = data & 0xFFF;
820
821		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
822
823		prev_link_state =  ha->hw.link_up;
824		ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
825
826		if (prev_link_state !=  ha->hw.link_up) {
827			if (ha->hw.link_up)
828				if_link_state_change(ha->ifp, LINK_STATE_UP);
829			else
830				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
831		}
832
833
834		ha->hw.module_type = ((data >> 8) & 0xFF);
835		ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
836		ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
837
838		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
839		ha->hw.flags.loopback_mode = data & 0x03;
840
841		ha->hw.link_faults = (data >> 3) & 0xFF;
842
843		break;
844
845        case 0x8100:
846		ha->hw.imd_compl=1;
847		break;
848
849        case 0x8101:
850                ha->async_event = 1;
851                ha->hw.aen_mb0 = 0x8101;
852                ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
853                ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
854                ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
855                ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
856                break;
857
858        case 0x8110:
859                /* for now just dump the registers */
860                {
861                        uint32_t ombx[5];
862
863                        ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
864                        ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
865                        ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
866                        ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
867                        ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
868
869                        device_printf(ha->pci_dev, "%s: "
870                                "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
871                                __func__, data, ombx[0], ombx[1], ombx[2],
872                                ombx[3], ombx[4]);
873                }
874
875                break;
876
877        case 0x8130:
878                /* sfp insertion aen */
879                device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
880                        __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
881                break;
882
883        case 0x8131:
884                /* sfp removal aen */
885                device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
886                break;
887
888	case 0x8140:
889		{
890			uint32_t ombx[3];
891
892			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
893			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
894			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
895
896			device_printf(ha->pci_dev, "%s: "
897				"0x%08x 0x%08x 0x%08x 0x%08x \n",
898				__func__, data, ombx[0], ombx[1], ombx[2]);
899		}
900		break;
901
902	default:
903		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
904		break;
905	}
906	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
907	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
908	return;
909}
910
911
912static void
913qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
914{
915	qla_rx_buf_t *rxb;
916	int count = sdsp->rx_free;
917	uint32_t rx_next;
918	qla_rdesc_t *rdesc;
919
920	/* we can play with this value via a sysctl */
921	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
922
923	rdesc = &ha->hw.rds[r_idx];
924
925	rx_next = rdesc->rx_next;
926
927	while (count--) {
928		rxb = sdsp->rxb_free;
929
930		if (rxb == NULL)
931			break;
932
933		sdsp->rxb_free = rxb->next;
934		sdsp->rx_free--;
935
936		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
937			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
938				rxb->handle,
939				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
940			rdesc->rx_in++;
941			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
942				rdesc->rx_in = 0;
943			rdesc->rx_next++;
944			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
945				rdesc->rx_next = 0;
946		} else {
947			device_printf(ha->pci_dev,
948				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
949				__func__, r_idx, rdesc->rx_in, rxb->handle);
950
951			rxb->m_head = NULL;
952			rxb->next = sdsp->rxb_free;
953			sdsp->rxb_free = rxb;
954			sdsp->rx_free++;
955
956			break;
957		}
958		if (replenish_thresh-- == 0) {
959			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
960				rdesc->rx_next);
961			rx_next = rdesc->rx_next;
962			replenish_thresh = ha->hw.rds_pidx_thres;
963		}
964	}
965
966	if (rx_next != rdesc->rx_next) {
967		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
968			rdesc->rx_next);
969	}
970}
971
972void
973ql_isr(void *arg)
974{
975	qla_ivec_t *ivec = arg;
976	qla_host_t *ha ;
977	int idx;
978	qla_hw_t *hw;
979	struct ifnet *ifp;
980	qla_tx_fp_t *fp;
981
982	ha = ivec->ha;
983	hw = &ha->hw;
984	ifp = ha->ifp;
985
986	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
987		return;
988
989	fp = &ha->tx_fp[idx];
990	hw->sds[idx].intr_count++;
991
992	if ((fp->fp_taskqueue != NULL) &&
993		(ifp->if_drv_flags & IFF_DRV_RUNNING))
994		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
995
996	return;
997}
998
999