1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_isr.c 351677 2019-09-02 00:31:05Z emaste $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46		uint32_t r_idx);
47
48static void
49qla_rcv_error(qla_host_t *ha)
50{
51	ha->stop_rcv = 1;
52	QL_INITIATE_RECOVERY(ha);
53}
54
55
56/*
57 * Name: qla_rx_intr
58 * Function: Handles normal ethernet frames received
59 */
60static void
61qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62{
63	qla_rx_buf_t		*rxb;
64	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65	struct ifnet		*ifp = ha->ifp;
66	qla_sds_t		*sdsp;
67	struct ether_vlan_header *eh;
68	uint32_t		i, rem_len = 0;
69	uint32_t		r_idx = 0;
70	qla_rx_ring_t		*rx_ring;
71	struct lro_ctrl		*lro;
72
73	lro = &ha->hw.sds[sds_idx].lro;
74
75	if (ha->hw.num_rds_rings > 1)
76		r_idx = sds_idx;
77
78	ha->hw.rds[r_idx].count++;
79
80	sdsp = &ha->hw.sds[sds_idx];
81	rx_ring = &ha->rx_ring[r_idx];
82
83	for (i = 0; i < sgc->num_handles; i++) {
84		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85
86		QL_ASSERT(ha, (rxb != NULL),
87			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88			sds_idx));
89
90		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91			/* log the error */
92			device_printf(ha->pci_dev,
93				"%s invalid rxb[%d, %d, 0x%04x]\n",
94				__func__, sds_idx, i, sgc->handle[i]);
95			qla_rcv_error(ha);
96			return;
97		}
98
99		mp = rxb->m_head;
100		if (i == 0)
101			mpf = mp;
102
103		QL_ASSERT(ha, (mp != NULL),
104			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105			sds_idx));
106
107		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108
109		rxb->m_head = NULL;
110		rxb->next = sdsp->rxb_free;
111		sdsp->rxb_free = rxb;
112		sdsp->rx_free++;
113
114		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115			/* log the error */
116			device_printf(ha->pci_dev,
117				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118				__func__, sds_idx, i, sgc->handle[i]);
119			qla_rcv_error(ha);
120			return;
121		}
122
123		if (i == 0) {
124			mpl = mpf = mp;
125			mp->m_flags |= M_PKTHDR;
126			mp->m_pkthdr.len = sgc->pkt_length;
127			mp->m_pkthdr.rcvif = ifp;
128			rem_len = mp->m_pkthdr.len;
129		} else {
130			mp->m_flags &= ~M_PKTHDR;
131			mpl->m_next = mp;
132			mpl = mp;
133			rem_len = rem_len - mp->m_len;
134		}
135	}
136
137	mpl->m_len = rem_len;
138
139	eh = mtod(mpf, struct ether_vlan_header *);
140
141	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142		uint32_t *data = (uint32_t *)eh;
143
144		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145		mpf->m_flags |= M_VLANTAG;
146
147		*(data + 3) = *(data + 2);
148		*(data + 2) = *(data + 1);
149		*(data + 1) = *data;
150
151		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152	}
153
154	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157		mpf->m_pkthdr.csum_data = 0xFFFF;
158	} else {
159		mpf->m_pkthdr.csum_flags = 0;
160	}
161
162	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
163
164	mpf->m_pkthdr.flowid = sgc->rss_hash;
165
166#if __FreeBSD_version >= 1100000
167	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
168#else
169#if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170        M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
171#else
172        M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
173#endif
174#endif /* #if __FreeBSD_version >= 1100000 */
175
176	if (ha->hw.enable_soft_lro) {
177
178#if (__FreeBSD_version >= 1100101)
179
180		tcp_lro_queue_mbuf(lro, mpf);
181
182#else
183		if (tcp_lro_rx(lro, mpf, 0))
184			(*ifp->if_input)(ifp, mpf);
185
186#endif /* #if (__FreeBSD_version >= 1100101) */
187
188
189	} else {
190		(*ifp->if_input)(ifp, mpf);
191	}
192
193	if (sdsp->rx_free > ha->std_replenish)
194		qla_replenish_normal_rx(ha, sdsp, r_idx);
195
196	return;
197}
198
199#define QLA_TCP_HDR_SIZE        20
200#define QLA_TCP_TS_OPTION_SIZE  12
201
202/*
203 * Name: qla_lro_intr
204 * Function: Handles normal ethernet frames received
205 */
206static int
207qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
208{
209	qla_rx_buf_t *rxb;
210	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
211	struct ifnet *ifp = ha->ifp;
212	qla_sds_t *sdsp;
213	struct ether_vlan_header *eh;
214	uint32_t i, rem_len = 0, pkt_length, iplen;
215	struct tcphdr *th;
216	struct ip *ip = NULL;
217	struct ip6_hdr *ip6 = NULL;
218	uint16_t etype;
219	uint32_t r_idx = 0;
220	qla_rx_ring_t *rx_ring;
221
222	if (ha->hw.num_rds_rings > 1)
223		r_idx = sds_idx;
224
225	ha->hw.rds[r_idx].count++;
226
227	rx_ring = &ha->rx_ring[r_idx];
228
229	ha->hw.rds[r_idx].lro_pkt_count++;
230
231	sdsp = &ha->hw.sds[sds_idx];
232
233	pkt_length = sgc->payload_length + sgc->l4_offset;
234
235	if (sgc->flags & Q8_LRO_COMP_TS) {
236		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
237	} else {
238		pkt_length += QLA_TCP_HDR_SIZE;
239	}
240	ha->hw.rds[r_idx].lro_bytes += pkt_length;
241
242	for (i = 0; i < sgc->num_handles; i++) {
243		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
244
245		QL_ASSERT(ha, (rxb != NULL),
246			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
247			sds_idx));
248
249		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
250			/* log the error */
251			device_printf(ha->pci_dev,
252				"%s invalid rxb[%d, %d, 0x%04x]\n",
253				__func__, sds_idx, i, sgc->handle[i]);
254			qla_rcv_error(ha);
255			return (0);
256		}
257
258		mp = rxb->m_head;
259		if (i == 0)
260			mpf = mp;
261
262		QL_ASSERT(ha, (mp != NULL),
263			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
264			sds_idx));
265
266		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
267
268		rxb->m_head = NULL;
269		rxb->next = sdsp->rxb_free;
270		sdsp->rxb_free = rxb;
271		sdsp->rx_free++;
272
273		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
274			/* log the error */
275			device_printf(ha->pci_dev,
276				"%s mp  == NULL [%d, %d, 0x%04x]\n",
277				__func__, sds_idx, i, sgc->handle[i]);
278			qla_rcv_error(ha);
279			return (0);
280		}
281
282		if (i == 0) {
283			mpl = mpf = mp;
284			mp->m_flags |= M_PKTHDR;
285			mp->m_pkthdr.len = pkt_length;
286			mp->m_pkthdr.rcvif = ifp;
287			rem_len = mp->m_pkthdr.len;
288		} else {
289			mp->m_flags &= ~M_PKTHDR;
290			mpl->m_next = mp;
291			mpl = mp;
292			rem_len = rem_len - mp->m_len;
293		}
294	}
295
296	mpl->m_len = rem_len;
297
298	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
299
300	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
301		th->th_flags |= TH_PUSH;
302
303	m_adj(mpf, sgc->l2_offset);
304
305	eh = mtod(mpf, struct ether_vlan_header *);
306
307	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
308		uint32_t *data = (uint32_t *)eh;
309
310		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
311		mpf->m_flags |= M_VLANTAG;
312
313		*(data + 3) = *(data + 2);
314		*(data + 2) = *(data + 1);
315		*(data + 1) = *data;
316
317		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
318
319		etype = ntohs(eh->evl_proto);
320	} else {
321		etype = ntohs(eh->evl_encap_proto);
322	}
323
324	if (etype == ETHERTYPE_IP) {
325		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
326
327		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
328				sgc->payload_length;
329
330                ip->ip_len = htons(iplen);
331
332		ha->ipv4_lro++;
333
334		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
335
336	} else if (etype == ETHERTYPE_IPV6) {
337		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
338
339		iplen = (th->th_off << 2) + sgc->payload_length;
340
341		ip6->ip6_plen = htons(iplen);
342
343		ha->ipv6_lro++;
344
345		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
346
347	} else {
348		m_freem(mpf);
349
350		if (sdsp->rx_free > ha->std_replenish)
351			qla_replenish_normal_rx(ha, sdsp, r_idx);
352		return 0;
353	}
354
355	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
356					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
357	mpf->m_pkthdr.csum_data = 0xFFFF;
358
359	mpf->m_pkthdr.flowid = sgc->rss_hash;
360
361	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
362
363	(*ifp->if_input)(ifp, mpf);
364
365	if (sdsp->rx_free > ha->std_replenish)
366		qla_replenish_normal_rx(ha, sdsp, r_idx);
367
368	return (0);
369}
370
371static int
372qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
373	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
374{
375	uint32_t i;
376	uint16_t num_handles;
377	q80_stat_desc_t *sdesc;
378	uint32_t opcode;
379
380	*nhandles = 0;
381	dcount--;
382
383	for (i = 0; i < dcount; i++) {
384		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
385		sdesc = (q80_stat_desc_t *)
386				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
387
388		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
389
390		if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
391			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
392				__func__, (void *)sdesc->data[0],
393				(void *)sdesc->data[1]);
394			return -1;
395		}
396
397		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
398		if (!num_handles) {
399			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
400				__func__, (void *)sdesc->data[0],
401				(void *)sdesc->data[1]);
402			return -1;
403		}
404
405		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
406			num_handles = -1;
407
408		switch (num_handles) {
409
410		case 1:
411			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
412			break;
413
414		case 2:
415			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
416			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
417			break;
418
419		case 3:
420			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
421			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
422			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
423			break;
424
425		case 4:
426			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
427			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
428			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
429			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
430			break;
431
432		case 5:
433			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
438			break;
439
440		case 6:
441			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
442			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
443			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
444			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
445			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
446			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
447			break;
448
449		case 7:
450			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
451			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
452			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
453			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
454			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
455			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
456			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
457			break;
458
459		default:
460			device_printf(ha->pci_dev,
461				"%s: invalid num handles %p %p\n",
462				__func__, (void *)sdesc->data[0],
463				(void *)sdesc->data[1]);
464
465			QL_ASSERT(ha, (0),\
466			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
467			__func__, "invalid num handles", sds_idx, num_handles,
468			(void *)sdesc->data[0],(void *)sdesc->data[1]));
469
470			qla_rcv_error(ha);
471			return 0;
472		}
473		*nhandles = *nhandles + num_handles;
474	}
475	return 0;
476}
477
478/*
479 * Name: ql_rcv_isr
480 * Function: Main Interrupt Service Routine
481 */
482uint32_t
483ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
484{
485	device_t dev;
486	qla_hw_t *hw;
487	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
488	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
489	uint32_t ret = 0;
490	qla_sgl_comp_t sgc;
491	uint16_t nhandles;
492	uint32_t sds_replenish_threshold = 0;
493	uint32_t r_idx = 0;
494	qla_sds_t *sdsp;
495
496	dev = ha->pci_dev;
497	hw = &ha->hw;
498
499	hw->sds[sds_idx].rcv_active = 1;
500	if (ha->stop_rcv) {
501		hw->sds[sds_idx].rcv_active = 0;
502		return 0;
503	}
504
505	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
506
507	/*
508	 * receive interrupts
509	 */
510	comp_idx = hw->sds[sds_idx].sdsr_next;
511
512	while (count-- && !ha->stop_rcv) {
513
514		sdesc = (q80_stat_desc_t *)
515				&hw->sds[sds_idx].sds_ring_base[comp_idx];
516
517		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
518
519		if (!opcode)
520			break;
521
522		switch (opcode) {
523
524		case Q8_STAT_DESC_OPCODE_RCV_PKT:
525
526			desc_count = 1;
527
528			bzero(&sgc, sizeof(qla_sgl_comp_t));
529
530			sgc.rcv.pkt_length =
531				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
532			sgc.rcv.num_handles = 1;
533			sgc.rcv.handle[0] =
534				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
535			sgc.rcv.chksum_status =
536				Q8_STAT_DESC_STATUS((sdesc->data[1]));
537
538			sgc.rcv.rss_hash =
539				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
540
541			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
542				sgc.rcv.vlan_tag =
543					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
544			}
545			qla_rx_intr(ha, &sgc.rcv, sds_idx);
546			break;
547
548		case Q8_STAT_DESC_OPCODE_SGL_RCV:
549
550			desc_count =
551				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
552
553			if (desc_count > 1) {
554				c_idx = (comp_idx + desc_count -1) &
555						(NUM_STATUS_DESCRIPTORS-1);
556				sdesc0 = (q80_stat_desc_t *)
557					&hw->sds[sds_idx].sds_ring_base[c_idx];
558
559				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
560						Q8_STAT_DESC_OPCODE_CONT) ||
561				QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
562					desc_count = 0;
563					break;
564				}
565			}
566
567			bzero(&sgc, sizeof(qla_sgl_comp_t));
568
569			sgc.rcv.pkt_length =
570				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
571					(sdesc->data[0]));
572			sgc.rcv.chksum_status =
573				Q8_STAT_DESC_STATUS((sdesc->data[1]));
574
575			sgc.rcv.rss_hash =
576				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
577
578			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
579				sgc.rcv.vlan_tag =
580					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
581			}
582
583			QL_ASSERT(ha, (desc_count <= 2) ,\
584				("%s: [sds_idx, data0, data1]="\
585				"%d, %p, %p]\n", __func__, sds_idx,\
586				(void *)sdesc->data[0],\
587				(void *)sdesc->data[1]));
588
589			sgc.rcv.num_handles = 1;
590			sgc.rcv.handle[0] =
591				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
592
593			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
594				&sgc.rcv.handle[1], &nhandles)) {
595				device_printf(dev,
596					"%s: [sds_idx, dcount, data0, data1]="
597					 "[%d, %d, 0x%llx, 0x%llx]\n",
598					__func__, sds_idx, desc_count,
599					(long long unsigned int)sdesc->data[0],
600					(long long unsigned int)sdesc->data[1]);
601				desc_count = 0;
602				break;
603			}
604
605			sgc.rcv.num_handles += nhandles;
606
607			qla_rx_intr(ha, &sgc.rcv, sds_idx);
608
609			break;
610
611		case Q8_STAT_DESC_OPCODE_SGL_LRO:
612
613			desc_count =
614				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
615
616			if (desc_count > 1) {
617				c_idx = (comp_idx + desc_count -1) &
618						(NUM_STATUS_DESCRIPTORS-1);
619				sdesc0 = (q80_stat_desc_t *)
620					&hw->sds[sds_idx].sds_ring_base[c_idx];
621
622				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
623						Q8_STAT_DESC_OPCODE_CONT) ||
624				QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
625					desc_count = 0;
626					break;
627				}
628			}
629			bzero(&sgc, sizeof(qla_sgl_comp_t));
630
631			sgc.lro.payload_length =
632			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
633
634			sgc.lro.rss_hash =
635				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
636
637			sgc.lro.num_handles = 1;
638			sgc.lro.handle[0] =
639				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
640
641			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
642				sgc.lro.flags |= Q8_LRO_COMP_TS;
643
644			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
645				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
646
647			sgc.lro.l2_offset =
648				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
649			sgc.lro.l4_offset =
650				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
651
652			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
653				sgc.lro.vlan_tag =
654					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
655			}
656
657			QL_ASSERT(ha, (desc_count <= 7) ,\
658				("%s: [sds_idx, data0, data1]="\
659				 "[%d, 0x%llx, 0x%llx]\n",\
660				__func__, sds_idx,\
661				(long long unsigned int)sdesc->data[0],\
662				(long long unsigned int)sdesc->data[1]));
663
664			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
665				desc_count, &sgc.lro.handle[1], &nhandles)) {
666				device_printf(dev,
667				"%s: [sds_idx, data0, data1]="\
668				 "[%d, 0x%llx, 0x%llx]\n",\
669				__func__, sds_idx,\
670				(long long unsigned int)sdesc->data[0],\
671				(long long unsigned int)sdesc->data[1]);
672
673				desc_count = 0;
674				break;
675			}
676
677			sgc.lro.num_handles += nhandles;
678
679			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
680				device_printf(dev,
681				"%s: [sds_idx, data0, data1]="\
682				 "[%d, 0x%llx, 0x%llx]\n",\
683				__func__, sds_idx,\
684				(long long unsigned int)sdesc->data[0],\
685				(long long unsigned int)sdesc->data[1]);
686				device_printf(dev,
687				"%s: [comp_idx, c_idx, dcount, nhndls]="\
688				 "[%d, %d, %d, %d]\n",\
689				__func__, comp_idx, c_idx, desc_count,
690				sgc.lro.num_handles);
691				if (desc_count > 1) {
692				device_printf(dev,
693				"%s: [sds_idx, data0, data1]="\
694				 "[%d, 0x%llx, 0x%llx]\n",\
695				__func__, sds_idx,\
696				(long long unsigned int)sdesc0->data[0],\
697				(long long unsigned int)sdesc0->data[1]);
698				}
699			}
700
701			break;
702
703		default:
704			desc_count = 0;
705			device_printf(dev, "%s: default 0x%llx!\n", __func__,
706					(long long unsigned int)sdesc->data[0]);
707			break;
708		}
709
710		if (desc_count == 0)
711			break;
712
713		sds_replenish_threshold += desc_count;
714
715
716		while (desc_count--) {
717			sdesc->data[0] = 0ULL;
718			sdesc->data[1] = 0ULL;
719			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
720			sdesc = (q80_stat_desc_t *)
721				&hw->sds[sds_idx].sds_ring_base[comp_idx];
722		}
723
724		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
725			sds_replenish_threshold = 0;
726			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
727				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
728					comp_idx);
729			}
730			hw->sds[sds_idx].sdsr_next = comp_idx;
731		}
732	}
733
734	if (ha->hw.enable_soft_lro) {
735		struct lro_ctrl		*lro;
736
737		lro = &ha->hw.sds[sds_idx].lro;
738
739#if (__FreeBSD_version >= 1100101)
740
741		tcp_lro_flush_all(lro);
742
743#else
744		struct lro_entry *queued;
745
746		while ((!SLIST_EMPTY(&lro->lro_active))) {
747			queued = SLIST_FIRST(&lro->lro_active);
748			SLIST_REMOVE_HEAD(&lro->lro_active, next);
749			tcp_lro_flush(lro, queued);
750		}
751
752#endif /* #if (__FreeBSD_version >= 1100101) */
753
754	}
755
756	if (ha->stop_rcv)
757		goto ql_rcv_isr_exit;
758
759	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
760		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
761		hw->sds[sds_idx].sdsr_next = comp_idx;
762	} else {
763		if (ha->hw.num_rds_rings > 1)
764			r_idx = sds_idx;
765
766		sdsp = &ha->hw.sds[sds_idx];
767
768		if (sdsp->rx_free > ha->std_replenish)
769			qla_replenish_normal_rx(ha, sdsp, r_idx);
770	}
771
772	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
773	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
774
775	if (opcode)
776		ret = -1;
777
778ql_rcv_isr_exit:
779	hw->sds[sds_idx].rcv_active = 0;
780
781	return (ret);
782}
783
784void
785ql_mbx_isr(void *arg)
786{
787	qla_host_t *ha;
788	uint32_t data;
789	uint32_t prev_link_state;
790
791	ha = arg;
792
793	if (ha == NULL) {
794		printf("%s: arg == NULL\n", __func__);
795		return;
796	}
797
798	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
799	if ((data & 0x3) != 0x1) {
800		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
801		return;
802	}
803
804	data = READ_REG32(ha, Q8_FW_MBOX0);
805
806	if ((data & 0xF000) != 0x8000)
807		return;
808
809	data = data & 0xFFFF;
810
811	switch (data) {
812
813	case 0x8001:  /* It's an AEN */
814
815		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
816
817		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
818		ha->hw.cable_length = data & 0xFFFF;
819
820		data = data >> 16;
821		ha->hw.link_speed = data & 0xFFF;
822
823		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
824
825		prev_link_state =  ha->hw.link_up;
826
827		data = (((data & 0xFF) == 0) ? 0 : 1);
828		atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
829
830		device_printf(ha->pci_dev,
831			"%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
832			__func__, data, prev_link_state);
833
834		if (prev_link_state !=  ha->hw.link_up) {
835			if (ha->hw.link_up)
836				if_link_state_change(ha->ifp, LINK_STATE_UP);
837			else
838				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
839		}
840
841
842		ha->hw.module_type = ((data >> 8) & 0xFF);
843		ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
844		ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
845
846		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
847		ha->hw.loopback_mode = data & 0x03;
848
849		ha->hw.link_faults = (data >> 3) & 0xFF;
850
851		break;
852
853        case 0x8100:
854		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
855		ha->hw.imd_compl=1;
856		break;
857
858        case 0x8101:
859                ha->async_event = 1;
860                ha->hw.aen_mb0 = 0x8101;
861                ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
862                ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
863                ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
864                ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
865		device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
866			__func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
867			ha->hw.aen_mb3, ha->hw.aen_mb4);
868                break;
869
870        case 0x8110:
871                /* for now just dump the registers */
872                {
873                        uint32_t ombx[5];
874
875                        ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
876                        ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
877                        ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
878                        ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
879                        ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
880
881                        device_printf(ha->pci_dev, "%s: "
882                                "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
883                                __func__, data, ombx[0], ombx[1], ombx[2],
884                                ombx[3], ombx[4]);
885                }
886
887                break;
888
889        case 0x8130:
890                /* sfp insertion aen */
891                device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
892                        __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
893                break;
894
895        case 0x8131:
896                /* sfp removal aen */
897                device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
898                break;
899
900	case 0x8140:
901		{
902			uint32_t ombx[3];
903
904			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
905			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
906			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
907
908			device_printf(ha->pci_dev, "%s: "
909				"0x%08x 0x%08x 0x%08x 0x%08x \n",
910				__func__, data, ombx[0], ombx[1], ombx[2]);
911		}
912		break;
913
914	default:
915		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
916		break;
917	}
918	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
919	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
920	return;
921}
922
923
924static void
925qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
926{
927	qla_rx_buf_t *rxb;
928	int count = sdsp->rx_free;
929	uint32_t rx_next;
930	qla_rdesc_t *rdesc;
931
932	/* we can play with this value via a sysctl */
933	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
934
935	rdesc = &ha->hw.rds[r_idx];
936
937	rx_next = rdesc->rx_next;
938
939	while (count--) {
940		rxb = sdsp->rxb_free;
941
942		if (rxb == NULL)
943			break;
944
945		sdsp->rxb_free = rxb->next;
946		sdsp->rx_free--;
947
948		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
949			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
950				rxb->handle,
951				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
952			rdesc->rx_in++;
953			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
954				rdesc->rx_in = 0;
955			rdesc->rx_next++;
956			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
957				rdesc->rx_next = 0;
958		} else {
959			device_printf(ha->pci_dev,
960				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
961				__func__, r_idx, rdesc->rx_in, rxb->handle);
962
963			rxb->m_head = NULL;
964			rxb->next = sdsp->rxb_free;
965			sdsp->rxb_free = rxb;
966			sdsp->rx_free++;
967
968			break;
969		}
970		if (replenish_thresh-- == 0) {
971			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
972				rdesc->rx_next);
973			rx_next = rdesc->rx_next;
974			replenish_thresh = ha->hw.rds_pidx_thres;
975		}
976	}
977
978	if (rx_next != rdesc->rx_next) {
979		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
980			rdesc->rx_next);
981	}
982}
983
984void
985ql_isr(void *arg)
986{
987	qla_ivec_t *ivec = arg;
988	qla_host_t *ha ;
989	int idx;
990	qla_hw_t *hw;
991	struct ifnet *ifp;
992	qla_tx_fp_t *fp;
993
994	ha = ivec->ha;
995	hw = &ha->hw;
996	ifp = ha->ifp;
997
998	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
999		return;
1000
1001	fp = &ha->tx_fp[idx];
1002	hw->sds[idx].intr_count++;
1003
1004	if ((fp->fp_taskqueue != NULL) &&
1005		(ifp->if_drv_flags & IFF_DRV_RUNNING))
1006		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1007
1008	return;
1009}
1010
1011