1/*
2 * Copyright (c) 2013-2014 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qls_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35
36
37#include "qls_os.h"
38#include "qls_hw.h"
39#include "qls_def.h"
40#include "qls_inline.h"
41#include "qls_ver.h"
42#include "qls_glbl.h"
43#include "qls_dbg.h"
44
45
46static void
47qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
48{
49	qla_tx_buf_t *txb;
50	uint32_t tx_idx = tx_comp->tid_lo;
51
52	if (tx_idx >= NUM_TX_DESCRIPTORS) {
53		ha->qla_initiate_recovery = 1;
54		return;
55	}
56
57	txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
58
59	if (txb->m_head) {
60		if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
61		bus_dmamap_sync(ha->tx_tag, txb->map,
62		        BUS_DMASYNC_POSTWRITE);
63		bus_dmamap_unload(ha->tx_tag, txb->map);
64		m_freem(txb->m_head);
65
66		txb->m_head = NULL;
67	}
68
69        ha->tx_ring[txr_idx].txr_done++;
70
71	if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
72		ha->tx_ring[txr_idx].txr_done = 0;
73}
74
75static void
76qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
77{
78        qla_rx_buf_t			*rxb;
79	qla_rx_ring_t			*rxr;
80        int				count;
81	volatile q81_bq_addr_e_t	*sbq_e;
82
83	rxr = &ha->rx_ring[r_idx];
84
85	count = rxr->rx_free;
86	sbq_e = rxr->sbq_vaddr;
87
88        while (count--) {
89
90		rxb = &rxr->rx_buf[rxr->sbq_next];
91
92		if (rxb->m_head == NULL) {
93                	if (qls_get_mbuf(ha, rxb, NULL) != 0) {
94                        	device_printf(ha->pci_dev,
95					"%s: qls_get_mbuf [0,%d,%d] failed\n",
96					__func__, rxr->sbq_next, r_idx);
97				rxb->m_head = NULL;
98				break;
99			}
100		}
101
102		if (rxb->m_head != NULL) {
103			sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
104			sbq_e[rxr->sbq_next].addr_hi =
105				(uint32_t)(rxb->paddr >> 32);
106
107                        rxr->sbq_next++;
108                        if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
109                                rxr->sbq_next = 0;
110
111			rxr->sbq_free++;
112                	rxr->rx_free--;
113		}
114
115                if (rxr->sbq_free == 16) {
116
117			rxr->sbq_in += 16;
118			rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
119			rxr->sbq_free = 0;
120
121			Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
122                }
123        }
124}
125
126static int
127qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
128{
129	qla_rx_buf_t	*rxb;
130	qla_rx_ring_t	*rxr;
131	device_t	dev = ha->pci_dev;
132	struct mbuf     *mp = NULL;
133	struct ifnet	*ifp = ha->ifp;
134	struct lro_ctrl	*lro;
135	struct ether_vlan_header *eh;
136
137	rxr = &ha->rx_ring[rxr_idx];
138
139	lro = &rxr->lro;
140
141	rxb = &rxr->rx_buf[rxr->rx_next];
142
143	if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
144		device_printf(dev, "%s: DS bit not set \n", __func__);
145		return -1;
146	}
147	if (rxb->paddr != cq_e->b_paddr) {
148
149		device_printf(dev,
150			"%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
151			__func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
152
153		Q81_SET_CQ_INVALID(cq_idx);
154
155		ha->qla_initiate_recovery = 1;
156
157		return(-1);
158	}
159
160	rxr->rx_int++;
161
162	if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
163
164		mp = rxb->m_head;
165		rxb->m_head = NULL;
166
167		if (mp == NULL) {
168			device_printf(dev, "%s: mp == NULL\n", __func__);
169		} else {
170			mp->m_flags |= M_PKTHDR;
171			mp->m_pkthdr.len = cq_e->length;
172			mp->m_pkthdr.rcvif = ifp;
173			mp->m_len = cq_e->length;
174
175			eh = mtod(mp, struct ether_vlan_header *);
176
177			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
178				uint32_t *data = (uint32_t *)eh;
179
180				mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
181				mp->m_flags |= M_VLANTAG;
182
183				*(data + 3) = *(data + 2);
184				*(data + 2) = *(data + 1);
185				*(data + 1) = *data;
186
187				m_adj(mp, ETHER_VLAN_ENCAP_LEN);
188			}
189
190			if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
191				rxr->rss_int++;
192				mp->m_pkthdr.flowid = cq_e->rss;
193				M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
194			}
195			if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
196				Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
197				mp->m_pkthdr.csum_flags = 0;
198			} else {
199				mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
200					CSUM_IP_VALID | CSUM_DATA_VALID |
201					CSUM_PSEUDO_HDR;
202				mp->m_pkthdr.csum_data = 0xFFFF;
203			}
204			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
205
206			if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
207				/* LRO packet has been successfully queued */
208			} else {
209				(*ifp->if_input)(ifp, mp);
210			}
211		}
212	} else {
213		device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
214	}
215
216	rxr->rx_free++;
217	rxr->rx_next++;
218
219	if (rxr->rx_next == NUM_RX_DESCRIPTORS)
220		rxr->rx_next = 0;
221
222	if ((rxr->rx_free + rxr->sbq_free) >= 16)
223                qls_replenish_rx(ha, rxr_idx);
224
225	return 0;
226}
227
228static void
229qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
230{
231	q81_cq_e_t *cq_e, *cq_b;
232	uint32_t i, cq_comp_idx;
233	int ret = 0, tx_comp_done = 0;
234	struct lro_ctrl	*lro;
235
236	cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
237	lro = &ha->rx_ring[cq_idx].lro;
238
239	cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
240
241	i = ha->rx_ring[cq_idx].cq_next;
242
243	while (i != cq_comp_idx) {
244
245		cq_e = &cq_b[i];
246
247		switch (cq_e->opcode) {
248
249                case Q81_IOCB_TX_MAC:
250                case Q81_IOCB_TX_TSO:
251                        qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
252                        tx_comp_done++;
253                        break;
254
255		case Q81_IOCB_RX:
256			ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
257
258			break;
259
260		case Q81_IOCB_MPI:
261		case Q81_IOCB_SYS:
262		default:
263			device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
264				__func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
265				cq_e->opcode);
266			qls_dump_buf32(ha, __func__, cq_e,
267				(sizeof (q81_cq_e_t) >> 2));
268			break;
269		}
270
271		i++;
272		if (i == NUM_CQ_ENTRIES)
273			i = 0;
274
275		if (ret) {
276			break;
277		}
278
279		if (i == cq_comp_idx) {
280			cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
281		}
282
283                if (tx_comp_done) {
284                        taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
285                        tx_comp_done = 0;
286                }
287	}
288
289	tcp_lro_flush_all(lro);
290
291	ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
292
293	if (!ret) {
294		Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
295	}
296        if (tx_comp_done)
297                taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
298
299	return;
300}
301
302static void
303qls_mbx_isr(qla_host_t *ha)
304{
305	uint32_t data;
306	int i;
307	device_t dev = ha->pci_dev;
308
309	if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
310
311		if ((data & 0xF000) == 0x4000) {
312			ha->mbox[0] = data;
313			for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
314				if (qls_mbx_rd_reg(ha, i, &data))
315					break;
316				ha->mbox[i] = data;
317			}
318			ha->mbx_done = 1;
319		} else if ((data & 0xF000) == 0x8000) {
320
321			/* we have an AEN */
322
323			ha->aen[0] = data;
324			for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
325				if (qls_mbx_rd_reg(ha, i, &data))
326					break;
327				ha->aen[i] = data;
328			}
329			device_printf(dev,"%s: AEN "
330				"[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
331				" 0x%08x 0x%08x 0x%08x 0x%08x]\n",
332				__func__,
333				ha->aen[0], ha->aen[1], ha->aen[2],
334				ha->aen[3], ha->aen[4], ha->aen[5],
335				ha->aen[6], ha->aen[7], ha->aen[8]);
336
337			switch ((ha->aen[0] & 0xFFFF)) {
338
339			case 0x8011:
340				ha->link_up = 1;
341				break;
342
343			case 0x8012:
344				ha->link_up = 0;
345				break;
346
347			case 0x8130:
348				ha->link_hw_info = ha->aen[1];
349				break;
350
351			case 0x8131:
352				ha->link_hw_info = 0;
353				break;
354
355			}
356		}
357	}
358	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
359
360	return;
361}
362
363void
364qls_isr(void *arg)
365{
366	qla_ivec_t *ivec = arg;
367	qla_host_t *ha;
368	uint32_t status;
369	uint32_t cq_idx;
370	device_t dev;
371
372	ha = ivec->ha;
373	cq_idx = ivec->cq_idx;
374	dev = ha->pci_dev;
375
376	status = READ_REG32(ha, Q81_CTL_STATUS);
377
378	if (status & Q81_CTL_STATUS_FE) {
379		device_printf(dev, "%s fatal error\n", __func__);
380		return;
381	}
382
383	if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
384		qls_mbx_isr(ha);
385	}
386
387	status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
388
389	if (status & ( 0x1 << cq_idx))
390		qls_cq_isr(ha, cq_idx);
391
392	Q81_ENABLE_INTR(ha, cq_idx);
393
394	return;
395}
396
397