1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2014 Qlogic Corporation
5 * All rights reserved.
6 *
7 *  Redistribution and use in source and binary forms, with or without
8 *  modification, are permitted provided that the following conditions
9 *  are met:
10 *
11 *  1. Redistributions of source code must retain the above copyright
12 *     notice, this list of conditions and the following disclaimer.
13 *  2. Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in the
15 *     documentation and/or other materials provided with the distribution.
16 *
17 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 *  POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * File: qls_isr.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34#include <sys/cdefs.h>
35#include "qls_os.h"
36#include "qls_hw.h"
37#include "qls_def.h"
38#include "qls_inline.h"
39#include "qls_ver.h"
40#include "qls_glbl.h"
41#include "qls_dbg.h"
42
43static void
44qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
45{
46	qla_tx_buf_t *txb;
47	uint32_t tx_idx = tx_comp->tid_lo;
48
49	if (tx_idx >= NUM_TX_DESCRIPTORS) {
50		ha->qla_initiate_recovery = 1;
51		return;
52	}
53
54	txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
55
56	if (txb->m_head) {
57		if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
58		bus_dmamap_sync(ha->tx_tag, txb->map,
59		        BUS_DMASYNC_POSTWRITE);
60		bus_dmamap_unload(ha->tx_tag, txb->map);
61		m_freem(txb->m_head);
62
63		txb->m_head = NULL;
64	}
65
66        ha->tx_ring[txr_idx].txr_done++;
67
68	if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
69		ha->tx_ring[txr_idx].txr_done = 0;
70}
71
72static void
73qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
74{
75        qla_rx_buf_t			*rxb;
76	qla_rx_ring_t			*rxr;
77        int				count;
78	volatile q81_bq_addr_e_t	*sbq_e;
79
80	rxr = &ha->rx_ring[r_idx];
81
82	count = rxr->rx_free;
83	sbq_e = rxr->sbq_vaddr;
84
85        while (count--) {
86		rxb = &rxr->rx_buf[rxr->sbq_next];
87
88		if (rxb->m_head == NULL) {
89                	if (qls_get_mbuf(ha, rxb, NULL) != 0) {
90                        	device_printf(ha->pci_dev,
91					"%s: qls_get_mbuf [0,%d,%d] failed\n",
92					__func__, rxr->sbq_next, r_idx);
93				rxb->m_head = NULL;
94				break;
95			}
96		}
97
98		if (rxb->m_head != NULL) {
99			sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
100			sbq_e[rxr->sbq_next].addr_hi =
101				(uint32_t)(rxb->paddr >> 32);
102
103                        rxr->sbq_next++;
104                        if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
105                                rxr->sbq_next = 0;
106
107			rxr->sbq_free++;
108                	rxr->rx_free--;
109		}
110
111                if (rxr->sbq_free == 16) {
112			rxr->sbq_in += 16;
113			rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
114			rxr->sbq_free = 0;
115
116			Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
117                }
118        }
119}
120
121static int
122qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
123{
124	qla_rx_buf_t	*rxb;
125	qla_rx_ring_t	*rxr;
126	device_t	dev = ha->pci_dev;
127	struct mbuf     *mp = NULL;
128	if_t ifp = ha->ifp;
129#if defined(INET) || defined(INET6)
130	struct lro_ctrl	*lro;
131#endif
132	struct ether_vlan_header *eh;
133
134	rxr = &ha->rx_ring[rxr_idx];
135
136#if defined(INET) || defined(INET6)
137	lro = &rxr->lro;
138#endif
139
140	rxb = &rxr->rx_buf[rxr->rx_next];
141
142	if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
143		device_printf(dev, "%s: DS bit not set \n", __func__);
144		return -1;
145	}
146	if (rxb->paddr != cq_e->b_paddr) {
147		device_printf(dev,
148			"%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
149			__func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
150
151		Q81_SET_CQ_INVALID(cq_idx);
152
153		ha->qla_initiate_recovery = 1;
154
155		return(-1);
156	}
157
158	rxr->rx_int++;
159
160	if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
161		mp = rxb->m_head;
162		rxb->m_head = NULL;
163
164		if (mp == NULL) {
165			device_printf(dev, "%s: mp == NULL\n", __func__);
166		} else {
167			mp->m_flags |= M_PKTHDR;
168			mp->m_pkthdr.len = cq_e->length;
169			mp->m_pkthdr.rcvif = ifp;
170			mp->m_len = cq_e->length;
171
172			eh = mtod(mp, struct ether_vlan_header *);
173
174			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
175				uint32_t *data = (uint32_t *)eh;
176
177				mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
178				mp->m_flags |= M_VLANTAG;
179
180				*(data + 3) = *(data + 2);
181				*(data + 2) = *(data + 1);
182				*(data + 1) = *data;
183
184				m_adj(mp, ETHER_VLAN_ENCAP_LEN);
185			}
186
187			if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
188				rxr->rss_int++;
189				mp->m_pkthdr.flowid = cq_e->rss;
190				M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
191			}
192			if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
193				Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
194				mp->m_pkthdr.csum_flags = 0;
195			} else {
196				mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
197					CSUM_IP_VALID | CSUM_DATA_VALID |
198					CSUM_PSEUDO_HDR;
199				mp->m_pkthdr.csum_data = 0xFFFF;
200			}
201			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
202
203#if defined(INET) || defined(INET6)
204			if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
205				/* LRO packet has been successfully queued */
206			} else
207#endif
208			{
209				if_input(ifp, mp);
210			}
211		}
212	} else {
213		device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
214	}
215
216	rxr->rx_free++;
217	rxr->rx_next++;
218
219	if (rxr->rx_next == NUM_RX_DESCRIPTORS)
220		rxr->rx_next = 0;
221
222	if ((rxr->rx_free + rxr->sbq_free) >= 16)
223                qls_replenish_rx(ha, rxr_idx);
224
225	return 0;
226}
227
228static void
229qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
230{
231	q81_cq_e_t *cq_e, *cq_b;
232	uint32_t i, cq_comp_idx;
233	int ret = 0, tx_comp_done = 0;
234#if defined(INET) || defined(INET6)
235	struct lro_ctrl	*lro = &ha->rx_ring[cq_idx].lro;
236#endif
237
238	cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
239
240	cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
241
242	i = ha->rx_ring[cq_idx].cq_next;
243
244	while (i != cq_comp_idx) {
245		cq_e = &cq_b[i];
246
247		switch (cq_e->opcode) {
248                case Q81_IOCB_TX_MAC:
249                case Q81_IOCB_TX_TSO:
250                        qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
251                        tx_comp_done++;
252                        break;
253
254		case Q81_IOCB_RX:
255			ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
256
257			break;
258
259		case Q81_IOCB_MPI:
260		case Q81_IOCB_SYS:
261		default:
262			device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
263				__func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
264				cq_e->opcode);
265			qls_dump_buf32(ha, __func__, cq_e,
266				(sizeof (q81_cq_e_t) >> 2));
267			break;
268		}
269
270		i++;
271		if (i == NUM_CQ_ENTRIES)
272			i = 0;
273
274		if (ret) {
275			break;
276		}
277
278		if (i == cq_comp_idx) {
279			cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
280		}
281
282                if (tx_comp_done) {
283                        taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
284                        tx_comp_done = 0;
285                }
286	}
287
288#if defined(INET) || defined(INET6)
289	tcp_lro_flush_all(lro);
290#endif
291
292	ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
293
294	if (!ret) {
295		Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
296	}
297        if (tx_comp_done)
298                taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
299
300	return;
301}
302
303static void
304qls_mbx_isr(qla_host_t *ha)
305{
306	uint32_t data;
307	int i;
308	device_t dev = ha->pci_dev;
309
310	if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
311		if ((data & 0xF000) == 0x4000) {
312			ha->mbox[0] = data;
313			for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
314				if (qls_mbx_rd_reg(ha, i, &data))
315					break;
316				ha->mbox[i] = data;
317			}
318			ha->mbx_done = 1;
319		} else if ((data & 0xF000) == 0x8000) {
320			/* we have an AEN */
321
322			ha->aen[0] = data;
323			for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
324				if (qls_mbx_rd_reg(ha, i, &data))
325					break;
326				ha->aen[i] = data;
327			}
328			device_printf(dev,"%s: AEN "
329				"[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
330				" 0x%08x 0x%08x 0x%08x 0x%08x]\n",
331				__func__,
332				ha->aen[0], ha->aen[1], ha->aen[2],
333				ha->aen[3], ha->aen[4], ha->aen[5],
334				ha->aen[6], ha->aen[7], ha->aen[8]);
335
336			switch ((ha->aen[0] & 0xFFFF)) {
337			case 0x8011:
338				ha->link_up = 1;
339				break;
340
341			case 0x8012:
342				ha->link_up = 0;
343				break;
344
345			case 0x8130:
346				ha->link_hw_info = ha->aen[1];
347				break;
348
349			case 0x8131:
350				ha->link_hw_info = 0;
351				break;
352			}
353		}
354	}
355	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
356
357	return;
358}
359
360void
361qls_isr(void *arg)
362{
363	qla_ivec_t *ivec = arg;
364	qla_host_t *ha;
365	uint32_t status;
366	uint32_t cq_idx;
367	device_t dev;
368
369	ha = ivec->ha;
370	cq_idx = ivec->cq_idx;
371	dev = ha->pci_dev;
372
373	status = READ_REG32(ha, Q81_CTL_STATUS);
374
375	if (status & Q81_CTL_STATUS_FE) {
376		device_printf(dev, "%s fatal error\n", __func__);
377		return;
378	}
379
380	if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
381		qls_mbx_isr(ha);
382	}
383
384	status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
385
386	if (status & ( 0x1 << cq_idx))
387		qls_cq_isr(ha, cq_idx);
388
389	Q81_ENABLE_INTR(ha, cq_idx);
390
391	return;
392}
393