Deleted Added
full compact
qla_isr.c (227064) qla_isr.c (250340)
1/*
1/*
2 * Copyright (c) 2010-2011 Qlogic Corporation
2 * Copyright (c) 2011-2013 Qlogic Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qla_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qla_isr.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/qlxgb/qla_isr.c 227064 2011-11-03 21:20:22Z bz $");
34__FBSDID("$FreeBSD: head/sys/dev/qlxgb/qla_isr.c 250340 2013-05-07 22:58:42Z davidcs $");
35
36#include "qla_os.h"
37#include "qla_reg.h"
38#include "qla_hw.h"
39#include "qla_def.h"
40#include "qla_inline.h"
41#include "qla_ver.h"
42#include "qla_glbl.h"
43#include "qla_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
46static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
47
48/*
49 * Name: qla_rx_intr
50 * Function: Handles normal ethernet frames received
51 */
52static void
53qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
54 struct lro_ctrl *lro)
55{
56 uint32_t idx, length, status, ring;
57 qla_rx_buf_t *rxb;
58 struct mbuf *mp;
59 struct ifnet *ifp = ha->ifp;
60 qla_sds_t *sdsp;
61 struct ether_vlan_header *eh;
62
63 sdsp = &ha->hw.sds[sds_idx];
64
65 ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
66 idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
67 length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
68 status = (uint32_t)Q8_STAT_DESC_STATUS(data);
69
70 if (ring == 0) {
71 if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
72 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
73 " len[0x%08x] invalid\n",
74 __func__, ring, idx, length);
75 return;
76 }
77 } else {
78 if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
79 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
80 " len[0x%08x] invalid\n",
81 __func__, ring, idx, length);
82 return;
83 }
84 }
85
86 if (ring == 0)
87 rxb = &ha->rx_buf[idx];
88 else
89 rxb = &ha->rx_jbuf[idx];
90
91 QL_ASSERT((rxb != NULL),\
92 ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
93 __func__, ring, idx, sds_idx));
94
95 mp = rxb->m_head;
96
97 QL_ASSERT((mp != NULL),\
98 ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
99 __func__, ring, idx, rxb, sds_idx));
100
101 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
102
103 if (ring == 0) {
104 rxb->m_head = NULL;
105 rxb->next = sdsp->rxb_free;
106 sdsp->rxb_free = rxb;
107 sdsp->rx_free++;
108 } else {
109 rxb->m_head = NULL;
110 rxb->next = sdsp->rxjb_free;
111 sdsp->rxjb_free = rxb;
112 sdsp->rxj_free++;
113 }
114
115 mp->m_len = length;
116 mp->m_pkthdr.len = length;
117 mp->m_pkthdr.rcvif = ifp;
118
119 eh = mtod(mp, struct ether_vlan_header *);
120
121 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
122 uint32_t *data = (uint32_t *)eh;
123
124 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
125 mp->m_flags |= M_VLANTAG;
126
127 *(data + 3) = *(data + 2);
128 *(data + 2) = *(data + 1);
129 *(data + 1) = *data;
130
131 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
132 }
133
134 if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
135 mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
136 } else {
137 mp->m_pkthdr.csum_flags = 0;
138 }
139
140 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
141 /* LRO packet has been successfuly queued */
142 } else {
143 (*ifp->if_input)(ifp, mp);
144 }
145
146 if (sdsp->rx_free > std_replenish)
147 qla_replenish_normal_rx(ha, sdsp);
148
149 if (sdsp->rxj_free > jumbo_replenish)
150 qla_replenish_jumbo_rx(ha, sdsp);
151
152 return;
153}
154
155static void
156qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
157{
158 qla_rx_buf_t *rxb;
159 int count = jumbo_replenish;
160 uint32_t rxj_next;
161
162 if (!mtx_trylock(&ha->rxj_lock))
163 return;
164
165 rxj_next = ha->hw.rxj_next;
166
167 while (count--) {
168 rxb = sdsp->rxjb_free;
169
170 if (rxb == NULL)
171 break;
172
173 sdsp->rxjb_free = rxb->next;
174 sdsp->rxj_free--;
175
176
177 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
178 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
179 ha->hw.rxj_in, rxb->handle, rxb->paddr,
180 (rxb->m_head)->m_pkthdr.len);
181 ha->hw.rxj_in++;
182 if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
183 ha->hw.rxj_in = 0;
184 ha->hw.rxj_next++;
185 if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
186 ha->hw.rxj_next = 0;
187 } else {
188 device_printf(ha->pci_dev,
189 "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
190 __func__, ha->hw.rxj_in, rxb->handle);
191
192 rxb->m_head = NULL;
193 rxb->next = sdsp->rxjb_free;
194 sdsp->rxjb_free = rxb;
195 sdsp->rxj_free++;
196
197 break;
198 }
199 }
200
201 if (rxj_next != ha->hw.rxj_next) {
202 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
203 }
204 mtx_unlock(&ha->rxj_lock);
205}
206
207static void
208qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
209{
210 qla_rx_buf_t *rxb;
211 int count = std_replenish;
212 uint32_t rx_next;
213
214 if (!mtx_trylock(&ha->rx_lock))
215 return;
216
217 rx_next = ha->hw.rx_next;
218
219 while (count--) {
220 rxb = sdsp->rxb_free;
221
222 if (rxb == NULL)
223 break;
224
225 sdsp->rxb_free = rxb->next;
226 sdsp->rx_free--;
227
228 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
229 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
230 ha->hw.rx_in, rxb->handle, rxb->paddr,
231 (rxb->m_head)->m_pkthdr.len);
232 ha->hw.rx_in++;
233 if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
234 ha->hw.rx_in = 0;
235 ha->hw.rx_next++;
236 if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
237 ha->hw.rx_next = 0;
238 } else {
239 device_printf(ha->pci_dev,
240 "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
241 __func__, ha->hw.rx_in, rxb->handle);
242
243 rxb->m_head = NULL;
244 rxb->next = sdsp->rxb_free;
245 sdsp->rxb_free = rxb;
246 sdsp->rx_free++;
247
248 break;
249 }
250 }
251
252 if (rx_next != ha->hw.rx_next) {
253 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
254 }
255 mtx_unlock(&ha->rx_lock);
256}
257
258/*
259 * Name: qla_isr
260 * Function: Main Interrupt Service Routine
261 */
262static uint32_t
263qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
264{
265 device_t dev;
266 qla_hw_t *hw;
267 uint32_t comp_idx, desc_count;
268 q80_stat_desc_t *sdesc;
269 struct lro_ctrl *lro;
270 struct lro_entry *queued;
271 uint32_t ret = 0;
272
273 dev = ha->pci_dev;
274 hw = &ha->hw;
275
276 hw->sds[sds_idx].rcv_active = 1;
277 if (ha->flags.stop_rcv) {
278 hw->sds[sds_idx].rcv_active = 0;
279 return 0;
280 }
281
282 QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
283
284 /*
285 * receive interrupts
286 */
287 comp_idx = hw->sds[sds_idx].sdsr_next;
288 lro = &hw->sds[sds_idx].lro;
289
290 while (count--) {
291
292 sdesc = (q80_stat_desc_t *)
293 &hw->sds[sds_idx].sds_ring_base[comp_idx];
294
295 if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
296 Q8_STAT_DESC_OWNER_HOST) {
297 QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
298 __func__, (void *)sdesc->data[0], comp_idx));
299 break;
300 }
301
302 desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
303
304 switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
305
306 case Q8_STAT_DESC_OPCODE_RCV_PKT:
307 case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
308 qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
309
310 break;
311
312 default:
313 device_printf(dev, "%s: default 0x%llx!\n", __func__,
314 (long long unsigned int)sdesc->data[0]);
315 break;
316 }
317
318 while (desc_count--) {
319 sdesc->data[0] =
320 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
321 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
322 sdesc = (q80_stat_desc_t *)
323 &hw->sds[sds_idx].sds_ring_base[comp_idx];
324 }
325 }
326
327 while((!SLIST_EMPTY(&lro->lro_active))) {
328 queued = SLIST_FIRST(&lro->lro_active);
329 SLIST_REMOVE_HEAD(&lro->lro_active, next);
330 tcp_lro_flush(lro, queued);
331 }
332
333 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
334 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
335 }
336 hw->sds[sds_idx].sdsr_next = comp_idx;
337
338 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
339 if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
340 Q8_STAT_DESC_OWNER_HOST)) {
341 ret = -1;
342 }
343
344 hw->sds[sds_idx].rcv_active = 0;
345 return (ret);
346}
347
348void
349qla_isr(void *arg)
350{
351 qla_ivec_t *ivec = arg;
352 qla_host_t *ha;
353 uint32_t sds_idx;
354 uint32_t ret;
355
356 ha = ivec->ha;
357 sds_idx = ivec->irq_rid - 1;
358
359 if (sds_idx >= ha->hw.num_sds_rings) {
360 device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
361 sds_idx);
362
363 return;
364 }
365
366 if (sds_idx == 0)
367 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
368
369 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
370
371 if (sds_idx == 0)
372 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
373
374 if (ret) {
375 taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
376 &ha->irq_vec[sds_idx].rcv_task);
377 } else {
378 QL_ENABLE_INTERRUPTS(ha, sds_idx);
379 }
380}
381
382void
383qla_rcv(void *context, int pending)
384{
385 qla_ivec_t *ivec = context;
386 qla_host_t *ha;
387 device_t dev;
388 qla_hw_t *hw;
389 uint32_t sds_idx;
390 uint32_t ret;
391 struct ifnet *ifp;
392
393 ha = ivec->ha;
394 dev = ha->pci_dev;
395 hw = &ha->hw;
396 sds_idx = ivec->irq_rid - 1;
397 ifp = ha->ifp;
398
399 do {
400 if (sds_idx == 0) {
401 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
402 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
403 } else if ((ifp->if_snd.ifq_head != NULL) &&
404 QL_RUNNING(ifp)) {
405 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
406 }
407 }
408 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
409 } while (ret);
410
411 if (sds_idx == 0)
412 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
413
414 QL_ENABLE_INTERRUPTS(ha, sds_idx);
415}
416
35
36#include "qla_os.h"
37#include "qla_reg.h"
38#include "qla_hw.h"
39#include "qla_def.h"
40#include "qla_inline.h"
41#include "qla_ver.h"
42#include "qla_glbl.h"
43#include "qla_dbg.h"
44
45static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
46static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
47
48/*
49 * Name: qla_rx_intr
50 * Function: Handles normal ethernet frames received
51 */
52static void
53qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
54 struct lro_ctrl *lro)
55{
56 uint32_t idx, length, status, ring;
57 qla_rx_buf_t *rxb;
58 struct mbuf *mp;
59 struct ifnet *ifp = ha->ifp;
60 qla_sds_t *sdsp;
61 struct ether_vlan_header *eh;
62
63 sdsp = &ha->hw.sds[sds_idx];
64
65 ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
66 idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
67 length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
68 status = (uint32_t)Q8_STAT_DESC_STATUS(data);
69
70 if (ring == 0) {
71 if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
72 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
73 " len[0x%08x] invalid\n",
74 __func__, ring, idx, length);
75 return;
76 }
77 } else {
78 if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
79 device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
80 " len[0x%08x] invalid\n",
81 __func__, ring, idx, length);
82 return;
83 }
84 }
85
86 if (ring == 0)
87 rxb = &ha->rx_buf[idx];
88 else
89 rxb = &ha->rx_jbuf[idx];
90
91 QL_ASSERT((rxb != NULL),\
92 ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
93 __func__, ring, idx, sds_idx));
94
95 mp = rxb->m_head;
96
97 QL_ASSERT((mp != NULL),\
98 ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
99 __func__, ring, idx, rxb, sds_idx));
100
101 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
102
103 if (ring == 0) {
104 rxb->m_head = NULL;
105 rxb->next = sdsp->rxb_free;
106 sdsp->rxb_free = rxb;
107 sdsp->rx_free++;
108 } else {
109 rxb->m_head = NULL;
110 rxb->next = sdsp->rxjb_free;
111 sdsp->rxjb_free = rxb;
112 sdsp->rxj_free++;
113 }
114
115 mp->m_len = length;
116 mp->m_pkthdr.len = length;
117 mp->m_pkthdr.rcvif = ifp;
118
119 eh = mtod(mp, struct ether_vlan_header *);
120
121 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
122 uint32_t *data = (uint32_t *)eh;
123
124 mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
125 mp->m_flags |= M_VLANTAG;
126
127 *(data + 3) = *(data + 2);
128 *(data + 2) = *(data + 1);
129 *(data + 1) = *data;
130
131 m_adj(mp, ETHER_VLAN_ENCAP_LEN);
132 }
133
134 if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
135 mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
136 } else {
137 mp->m_pkthdr.csum_flags = 0;
138 }
139
140 if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
141 /* LRO packet has been successfuly queued */
142 } else {
143 (*ifp->if_input)(ifp, mp);
144 }
145
146 if (sdsp->rx_free > std_replenish)
147 qla_replenish_normal_rx(ha, sdsp);
148
149 if (sdsp->rxj_free > jumbo_replenish)
150 qla_replenish_jumbo_rx(ha, sdsp);
151
152 return;
153}
154
155static void
156qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
157{
158 qla_rx_buf_t *rxb;
159 int count = jumbo_replenish;
160 uint32_t rxj_next;
161
162 if (!mtx_trylock(&ha->rxj_lock))
163 return;
164
165 rxj_next = ha->hw.rxj_next;
166
167 while (count--) {
168 rxb = sdsp->rxjb_free;
169
170 if (rxb == NULL)
171 break;
172
173 sdsp->rxjb_free = rxb->next;
174 sdsp->rxj_free--;
175
176
177 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
178 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
179 ha->hw.rxj_in, rxb->handle, rxb->paddr,
180 (rxb->m_head)->m_pkthdr.len);
181 ha->hw.rxj_in++;
182 if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
183 ha->hw.rxj_in = 0;
184 ha->hw.rxj_next++;
185 if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
186 ha->hw.rxj_next = 0;
187 } else {
188 device_printf(ha->pci_dev,
189 "%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
190 __func__, ha->hw.rxj_in, rxb->handle);
191
192 rxb->m_head = NULL;
193 rxb->next = sdsp->rxjb_free;
194 sdsp->rxjb_free = rxb;
195 sdsp->rxj_free++;
196
197 break;
198 }
199 }
200
201 if (rxj_next != ha->hw.rxj_next) {
202 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
203 }
204 mtx_unlock(&ha->rxj_lock);
205}
206
207static void
208qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
209{
210 qla_rx_buf_t *rxb;
211 int count = std_replenish;
212 uint32_t rx_next;
213
214 if (!mtx_trylock(&ha->rx_lock))
215 return;
216
217 rx_next = ha->hw.rx_next;
218
219 while (count--) {
220 rxb = sdsp->rxb_free;
221
222 if (rxb == NULL)
223 break;
224
225 sdsp->rxb_free = rxb->next;
226 sdsp->rx_free--;
227
228 if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
229 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
230 ha->hw.rx_in, rxb->handle, rxb->paddr,
231 (rxb->m_head)->m_pkthdr.len);
232 ha->hw.rx_in++;
233 if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
234 ha->hw.rx_in = 0;
235 ha->hw.rx_next++;
236 if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
237 ha->hw.rx_next = 0;
238 } else {
239 device_printf(ha->pci_dev,
240 "%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
241 __func__, ha->hw.rx_in, rxb->handle);
242
243 rxb->m_head = NULL;
244 rxb->next = sdsp->rxb_free;
245 sdsp->rxb_free = rxb;
246 sdsp->rx_free++;
247
248 break;
249 }
250 }
251
252 if (rx_next != ha->hw.rx_next) {
253 QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
254 }
255 mtx_unlock(&ha->rx_lock);
256}
257
258/*
259 * Name: qla_isr
260 * Function: Main Interrupt Service Routine
261 */
262static uint32_t
263qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
264{
265 device_t dev;
266 qla_hw_t *hw;
267 uint32_t comp_idx, desc_count;
268 q80_stat_desc_t *sdesc;
269 struct lro_ctrl *lro;
270 struct lro_entry *queued;
271 uint32_t ret = 0;
272
273 dev = ha->pci_dev;
274 hw = &ha->hw;
275
276 hw->sds[sds_idx].rcv_active = 1;
277 if (ha->flags.stop_rcv) {
278 hw->sds[sds_idx].rcv_active = 0;
279 return 0;
280 }
281
282 QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
283
284 /*
285 * receive interrupts
286 */
287 comp_idx = hw->sds[sds_idx].sdsr_next;
288 lro = &hw->sds[sds_idx].lro;
289
290 while (count--) {
291
292 sdesc = (q80_stat_desc_t *)
293 &hw->sds[sds_idx].sds_ring_base[comp_idx];
294
295 if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
296 Q8_STAT_DESC_OWNER_HOST) {
297 QL_DPRINT2((dev, "%s: data %p sdsr_next 0x%08x\n",
298 __func__, (void *)sdesc->data[0], comp_idx));
299 break;
300 }
301
302 desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
303
304 switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
305
306 case Q8_STAT_DESC_OPCODE_RCV_PKT:
307 case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
308 qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
309
310 break;
311
312 default:
313 device_printf(dev, "%s: default 0x%llx!\n", __func__,
314 (long long unsigned int)sdesc->data[0]);
315 break;
316 }
317
318 while (desc_count--) {
319 sdesc->data[0] =
320 Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
321 comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
322 sdesc = (q80_stat_desc_t *)
323 &hw->sds[sds_idx].sds_ring_base[comp_idx];
324 }
325 }
326
327 while((!SLIST_EMPTY(&lro->lro_active))) {
328 queued = SLIST_FIRST(&lro->lro_active);
329 SLIST_REMOVE_HEAD(&lro->lro_active, next);
330 tcp_lro_flush(lro, queued);
331 }
332
333 if (hw->sds[sds_idx].sdsr_next != comp_idx) {
334 QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
335 }
336 hw->sds[sds_idx].sdsr_next = comp_idx;
337
338 sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
339 if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
340 Q8_STAT_DESC_OWNER_HOST)) {
341 ret = -1;
342 }
343
344 hw->sds[sds_idx].rcv_active = 0;
345 return (ret);
346}
347
348void
349qla_isr(void *arg)
350{
351 qla_ivec_t *ivec = arg;
352 qla_host_t *ha;
353 uint32_t sds_idx;
354 uint32_t ret;
355
356 ha = ivec->ha;
357 sds_idx = ivec->irq_rid - 1;
358
359 if (sds_idx >= ha->hw.num_sds_rings) {
360 device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
361 sds_idx);
362
363 return;
364 }
365
366 if (sds_idx == 0)
367 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
368
369 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
370
371 if (sds_idx == 0)
372 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
373
374 if (ret) {
375 taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
376 &ha->irq_vec[sds_idx].rcv_task);
377 } else {
378 QL_ENABLE_INTERRUPTS(ha, sds_idx);
379 }
380}
381
382void
383qla_rcv(void *context, int pending)
384{
385 qla_ivec_t *ivec = context;
386 qla_host_t *ha;
387 device_t dev;
388 qla_hw_t *hw;
389 uint32_t sds_idx;
390 uint32_t ret;
391 struct ifnet *ifp;
392
393 ha = ivec->ha;
394 dev = ha->pci_dev;
395 hw = &ha->hw;
396 sds_idx = ivec->irq_rid - 1;
397 ifp = ha->ifp;
398
399 do {
400 if (sds_idx == 0) {
401 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
402 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
403 } else if ((ifp->if_snd.ifq_head != NULL) &&
404 QL_RUNNING(ifp)) {
405 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
406 }
407 }
408 ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
409 } while (ret);
410
411 if (sds_idx == 0)
412 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
413
414 QL_ENABLE_INTERRUPTS(ha, sds_idx);
415}
416