Deleted Added
sdiff udiff text old ( 252869 ) new ( 257007 )
full compact
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 252869 2013-07-06 08:30:45Z delphij $ */
41
42#include "opt_inet6.h"
43#include "opt_inet.h"
44
45#include "oce_if.h"
46
47
48/* Driver entry points prototypes */
49static int oce_probe(device_t dev);
50static int oce_attach(device_t dev);
51static int oce_detach(device_t dev);
52static int oce_shutdown(device_t dev);
53static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54static void oce_init(void *xsc);
55static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56static void oce_multiq_flush(struct ifnet *ifp);
57
58/* Driver interrupt routines protypes */
59static void oce_intr(void *arg, int pending);
60static int oce_setup_intr(POCE_SOFTC sc);
61static int oce_fast_isr(void *arg);
62static int oce_alloc_intr(POCE_SOFTC sc, int vector,
63 void (*isr) (void *arg, int pending));
64
65/* Media callbacks prototypes */
66static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67static int oce_media_change(struct ifnet *ifp);
68
69/* Transmit routines prototypes */
70static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73 uint32_t status);
74static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
75 struct oce_wq *wq);
76
77/* Receive routines prototypes */
78static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
79static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
81static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
82 struct oce_nic_rx_cqe *cqe);
83
84/* Helper function prototypes in this file */
85static int oce_attach_ifp(POCE_SOFTC sc);
86static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
88static int oce_vid_config(POCE_SOFTC sc);
89static void oce_mac_addr_set(POCE_SOFTC sc);
90static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
91static void oce_local_timer(void *arg);
92static void oce_if_deactivate(POCE_SOFTC sc);
93static void oce_if_activate(POCE_SOFTC sc);
94static void setup_max_queues_want(POCE_SOFTC sc);
95static void update_queues_got(POCE_SOFTC sc);
96static void process_link_state(POCE_SOFTC sc,
97 struct oce_async_cqe_link_state *acqe);
98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
99static void oce_get_config(POCE_SOFTC sc);
100static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
101
102/* IP specific */
103#if defined(INET6) || defined(INET)
104static int oce_init_lro(POCE_SOFTC sc);
105static void oce_rx_flush_lro(struct oce_rq *rq);
106static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
107#endif
108
109static device_method_t oce_dispatch[] = {
110 DEVMETHOD(device_probe, oce_probe),
111 DEVMETHOD(device_attach, oce_attach),
112 DEVMETHOD(device_detach, oce_detach),
113 DEVMETHOD(device_shutdown, oce_shutdown),
114
115 DEVMETHOD_END
116};
117
118static driver_t oce_driver = {
119 "oce",
120 oce_dispatch,
121 sizeof(OCE_SOFTC)
122};
123static devclass_t oce_devclass;
124
125
126DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
127MODULE_DEPEND(oce, pci, 1, 1, 1);
128MODULE_DEPEND(oce, ether, 1, 1, 1);
129MODULE_VERSION(oce, 1);
130
131
132/* global vars */
133const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
134
135/* Module capabilites and parameters */
136uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
137uint32_t oce_enable_rss = OCE_MODCAP_RSS;
138
139
140TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
141TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
142
143
144/* Supported devices table */
145static uint32_t supportedDevices[] = {
146 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
147 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
148 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
149 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
150 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
151 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
152};
153
154
155
156
157/*****************************************************************************
158 * Driver entry points functions *
159 *****************************************************************************/
160
161static int
162oce_probe(device_t dev)
163{
164 uint16_t vendor = 0;
165 uint16_t device = 0;
166 int i = 0;
167 char str[256] = {0};
168 POCE_SOFTC sc;
169
170 sc = device_get_softc(dev);
171 bzero(sc, sizeof(OCE_SOFTC));
172 sc->dev = dev;
173
174 vendor = pci_get_vendor(dev);
175 device = pci_get_device(dev);
176
177 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
178 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
179 if (device == (supportedDevices[i] & 0xffff)) {
180 sprintf(str, "%s:%s", "Emulex CNA NIC function",
181 component_revision);
182 device_set_desc_copy(dev, str);
183
184 switch (device) {
185 case PCI_PRODUCT_BE2:
186 sc->flags |= OCE_FLAGS_BE2;
187 break;
188 case PCI_PRODUCT_BE3:
189 sc->flags |= OCE_FLAGS_BE3;
190 break;
191 case PCI_PRODUCT_XE201:
192 case PCI_PRODUCT_XE201_VF:
193 sc->flags |= OCE_FLAGS_XE201;
194 break;
195 case PCI_PRODUCT_SH:
196 sc->flags |= OCE_FLAGS_SH;
197 break;
198 default:
199 return ENXIO;
200 }
201 return BUS_PROBE_DEFAULT;
202 }
203 }
204 }
205
206 return ENXIO;
207}
208
209
210static int
211oce_attach(device_t dev)
212{
213 POCE_SOFTC sc;
214 int rc = 0;
215
216 sc = device_get_softc(dev);
217
218 rc = oce_hw_pci_alloc(sc);
219 if (rc)
220 return rc;
221
222 sc->tx_ring_size = OCE_TX_RING_SIZE;
223 sc->rx_ring_size = OCE_RX_RING_SIZE;
224 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
225 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
226 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
227
228 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
229 LOCK_CREATE(&sc->dev_lock, "Device_lock");
230
231 /* initialise the hardware */
232 rc = oce_hw_init(sc);
233 if (rc)
234 goto pci_res_free;
235
236 oce_get_config(sc);
237
238 setup_max_queues_want(sc);
239
240 rc = oce_setup_intr(sc);
241 if (rc)
242 goto mbox_free;
243
244 rc = oce_queue_init_all(sc);
245 if (rc)
246 goto intr_free;
247
248 rc = oce_attach_ifp(sc);
249 if (rc)
250 goto queues_free;
251
252#if defined(INET6) || defined(INET)
253 rc = oce_init_lro(sc);
254 if (rc)
255 goto ifp_free;
256#endif
257
258 rc = oce_hw_start(sc);
259 if (rc)
260 goto lro_free;
261
262 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
266
267 rc = oce_stats_init(sc);
268 if (rc)
269 goto vlan_free;
270
271 oce_add_sysctls(sc);
272
273 callout_init(&sc->timer, CALLOUT_MPSAFE);
274 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
275 if (rc)
276 goto stats_free;
277
278 return 0;
279
280stats_free:
281 callout_drain(&sc->timer);
282 oce_stats_free(sc);
283vlan_free:
284 if (sc->vlan_attach)
285 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
286 if (sc->vlan_detach)
287 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
288 oce_hw_intr_disable(sc);
289lro_free:
290#if defined(INET6) || defined(INET)
291 oce_free_lro(sc);
292ifp_free:
293#endif
294 ether_ifdetach(sc->ifp);
295 if_free(sc->ifp);
296queues_free:
297 oce_queue_release_all(sc);
298intr_free:
299 oce_intr_free(sc);
300mbox_free:
301 oce_dma_free(sc, &sc->bsmbx);
302pci_res_free:
303 oce_hw_pci_free(sc);
304 LOCK_DESTROY(&sc->dev_lock);
305 LOCK_DESTROY(&sc->bmbx_lock);
306 return rc;
307
308}
309
310
311static int
312oce_detach(device_t dev)
313{
314 POCE_SOFTC sc = device_get_softc(dev);
315
316 LOCK(&sc->dev_lock);
317 oce_if_deactivate(sc);
318 UNLOCK(&sc->dev_lock);
319
320 callout_drain(&sc->timer);
321
322 if (sc->vlan_attach != NULL)
323 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
324 if (sc->vlan_detach != NULL)
325 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
326
327 ether_ifdetach(sc->ifp);
328
329 if_free(sc->ifp);
330
331 oce_hw_shutdown(sc);
332
333 bus_generic_detach(dev);
334
335 return 0;
336}
337
338
339static int
340oce_shutdown(device_t dev)
341{
342 int rc;
343
344 rc = oce_detach(dev);
345
346 return rc;
347}
348
349
350static int
351oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
352{
353 struct ifreq *ifr = (struct ifreq *)data;
354 POCE_SOFTC sc = ifp->if_softc;
355 int rc = 0;
356 uint32_t u;
357
358 switch (command) {
359
360 case SIOCGIFMEDIA:
361 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
362 break;
363
364 case SIOCSIFMTU:
365 if (ifr->ifr_mtu > OCE_MAX_MTU)
366 rc = EINVAL;
367 else
368 ifp->if_mtu = ifr->ifr_mtu;
369 break;
370
371 case SIOCSIFFLAGS:
372 if (ifp->if_flags & IFF_UP) {
373 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
374 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
375 oce_init(sc);
376 }
377 device_printf(sc->dev, "Interface Up\n");
378 } else {
379 LOCK(&sc->dev_lock);
380
381 sc->ifp->if_drv_flags &=
382 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
383 oce_if_deactivate(sc);
384
385 UNLOCK(&sc->dev_lock);
386
387 device_printf(sc->dev, "Interface Down\n");
388 }
389
390 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
391 sc->promisc = TRUE;
392 oce_rxf_set_promiscuous(sc, sc->promisc);
393 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
394 sc->promisc = FALSE;
395 oce_rxf_set_promiscuous(sc, sc->promisc);
396 }
397
398 break;
399
400 case SIOCADDMULTI:
401 case SIOCDELMULTI:
402 rc = oce_hw_update_multicast(sc);
403 if (rc)
404 device_printf(sc->dev,
405 "Update multicast address failed\n");
406 break;
407
408 case SIOCSIFCAP:
409 u = ifr->ifr_reqcap ^ ifp->if_capenable;
410
411 if (u & IFCAP_TXCSUM) {
412 ifp->if_capenable ^= IFCAP_TXCSUM;
413 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
414
415 if (IFCAP_TSO & ifp->if_capenable &&
416 !(IFCAP_TXCSUM & ifp->if_capenable)) {
417 ifp->if_capenable &= ~IFCAP_TSO;
418 ifp->if_hwassist &= ~CSUM_TSO;
419 if_printf(ifp,
420 "TSO disabled due to -txcsum.\n");
421 }
422 }
423
424 if (u & IFCAP_RXCSUM)
425 ifp->if_capenable ^= IFCAP_RXCSUM;
426
427 if (u & IFCAP_TSO4) {
428 ifp->if_capenable ^= IFCAP_TSO4;
429
430 if (IFCAP_TSO & ifp->if_capenable) {
431 if (IFCAP_TXCSUM & ifp->if_capenable)
432 ifp->if_hwassist |= CSUM_TSO;
433 else {
434 ifp->if_capenable &= ~IFCAP_TSO;
435 ifp->if_hwassist &= ~CSUM_TSO;
436 if_printf(ifp,
437 "Enable txcsum first.\n");
438 rc = EAGAIN;
439 }
440 } else
441 ifp->if_hwassist &= ~CSUM_TSO;
442 }
443
444 if (u & IFCAP_VLAN_HWTAGGING)
445 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
446
447 if (u & IFCAP_VLAN_HWFILTER) {
448 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
449 oce_vid_config(sc);
450 }
451#if defined(INET6) || defined(INET)
452 if (u & IFCAP_LRO)
453 ifp->if_capenable ^= IFCAP_LRO;
454#endif
455
456 break;
457
458 case SIOCGPRIVATE_0:
459 rc = oce_handle_passthrough(ifp, data);
460 break;
461 default:
462 rc = ether_ioctl(ifp, command, data);
463 break;
464 }
465
466 return rc;
467}
468
469
470static void
471oce_init(void *arg)
472{
473 POCE_SOFTC sc = arg;
474
475 LOCK(&sc->dev_lock);
476
477 if (sc->ifp->if_flags & IFF_UP) {
478 oce_if_deactivate(sc);
479 oce_if_activate(sc);
480 }
481
482 UNLOCK(&sc->dev_lock);
483
484}
485
486
487static int
488oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
489{
490 POCE_SOFTC sc = ifp->if_softc;
491 struct oce_wq *wq = NULL;
492 int queue_index = 0;
493 int status = 0;
494
495 if (!sc->link_status)
496 return ENXIO;
497
498 if ((m->m_flags & M_FLOWID) != 0)
499 queue_index = m->m_pkthdr.flowid % sc->nwqs;
500
501 wq = sc->wq[queue_index];
502
503 LOCK(&wq->tx_lock);
504 status = oce_multiq_transmit(ifp, m, wq);
505 UNLOCK(&wq->tx_lock);
506
507 return status;
508
509}
510
511
512static void
513oce_multiq_flush(struct ifnet *ifp)
514{
515 POCE_SOFTC sc = ifp->if_softc;
516 struct mbuf *m;
517 int i = 0;
518
519 for (i = 0; i < sc->nwqs; i++) {
520 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
521 m_freem(m);
522 }
523 if_qflush(ifp);
524}
525
526
527
528/*****************************************************************************
529 * Driver interrupt routines functions *
530 *****************************************************************************/
531
532static void
533oce_intr(void *arg, int pending)
534{
535
536 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
537 POCE_SOFTC sc = ii->sc;
538 struct oce_eq *eq = ii->eq;
539 struct oce_eqe *eqe;
540 struct oce_cq *cq = NULL;
541 int i, num_eqes = 0;
542
543
544 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
545 BUS_DMASYNC_POSTWRITE);
546 do {
547 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
548 if (eqe->evnt == 0)
549 break;
550 eqe->evnt = 0;
551 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
552 BUS_DMASYNC_POSTWRITE);
553 RING_GET(eq->ring, 1);
554 num_eqes++;
555
556 } while (TRUE);
557
558 if (!num_eqes)
559 goto eq_arm; /* Spurious */
560
561 /* Clear EQ entries, but dont arm */
562 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
563
564 /* Process TX, RX and MCC. But dont arm CQ*/
565 for (i = 0; i < eq->cq_valid; i++) {
566 cq = eq->cq[i];
567 (*cq->cq_handler)(cq->cb_arg);
568 }
569
570 /* Arm all cqs connected to this EQ */
571 for (i = 0; i < eq->cq_valid; i++) {
572 cq = eq->cq[i];
573 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
574 }
575
576eq_arm:
577 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
578
579 return;
580}
581
582
583static int
584oce_setup_intr(POCE_SOFTC sc)
585{
586 int rc = 0, use_intx = 0;
587 int vector = 0, req_vectors = 0;
588
589 if (is_rss_enabled(sc))
590 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
591 else
592 req_vectors = 1;
593
594 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
595 sc->intr_count = req_vectors;
596 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
597 if (rc != 0) {
598 use_intx = 1;
599 pci_release_msi(sc->dev);
600 } else
601 sc->flags |= OCE_FLAGS_USING_MSIX;
602 } else
603 use_intx = 1;
604
605 if (use_intx)
606 sc->intr_count = 1;
607
608 /* Scale number of queues based on intr we got */
609 update_queues_got(sc);
610
611 if (use_intx) {
612 device_printf(sc->dev, "Using legacy interrupt\n");
613 rc = oce_alloc_intr(sc, vector, oce_intr);
614 if (rc)
615 goto error;
616 } else {
617 for (; vector < sc->intr_count; vector++) {
618 rc = oce_alloc_intr(sc, vector, oce_intr);
619 if (rc)
620 goto error;
621 }
622 }
623
624 return 0;
625error:
626 oce_intr_free(sc);
627 return rc;
628}
629
630
631static int
632oce_fast_isr(void *arg)
633{
634 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
635 POCE_SOFTC sc = ii->sc;
636
637 if (ii->eq == NULL)
638 return FILTER_STRAY;
639
640 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
641
642 taskqueue_enqueue_fast(ii->tq, &ii->task);
643
644 ii->eq->intr++;
645
646 return FILTER_HANDLED;
647}
648
649
650static int
651oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
652{
653 POCE_INTR_INFO ii = &sc->intrs[vector];
654 int rc = 0, rr;
655
656 if (vector >= OCE_MAX_EQ)
657 return (EINVAL);
658
659 /* Set the resource id for the interrupt.
660 * MSIx is vector + 1 for the resource id,
661 * INTx is 0 for the resource id.
662 */
663 if (sc->flags & OCE_FLAGS_USING_MSIX)
664 rr = vector + 1;
665 else
666 rr = 0;
667 ii->intr_res = bus_alloc_resource_any(sc->dev,
668 SYS_RES_IRQ,
669 &rr, RF_ACTIVE|RF_SHAREABLE);
670 ii->irq_rr = rr;
671 if (ii->intr_res == NULL) {
672 device_printf(sc->dev,
673 "Could not allocate interrupt\n");
674 rc = ENXIO;
675 return rc;
676 }
677
678 TASK_INIT(&ii->task, 0, isr, ii);
679 ii->vector = vector;
680 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
681 ii->tq = taskqueue_create_fast(ii->task_name,
682 M_NOWAIT,
683 taskqueue_thread_enqueue,
684 &ii->tq);
685 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
686 device_get_nameunit(sc->dev));
687
688 ii->sc = sc;
689 rc = bus_setup_intr(sc->dev,
690 ii->intr_res,
691 INTR_TYPE_NET,
692 oce_fast_isr, NULL, ii, &ii->tag);
693 return rc;
694
695}
696
697
698void
699oce_intr_free(POCE_SOFTC sc)
700{
701 int i = 0;
702
703 for (i = 0; i < sc->intr_count; i++) {
704
705 if (sc->intrs[i].tag != NULL)
706 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
707 sc->intrs[i].tag);
708 if (sc->intrs[i].tq != NULL)
709 taskqueue_free(sc->intrs[i].tq);
710
711 if (sc->intrs[i].intr_res != NULL)
712 bus_release_resource(sc->dev, SYS_RES_IRQ,
713 sc->intrs[i].irq_rr,
714 sc->intrs[i].intr_res);
715 sc->intrs[i].tag = NULL;
716 sc->intrs[i].intr_res = NULL;
717 }
718
719 if (sc->flags & OCE_FLAGS_USING_MSIX)
720 pci_release_msi(sc->dev);
721
722}
723
724
725
726/******************************************************************************
727* Media callbacks functions *
728******************************************************************************/
729
730static void
731oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
732{
733 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
734
735
736 req->ifm_status = IFM_AVALID;
737 req->ifm_active = IFM_ETHER;
738
739 if (sc->link_status == 1)
740 req->ifm_status |= IFM_ACTIVE;
741 else
742 return;
743
744 switch (sc->link_speed) {
745 case 1: /* 10 Mbps */
746 req->ifm_active |= IFM_10_T | IFM_FDX;
747 sc->speed = 10;
748 break;
749 case 2: /* 100 Mbps */
750 req->ifm_active |= IFM_100_TX | IFM_FDX;
751 sc->speed = 100;
752 break;
753 case 3: /* 1 Gbps */
754 req->ifm_active |= IFM_1000_T | IFM_FDX;
755 sc->speed = 1000;
756 break;
757 case 4: /* 10 Gbps */
758 req->ifm_active |= IFM_10G_SR | IFM_FDX;
759 sc->speed = 10000;
760 break;
761 }
762
763 return;
764}
765
766
767int
768oce_media_change(struct ifnet *ifp)
769{
770 return 0;
771}
772
773
774
775
776/*****************************************************************************
777 * Transmit routines functions *
778 *****************************************************************************/
779
780static int
781oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
782{
783 int rc = 0, i, retry_cnt = 0;
784 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
785 struct mbuf *m, *m_temp;
786 struct oce_wq *wq = sc->wq[wq_index];
787 struct oce_packet_desc *pd;
788 struct oce_nic_hdr_wqe *nichdr;
789 struct oce_nic_frag_wqe *nicfrag;
790 int num_wqes;
791 uint32_t reg_value;
792 boolean_t complete = TRUE;
793
794 m = *mpp;
795 if (!m)
796 return EINVAL;
797
798 if (!(m->m_flags & M_PKTHDR)) {
799 rc = ENXIO;
800 goto free_ret;
801 }
802
803 if(oce_tx_asic_stall_verify(sc, m)) {
804 m = oce_insert_vlan_tag(sc, m, &complete);
805 if(!m) {
806 device_printf(sc->dev, "Insertion unsuccessful\n");
807 return 0;
808 }
809
810 }
811
812 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
813 /* consolidate packet buffers for TSO/LSO segment offload */
814#if defined(INET6) || defined(INET)
815 m = oce_tso_setup(sc, mpp);
816#else
817 m = NULL;
818#endif
819 if (m == NULL) {
820 rc = ENXIO;
821 goto free_ret;
822 }
823 }
824
825 pd = &wq->pckts[wq->pkt_desc_head];
826retry:
827 rc = bus_dmamap_load_mbuf_sg(wq->tag,
828 pd->map,
829 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
830 if (rc == 0) {
831 num_wqes = pd->nsegs + 1;
832 if (IS_BE(sc) || IS_SH(sc)) {
833 /*Dummy required only for BE3.*/
834 if (num_wqes & 1)
835 num_wqes++;
836 }
837 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
838 bus_dmamap_unload(wq->tag, pd->map);
839 return EBUSY;
840 }
841 atomic_store_rel_int(&wq->pkt_desc_head,
842 (wq->pkt_desc_head + 1) % \
843 OCE_WQ_PACKET_ARRAY_SIZE);
844 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
845 pd->mbuf = m;
846
847 nichdr =
848 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
849 nichdr->u0.dw[0] = 0;
850 nichdr->u0.dw[1] = 0;
851 nichdr->u0.dw[2] = 0;
852 nichdr->u0.dw[3] = 0;
853
854 nichdr->u0.s.complete = complete;
855 nichdr->u0.s.event = 1;
856 nichdr->u0.s.crc = 1;
857 nichdr->u0.s.forward = 0;
858 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
859 nichdr->u0.s.udpcs =
860 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
861 nichdr->u0.s.tcpcs =
862 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
863 nichdr->u0.s.num_wqe = num_wqes;
864 nichdr->u0.s.total_length = m->m_pkthdr.len;
865 if (m->m_flags & M_VLANTAG) {
866 nichdr->u0.s.vlan = 1; /*Vlan present*/
867 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
868 }
869 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
870 if (m->m_pkthdr.tso_segsz) {
871 nichdr->u0.s.lso = 1;
872 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
873 }
874 if (!IS_BE(sc) || !IS_SH(sc))
875 nichdr->u0.s.ipcs = 1;
876 }
877
878 RING_PUT(wq->ring, 1);
879 atomic_add_int(&wq->ring->num_used, 1);
880
881 for (i = 0; i < pd->nsegs; i++) {
882 nicfrag =
883 RING_GET_PRODUCER_ITEM_VA(wq->ring,
884 struct oce_nic_frag_wqe);
885 nicfrag->u0.s.rsvd0 = 0;
886 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
887 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
888 nicfrag->u0.s.frag_len = segs[i].ds_len;
889 pd->wqe_idx = wq->ring->pidx;
890 RING_PUT(wq->ring, 1);
891 atomic_add_int(&wq->ring->num_used, 1);
892 }
893 if (num_wqes > (pd->nsegs + 1)) {
894 nicfrag =
895 RING_GET_PRODUCER_ITEM_VA(wq->ring,
896 struct oce_nic_frag_wqe);
897 nicfrag->u0.dw[0] = 0;
898 nicfrag->u0.dw[1] = 0;
899 nicfrag->u0.dw[2] = 0;
900 nicfrag->u0.dw[3] = 0;
901 pd->wqe_idx = wq->ring->pidx;
902 RING_PUT(wq->ring, 1);
903 atomic_add_int(&wq->ring->num_used, 1);
904 pd->nsegs++;
905 }
906
907 sc->ifp->if_opackets++;
908 wq->tx_stats.tx_reqs++;
909 wq->tx_stats.tx_wrbs += num_wqes;
910 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
911 wq->tx_stats.tx_pkts++;
912
913 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
915 reg_value = (num_wqes << 16) | wq->wq_id;
916 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
917
918 } else if (rc == EFBIG) {
919 if (retry_cnt == 0) {
920 m_temp = m_defrag(m, M_NOWAIT);
921 if (m_temp == NULL)
922 goto free_ret;
923 m = m_temp;
924 *mpp = m_temp;
925 retry_cnt = retry_cnt + 1;
926 goto retry;
927 } else
928 goto free_ret;
929 } else if (rc == ENOMEM)
930 return rc;
931 else
932 goto free_ret;
933
934 return 0;
935
936free_ret:
937 m_freem(*mpp);
938 *mpp = NULL;
939 return rc;
940}
941
942
943static void
944oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
945{
946 struct oce_packet_desc *pd;
947 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
948 struct mbuf *m;
949
950 pd = &wq->pckts[wq->pkt_desc_tail];
951 atomic_store_rel_int(&wq->pkt_desc_tail,
952 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
953 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
954 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
955 bus_dmamap_unload(wq->tag, pd->map);
956
957 m = pd->mbuf;
958 m_freem(m);
959 pd->mbuf = NULL;
960
961
962 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
963 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
964 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
965 oce_tx_restart(sc, wq);
966 }
967 }
968}
969
970
971static void
972oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
973{
974
975 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
976 return;
977
978#if __FreeBSD_version >= 800000
979 if (!drbr_empty(sc->ifp, wq->br))
980#else
981 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
982#endif
983 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
984
985}
986
987
988#if defined(INET6) || defined(INET)
989static struct mbuf *
990oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
991{
992 struct mbuf *m;
993#ifdef INET
994 struct ip *ip;
995#endif
996#ifdef INET6
997 struct ip6_hdr *ip6;
998#endif
999 struct ether_vlan_header *eh;
1000 struct tcphdr *th;
1001 uint16_t etype;
1002 int total_len = 0, ehdrlen = 0;
1003
1004 m = *mpp;
1005
1006 if (M_WRITABLE(m) == 0) {
1007 m = m_dup(*mpp, M_NOWAIT);
1008 if (!m)
1009 return NULL;
1010 m_freem(*mpp);
1011 *mpp = m;
1012 }
1013
1014 eh = mtod(m, struct ether_vlan_header *);
1015 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1016 etype = ntohs(eh->evl_proto);
1017 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1018 } else {
1019 etype = ntohs(eh->evl_encap_proto);
1020 ehdrlen = ETHER_HDR_LEN;
1021 }
1022
1023 switch (etype) {
1024#ifdef INET
1025 case ETHERTYPE_IP:
1026 ip = (struct ip *)(m->m_data + ehdrlen);
1027 if (ip->ip_p != IPPROTO_TCP)
1028 return NULL;
1029 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1030
1031 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1032 break;
1033#endif
1034#ifdef INET6
1035 case ETHERTYPE_IPV6:
1036 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1037 if (ip6->ip6_nxt != IPPROTO_TCP)
1038 return NULL;
1039 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1040
1041 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1042 break;
1043#endif
1044 default:
1045 return NULL;
1046 }
1047
1048 m = m_pullup(m, total_len);
1049 if (!m)
1050 return NULL;
1051 *mpp = m;
1052 return m;
1053
1054}
1055#endif /* INET6 || INET */
1056
1057void
1058oce_tx_task(void *arg, int npending)
1059{
1060 struct oce_wq *wq = arg;
1061 POCE_SOFTC sc = wq->parent;
1062 struct ifnet *ifp = sc->ifp;
1063 int rc = 0;
1064
1065#if __FreeBSD_version >= 800000
1066 LOCK(&wq->tx_lock);
1067 rc = oce_multiq_transmit(ifp, NULL, wq);
1068 if (rc) {
1069 device_printf(sc->dev,
1070 "TX[%d] restart failed\n", wq->queue_index);
1071 }
1072 UNLOCK(&wq->tx_lock);
1073#else
1074 oce_start(ifp);
1075#endif
1076
1077}
1078
1079
1080void
1081oce_start(struct ifnet *ifp)
1082{
1083 POCE_SOFTC sc = ifp->if_softc;
1084 struct mbuf *m;
1085 int rc = 0;
1086 int def_q = 0; /* Defualt tx queue is 0*/
1087
1088 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1089 IFF_DRV_RUNNING)
1090 return;
1091
1092 if (!sc->link_status)
1093 return;
1094
1095 do {
1096 IF_DEQUEUE(&sc->ifp->if_snd, m);
1097 if (m == NULL)
1098 break;
1099
1100 LOCK(&sc->wq[def_q]->tx_lock);
1101 rc = oce_tx(sc, &m, def_q);
1102 UNLOCK(&sc->wq[def_q]->tx_lock);
1103 if (rc) {
1104 if (m != NULL) {
1105 sc->wq[def_q]->tx_stats.tx_stops ++;
1106 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1107 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1108 m = NULL;
1109 }
1110 break;
1111 }
1112 if (m != NULL)
1113 ETHER_BPF_MTAP(ifp, m);
1114
1115 } while (TRUE);
1116
1117 return;
1118}
1119
1120
1121/* Handle the Completion Queue for transmit */
1122uint16_t
1123oce_wq_handler(void *arg)
1124{
1125 struct oce_wq *wq = (struct oce_wq *)arg;
1126 POCE_SOFTC sc = wq->parent;
1127 struct oce_cq *cq = wq->cq;
1128 struct oce_nic_tx_cqe *cqe;
1129 int num_cqes = 0;
1130
1131 bus_dmamap_sync(cq->ring->dma.tag,
1132 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1133 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1134 while (cqe->u0.dw[3]) {
1135 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1136
1137 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1138 if (wq->ring->cidx >= wq->ring->num_items)
1139 wq->ring->cidx -= wq->ring->num_items;
1140
1141 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1142 wq->tx_stats.tx_compl++;
1143 cqe->u0.dw[3] = 0;
1144 RING_GET(cq->ring, 1);
1145 bus_dmamap_sync(cq->ring->dma.tag,
1146 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1147 cqe =
1148 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1149 num_cqes++;
1150 }
1151
1152 if (num_cqes)
1153 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1154
1155 return 0;
1156}
1157
1158
1159static int
1160oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1161{
1162 POCE_SOFTC sc = ifp->if_softc;
1163 int status = 0, queue_index = 0;
1164 struct mbuf *next = NULL;
1165 struct buf_ring *br = NULL;
1166
1167 br = wq->br;
1168 queue_index = wq->queue_index;
1169
1170 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1171 IFF_DRV_RUNNING) {
1172 if (m != NULL)
1173 status = drbr_enqueue(ifp, br, m);
1174 return status;
1175 }
1176
1177 if (m != NULL) {
1178 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1179 return status;
1180 }
1181 while ((next = drbr_peek(ifp, br)) != NULL) {
1182 if (oce_tx(sc, &next, queue_index)) {
1183 if (next == NULL) {
1184 drbr_advance(ifp, br);
1185 } else {
1186 drbr_putback(ifp, br, next);
1187 wq->tx_stats.tx_stops ++;
1188 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1189 status = drbr_enqueue(ifp, br, next);
1190 }
1191 break;
1192 }
1193 drbr_advance(ifp, br);
1194 ifp->if_obytes += next->m_pkthdr.len;
1195 if (next->m_flags & M_MCAST)
1196 ifp->if_omcasts++;
1197 ETHER_BPF_MTAP(ifp, next);
1198 }
1199
1200 return status;
1201}
1202
1203
1204
1205
1206/*****************************************************************************
1207 * Receive routines functions *
1208 *****************************************************************************/
1209
1210static void
1211oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1212{
1213 uint32_t out;
1214 struct oce_packet_desc *pd;
1215 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1216 int i, len, frag_len;
1217 struct mbuf *m = NULL, *tail = NULL;
1218 uint16_t vtag;
1219
1220 len = cqe->u0.s.pkt_size;
1221 if (!len) {
1222 /*partial DMA workaround for Lancer*/
1223 oce_discard_rx_comp(rq, cqe);
1224 goto exit;
1225 }
1226
1227 /* Get vlan_tag value */
1228 if(IS_BE(sc) || IS_SH(sc))
1229 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1230 else
1231 vtag = cqe->u0.s.vlan_tag;
1232
1233
1234 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1235
1236 if (rq->packets_out == rq->packets_in) {
1237 device_printf(sc->dev,
1238 "RQ transmit descriptor missing\n");
1239 }
1240 out = rq->packets_out + 1;
1241 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1242 out = 0;
1243 pd = &rq->pckts[rq->packets_out];
1244 rq->packets_out = out;
1245
1246 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1247 bus_dmamap_unload(rq->tag, pd->map);
1248 rq->pending--;
1249
1250 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1251 pd->mbuf->m_len = frag_len;
1252
1253 if (tail != NULL) {
1254 /* additional fragments */
1255 pd->mbuf->m_flags &= ~M_PKTHDR;
1256 tail->m_next = pd->mbuf;
1257 tail = pd->mbuf;
1258 } else {
1259 /* first fragment, fill out much of the packet header */
1260 pd->mbuf->m_pkthdr.len = len;
1261 pd->mbuf->m_pkthdr.csum_flags = 0;
1262 if (IF_CSUM_ENABLED(sc)) {
1263 if (cqe->u0.s.l4_cksum_pass) {
1264 pd->mbuf->m_pkthdr.csum_flags |=
1265 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1266 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1267 }
1268 if (cqe->u0.s.ip_cksum_pass) {
1269 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1270 pd->mbuf->m_pkthdr.csum_flags |=
1271 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1272 }
1273 }
1274 }
1275 m = tail = pd->mbuf;
1276 }
1277 pd->mbuf = NULL;
1278 len -= frag_len;
1279 }
1280
1281 if (m) {
1282 if (!oce_cqe_portid_valid(sc, cqe)) {
1283 m_freem(m);
1284 goto exit;
1285 }
1286
1287 m->m_pkthdr.rcvif = sc->ifp;
1288#if __FreeBSD_version >= 800000
1289 if (rq->queue_index)
1290 m->m_pkthdr.flowid = (rq->queue_index - 1);
1291 else
1292 m->m_pkthdr.flowid = rq->queue_index;
1293 m->m_flags |= M_FLOWID;
1294#endif
1295 /* This deternies if vlan tag is Valid */
1296 if (oce_cqe_vtp_valid(sc, cqe)) {
1297 if (sc->function_mode & FNM_FLEX10_MODE) {
1298 /* FLEX10. If QnQ is not set, neglect VLAN */
1299 if (cqe->u0.s.qnq) {
1300 m->m_pkthdr.ether_vtag = vtag;
1301 m->m_flags |= M_VLANTAG;
1302 }
1303 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1304 /* In UMC mode generally pvid will be striped by
1305 hw. But in some cases we have seen it comes
1306 with pvid. So if pvid == vlan, neglect vlan.
1307 */
1308 m->m_pkthdr.ether_vtag = vtag;
1309 m->m_flags |= M_VLANTAG;
1310 }
1311 }
1312
1313 sc->ifp->if_ipackets++;
1314#if defined(INET6) || defined(INET)
1315 /* Try to queue to LRO */
1316 if (IF_LRO_ENABLED(sc) &&
1317 (cqe->u0.s.ip_cksum_pass) &&
1318 (cqe->u0.s.l4_cksum_pass) &&
1319 (!cqe->u0.s.ip_ver) &&
1320 (rq->lro.lro_cnt != 0)) {
1321
1322 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1323 rq->lro_pkts_queued ++;
1324 goto post_done;
1325 }
1326 /* If LRO posting fails then try to post to STACK */
1327 }
1328#endif
1329
1330 (*sc->ifp->if_input) (sc->ifp, m);
1331#if defined(INET6) || defined(INET)
1332post_done:
1333#endif
1334 /* Update rx stats per queue */
1335 rq->rx_stats.rx_pkts++;
1336 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1337 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1338 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1339 rq->rx_stats.rx_mcast_pkts++;
1340 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1341 rq->rx_stats.rx_ucast_pkts++;
1342 }
1343exit:
1344 return;
1345}
1346
1347
1348static void
1349oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1350{
1351 uint32_t out, i = 0;
1352 struct oce_packet_desc *pd;
1353 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1354 int num_frags = cqe->u0.s.num_fragments;
1355
1356 for (i = 0; i < num_frags; i++) {
1357 if (rq->packets_out == rq->packets_in) {
1358 device_printf(sc->dev,
1359 "RQ transmit descriptor missing\n");
1360 }
1361 out = rq->packets_out + 1;
1362 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1363 out = 0;
1364 pd = &rq->pckts[rq->packets_out];
1365 rq->packets_out = out;
1366
1367 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1368 bus_dmamap_unload(rq->tag, pd->map);
1369 rq->pending--;
1370 m_freem(pd->mbuf);
1371 }
1372
1373}
1374
1375
1376static int
1377oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1378{
1379 struct oce_nic_rx_cqe_v1 *cqe_v1;
1380 int vtp = 0;
1381
1382 if (sc->be3_native) {
1383 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1384 vtp = cqe_v1->u0.s.vlan_tag_present;
1385 } else
1386 vtp = cqe->u0.s.vlan_tag_present;
1387
1388 return vtp;
1389
1390}
1391
1392
1393static int
1394oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1395{
1396 struct oce_nic_rx_cqe_v1 *cqe_v1;
1397 int port_id = 0;
1398
1399 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1400 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1401 port_id = cqe_v1->u0.s.port;
1402 if (sc->port_id != port_id)
1403 return 0;
1404 } else
1405 ;/* For BE3 legacy and Lancer this is dummy */
1406
1407 return 1;
1408
1409}
1410
1411#if defined(INET6) || defined(INET)
1412static void
1413oce_rx_flush_lro(struct oce_rq *rq)
1414{
1415 struct lro_ctrl *lro = &rq->lro;
1416 struct lro_entry *queued;
1417 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1418
1419 if (!IF_LRO_ENABLED(sc))
1420 return;
1421
1422 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1423 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1424 tcp_lro_flush(lro, queued);
1425 }
1426 rq->lro_pkts_queued = 0;
1427
1428 return;
1429}
1430
1431
1432static int
1433oce_init_lro(POCE_SOFTC sc)
1434{
1435 struct lro_ctrl *lro = NULL;
1436 int i = 0, rc = 0;
1437
1438 for (i = 0; i < sc->nrqs; i++) {
1439 lro = &sc->rq[i]->lro;
1440 rc = tcp_lro_init(lro);
1441 if (rc != 0) {
1442 device_printf(sc->dev, "LRO init failed\n");
1443 return rc;
1444 }
1445 lro->ifp = sc->ifp;
1446 }
1447
1448 return rc;
1449}
1450
1451
1452void
1453oce_free_lro(POCE_SOFTC sc)
1454{
1455 struct lro_ctrl *lro = NULL;
1456 int i = 0;
1457
1458 for (i = 0; i < sc->nrqs; i++) {
1459 lro = &sc->rq[i]->lro;
1460 if (lro)
1461 tcp_lro_free(lro);
1462 }
1463}
1464#endif
1465
1466int
1467oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1468{
1469 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1470 int i, in, rc;
1471 struct oce_packet_desc *pd;
1472 bus_dma_segment_t segs[6];
1473 int nsegs, added = 0;
1474 struct oce_nic_rqe *rqe;
1475 pd_rxulp_db_t rxdb_reg;
1476
1477 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1478 for (i = 0; i < count; i++) {
1479 in = rq->packets_in + 1;
1480 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1481 in = 0;
1482 if (in == rq->packets_out)
1483 break; /* no more room */
1484
1485 pd = &rq->pckts[rq->packets_in];
1486 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1487 if (pd->mbuf == NULL)
1488 break;
1489
1490 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1491 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1492 pd->map,
1493 pd->mbuf,
1494 segs, &nsegs, BUS_DMA_NOWAIT);
1495 if (rc) {
1496 m_free(pd->mbuf);
1497 break;
1498 }
1499
1500 if (nsegs != 1) {
1501 i--;
1502 continue;
1503 }
1504
1505 rq->packets_in = in;
1506 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1507
1508 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1509 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1510 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1511 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1512 RING_PUT(rq->ring, 1);
1513 added++;
1514 rq->pending++;
1515 }
1516 if (added != 0) {
1517 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1518 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1519 rxdb_reg.bits.qid = rq->rq_id;
1520 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1521 added -= OCE_MAX_RQ_POSTS;
1522 }
1523 if (added > 0) {
1524 rxdb_reg.bits.qid = rq->rq_id;
1525 rxdb_reg.bits.num_posted = added;
1526 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1527 }
1528 }
1529
1530 return 0;
1531}
1532
1533
1534/* Handle the Completion Queue for receive */
1535uint16_t
1536oce_rq_handler(void *arg)
1537{
1538 struct oce_rq *rq = (struct oce_rq *)arg;
1539 struct oce_cq *cq = rq->cq;
1540 POCE_SOFTC sc = rq->parent;
1541 struct oce_nic_rx_cqe *cqe;
1542 int num_cqes = 0, rq_buffers_used = 0;
1543
1544
1545 bus_dmamap_sync(cq->ring->dma.tag,
1546 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1547 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1548 while (cqe->u0.dw[2]) {
1549 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1550
1551 RING_GET(rq->ring, 1);
1552 if (cqe->u0.s.error == 0) {
1553 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1554 } else {
1555 rq->rx_stats.rxcp_err++;
1556 sc->ifp->if_ierrors++;
1557 /* Post L3/L4 errors to stack.*/
1558 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1559 }
1560 rq->rx_stats.rx_compl++;
1561 cqe->u0.dw[2] = 0;
1562
1563#if defined(INET6) || defined(INET)
1564 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1565 oce_rx_flush_lro(rq);
1566 }
1567#endif
1568
1569 RING_GET(cq->ring, 1);
1570 bus_dmamap_sync(cq->ring->dma.tag,
1571 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1572 cqe =
1573 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1574 num_cqes++;
1575 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1576 break;
1577 }
1578
1579#if defined(INET6) || defined(INET)
1580 if (IF_LRO_ENABLED(sc))
1581 oce_rx_flush_lro(rq);
1582#endif
1583
1584 if (num_cqes) {
1585 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1586 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1587 if (rq_buffers_used > 1)
1588 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1589 }
1590
1591 return 0;
1592
1593}
1594
1595
1596
1597
1598/*****************************************************************************
1599 * Helper function prototypes in this file *
1600 *****************************************************************************/
1601
1602static int
1603oce_attach_ifp(POCE_SOFTC sc)
1604{
1605
1606 sc->ifp = if_alloc(IFT_ETHER);
1607 if (!sc->ifp)
1608 return ENOMEM;
1609
1610 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1611 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1612 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1613
1614 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1615 sc->ifp->if_ioctl = oce_ioctl;
1616 sc->ifp->if_start = oce_start;
1617 sc->ifp->if_init = oce_init;
1618 sc->ifp->if_mtu = ETHERMTU;
1619 sc->ifp->if_softc = sc;
1620#if __FreeBSD_version >= 800000
1621 sc->ifp->if_transmit = oce_multiq_start;
1622 sc->ifp->if_qflush = oce_multiq_flush;
1623#endif
1624
1625 if_initname(sc->ifp,
1626 device_get_name(sc->dev), device_get_unit(sc->dev));
1627
1628 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1629 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1630 IFQ_SET_READY(&sc->ifp->if_snd);
1631
1632 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1633 sc->ifp->if_hwassist |= CSUM_TSO;
1634 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1635
1636 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1637 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1638 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1639
1640#if defined(INET6) || defined(INET)
1641 sc->ifp->if_capabilities |= IFCAP_TSO;
1642 sc->ifp->if_capabilities |= IFCAP_LRO;
1643 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1644#endif
1645
1646 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1647 if_initbaudrate(sc->ifp, IF_Gbps(10));
1648
1649 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1650
1651 return 0;
1652}
1653
1654
1655static void
1656oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1657{
1658 POCE_SOFTC sc = ifp->if_softc;
1659
1660 if (ifp->if_softc != arg)
1661 return;
1662 if ((vtag == 0) || (vtag > 4095))
1663 return;
1664
1665 sc->vlan_tag[vtag] = 1;
1666 sc->vlans_added++;
1667 oce_vid_config(sc);
1668}
1669
1670
1671static void
1672oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1673{
1674 POCE_SOFTC sc = ifp->if_softc;
1675
1676 if (ifp->if_softc != arg)
1677 return;
1678 if ((vtag == 0) || (vtag > 4095))
1679 return;
1680
1681 sc->vlan_tag[vtag] = 0;
1682 sc->vlans_added--;
1683 oce_vid_config(sc);
1684}
1685
1686
1687/*
1688 * A max of 64 vlans can be configured in BE. If the user configures
1689 * more, place the card in vlan promiscuous mode.
1690 */
1691static int
1692oce_vid_config(POCE_SOFTC sc)
1693{
1694 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1695 uint16_t ntags = 0, i;
1696 int status = 0;
1697
1698 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1699 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1700 for (i = 0; i < MAX_VLANS; i++) {
1701 if (sc->vlan_tag[i]) {
1702 vtags[ntags].vtag = i;
1703 ntags++;
1704 }
1705 }
1706 if (ntags)
1707 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1708 vtags, ntags, 1, 0);
1709 } else
1710 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1711 NULL, 0, 1, 1);
1712 return status;
1713}
1714
1715
1716static void
1717oce_mac_addr_set(POCE_SOFTC sc)
1718{
1719 uint32_t old_pmac_id = sc->pmac_id;
1720 int status = 0;
1721
1722
1723 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1724 sc->macaddr.size_of_struct);
1725 if (!status)
1726 return;
1727
1728 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1729 sc->if_id, &sc->pmac_id);
1730 if (!status) {
1731 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1732 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1733 sc->macaddr.size_of_struct);
1734 }
1735 if (status)
1736 device_printf(sc->dev, "Failed update macaddress\n");
1737
1738}
1739
1740
1741static int
1742oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1743{
1744 POCE_SOFTC sc = ifp->if_softc;
1745 struct ifreq *ifr = (struct ifreq *)data;
1746 int rc = ENXIO;
1747 char cookie[32] = {0};
1748 void *priv_data = (void *)ifr->ifr_data;
1749 void *ioctl_ptr;
1750 uint32_t req_size;
1751 struct mbx_hdr req;
1752 OCE_DMA_MEM dma_mem;
1753 struct mbx_common_get_cntl_attr *fw_cmd;
1754
1755 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1756 return EFAULT;
1757
1758 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1759 return EINVAL;
1760
1761 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1762 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1763 return EFAULT;
1764
1765 req_size = le32toh(req.u0.req.request_length);
1766 if (req_size > 65536)
1767 return EINVAL;
1768
1769 req_size += sizeof(struct mbx_hdr);
1770 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1771 if (rc)
1772 return ENOMEM;
1773
1774 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1775 rc = EFAULT;
1776 goto dma_free;
1777 }
1778
1779 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1780 if (rc) {
1781 rc = EIO;
1782 goto dma_free;
1783 }
1784
1785 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1786 rc = EFAULT;
1787
1788 /*
1789 firmware is filling all the attributes for this ioctl except
1790 the driver version..so fill it
1791 */
1792 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1793 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1794 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1795 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1796 }
1797
1798dma_free:
1799 oce_dma_free(sc, &dma_mem);
1800 return rc;
1801
1802}
1803
1804static void
1805oce_eqd_set_periodic(POCE_SOFTC sc)
1806{
1807 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1808 struct oce_aic_obj *aic;
1809 struct oce_eq *eqo;
1810 uint64_t now = 0, delta;
1811 int eqd, i, num = 0;
1812 uint32_t ips = 0;
1813 int tps;
1814
1815 for (i = 0 ; i < sc->neqs; i++) {
1816 eqo = sc->eq[i];
1817 aic = &sc->aic_obj[i];
1818 /* When setting the static eq delay from the user space */
1819 if (!aic->enable) {
1820 eqd = aic->et_eqd;
1821 goto modify_eqd;
1822 }
1823
1824 now = ticks;
1825
1826 /* Over flow check */
1827 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1828 goto done;
1829
1830 delta = now - aic->ticks;
1831 tps = delta/hz;
1832
1833 /* Interrupt rate based on elapsed ticks */
1834 if(tps)
1835 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1836
1837 if (ips > INTR_RATE_HWM)
1838 eqd = aic->cur_eqd + 20;
1839 else if (ips < INTR_RATE_LWM)
1840 eqd = aic->cur_eqd / 2;
1841 else
1842 goto done;
1843
1844 if (eqd < 10)
1845 eqd = 0;
1846
1847 /* Make sure that the eq delay is in the known range */
1848 eqd = min(eqd, aic->max_eqd);
1849 eqd = max(eqd, aic->min_eqd);
1850
1851modify_eqd:
1852 if (eqd != aic->cur_eqd) {
1853 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1854 set_eqd[num].eq_id = eqo->eq_id;
1855 aic->cur_eqd = eqd;
1856 num++;
1857 }
1858done:
1859 aic->intr_prev = eqo->intr;
1860 aic->ticks = now;
1861 }
1862
1863 /* Is there atleast one eq that needs to be modified? */
1864 if(num)
1865 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1866
1867}
1868
1869static void
1870oce_local_timer(void *arg)
1871{
1872 POCE_SOFTC sc = arg;
1873 int i = 0;
1874
1875 oce_refresh_nic_stats(sc);
1876 oce_refresh_queue_stats(sc);
1877 oce_mac_addr_set(sc);
1878
1879 /* TX Watch Dog*/
1880 for (i = 0; i < sc->nwqs; i++)
1881 oce_tx_restart(sc, sc->wq[i]);
1882
1883 /* calculate and set the eq delay for optimal interrupt rate */
1884 if (IS_BE(sc) || IS_SH(sc))
1885 oce_eqd_set_periodic(sc);
1886
1887 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1888}
1889
1890
1891/* NOTE : This should only be called holding
1892 * DEVICE_LOCK.
1893*/
1894static void
1895oce_if_deactivate(POCE_SOFTC sc)
1896{
1897 int i, mtime = 0;
1898 int wait_req = 0;
1899 struct oce_rq *rq;
1900 struct oce_wq *wq;
1901 struct oce_eq *eq;
1902
1903 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1904
1905 /*Wait for max of 400ms for TX completions to be done */
1906 while (mtime < 400) {
1907 wait_req = 0;
1908 for_all_wq_queues(sc, wq, i) {
1909 if (wq->ring->num_used) {
1910 wait_req = 1;
1911 DELAY(1);
1912 break;
1913 }
1914 }
1915 mtime += 1;
1916 if (!wait_req)
1917 break;
1918 }
1919
1920 /* Stop intrs and finish any bottom halves pending */
1921 oce_hw_intr_disable(sc);
1922
1923 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
1924 any other lock. So unlock device lock and require after
1925 completing taskqueue_drain.
1926 */
1927 UNLOCK(&sc->dev_lock);
1928 for (i = 0; i < sc->intr_count; i++) {
1929 if (sc->intrs[i].tq != NULL) {
1930 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1931 }
1932 }
1933 LOCK(&sc->dev_lock);
1934
1935 /* Delete RX queue in card with flush param */
1936 oce_stop_rx(sc);
1937
1938 /* Invalidate any pending cq and eq entries*/
1939 for_all_evnt_queues(sc, eq, i)
1940 oce_drain_eq(eq);
1941 for_all_rq_queues(sc, rq, i)
1942 oce_drain_rq_cq(rq);
1943 for_all_wq_queues(sc, wq, i)
1944 oce_drain_wq_cq(wq);
1945
1946 /* But still we need to get MCC aync events.
1947 So enable intrs and also arm first EQ
1948 */
1949 oce_hw_intr_enable(sc);
1950 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1951
1952 DELAY(10);
1953}
1954
1955
1956static void
1957oce_if_activate(POCE_SOFTC sc)
1958{
1959 struct oce_eq *eq;
1960 struct oce_rq *rq;
1961 struct oce_wq *wq;
1962 int i, rc = 0;
1963
1964 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1965
1966 oce_hw_intr_disable(sc);
1967
1968 oce_start_rx(sc);
1969
1970 for_all_rq_queues(sc, rq, i) {
1971 rc = oce_start_rq(rq);
1972 if (rc)
1973 device_printf(sc->dev, "Unable to start RX\n");
1974 }
1975
1976 for_all_wq_queues(sc, wq, i) {
1977 rc = oce_start_wq(wq);
1978 if (rc)
1979 device_printf(sc->dev, "Unable to start TX\n");
1980 }
1981
1982
1983 for_all_evnt_queues(sc, eq, i)
1984 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1985
1986 oce_hw_intr_enable(sc);
1987
1988}
1989
1990static void
1991process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1992{
1993 /* Update Link status */
1994 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1995 ASYNC_EVENT_LINK_UP) {
1996 sc->link_status = ASYNC_EVENT_LINK_UP;
1997 if_link_state_change(sc->ifp, LINK_STATE_UP);
1998 } else {
1999 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2000 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2001 }
2002
2003 /* Update speed */
2004 sc->link_speed = acqe->u0.s.speed;
2005 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2006
2007}
2008
2009
2010/* Handle the Completion Queue for the Mailbox/Async notifications */
2011uint16_t
2012oce_mq_handler(void *arg)
2013{
2014 struct oce_mq *mq = (struct oce_mq *)arg;
2015 POCE_SOFTC sc = mq->parent;
2016 struct oce_cq *cq = mq->cq;
2017 int num_cqes = 0, evt_type = 0, optype = 0;
2018 struct oce_mq_cqe *cqe;
2019 struct oce_async_cqe_link_state *acqe;
2020 struct oce_async_event_grp5_pvid_state *gcqe;
2021 struct oce_async_event_qnq *dbgcqe;
2022
2023
2024 bus_dmamap_sync(cq->ring->dma.tag,
2025 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2026 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2027
2028 while (cqe->u0.dw[3]) {
2029 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2030 if (cqe->u0.s.async_event) {
2031 evt_type = cqe->u0.s.event_type;
2032 optype = cqe->u0.s.async_type;
2033 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2034 /* Link status evt */
2035 acqe = (struct oce_async_cqe_link_state *)cqe;
2036 process_link_state(sc, acqe);
2037 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2038 (optype == ASYNC_EVENT_PVID_STATE)) {
2039 /* GRP5 PVID */
2040 gcqe =
2041 (struct oce_async_event_grp5_pvid_state *)cqe;
2042 if (gcqe->enabled)
2043 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2044 else
2045 sc->pvid = 0;
2046
2047 }
2048 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2049 optype == ASYNC_EVENT_DEBUG_QNQ) {
2050 dbgcqe =
2051 (struct oce_async_event_qnq *)cqe;
2052 if(dbgcqe->valid)
2053 sc->qnqid = dbgcqe->vlan_tag;
2054 sc->qnq_debug_event = TRUE;
2055 }
2056 }
2057 cqe->u0.dw[3] = 0;
2058 RING_GET(cq->ring, 1);
2059 bus_dmamap_sync(cq->ring->dma.tag,
2060 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2061 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2062 num_cqes++;
2063 }
2064
2065 if (num_cqes)
2066 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2067
2068 return 0;
2069}
2070
2071
2072static void
2073setup_max_queues_want(POCE_SOFTC sc)
2074{
2075 /* Check if it is FLEX machine. Is so dont use RSS */
2076 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2077 (sc->function_mode & FNM_UMC_MODE) ||
2078 (sc->function_mode & FNM_VNIC_MODE) ||
2079 (!is_rss_enabled(sc)) ||
2080 (sc->flags & OCE_FLAGS_BE2)) {
2081 sc->nrqs = 1;
2082 sc->nwqs = 1;
2083 }
2084}
2085
2086
2087static void
2088update_queues_got(POCE_SOFTC sc)
2089{
2090 if (is_rss_enabled(sc)) {
2091 sc->nrqs = sc->intr_count + 1;
2092 sc->nwqs = sc->intr_count;
2093 } else {
2094 sc->nrqs = 1;
2095 sc->nwqs = 1;
2096 }
2097}
2098
2099static int
2100oce_check_ipv6_ext_hdr(struct mbuf *m)
2101{
2102 struct ether_header *eh = mtod(m, struct ether_header *);
2103 caddr_t m_datatemp = m->m_data;
2104
2105 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2106 m->m_data += sizeof(struct ether_header);
2107 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2108
2109 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2110 (ip6->ip6_nxt != IPPROTO_UDP)){
2111 struct ip6_ext *ip6e = NULL;
2112 m->m_data += sizeof(struct ip6_hdr);
2113
2114 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2115 if(ip6e->ip6e_len == 0xff) {
2116 m->m_data = m_datatemp;
2117 return TRUE;
2118 }
2119 }
2120 m->m_data = m_datatemp;
2121 }
2122 return FALSE;
2123}
2124
2125static int
2126is_be3_a1(POCE_SOFTC sc)
2127{
2128 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2129 return TRUE;
2130 }
2131 return FALSE;
2132}
2133
2134static struct mbuf *
2135oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2136{
2137 uint16_t vlan_tag = 0;
2138
2139 if(!M_WRITABLE(m))
2140 return NULL;
2141
2142 /* Embed vlan tag in the packet if it is not part of it */
2143 if(m->m_flags & M_VLANTAG) {
2144 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2145 m->m_flags &= ~M_VLANTAG;
2146 }
2147
2148 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2149 if(sc->pvid) {
2150 if(!vlan_tag)
2151 vlan_tag = sc->pvid;
2152 *complete = FALSE;
2153 }
2154
2155 if(vlan_tag) {
2156 m = ether_vlanencap(m, vlan_tag);
2157 }
2158
2159 if(sc->qnqid) {
2160 m = ether_vlanencap(m, sc->qnqid);
2161 *complete = FALSE;
2162 }
2163 return m;
2164}
2165
2166static int
2167oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2168{
2169 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2170 oce_check_ipv6_ext_hdr(m)) {
2171 return TRUE;
2172 }
2173 return FALSE;
2174}
2175
2176static void
2177oce_get_config(POCE_SOFTC sc)
2178{
2179 int rc = 0;
2180 uint32_t max_rss = 0;
2181
2182 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2183 max_rss = OCE_LEGACY_MODE_RSS;
2184 else
2185 max_rss = OCE_MAX_RSS;
2186
2187 if (!IS_BE(sc)) {
2188 rc = oce_get_func_config(sc);
2189 if (rc) {
2190 sc->nwqs = OCE_MAX_WQ;
2191 sc->nrssqs = max_rss;
2192 sc->nrqs = sc->nrssqs + 1;
2193 }
2194 }
2195 else {
2196 rc = oce_get_profile_config(sc);
2197 sc->nrssqs = max_rss;
2198 sc->nrqs = sc->nrssqs + 1;
2199 if (rc)
2200 sc->nwqs = OCE_MAX_WQ;
2201 }
2202}