Deleted Added
full compact
oce_if.c (331722) oce_if.c (332288)
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: stable/11/sys/dev/oce/oce_if.c 331722 2018-03-29 02:50:57Z eadler $ */
39/* $FreeBSD: stable/11/sys/dev/oce/oce_if.c 332288 2018-04-08 16:54:07Z brooks $ */
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
117
118
119/* Driver entry points prototypes */
120static int oce_probe(device_t dev);
121static int oce_attach(device_t dev);
122static int oce_detach(device_t dev);
123static int oce_shutdown(device_t dev);
124static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int oce_setup_intr(POCE_SOFTC sc);
132static int oce_fast_isr(void *arg);
133static int oce_alloc_intr(POCE_SOFTC sc, int vector,
134 void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 uint32_t status);
145static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181 DEVMETHOD(device_probe, oce_probe),
182 DEVMETHOD(device_attach, oce_attach),
183 DEVMETHOD(device_detach, oce_detach),
184 DEVMETHOD(device_shutdown, oce_shutdown),
185
186 DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190 "oce",
191 oce_dispatch,
192 sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] = {
217 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 * Driver entry points functions *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235 uint16_t vendor = 0;
236 uint16_t device = 0;
237 int i = 0;
238 char str[256] = {0};
239 POCE_SOFTC sc;
240
241 sc = device_get_softc(dev);
242 bzero(sc, sizeof(OCE_SOFTC));
243 sc->dev = dev;
244
245 vendor = pci_get_vendor(dev);
246 device = pci_get_device(dev);
247
248 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250 if (device == (supportedDevices[i] & 0xffff)) {
251 sprintf(str, "%s:%s", "Emulex CNA NIC function",
252 component_revision);
253 device_set_desc_copy(dev, str);
254
255 switch (device) {
256 case PCI_PRODUCT_BE2:
257 sc->flags |= OCE_FLAGS_BE2;
258 break;
259 case PCI_PRODUCT_BE3:
260 sc->flags |= OCE_FLAGS_BE3;
261 break;
262 case PCI_PRODUCT_XE201:
263 case PCI_PRODUCT_XE201_VF:
264 sc->flags |= OCE_FLAGS_XE201;
265 break;
266 case PCI_PRODUCT_SH:
267 sc->flags |= OCE_FLAGS_SH;
268 break;
269 default:
270 return ENXIO;
271 }
272 return BUS_PROBE_DEFAULT;
273 }
274 }
275 }
276
277 return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284 POCE_SOFTC sc;
285 int rc = 0;
286
287 sc = device_get_softc(dev);
288
289 rc = oce_hw_pci_alloc(sc);
290 if (rc)
291 return rc;
292
293 sc->tx_ring_size = OCE_TX_RING_SIZE;
294 sc->rx_ring_size = OCE_RX_RING_SIZE;
295 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
298
299 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300 LOCK_CREATE(&sc->dev_lock, "Device_lock");
301
302 /* initialise the hardware */
303 rc = oce_hw_init(sc);
304 if (rc)
305 goto pci_res_free;
306
307 oce_get_config(sc);
308
309 setup_max_queues_want(sc);
310
311 rc = oce_setup_intr(sc);
312 if (rc)
313 goto mbox_free;
314
315 rc = oce_queue_init_all(sc);
316 if (rc)
317 goto intr_free;
318
319 rc = oce_attach_ifp(sc);
320 if (rc)
321 goto queues_free;
322
323#if defined(INET6) || defined(INET)
324 rc = oce_init_lro(sc);
325 if (rc)
326 goto ifp_free;
327#endif
328
329 rc = oce_hw_start(sc);
330 if (rc)
331 goto lro_free;
332
333 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338 rc = oce_stats_init(sc);
339 if (rc)
340 goto vlan_free;
341
342 oce_add_sysctls(sc);
343
344 callout_init(&sc->timer, 1);
345 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346 if (rc)
347 goto stats_free;
348
349 return 0;
350
351stats_free:
352 callout_drain(&sc->timer);
353 oce_stats_free(sc);
354vlan_free:
355 if (sc->vlan_attach)
356 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357 if (sc->vlan_detach)
358 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359 oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362 oce_free_lro(sc);
363ifp_free:
364#endif
365 ether_ifdetach(sc->ifp);
366 if_free(sc->ifp);
367queues_free:
368 oce_queue_release_all(sc);
369intr_free:
370 oce_intr_free(sc);
371mbox_free:
372 oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374 oce_hw_pci_free(sc);
375 LOCK_DESTROY(&sc->dev_lock);
376 LOCK_DESTROY(&sc->bmbx_lock);
377 return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385 POCE_SOFTC sc = device_get_softc(dev);
386
387 LOCK(&sc->dev_lock);
388 oce_if_deactivate(sc);
389 UNLOCK(&sc->dev_lock);
390
391 callout_drain(&sc->timer);
392
393 if (sc->vlan_attach != NULL)
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395 if (sc->vlan_detach != NULL)
396 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398 ether_ifdetach(sc->ifp);
399
400 if_free(sc->ifp);
401
402 oce_hw_shutdown(sc);
403
404 bus_generic_detach(dev);
405
406 return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413 int rc;
414
415 rc = oce_detach(dev);
416
417 return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424 struct ifreq *ifr = (struct ifreq *)data;
425 POCE_SOFTC sc = ifp->if_softc;
426 int rc = 0;
427 uint32_t u;
428
429 switch (command) {
430
431 case SIOCGIFMEDIA:
432 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433 break;
434
435 case SIOCSIFMTU:
436 if (ifr->ifr_mtu > OCE_MAX_MTU)
437 rc = EINVAL;
438 else
439 ifp->if_mtu = ifr->ifr_mtu;
440 break;
441
442 case SIOCSIFFLAGS:
443 if (ifp->if_flags & IFF_UP) {
444 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446 oce_init(sc);
447 }
448 device_printf(sc->dev, "Interface Up\n");
449 } else {
450 LOCK(&sc->dev_lock);
451
452 sc->ifp->if_drv_flags &=
453 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454 oce_if_deactivate(sc);
455
456 UNLOCK(&sc->dev_lock);
457
458 device_printf(sc->dev, "Interface Down\n");
459 }
460
461 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
462 if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463 sc->promisc = TRUE;
464 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
465 if (!oce_rxf_set_promiscuous(sc, 0))
466 sc->promisc = FALSE;
467 }
468
469 break;
470
471 case SIOCADDMULTI:
472 case SIOCDELMULTI:
473 rc = oce_hw_update_multicast(sc);
474 if (rc)
475 device_printf(sc->dev,
476 "Update multicast address failed\n");
477 break;
478
479 case SIOCSIFCAP:
480 u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482 if (u & IFCAP_TXCSUM) {
483 ifp->if_capenable ^= IFCAP_TXCSUM;
484 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486 if (IFCAP_TSO & ifp->if_capenable &&
487 !(IFCAP_TXCSUM & ifp->if_capenable)) {
488 ifp->if_capenable &= ~IFCAP_TSO;
489 ifp->if_hwassist &= ~CSUM_TSO;
490 if_printf(ifp,
491 "TSO disabled due to -txcsum.\n");
492 }
493 }
494
495 if (u & IFCAP_RXCSUM)
496 ifp->if_capenable ^= IFCAP_RXCSUM;
497
498 if (u & IFCAP_TSO4) {
499 ifp->if_capenable ^= IFCAP_TSO4;
500
501 if (IFCAP_TSO & ifp->if_capenable) {
502 if (IFCAP_TXCSUM & ifp->if_capenable)
503 ifp->if_hwassist |= CSUM_TSO;
504 else {
505 ifp->if_capenable &= ~IFCAP_TSO;
506 ifp->if_hwassist &= ~CSUM_TSO;
507 if_printf(ifp,
508 "Enable txcsum first.\n");
509 rc = EAGAIN;
510 }
511 } else
512 ifp->if_hwassist &= ~CSUM_TSO;
513 }
514
515 if (u & IFCAP_VLAN_HWTAGGING)
516 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518 if (u & IFCAP_VLAN_HWFILTER) {
519 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520 oce_vid_config(sc);
521 }
522#if defined(INET6) || defined(INET)
523 if (u & IFCAP_LRO)
524 ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527 break;
528
529 case SIOCGPRIVATE_0:
530 rc = oce_handle_passthrough(ifp, data);
531 break;
532 default:
533 rc = ether_ioctl(ifp, command, data);
534 break;
535 }
536
537 return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544 POCE_SOFTC sc = arg;
545
546 LOCK(&sc->dev_lock);
547
548 if (sc->ifp->if_flags & IFF_UP) {
549 oce_if_deactivate(sc);
550 oce_if_activate(sc);
551 }
552
553 UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561 POCE_SOFTC sc = ifp->if_softc;
562 struct oce_wq *wq = NULL;
563 int queue_index = 0;
564 int status = 0;
565
566 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
567 queue_index = m->m_pkthdr.flowid % sc->nwqs;
568
569 wq = sc->wq[queue_index];
570
571 LOCK(&wq->tx_lock);
572 status = oce_multiq_transmit(ifp, m, wq);
573 UNLOCK(&wq->tx_lock);
574
575 return status;
576
577}
578
579
580static void
581oce_multiq_flush(struct ifnet *ifp)
582{
583 POCE_SOFTC sc = ifp->if_softc;
584 struct mbuf *m;
585 int i = 0;
586
587 for (i = 0; i < sc->nwqs; i++) {
588 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
589 m_freem(m);
590 }
591 if_qflush(ifp);
592}
593
594
595
596/*****************************************************************************
597 * Driver interrupt routines functions *
598 *****************************************************************************/
599
600static void
601oce_intr(void *arg, int pending)
602{
603
604 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
605 POCE_SOFTC sc = ii->sc;
606 struct oce_eq *eq = ii->eq;
607 struct oce_eqe *eqe;
608 struct oce_cq *cq = NULL;
609 int i, num_eqes = 0;
610
611
612 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
613 BUS_DMASYNC_POSTWRITE);
614 do {
615 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
616 if (eqe->evnt == 0)
617 break;
618 eqe->evnt = 0;
619 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
620 BUS_DMASYNC_POSTWRITE);
621 RING_GET(eq->ring, 1);
622 num_eqes++;
623
624 } while (TRUE);
625
626 if (!num_eqes)
627 goto eq_arm; /* Spurious */
628
629 /* Clear EQ entries, but dont arm */
630 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
631
632 /* Process TX, RX and MCC. But dont arm CQ*/
633 for (i = 0; i < eq->cq_valid; i++) {
634 cq = eq->cq[i];
635 (*cq->cq_handler)(cq->cb_arg);
636 }
637
638 /* Arm all cqs connected to this EQ */
639 for (i = 0; i < eq->cq_valid; i++) {
640 cq = eq->cq[i];
641 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
642 }
643
644eq_arm:
645 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
646
647 return;
648}
649
650
651static int
652oce_setup_intr(POCE_SOFTC sc)
653{
654 int rc = 0, use_intx = 0;
655 int vector = 0, req_vectors = 0;
656
657 if (is_rss_enabled(sc))
658 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
659 else
660 req_vectors = 1;
661
662 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
663 sc->intr_count = req_vectors;
664 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
665 if (rc != 0) {
666 use_intx = 1;
667 pci_release_msi(sc->dev);
668 } else
669 sc->flags |= OCE_FLAGS_USING_MSIX;
670 } else
671 use_intx = 1;
672
673 if (use_intx)
674 sc->intr_count = 1;
675
676 /* Scale number of queues based on intr we got */
677 update_queues_got(sc);
678
679 if (use_intx) {
680 device_printf(sc->dev, "Using legacy interrupt\n");
681 rc = oce_alloc_intr(sc, vector, oce_intr);
682 if (rc)
683 goto error;
684 } else {
685 for (; vector < sc->intr_count; vector++) {
686 rc = oce_alloc_intr(sc, vector, oce_intr);
687 if (rc)
688 goto error;
689 }
690 }
691
692 return 0;
693error:
694 oce_intr_free(sc);
695 return rc;
696}
697
698
699static int
700oce_fast_isr(void *arg)
701{
702 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
703 POCE_SOFTC sc = ii->sc;
704
705 if (ii->eq == NULL)
706 return FILTER_STRAY;
707
708 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
709
710 taskqueue_enqueue(ii->tq, &ii->task);
711
712 ii->eq->intr++;
713
714 return FILTER_HANDLED;
715}
716
717
718static int
719oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
720{
721 POCE_INTR_INFO ii = &sc->intrs[vector];
722 int rc = 0, rr;
723
724 if (vector >= OCE_MAX_EQ)
725 return (EINVAL);
726
727 /* Set the resource id for the interrupt.
728 * MSIx is vector + 1 for the resource id,
729 * INTx is 0 for the resource id.
730 */
731 if (sc->flags & OCE_FLAGS_USING_MSIX)
732 rr = vector + 1;
733 else
734 rr = 0;
735 ii->intr_res = bus_alloc_resource_any(sc->dev,
736 SYS_RES_IRQ,
737 &rr, RF_ACTIVE|RF_SHAREABLE);
738 ii->irq_rr = rr;
739 if (ii->intr_res == NULL) {
740 device_printf(sc->dev,
741 "Could not allocate interrupt\n");
742 rc = ENXIO;
743 return rc;
744 }
745
746 TASK_INIT(&ii->task, 0, isr, ii);
747 ii->vector = vector;
748 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
749 ii->tq = taskqueue_create_fast(ii->task_name,
750 M_NOWAIT,
751 taskqueue_thread_enqueue,
752 &ii->tq);
753 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
754 device_get_nameunit(sc->dev));
755
756 ii->sc = sc;
757 rc = bus_setup_intr(sc->dev,
758 ii->intr_res,
759 INTR_TYPE_NET,
760 oce_fast_isr, NULL, ii, &ii->tag);
761 return rc;
762
763}
764
765
766void
767oce_intr_free(POCE_SOFTC sc)
768{
769 int i = 0;
770
771 for (i = 0; i < sc->intr_count; i++) {
772
773 if (sc->intrs[i].tag != NULL)
774 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
775 sc->intrs[i].tag);
776 if (sc->intrs[i].tq != NULL)
777 taskqueue_free(sc->intrs[i].tq);
778
779 if (sc->intrs[i].intr_res != NULL)
780 bus_release_resource(sc->dev, SYS_RES_IRQ,
781 sc->intrs[i].irq_rr,
782 sc->intrs[i].intr_res);
783 sc->intrs[i].tag = NULL;
784 sc->intrs[i].intr_res = NULL;
785 }
786
787 if (sc->flags & OCE_FLAGS_USING_MSIX)
788 pci_release_msi(sc->dev);
789
790}
791
792
793
794/******************************************************************************
795* Media callbacks functions *
796******************************************************************************/
797
798static void
799oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
800{
801 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
802
803
804 req->ifm_status = IFM_AVALID;
805 req->ifm_active = IFM_ETHER;
806
807 if (sc->link_status == 1)
808 req->ifm_status |= IFM_ACTIVE;
809 else
810 return;
811
812 switch (sc->link_speed) {
813 case 1: /* 10 Mbps */
814 req->ifm_active |= IFM_10_T | IFM_FDX;
815 sc->speed = 10;
816 break;
817 case 2: /* 100 Mbps */
818 req->ifm_active |= IFM_100_TX | IFM_FDX;
819 sc->speed = 100;
820 break;
821 case 3: /* 1 Gbps */
822 req->ifm_active |= IFM_1000_T | IFM_FDX;
823 sc->speed = 1000;
824 break;
825 case 4: /* 10 Gbps */
826 req->ifm_active |= IFM_10G_SR | IFM_FDX;
827 sc->speed = 10000;
828 break;
829 case 5: /* 20 Gbps */
830 req->ifm_active |= IFM_10G_SR | IFM_FDX;
831 sc->speed = 20000;
832 break;
833 case 6: /* 25 Gbps */
834 req->ifm_active |= IFM_10G_SR | IFM_FDX;
835 sc->speed = 25000;
836 break;
837 case 7: /* 40 Gbps */
838 req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
839 sc->speed = 40000;
840 break;
841 default:
842 sc->speed = 0;
843 break;
844 }
845
846 return;
847}
848
849
850int
851oce_media_change(struct ifnet *ifp)
852{
853 return 0;
854}
855
856
857
858
859/*****************************************************************************
860 * Transmit routines functions *
861 *****************************************************************************/
862
863static int
864oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
865{
866 int rc = 0, i, retry_cnt = 0;
867 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
868 struct mbuf *m, *m_temp;
869 struct oce_wq *wq = sc->wq[wq_index];
870 struct oce_packet_desc *pd;
871 struct oce_nic_hdr_wqe *nichdr;
872 struct oce_nic_frag_wqe *nicfrag;
873 int num_wqes;
874 uint32_t reg_value;
875 boolean_t complete = TRUE;
876
877 m = *mpp;
878 if (!m)
879 return EINVAL;
880
881 if (!(m->m_flags & M_PKTHDR)) {
882 rc = ENXIO;
883 goto free_ret;
884 }
885
886 if(oce_tx_asic_stall_verify(sc, m)) {
887 m = oce_insert_vlan_tag(sc, m, &complete);
888 if(!m) {
889 device_printf(sc->dev, "Insertion unsuccessful\n");
890 return 0;
891 }
892
893 }
894
895 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
896 /* consolidate packet buffers for TSO/LSO segment offload */
897#if defined(INET6) || defined(INET)
898 m = oce_tso_setup(sc, mpp);
899#else
900 m = NULL;
901#endif
902 if (m == NULL) {
903 rc = ENXIO;
904 goto free_ret;
905 }
906 }
907
908 pd = &wq->pckts[wq->pkt_desc_head];
909retry:
910 rc = bus_dmamap_load_mbuf_sg(wq->tag,
911 pd->map,
912 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
913 if (rc == 0) {
914 num_wqes = pd->nsegs + 1;
915 if (IS_BE(sc) || IS_SH(sc)) {
916 /*Dummy required only for BE3.*/
917 if (num_wqes & 1)
918 num_wqes++;
919 }
920 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
921 bus_dmamap_unload(wq->tag, pd->map);
922 return EBUSY;
923 }
924 atomic_store_rel_int(&wq->pkt_desc_head,
925 (wq->pkt_desc_head + 1) % \
926 OCE_WQ_PACKET_ARRAY_SIZE);
927 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
928 pd->mbuf = m;
929
930 nichdr =
931 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
932 nichdr->u0.dw[0] = 0;
933 nichdr->u0.dw[1] = 0;
934 nichdr->u0.dw[2] = 0;
935 nichdr->u0.dw[3] = 0;
936
937 nichdr->u0.s.complete = complete;
938 nichdr->u0.s.event = 1;
939 nichdr->u0.s.crc = 1;
940 nichdr->u0.s.forward = 0;
941 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
942 nichdr->u0.s.udpcs =
943 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
944 nichdr->u0.s.tcpcs =
945 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
946 nichdr->u0.s.num_wqe = num_wqes;
947 nichdr->u0.s.total_length = m->m_pkthdr.len;
948
949 if (m->m_flags & M_VLANTAG) {
950 nichdr->u0.s.vlan = 1; /*Vlan present*/
951 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
952 }
953
954 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
955 if (m->m_pkthdr.tso_segsz) {
956 nichdr->u0.s.lso = 1;
957 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
958 }
959 if (!IS_BE(sc) || !IS_SH(sc))
960 nichdr->u0.s.ipcs = 1;
961 }
962
963 RING_PUT(wq->ring, 1);
964 atomic_add_int(&wq->ring->num_used, 1);
965
966 for (i = 0; i < pd->nsegs; i++) {
967 nicfrag =
968 RING_GET_PRODUCER_ITEM_VA(wq->ring,
969 struct oce_nic_frag_wqe);
970 nicfrag->u0.s.rsvd0 = 0;
971 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
972 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
973 nicfrag->u0.s.frag_len = segs[i].ds_len;
974 pd->wqe_idx = wq->ring->pidx;
975 RING_PUT(wq->ring, 1);
976 atomic_add_int(&wq->ring->num_used, 1);
977 }
978 if (num_wqes > (pd->nsegs + 1)) {
979 nicfrag =
980 RING_GET_PRODUCER_ITEM_VA(wq->ring,
981 struct oce_nic_frag_wqe);
982 nicfrag->u0.dw[0] = 0;
983 nicfrag->u0.dw[1] = 0;
984 nicfrag->u0.dw[2] = 0;
985 nicfrag->u0.dw[3] = 0;
986 pd->wqe_idx = wq->ring->pidx;
987 RING_PUT(wq->ring, 1);
988 atomic_add_int(&wq->ring->num_used, 1);
989 pd->nsegs++;
990 }
991
992 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
993 wq->tx_stats.tx_reqs++;
994 wq->tx_stats.tx_wrbs += num_wqes;
995 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
996 wq->tx_stats.tx_pkts++;
997
998 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
999 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000 reg_value = (num_wqes << 16) | wq->wq_id;
1001 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1002
1003 } else if (rc == EFBIG) {
1004 if (retry_cnt == 0) {
1005 m_temp = m_defrag(m, M_NOWAIT);
1006 if (m_temp == NULL)
1007 goto free_ret;
1008 m = m_temp;
1009 *mpp = m_temp;
1010 retry_cnt = retry_cnt + 1;
1011 goto retry;
1012 } else
1013 goto free_ret;
1014 } else if (rc == ENOMEM)
1015 return rc;
1016 else
1017 goto free_ret;
1018
1019 return 0;
1020
1021free_ret:
1022 m_freem(*mpp);
1023 *mpp = NULL;
1024 return rc;
1025}
1026
1027
1028static void
1029oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1030{
1031 struct oce_packet_desc *pd;
1032 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1033 struct mbuf *m;
1034
1035 pd = &wq->pckts[wq->pkt_desc_tail];
1036 atomic_store_rel_int(&wq->pkt_desc_tail,
1037 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1038 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1039 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1040 bus_dmamap_unload(wq->tag, pd->map);
1041
1042 m = pd->mbuf;
1043 m_freem(m);
1044 pd->mbuf = NULL;
1045
1046
1047 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1048 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1049 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1050 oce_tx_restart(sc, wq);
1051 }
1052 }
1053}
1054
1055
1056static void
1057oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1058{
1059
1060 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1061 return;
1062
1063#if __FreeBSD_version >= 800000
1064 if (!drbr_empty(sc->ifp, wq->br))
1065#else
1066 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1067#endif
1068 taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1069
1070}
1071
1072
1073#if defined(INET6) || defined(INET)
1074static struct mbuf *
1075oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1076{
1077 struct mbuf *m;
1078#ifdef INET
1079 struct ip *ip;
1080#endif
1081#ifdef INET6
1082 struct ip6_hdr *ip6;
1083#endif
1084 struct ether_vlan_header *eh;
1085 struct tcphdr *th;
1086 uint16_t etype;
1087 int total_len = 0, ehdrlen = 0;
1088
1089 m = *mpp;
1090
1091 if (M_WRITABLE(m) == 0) {
1092 m = m_dup(*mpp, M_NOWAIT);
1093 if (!m)
1094 return NULL;
1095 m_freem(*mpp);
1096 *mpp = m;
1097 }
1098
1099 eh = mtod(m, struct ether_vlan_header *);
1100 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1101 etype = ntohs(eh->evl_proto);
1102 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1103 } else {
1104 etype = ntohs(eh->evl_encap_proto);
1105 ehdrlen = ETHER_HDR_LEN;
1106 }
1107
1108 switch (etype) {
1109#ifdef INET
1110 case ETHERTYPE_IP:
1111 ip = (struct ip *)(m->m_data + ehdrlen);
1112 if (ip->ip_p != IPPROTO_TCP)
1113 return NULL;
1114 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1115
1116 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1117 break;
1118#endif
1119#ifdef INET6
1120 case ETHERTYPE_IPV6:
1121 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1122 if (ip6->ip6_nxt != IPPROTO_TCP)
1123 return NULL;
1124 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1125
1126 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1127 break;
1128#endif
1129 default:
1130 return NULL;
1131 }
1132
1133 m = m_pullup(m, total_len);
1134 if (!m)
1135 return NULL;
1136 *mpp = m;
1137 return m;
1138
1139}
1140#endif /* INET6 || INET */
1141
1142void
1143oce_tx_task(void *arg, int npending)
1144{
1145 struct oce_wq *wq = arg;
1146 POCE_SOFTC sc = wq->parent;
1147 struct ifnet *ifp = sc->ifp;
1148 int rc = 0;
1149
1150#if __FreeBSD_version >= 800000
1151 LOCK(&wq->tx_lock);
1152 rc = oce_multiq_transmit(ifp, NULL, wq);
1153 if (rc) {
1154 device_printf(sc->dev,
1155 "TX[%d] restart failed\n", wq->queue_index);
1156 }
1157 UNLOCK(&wq->tx_lock);
1158#else
1159 oce_start(ifp);
1160#endif
1161
1162}
1163
1164
1165void
1166oce_start(struct ifnet *ifp)
1167{
1168 POCE_SOFTC sc = ifp->if_softc;
1169 struct mbuf *m;
1170 int rc = 0;
1171 int def_q = 0; /* Defualt tx queue is 0*/
1172
1173 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1174 IFF_DRV_RUNNING)
1175 return;
1176
1177 if (!sc->link_status)
1178 return;
1179
1180 do {
1181 IF_DEQUEUE(&sc->ifp->if_snd, m);
1182 if (m == NULL)
1183 break;
1184
1185 LOCK(&sc->wq[def_q]->tx_lock);
1186 rc = oce_tx(sc, &m, def_q);
1187 UNLOCK(&sc->wq[def_q]->tx_lock);
1188 if (rc) {
1189 if (m != NULL) {
1190 sc->wq[def_q]->tx_stats.tx_stops ++;
1191 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1192 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1193 m = NULL;
1194 }
1195 break;
1196 }
1197 if (m != NULL)
1198 ETHER_BPF_MTAP(ifp, m);
1199
1200 } while (TRUE);
1201
1202 return;
1203}
1204
1205
1206/* Handle the Completion Queue for transmit */
1207uint16_t
1208oce_wq_handler(void *arg)
1209{
1210 struct oce_wq *wq = (struct oce_wq *)arg;
1211 POCE_SOFTC sc = wq->parent;
1212 struct oce_cq *cq = wq->cq;
1213 struct oce_nic_tx_cqe *cqe;
1214 int num_cqes = 0;
1215
1216 bus_dmamap_sync(cq->ring->dma.tag,
1217 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1218 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1219 while (cqe->u0.dw[3]) {
1220 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1221
1222 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1223 if (wq->ring->cidx >= wq->ring->num_items)
1224 wq->ring->cidx -= wq->ring->num_items;
1225
1226 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1227 wq->tx_stats.tx_compl++;
1228 cqe->u0.dw[3] = 0;
1229 RING_GET(cq->ring, 1);
1230 bus_dmamap_sync(cq->ring->dma.tag,
1231 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1232 cqe =
1233 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1234 num_cqes++;
1235 }
1236
1237 if (num_cqes)
1238 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1239
1240 return 0;
1241}
1242
1243
1244static int
1245oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1246{
1247 POCE_SOFTC sc = ifp->if_softc;
1248 int status = 0, queue_index = 0;
1249 struct mbuf *next = NULL;
1250 struct buf_ring *br = NULL;
1251
1252 br = wq->br;
1253 queue_index = wq->queue_index;
1254
1255 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1256 IFF_DRV_RUNNING) {
1257 if (m != NULL)
1258 status = drbr_enqueue(ifp, br, m);
1259 return status;
1260 }
1261
1262 if (m != NULL) {
1263 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1264 return status;
1265 }
1266 while ((next = drbr_peek(ifp, br)) != NULL) {
1267 if (oce_tx(sc, &next, queue_index)) {
1268 if (next == NULL) {
1269 drbr_advance(ifp, br);
1270 } else {
1271 drbr_putback(ifp, br, next);
1272 wq->tx_stats.tx_stops ++;
1273 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1274 }
1275 break;
1276 }
1277 drbr_advance(ifp, br);
1278 if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1279 if (next->m_flags & M_MCAST)
1280 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1281 ETHER_BPF_MTAP(ifp, next);
1282 }
1283
1284 return 0;
1285}
1286
1287
1288
1289
1290/*****************************************************************************
1291 * Receive routines functions *
1292 *****************************************************************************/
1293
1294static void
1295oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1296{
1297 uint32_t out;
1298 struct oce_packet_desc *pd;
1299 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1300 int i, len, frag_len;
1301 struct mbuf *m = NULL, *tail = NULL;
1302 uint16_t vtag;
1303
1304 len = cqe->u0.s.pkt_size;
1305 if (!len) {
1306 /*partial DMA workaround for Lancer*/
1307 oce_discard_rx_comp(rq, cqe);
1308 goto exit;
1309 }
1310
1311 /* Get vlan_tag value */
1312 if(IS_BE(sc) || IS_SH(sc))
1313 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1314 else
1315 vtag = cqe->u0.s.vlan_tag;
1316
1317
1318 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1319
1320 if (rq->packets_out == rq->packets_in) {
1321 device_printf(sc->dev,
1322 "RQ transmit descriptor missing\n");
1323 }
1324 out = rq->packets_out + 1;
1325 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1326 out = 0;
1327 pd = &rq->pckts[rq->packets_out];
1328 rq->packets_out = out;
1329
1330 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1331 bus_dmamap_unload(rq->tag, pd->map);
1332 rq->pending--;
1333
1334 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1335 pd->mbuf->m_len = frag_len;
1336
1337 if (tail != NULL) {
1338 /* additional fragments */
1339 pd->mbuf->m_flags &= ~M_PKTHDR;
1340 tail->m_next = pd->mbuf;
1341 tail = pd->mbuf;
1342 } else {
1343 /* first fragment, fill out much of the packet header */
1344 pd->mbuf->m_pkthdr.len = len;
1345 pd->mbuf->m_pkthdr.csum_flags = 0;
1346 if (IF_CSUM_ENABLED(sc)) {
1347 if (cqe->u0.s.l4_cksum_pass) {
1348 pd->mbuf->m_pkthdr.csum_flags |=
1349 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1350 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1351 }
1352 if (cqe->u0.s.ip_cksum_pass) {
1353 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1354 pd->mbuf->m_pkthdr.csum_flags |=
1355 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1356 }
1357 }
1358 }
1359 m = tail = pd->mbuf;
1360 }
1361 pd->mbuf = NULL;
1362 len -= frag_len;
1363 }
1364
1365 if (m) {
1366 if (!oce_cqe_portid_valid(sc, cqe)) {
1367 m_freem(m);
1368 goto exit;
1369 }
1370
1371 m->m_pkthdr.rcvif = sc->ifp;
1372#if __FreeBSD_version >= 800000
1373 if (rq->queue_index)
1374 m->m_pkthdr.flowid = (rq->queue_index - 1);
1375 else
1376 m->m_pkthdr.flowid = rq->queue_index;
1377 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1378#endif
1379 /* This deternies if vlan tag is Valid */
1380 if (oce_cqe_vtp_valid(sc, cqe)) {
1381 if (sc->function_mode & FNM_FLEX10_MODE) {
1382 /* FLEX10. If QnQ is not set, neglect VLAN */
1383 if (cqe->u0.s.qnq) {
1384 m->m_pkthdr.ether_vtag = vtag;
1385 m->m_flags |= M_VLANTAG;
1386 }
1387 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1388 /* In UMC mode generally pvid will be striped by
1389 hw. But in some cases we have seen it comes
1390 with pvid. So if pvid == vlan, neglect vlan.
1391 */
1392 m->m_pkthdr.ether_vtag = vtag;
1393 m->m_flags |= M_VLANTAG;
1394 }
1395 }
1396
1397 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1398#if defined(INET6) || defined(INET)
1399 /* Try to queue to LRO */
1400 if (IF_LRO_ENABLED(sc) &&
1401 (cqe->u0.s.ip_cksum_pass) &&
1402 (cqe->u0.s.l4_cksum_pass) &&
1403 (!cqe->u0.s.ip_ver) &&
1404 (rq->lro.lro_cnt != 0)) {
1405
1406 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1407 rq->lro_pkts_queued ++;
1408 goto post_done;
1409 }
1410 /* If LRO posting fails then try to post to STACK */
1411 }
1412#endif
1413
1414 (*sc->ifp->if_input) (sc->ifp, m);
1415#if defined(INET6) || defined(INET)
1416post_done:
1417#endif
1418 /* Update rx stats per queue */
1419 rq->rx_stats.rx_pkts++;
1420 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1421 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1422 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1423 rq->rx_stats.rx_mcast_pkts++;
1424 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1425 rq->rx_stats.rx_ucast_pkts++;
1426 }
1427exit:
1428 return;
1429}
1430
1431
1432static void
1433oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1434{
1435 uint32_t out, i = 0;
1436 struct oce_packet_desc *pd;
1437 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1438 int num_frags = cqe->u0.s.num_fragments;
1439
1440 for (i = 0; i < num_frags; i++) {
1441 if (rq->packets_out == rq->packets_in) {
1442 device_printf(sc->dev,
1443 "RQ transmit descriptor missing\n");
1444 }
1445 out = rq->packets_out + 1;
1446 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1447 out = 0;
1448 pd = &rq->pckts[rq->packets_out];
1449 rq->packets_out = out;
1450
1451 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1452 bus_dmamap_unload(rq->tag, pd->map);
1453 rq->pending--;
1454 m_freem(pd->mbuf);
1455 }
1456
1457}
1458
1459
1460static int
1461oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1462{
1463 struct oce_nic_rx_cqe_v1 *cqe_v1;
1464 int vtp = 0;
1465
1466 if (sc->be3_native) {
1467 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1468 vtp = cqe_v1->u0.s.vlan_tag_present;
1469 } else
1470 vtp = cqe->u0.s.vlan_tag_present;
1471
1472 return vtp;
1473
1474}
1475
1476
1477static int
1478oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1479{
1480 struct oce_nic_rx_cqe_v1 *cqe_v1;
1481 int port_id = 0;
1482
1483 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1484 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1485 port_id = cqe_v1->u0.s.port;
1486 if (sc->port_id != port_id)
1487 return 0;
1488 } else
1489 ;/* For BE3 legacy and Lancer this is dummy */
1490
1491 return 1;
1492
1493}
1494
1495#if defined(INET6) || defined(INET)
1496static void
1497oce_rx_flush_lro(struct oce_rq *rq)
1498{
1499 struct lro_ctrl *lro = &rq->lro;
1500 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1501
1502 if (!IF_LRO_ENABLED(sc))
1503 return;
1504
1505 tcp_lro_flush_all(lro);
1506 rq->lro_pkts_queued = 0;
1507
1508 return;
1509}
1510
1511
1512static int
1513oce_init_lro(POCE_SOFTC sc)
1514{
1515 struct lro_ctrl *lro = NULL;
1516 int i = 0, rc = 0;
1517
1518 for (i = 0; i < sc->nrqs; i++) {
1519 lro = &sc->rq[i]->lro;
1520 rc = tcp_lro_init(lro);
1521 if (rc != 0) {
1522 device_printf(sc->dev, "LRO init failed\n");
1523 return rc;
1524 }
1525 lro->ifp = sc->ifp;
1526 }
1527
1528 return rc;
1529}
1530
1531
1532void
1533oce_free_lro(POCE_SOFTC sc)
1534{
1535 struct lro_ctrl *lro = NULL;
1536 int i = 0;
1537
1538 for (i = 0; i < sc->nrqs; i++) {
1539 lro = &sc->rq[i]->lro;
1540 if (lro)
1541 tcp_lro_free(lro);
1542 }
1543}
1544#endif
1545
1546int
1547oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1548{
1549 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1550 int i, in, rc;
1551 struct oce_packet_desc *pd;
1552 bus_dma_segment_t segs[6];
1553 int nsegs, added = 0;
1554 struct oce_nic_rqe *rqe;
1555 pd_rxulp_db_t rxdb_reg;
1556
1557 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1558 for (i = 0; i < count; i++) {
1559 in = rq->packets_in + 1;
1560 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1561 in = 0;
1562 if (in == rq->packets_out)
1563 break; /* no more room */
1564
1565 pd = &rq->pckts[rq->packets_in];
1566 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1567 if (pd->mbuf == NULL)
1568 break;
1569
1570 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1571 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1572 pd->map,
1573 pd->mbuf,
1574 segs, &nsegs, BUS_DMA_NOWAIT);
1575 if (rc) {
1576 m_free(pd->mbuf);
1577 break;
1578 }
1579
1580 if (nsegs != 1) {
1581 i--;
1582 continue;
1583 }
1584
1585 rq->packets_in = in;
1586 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1587
1588 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1589 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1590 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1591 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1592 RING_PUT(rq->ring, 1);
1593 added++;
1594 rq->pending++;
1595 }
1596 if (added != 0) {
1597 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1598 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1599 rxdb_reg.bits.qid = rq->rq_id;
1600 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1601 added -= OCE_MAX_RQ_POSTS;
1602 }
1603 if (added > 0) {
1604 rxdb_reg.bits.qid = rq->rq_id;
1605 rxdb_reg.bits.num_posted = added;
1606 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1607 }
1608 }
1609
1610 return 0;
1611}
1612
1613
1614/* Handle the Completion Queue for receive */
1615uint16_t
1616oce_rq_handler(void *arg)
1617{
1618 struct oce_rq *rq = (struct oce_rq *)arg;
1619 struct oce_cq *cq = rq->cq;
1620 POCE_SOFTC sc = rq->parent;
1621 struct oce_nic_rx_cqe *cqe;
1622 int num_cqes = 0, rq_buffers_used = 0;
1623
1624
1625 bus_dmamap_sync(cq->ring->dma.tag,
1626 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1627 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1628 while (cqe->u0.dw[2]) {
1629 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1630
1631 RING_GET(rq->ring, 1);
1632 if (cqe->u0.s.error == 0) {
1633 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1634 } else {
1635 rq->rx_stats.rxcp_err++;
1636 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1637 /* Post L3/L4 errors to stack.*/
1638 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1639 }
1640 rq->rx_stats.rx_compl++;
1641 cqe->u0.dw[2] = 0;
1642
1643#if defined(INET6) || defined(INET)
1644 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1645 oce_rx_flush_lro(rq);
1646 }
1647#endif
1648
1649 RING_GET(cq->ring, 1);
1650 bus_dmamap_sync(cq->ring->dma.tag,
1651 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1652 cqe =
1653 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1654 num_cqes++;
1655 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1656 break;
1657 }
1658
1659#if defined(INET6) || defined(INET)
1660 if (IF_LRO_ENABLED(sc))
1661 oce_rx_flush_lro(rq);
1662#endif
1663
1664 if (num_cqes) {
1665 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1666 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1667 if (rq_buffers_used > 1)
1668 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1669 }
1670
1671 return 0;
1672
1673}
1674
1675
1676
1677
1678/*****************************************************************************
1679 * Helper function prototypes in this file *
1680 *****************************************************************************/
1681
1682static int
1683oce_attach_ifp(POCE_SOFTC sc)
1684{
1685
1686 sc->ifp = if_alloc(IFT_ETHER);
1687 if (!sc->ifp)
1688 return ENOMEM;
1689
1690 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1691 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1692 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1693
1694 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1695 sc->ifp->if_ioctl = oce_ioctl;
1696 sc->ifp->if_start = oce_start;
1697 sc->ifp->if_init = oce_init;
1698 sc->ifp->if_mtu = ETHERMTU;
1699 sc->ifp->if_softc = sc;
1700#if __FreeBSD_version >= 800000
1701 sc->ifp->if_transmit = oce_multiq_start;
1702 sc->ifp->if_qflush = oce_multiq_flush;
1703#endif
1704
1705 if_initname(sc->ifp,
1706 device_get_name(sc->dev), device_get_unit(sc->dev));
1707
1708 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1709 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1710 IFQ_SET_READY(&sc->ifp->if_snd);
1711
1712 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1713 sc->ifp->if_hwassist |= CSUM_TSO;
1714 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1715
1716 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1717 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1718 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1719
1720#if defined(INET6) || defined(INET)
1721 sc->ifp->if_capabilities |= IFCAP_TSO;
1722 sc->ifp->if_capabilities |= IFCAP_LRO;
1723 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1724#endif
1725
1726 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1727 sc->ifp->if_baudrate = IF_Gbps(10);
1728
1729#if __FreeBSD_version >= 1000000
1730 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1731 sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1732 sc->ifp->if_hw_tsomaxsegsize = 4096;
1733#endif
1734
1735 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1736
1737 return 0;
1738}
1739
1740
1741static void
1742oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1743{
1744 POCE_SOFTC sc = ifp->if_softc;
1745
1746 if (ifp->if_softc != arg)
1747 return;
1748 if ((vtag == 0) || (vtag > 4095))
1749 return;
1750
1751 sc->vlan_tag[vtag] = 1;
1752 sc->vlans_added++;
1753 if (sc->vlans_added <= (sc->max_vlans + 1))
1754 oce_vid_config(sc);
1755}
1756
1757
1758static void
1759oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1760{
1761 POCE_SOFTC sc = ifp->if_softc;
1762
1763 if (ifp->if_softc != arg)
1764 return;
1765 if ((vtag == 0) || (vtag > 4095))
1766 return;
1767
1768 sc->vlan_tag[vtag] = 0;
1769 sc->vlans_added--;
1770 oce_vid_config(sc);
1771}
1772
1773
1774/*
1775 * A max of 64 vlans can be configured in BE. If the user configures
1776 * more, place the card in vlan promiscuous mode.
1777 */
1778static int
1779oce_vid_config(POCE_SOFTC sc)
1780{
1781 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1782 uint16_t ntags = 0, i;
1783 int status = 0;
1784
1785 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1786 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1787 for (i = 0; i < MAX_VLANS; i++) {
1788 if (sc->vlan_tag[i]) {
1789 vtags[ntags].vtag = i;
1790 ntags++;
1791 }
1792 }
1793 if (ntags)
1794 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1795 vtags, ntags, 1, 0);
1796 } else
1797 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1798 NULL, 0, 1, 1);
1799 return status;
1800}
1801
1802
1803static void
1804oce_mac_addr_set(POCE_SOFTC sc)
1805{
1806 uint32_t old_pmac_id = sc->pmac_id;
1807 int status = 0;
1808
1809
1810 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1811 sc->macaddr.size_of_struct);
1812 if (!status)
1813 return;
1814
1815 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1816 sc->if_id, &sc->pmac_id);
1817 if (!status) {
1818 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1819 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1820 sc->macaddr.size_of_struct);
1821 }
1822 if (status)
1823 device_printf(sc->dev, "Failed update macaddress\n");
1824
1825}
1826
1827
1828static int
1829oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1830{
1831 POCE_SOFTC sc = ifp->if_softc;
1832 struct ifreq *ifr = (struct ifreq *)data;
1833 int rc = ENXIO;
1834 char cookie[32] = {0};
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
117
118
119/* Driver entry points prototypes */
120static int oce_probe(device_t dev);
121static int oce_attach(device_t dev);
122static int oce_detach(device_t dev);
123static int oce_shutdown(device_t dev);
124static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int oce_setup_intr(POCE_SOFTC sc);
132static int oce_fast_isr(void *arg);
133static int oce_alloc_intr(POCE_SOFTC sc, int vector,
134 void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 uint32_t status);
145static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181 DEVMETHOD(device_probe, oce_probe),
182 DEVMETHOD(device_attach, oce_attach),
183 DEVMETHOD(device_detach, oce_detach),
184 DEVMETHOD(device_shutdown, oce_shutdown),
185
186 DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190 "oce",
191 oce_dispatch,
192 sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] = {
217 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 * Driver entry points functions *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235 uint16_t vendor = 0;
236 uint16_t device = 0;
237 int i = 0;
238 char str[256] = {0};
239 POCE_SOFTC sc;
240
241 sc = device_get_softc(dev);
242 bzero(sc, sizeof(OCE_SOFTC));
243 sc->dev = dev;
244
245 vendor = pci_get_vendor(dev);
246 device = pci_get_device(dev);
247
248 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250 if (device == (supportedDevices[i] & 0xffff)) {
251 sprintf(str, "%s:%s", "Emulex CNA NIC function",
252 component_revision);
253 device_set_desc_copy(dev, str);
254
255 switch (device) {
256 case PCI_PRODUCT_BE2:
257 sc->flags |= OCE_FLAGS_BE2;
258 break;
259 case PCI_PRODUCT_BE3:
260 sc->flags |= OCE_FLAGS_BE3;
261 break;
262 case PCI_PRODUCT_XE201:
263 case PCI_PRODUCT_XE201_VF:
264 sc->flags |= OCE_FLAGS_XE201;
265 break;
266 case PCI_PRODUCT_SH:
267 sc->flags |= OCE_FLAGS_SH;
268 break;
269 default:
270 return ENXIO;
271 }
272 return BUS_PROBE_DEFAULT;
273 }
274 }
275 }
276
277 return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284 POCE_SOFTC sc;
285 int rc = 0;
286
287 sc = device_get_softc(dev);
288
289 rc = oce_hw_pci_alloc(sc);
290 if (rc)
291 return rc;
292
293 sc->tx_ring_size = OCE_TX_RING_SIZE;
294 sc->rx_ring_size = OCE_RX_RING_SIZE;
295 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
298
299 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300 LOCK_CREATE(&sc->dev_lock, "Device_lock");
301
302 /* initialise the hardware */
303 rc = oce_hw_init(sc);
304 if (rc)
305 goto pci_res_free;
306
307 oce_get_config(sc);
308
309 setup_max_queues_want(sc);
310
311 rc = oce_setup_intr(sc);
312 if (rc)
313 goto mbox_free;
314
315 rc = oce_queue_init_all(sc);
316 if (rc)
317 goto intr_free;
318
319 rc = oce_attach_ifp(sc);
320 if (rc)
321 goto queues_free;
322
323#if defined(INET6) || defined(INET)
324 rc = oce_init_lro(sc);
325 if (rc)
326 goto ifp_free;
327#endif
328
329 rc = oce_hw_start(sc);
330 if (rc)
331 goto lro_free;
332
333 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338 rc = oce_stats_init(sc);
339 if (rc)
340 goto vlan_free;
341
342 oce_add_sysctls(sc);
343
344 callout_init(&sc->timer, 1);
345 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346 if (rc)
347 goto stats_free;
348
349 return 0;
350
351stats_free:
352 callout_drain(&sc->timer);
353 oce_stats_free(sc);
354vlan_free:
355 if (sc->vlan_attach)
356 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357 if (sc->vlan_detach)
358 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359 oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362 oce_free_lro(sc);
363ifp_free:
364#endif
365 ether_ifdetach(sc->ifp);
366 if_free(sc->ifp);
367queues_free:
368 oce_queue_release_all(sc);
369intr_free:
370 oce_intr_free(sc);
371mbox_free:
372 oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374 oce_hw_pci_free(sc);
375 LOCK_DESTROY(&sc->dev_lock);
376 LOCK_DESTROY(&sc->bmbx_lock);
377 return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385 POCE_SOFTC sc = device_get_softc(dev);
386
387 LOCK(&sc->dev_lock);
388 oce_if_deactivate(sc);
389 UNLOCK(&sc->dev_lock);
390
391 callout_drain(&sc->timer);
392
393 if (sc->vlan_attach != NULL)
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395 if (sc->vlan_detach != NULL)
396 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398 ether_ifdetach(sc->ifp);
399
400 if_free(sc->ifp);
401
402 oce_hw_shutdown(sc);
403
404 bus_generic_detach(dev);
405
406 return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413 int rc;
414
415 rc = oce_detach(dev);
416
417 return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424 struct ifreq *ifr = (struct ifreq *)data;
425 POCE_SOFTC sc = ifp->if_softc;
426 int rc = 0;
427 uint32_t u;
428
429 switch (command) {
430
431 case SIOCGIFMEDIA:
432 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433 break;
434
435 case SIOCSIFMTU:
436 if (ifr->ifr_mtu > OCE_MAX_MTU)
437 rc = EINVAL;
438 else
439 ifp->if_mtu = ifr->ifr_mtu;
440 break;
441
442 case SIOCSIFFLAGS:
443 if (ifp->if_flags & IFF_UP) {
444 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446 oce_init(sc);
447 }
448 device_printf(sc->dev, "Interface Up\n");
449 } else {
450 LOCK(&sc->dev_lock);
451
452 sc->ifp->if_drv_flags &=
453 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454 oce_if_deactivate(sc);
455
456 UNLOCK(&sc->dev_lock);
457
458 device_printf(sc->dev, "Interface Down\n");
459 }
460
461 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
462 if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463 sc->promisc = TRUE;
464 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
465 if (!oce_rxf_set_promiscuous(sc, 0))
466 sc->promisc = FALSE;
467 }
468
469 break;
470
471 case SIOCADDMULTI:
472 case SIOCDELMULTI:
473 rc = oce_hw_update_multicast(sc);
474 if (rc)
475 device_printf(sc->dev,
476 "Update multicast address failed\n");
477 break;
478
479 case SIOCSIFCAP:
480 u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482 if (u & IFCAP_TXCSUM) {
483 ifp->if_capenable ^= IFCAP_TXCSUM;
484 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486 if (IFCAP_TSO & ifp->if_capenable &&
487 !(IFCAP_TXCSUM & ifp->if_capenable)) {
488 ifp->if_capenable &= ~IFCAP_TSO;
489 ifp->if_hwassist &= ~CSUM_TSO;
490 if_printf(ifp,
491 "TSO disabled due to -txcsum.\n");
492 }
493 }
494
495 if (u & IFCAP_RXCSUM)
496 ifp->if_capenable ^= IFCAP_RXCSUM;
497
498 if (u & IFCAP_TSO4) {
499 ifp->if_capenable ^= IFCAP_TSO4;
500
501 if (IFCAP_TSO & ifp->if_capenable) {
502 if (IFCAP_TXCSUM & ifp->if_capenable)
503 ifp->if_hwassist |= CSUM_TSO;
504 else {
505 ifp->if_capenable &= ~IFCAP_TSO;
506 ifp->if_hwassist &= ~CSUM_TSO;
507 if_printf(ifp,
508 "Enable txcsum first.\n");
509 rc = EAGAIN;
510 }
511 } else
512 ifp->if_hwassist &= ~CSUM_TSO;
513 }
514
515 if (u & IFCAP_VLAN_HWTAGGING)
516 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518 if (u & IFCAP_VLAN_HWFILTER) {
519 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520 oce_vid_config(sc);
521 }
522#if defined(INET6) || defined(INET)
523 if (u & IFCAP_LRO)
524 ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527 break;
528
529 case SIOCGPRIVATE_0:
530 rc = oce_handle_passthrough(ifp, data);
531 break;
532 default:
533 rc = ether_ioctl(ifp, command, data);
534 break;
535 }
536
537 return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544 POCE_SOFTC sc = arg;
545
546 LOCK(&sc->dev_lock);
547
548 if (sc->ifp->if_flags & IFF_UP) {
549 oce_if_deactivate(sc);
550 oce_if_activate(sc);
551 }
552
553 UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561 POCE_SOFTC sc = ifp->if_softc;
562 struct oce_wq *wq = NULL;
563 int queue_index = 0;
564 int status = 0;
565
566 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
567 queue_index = m->m_pkthdr.flowid % sc->nwqs;
568
569 wq = sc->wq[queue_index];
570
571 LOCK(&wq->tx_lock);
572 status = oce_multiq_transmit(ifp, m, wq);
573 UNLOCK(&wq->tx_lock);
574
575 return status;
576
577}
578
579
580static void
581oce_multiq_flush(struct ifnet *ifp)
582{
583 POCE_SOFTC sc = ifp->if_softc;
584 struct mbuf *m;
585 int i = 0;
586
587 for (i = 0; i < sc->nwqs; i++) {
588 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
589 m_freem(m);
590 }
591 if_qflush(ifp);
592}
593
594
595
596/*****************************************************************************
597 * Driver interrupt routines functions *
598 *****************************************************************************/
599
600static void
601oce_intr(void *arg, int pending)
602{
603
604 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
605 POCE_SOFTC sc = ii->sc;
606 struct oce_eq *eq = ii->eq;
607 struct oce_eqe *eqe;
608 struct oce_cq *cq = NULL;
609 int i, num_eqes = 0;
610
611
612 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
613 BUS_DMASYNC_POSTWRITE);
614 do {
615 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
616 if (eqe->evnt == 0)
617 break;
618 eqe->evnt = 0;
619 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
620 BUS_DMASYNC_POSTWRITE);
621 RING_GET(eq->ring, 1);
622 num_eqes++;
623
624 } while (TRUE);
625
626 if (!num_eqes)
627 goto eq_arm; /* Spurious */
628
629 /* Clear EQ entries, but dont arm */
630 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
631
632 /* Process TX, RX and MCC. But dont arm CQ*/
633 for (i = 0; i < eq->cq_valid; i++) {
634 cq = eq->cq[i];
635 (*cq->cq_handler)(cq->cb_arg);
636 }
637
638 /* Arm all cqs connected to this EQ */
639 for (i = 0; i < eq->cq_valid; i++) {
640 cq = eq->cq[i];
641 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
642 }
643
644eq_arm:
645 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
646
647 return;
648}
649
650
651static int
652oce_setup_intr(POCE_SOFTC sc)
653{
654 int rc = 0, use_intx = 0;
655 int vector = 0, req_vectors = 0;
656
657 if (is_rss_enabled(sc))
658 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
659 else
660 req_vectors = 1;
661
662 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
663 sc->intr_count = req_vectors;
664 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
665 if (rc != 0) {
666 use_intx = 1;
667 pci_release_msi(sc->dev);
668 } else
669 sc->flags |= OCE_FLAGS_USING_MSIX;
670 } else
671 use_intx = 1;
672
673 if (use_intx)
674 sc->intr_count = 1;
675
676 /* Scale number of queues based on intr we got */
677 update_queues_got(sc);
678
679 if (use_intx) {
680 device_printf(sc->dev, "Using legacy interrupt\n");
681 rc = oce_alloc_intr(sc, vector, oce_intr);
682 if (rc)
683 goto error;
684 } else {
685 for (; vector < sc->intr_count; vector++) {
686 rc = oce_alloc_intr(sc, vector, oce_intr);
687 if (rc)
688 goto error;
689 }
690 }
691
692 return 0;
693error:
694 oce_intr_free(sc);
695 return rc;
696}
697
698
699static int
700oce_fast_isr(void *arg)
701{
702 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
703 POCE_SOFTC sc = ii->sc;
704
705 if (ii->eq == NULL)
706 return FILTER_STRAY;
707
708 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
709
710 taskqueue_enqueue(ii->tq, &ii->task);
711
712 ii->eq->intr++;
713
714 return FILTER_HANDLED;
715}
716
717
718static int
719oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
720{
721 POCE_INTR_INFO ii = &sc->intrs[vector];
722 int rc = 0, rr;
723
724 if (vector >= OCE_MAX_EQ)
725 return (EINVAL);
726
727 /* Set the resource id for the interrupt.
728 * MSIx is vector + 1 for the resource id,
729 * INTx is 0 for the resource id.
730 */
731 if (sc->flags & OCE_FLAGS_USING_MSIX)
732 rr = vector + 1;
733 else
734 rr = 0;
735 ii->intr_res = bus_alloc_resource_any(sc->dev,
736 SYS_RES_IRQ,
737 &rr, RF_ACTIVE|RF_SHAREABLE);
738 ii->irq_rr = rr;
739 if (ii->intr_res == NULL) {
740 device_printf(sc->dev,
741 "Could not allocate interrupt\n");
742 rc = ENXIO;
743 return rc;
744 }
745
746 TASK_INIT(&ii->task, 0, isr, ii);
747 ii->vector = vector;
748 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
749 ii->tq = taskqueue_create_fast(ii->task_name,
750 M_NOWAIT,
751 taskqueue_thread_enqueue,
752 &ii->tq);
753 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
754 device_get_nameunit(sc->dev));
755
756 ii->sc = sc;
757 rc = bus_setup_intr(sc->dev,
758 ii->intr_res,
759 INTR_TYPE_NET,
760 oce_fast_isr, NULL, ii, &ii->tag);
761 return rc;
762
763}
764
765
766void
767oce_intr_free(POCE_SOFTC sc)
768{
769 int i = 0;
770
771 for (i = 0; i < sc->intr_count; i++) {
772
773 if (sc->intrs[i].tag != NULL)
774 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
775 sc->intrs[i].tag);
776 if (sc->intrs[i].tq != NULL)
777 taskqueue_free(sc->intrs[i].tq);
778
779 if (sc->intrs[i].intr_res != NULL)
780 bus_release_resource(sc->dev, SYS_RES_IRQ,
781 sc->intrs[i].irq_rr,
782 sc->intrs[i].intr_res);
783 sc->intrs[i].tag = NULL;
784 sc->intrs[i].intr_res = NULL;
785 }
786
787 if (sc->flags & OCE_FLAGS_USING_MSIX)
788 pci_release_msi(sc->dev);
789
790}
791
792
793
794/******************************************************************************
795* Media callbacks functions *
796******************************************************************************/
797
798static void
799oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
800{
801 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
802
803
804 req->ifm_status = IFM_AVALID;
805 req->ifm_active = IFM_ETHER;
806
807 if (sc->link_status == 1)
808 req->ifm_status |= IFM_ACTIVE;
809 else
810 return;
811
812 switch (sc->link_speed) {
813 case 1: /* 10 Mbps */
814 req->ifm_active |= IFM_10_T | IFM_FDX;
815 sc->speed = 10;
816 break;
817 case 2: /* 100 Mbps */
818 req->ifm_active |= IFM_100_TX | IFM_FDX;
819 sc->speed = 100;
820 break;
821 case 3: /* 1 Gbps */
822 req->ifm_active |= IFM_1000_T | IFM_FDX;
823 sc->speed = 1000;
824 break;
825 case 4: /* 10 Gbps */
826 req->ifm_active |= IFM_10G_SR | IFM_FDX;
827 sc->speed = 10000;
828 break;
829 case 5: /* 20 Gbps */
830 req->ifm_active |= IFM_10G_SR | IFM_FDX;
831 sc->speed = 20000;
832 break;
833 case 6: /* 25 Gbps */
834 req->ifm_active |= IFM_10G_SR | IFM_FDX;
835 sc->speed = 25000;
836 break;
837 case 7: /* 40 Gbps */
838 req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
839 sc->speed = 40000;
840 break;
841 default:
842 sc->speed = 0;
843 break;
844 }
845
846 return;
847}
848
849
850int
851oce_media_change(struct ifnet *ifp)
852{
853 return 0;
854}
855
856
857
858
859/*****************************************************************************
860 * Transmit routines functions *
861 *****************************************************************************/
862
863static int
864oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
865{
866 int rc = 0, i, retry_cnt = 0;
867 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
868 struct mbuf *m, *m_temp;
869 struct oce_wq *wq = sc->wq[wq_index];
870 struct oce_packet_desc *pd;
871 struct oce_nic_hdr_wqe *nichdr;
872 struct oce_nic_frag_wqe *nicfrag;
873 int num_wqes;
874 uint32_t reg_value;
875 boolean_t complete = TRUE;
876
877 m = *mpp;
878 if (!m)
879 return EINVAL;
880
881 if (!(m->m_flags & M_PKTHDR)) {
882 rc = ENXIO;
883 goto free_ret;
884 }
885
886 if(oce_tx_asic_stall_verify(sc, m)) {
887 m = oce_insert_vlan_tag(sc, m, &complete);
888 if(!m) {
889 device_printf(sc->dev, "Insertion unsuccessful\n");
890 return 0;
891 }
892
893 }
894
895 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
896 /* consolidate packet buffers for TSO/LSO segment offload */
897#if defined(INET6) || defined(INET)
898 m = oce_tso_setup(sc, mpp);
899#else
900 m = NULL;
901#endif
902 if (m == NULL) {
903 rc = ENXIO;
904 goto free_ret;
905 }
906 }
907
908 pd = &wq->pckts[wq->pkt_desc_head];
909retry:
910 rc = bus_dmamap_load_mbuf_sg(wq->tag,
911 pd->map,
912 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
913 if (rc == 0) {
914 num_wqes = pd->nsegs + 1;
915 if (IS_BE(sc) || IS_SH(sc)) {
916 /*Dummy required only for BE3.*/
917 if (num_wqes & 1)
918 num_wqes++;
919 }
920 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
921 bus_dmamap_unload(wq->tag, pd->map);
922 return EBUSY;
923 }
924 atomic_store_rel_int(&wq->pkt_desc_head,
925 (wq->pkt_desc_head + 1) % \
926 OCE_WQ_PACKET_ARRAY_SIZE);
927 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
928 pd->mbuf = m;
929
930 nichdr =
931 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
932 nichdr->u0.dw[0] = 0;
933 nichdr->u0.dw[1] = 0;
934 nichdr->u0.dw[2] = 0;
935 nichdr->u0.dw[3] = 0;
936
937 nichdr->u0.s.complete = complete;
938 nichdr->u0.s.event = 1;
939 nichdr->u0.s.crc = 1;
940 nichdr->u0.s.forward = 0;
941 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
942 nichdr->u0.s.udpcs =
943 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
944 nichdr->u0.s.tcpcs =
945 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
946 nichdr->u0.s.num_wqe = num_wqes;
947 nichdr->u0.s.total_length = m->m_pkthdr.len;
948
949 if (m->m_flags & M_VLANTAG) {
950 nichdr->u0.s.vlan = 1; /*Vlan present*/
951 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
952 }
953
954 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
955 if (m->m_pkthdr.tso_segsz) {
956 nichdr->u0.s.lso = 1;
957 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
958 }
959 if (!IS_BE(sc) || !IS_SH(sc))
960 nichdr->u0.s.ipcs = 1;
961 }
962
963 RING_PUT(wq->ring, 1);
964 atomic_add_int(&wq->ring->num_used, 1);
965
966 for (i = 0; i < pd->nsegs; i++) {
967 nicfrag =
968 RING_GET_PRODUCER_ITEM_VA(wq->ring,
969 struct oce_nic_frag_wqe);
970 nicfrag->u0.s.rsvd0 = 0;
971 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
972 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
973 nicfrag->u0.s.frag_len = segs[i].ds_len;
974 pd->wqe_idx = wq->ring->pidx;
975 RING_PUT(wq->ring, 1);
976 atomic_add_int(&wq->ring->num_used, 1);
977 }
978 if (num_wqes > (pd->nsegs + 1)) {
979 nicfrag =
980 RING_GET_PRODUCER_ITEM_VA(wq->ring,
981 struct oce_nic_frag_wqe);
982 nicfrag->u0.dw[0] = 0;
983 nicfrag->u0.dw[1] = 0;
984 nicfrag->u0.dw[2] = 0;
985 nicfrag->u0.dw[3] = 0;
986 pd->wqe_idx = wq->ring->pidx;
987 RING_PUT(wq->ring, 1);
988 atomic_add_int(&wq->ring->num_used, 1);
989 pd->nsegs++;
990 }
991
992 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
993 wq->tx_stats.tx_reqs++;
994 wq->tx_stats.tx_wrbs += num_wqes;
995 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
996 wq->tx_stats.tx_pkts++;
997
998 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
999 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000 reg_value = (num_wqes << 16) | wq->wq_id;
1001 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1002
1003 } else if (rc == EFBIG) {
1004 if (retry_cnt == 0) {
1005 m_temp = m_defrag(m, M_NOWAIT);
1006 if (m_temp == NULL)
1007 goto free_ret;
1008 m = m_temp;
1009 *mpp = m_temp;
1010 retry_cnt = retry_cnt + 1;
1011 goto retry;
1012 } else
1013 goto free_ret;
1014 } else if (rc == ENOMEM)
1015 return rc;
1016 else
1017 goto free_ret;
1018
1019 return 0;
1020
1021free_ret:
1022 m_freem(*mpp);
1023 *mpp = NULL;
1024 return rc;
1025}
1026
1027
1028static void
1029oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1030{
1031 struct oce_packet_desc *pd;
1032 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1033 struct mbuf *m;
1034
1035 pd = &wq->pckts[wq->pkt_desc_tail];
1036 atomic_store_rel_int(&wq->pkt_desc_tail,
1037 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1038 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1039 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1040 bus_dmamap_unload(wq->tag, pd->map);
1041
1042 m = pd->mbuf;
1043 m_freem(m);
1044 pd->mbuf = NULL;
1045
1046
1047 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1048 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1049 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1050 oce_tx_restart(sc, wq);
1051 }
1052 }
1053}
1054
1055
1056static void
1057oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1058{
1059
1060 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1061 return;
1062
1063#if __FreeBSD_version >= 800000
1064 if (!drbr_empty(sc->ifp, wq->br))
1065#else
1066 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1067#endif
1068 taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1069
1070}
1071
1072
1073#if defined(INET6) || defined(INET)
1074static struct mbuf *
1075oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1076{
1077 struct mbuf *m;
1078#ifdef INET
1079 struct ip *ip;
1080#endif
1081#ifdef INET6
1082 struct ip6_hdr *ip6;
1083#endif
1084 struct ether_vlan_header *eh;
1085 struct tcphdr *th;
1086 uint16_t etype;
1087 int total_len = 0, ehdrlen = 0;
1088
1089 m = *mpp;
1090
1091 if (M_WRITABLE(m) == 0) {
1092 m = m_dup(*mpp, M_NOWAIT);
1093 if (!m)
1094 return NULL;
1095 m_freem(*mpp);
1096 *mpp = m;
1097 }
1098
1099 eh = mtod(m, struct ether_vlan_header *);
1100 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1101 etype = ntohs(eh->evl_proto);
1102 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1103 } else {
1104 etype = ntohs(eh->evl_encap_proto);
1105 ehdrlen = ETHER_HDR_LEN;
1106 }
1107
1108 switch (etype) {
1109#ifdef INET
1110 case ETHERTYPE_IP:
1111 ip = (struct ip *)(m->m_data + ehdrlen);
1112 if (ip->ip_p != IPPROTO_TCP)
1113 return NULL;
1114 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1115
1116 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1117 break;
1118#endif
1119#ifdef INET6
1120 case ETHERTYPE_IPV6:
1121 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1122 if (ip6->ip6_nxt != IPPROTO_TCP)
1123 return NULL;
1124 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1125
1126 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1127 break;
1128#endif
1129 default:
1130 return NULL;
1131 }
1132
1133 m = m_pullup(m, total_len);
1134 if (!m)
1135 return NULL;
1136 *mpp = m;
1137 return m;
1138
1139}
1140#endif /* INET6 || INET */
1141
1142void
1143oce_tx_task(void *arg, int npending)
1144{
1145 struct oce_wq *wq = arg;
1146 POCE_SOFTC sc = wq->parent;
1147 struct ifnet *ifp = sc->ifp;
1148 int rc = 0;
1149
1150#if __FreeBSD_version >= 800000
1151 LOCK(&wq->tx_lock);
1152 rc = oce_multiq_transmit(ifp, NULL, wq);
1153 if (rc) {
1154 device_printf(sc->dev,
1155 "TX[%d] restart failed\n", wq->queue_index);
1156 }
1157 UNLOCK(&wq->tx_lock);
1158#else
1159 oce_start(ifp);
1160#endif
1161
1162}
1163
1164
1165void
1166oce_start(struct ifnet *ifp)
1167{
1168 POCE_SOFTC sc = ifp->if_softc;
1169 struct mbuf *m;
1170 int rc = 0;
1171 int def_q = 0; /* Defualt tx queue is 0*/
1172
1173 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1174 IFF_DRV_RUNNING)
1175 return;
1176
1177 if (!sc->link_status)
1178 return;
1179
1180 do {
1181 IF_DEQUEUE(&sc->ifp->if_snd, m);
1182 if (m == NULL)
1183 break;
1184
1185 LOCK(&sc->wq[def_q]->tx_lock);
1186 rc = oce_tx(sc, &m, def_q);
1187 UNLOCK(&sc->wq[def_q]->tx_lock);
1188 if (rc) {
1189 if (m != NULL) {
1190 sc->wq[def_q]->tx_stats.tx_stops ++;
1191 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1192 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1193 m = NULL;
1194 }
1195 break;
1196 }
1197 if (m != NULL)
1198 ETHER_BPF_MTAP(ifp, m);
1199
1200 } while (TRUE);
1201
1202 return;
1203}
1204
1205
1206/* Handle the Completion Queue for transmit */
1207uint16_t
1208oce_wq_handler(void *arg)
1209{
1210 struct oce_wq *wq = (struct oce_wq *)arg;
1211 POCE_SOFTC sc = wq->parent;
1212 struct oce_cq *cq = wq->cq;
1213 struct oce_nic_tx_cqe *cqe;
1214 int num_cqes = 0;
1215
1216 bus_dmamap_sync(cq->ring->dma.tag,
1217 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1218 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1219 while (cqe->u0.dw[3]) {
1220 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1221
1222 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1223 if (wq->ring->cidx >= wq->ring->num_items)
1224 wq->ring->cidx -= wq->ring->num_items;
1225
1226 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1227 wq->tx_stats.tx_compl++;
1228 cqe->u0.dw[3] = 0;
1229 RING_GET(cq->ring, 1);
1230 bus_dmamap_sync(cq->ring->dma.tag,
1231 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1232 cqe =
1233 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1234 num_cqes++;
1235 }
1236
1237 if (num_cqes)
1238 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1239
1240 return 0;
1241}
1242
1243
1244static int
1245oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1246{
1247 POCE_SOFTC sc = ifp->if_softc;
1248 int status = 0, queue_index = 0;
1249 struct mbuf *next = NULL;
1250 struct buf_ring *br = NULL;
1251
1252 br = wq->br;
1253 queue_index = wq->queue_index;
1254
1255 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1256 IFF_DRV_RUNNING) {
1257 if (m != NULL)
1258 status = drbr_enqueue(ifp, br, m);
1259 return status;
1260 }
1261
1262 if (m != NULL) {
1263 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1264 return status;
1265 }
1266 while ((next = drbr_peek(ifp, br)) != NULL) {
1267 if (oce_tx(sc, &next, queue_index)) {
1268 if (next == NULL) {
1269 drbr_advance(ifp, br);
1270 } else {
1271 drbr_putback(ifp, br, next);
1272 wq->tx_stats.tx_stops ++;
1273 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1274 }
1275 break;
1276 }
1277 drbr_advance(ifp, br);
1278 if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1279 if (next->m_flags & M_MCAST)
1280 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1281 ETHER_BPF_MTAP(ifp, next);
1282 }
1283
1284 return 0;
1285}
1286
1287
1288
1289
1290/*****************************************************************************
1291 * Receive routines functions *
1292 *****************************************************************************/
1293
1294static void
1295oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1296{
1297 uint32_t out;
1298 struct oce_packet_desc *pd;
1299 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1300 int i, len, frag_len;
1301 struct mbuf *m = NULL, *tail = NULL;
1302 uint16_t vtag;
1303
1304 len = cqe->u0.s.pkt_size;
1305 if (!len) {
1306 /*partial DMA workaround for Lancer*/
1307 oce_discard_rx_comp(rq, cqe);
1308 goto exit;
1309 }
1310
1311 /* Get vlan_tag value */
1312 if(IS_BE(sc) || IS_SH(sc))
1313 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1314 else
1315 vtag = cqe->u0.s.vlan_tag;
1316
1317
1318 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1319
1320 if (rq->packets_out == rq->packets_in) {
1321 device_printf(sc->dev,
1322 "RQ transmit descriptor missing\n");
1323 }
1324 out = rq->packets_out + 1;
1325 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1326 out = 0;
1327 pd = &rq->pckts[rq->packets_out];
1328 rq->packets_out = out;
1329
1330 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1331 bus_dmamap_unload(rq->tag, pd->map);
1332 rq->pending--;
1333
1334 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1335 pd->mbuf->m_len = frag_len;
1336
1337 if (tail != NULL) {
1338 /* additional fragments */
1339 pd->mbuf->m_flags &= ~M_PKTHDR;
1340 tail->m_next = pd->mbuf;
1341 tail = pd->mbuf;
1342 } else {
1343 /* first fragment, fill out much of the packet header */
1344 pd->mbuf->m_pkthdr.len = len;
1345 pd->mbuf->m_pkthdr.csum_flags = 0;
1346 if (IF_CSUM_ENABLED(sc)) {
1347 if (cqe->u0.s.l4_cksum_pass) {
1348 pd->mbuf->m_pkthdr.csum_flags |=
1349 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1350 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1351 }
1352 if (cqe->u0.s.ip_cksum_pass) {
1353 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1354 pd->mbuf->m_pkthdr.csum_flags |=
1355 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1356 }
1357 }
1358 }
1359 m = tail = pd->mbuf;
1360 }
1361 pd->mbuf = NULL;
1362 len -= frag_len;
1363 }
1364
1365 if (m) {
1366 if (!oce_cqe_portid_valid(sc, cqe)) {
1367 m_freem(m);
1368 goto exit;
1369 }
1370
1371 m->m_pkthdr.rcvif = sc->ifp;
1372#if __FreeBSD_version >= 800000
1373 if (rq->queue_index)
1374 m->m_pkthdr.flowid = (rq->queue_index - 1);
1375 else
1376 m->m_pkthdr.flowid = rq->queue_index;
1377 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1378#endif
1379 /* This deternies if vlan tag is Valid */
1380 if (oce_cqe_vtp_valid(sc, cqe)) {
1381 if (sc->function_mode & FNM_FLEX10_MODE) {
1382 /* FLEX10. If QnQ is not set, neglect VLAN */
1383 if (cqe->u0.s.qnq) {
1384 m->m_pkthdr.ether_vtag = vtag;
1385 m->m_flags |= M_VLANTAG;
1386 }
1387 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1388 /* In UMC mode generally pvid will be striped by
1389 hw. But in some cases we have seen it comes
1390 with pvid. So if pvid == vlan, neglect vlan.
1391 */
1392 m->m_pkthdr.ether_vtag = vtag;
1393 m->m_flags |= M_VLANTAG;
1394 }
1395 }
1396
1397 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1398#if defined(INET6) || defined(INET)
1399 /* Try to queue to LRO */
1400 if (IF_LRO_ENABLED(sc) &&
1401 (cqe->u0.s.ip_cksum_pass) &&
1402 (cqe->u0.s.l4_cksum_pass) &&
1403 (!cqe->u0.s.ip_ver) &&
1404 (rq->lro.lro_cnt != 0)) {
1405
1406 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1407 rq->lro_pkts_queued ++;
1408 goto post_done;
1409 }
1410 /* If LRO posting fails then try to post to STACK */
1411 }
1412#endif
1413
1414 (*sc->ifp->if_input) (sc->ifp, m);
1415#if defined(INET6) || defined(INET)
1416post_done:
1417#endif
1418 /* Update rx stats per queue */
1419 rq->rx_stats.rx_pkts++;
1420 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1421 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1422 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1423 rq->rx_stats.rx_mcast_pkts++;
1424 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1425 rq->rx_stats.rx_ucast_pkts++;
1426 }
1427exit:
1428 return;
1429}
1430
1431
1432static void
1433oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1434{
1435 uint32_t out, i = 0;
1436 struct oce_packet_desc *pd;
1437 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1438 int num_frags = cqe->u0.s.num_fragments;
1439
1440 for (i = 0; i < num_frags; i++) {
1441 if (rq->packets_out == rq->packets_in) {
1442 device_printf(sc->dev,
1443 "RQ transmit descriptor missing\n");
1444 }
1445 out = rq->packets_out + 1;
1446 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1447 out = 0;
1448 pd = &rq->pckts[rq->packets_out];
1449 rq->packets_out = out;
1450
1451 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1452 bus_dmamap_unload(rq->tag, pd->map);
1453 rq->pending--;
1454 m_freem(pd->mbuf);
1455 }
1456
1457}
1458
1459
1460static int
1461oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1462{
1463 struct oce_nic_rx_cqe_v1 *cqe_v1;
1464 int vtp = 0;
1465
1466 if (sc->be3_native) {
1467 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1468 vtp = cqe_v1->u0.s.vlan_tag_present;
1469 } else
1470 vtp = cqe->u0.s.vlan_tag_present;
1471
1472 return vtp;
1473
1474}
1475
1476
1477static int
1478oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1479{
1480 struct oce_nic_rx_cqe_v1 *cqe_v1;
1481 int port_id = 0;
1482
1483 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1484 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1485 port_id = cqe_v1->u0.s.port;
1486 if (sc->port_id != port_id)
1487 return 0;
1488 } else
1489 ;/* For BE3 legacy and Lancer this is dummy */
1490
1491 return 1;
1492
1493}
1494
1495#if defined(INET6) || defined(INET)
1496static void
1497oce_rx_flush_lro(struct oce_rq *rq)
1498{
1499 struct lro_ctrl *lro = &rq->lro;
1500 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1501
1502 if (!IF_LRO_ENABLED(sc))
1503 return;
1504
1505 tcp_lro_flush_all(lro);
1506 rq->lro_pkts_queued = 0;
1507
1508 return;
1509}
1510
1511
1512static int
1513oce_init_lro(POCE_SOFTC sc)
1514{
1515 struct lro_ctrl *lro = NULL;
1516 int i = 0, rc = 0;
1517
1518 for (i = 0; i < sc->nrqs; i++) {
1519 lro = &sc->rq[i]->lro;
1520 rc = tcp_lro_init(lro);
1521 if (rc != 0) {
1522 device_printf(sc->dev, "LRO init failed\n");
1523 return rc;
1524 }
1525 lro->ifp = sc->ifp;
1526 }
1527
1528 return rc;
1529}
1530
1531
1532void
1533oce_free_lro(POCE_SOFTC sc)
1534{
1535 struct lro_ctrl *lro = NULL;
1536 int i = 0;
1537
1538 for (i = 0; i < sc->nrqs; i++) {
1539 lro = &sc->rq[i]->lro;
1540 if (lro)
1541 tcp_lro_free(lro);
1542 }
1543}
1544#endif
1545
1546int
1547oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1548{
1549 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1550 int i, in, rc;
1551 struct oce_packet_desc *pd;
1552 bus_dma_segment_t segs[6];
1553 int nsegs, added = 0;
1554 struct oce_nic_rqe *rqe;
1555 pd_rxulp_db_t rxdb_reg;
1556
1557 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1558 for (i = 0; i < count; i++) {
1559 in = rq->packets_in + 1;
1560 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1561 in = 0;
1562 if (in == rq->packets_out)
1563 break; /* no more room */
1564
1565 pd = &rq->pckts[rq->packets_in];
1566 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1567 if (pd->mbuf == NULL)
1568 break;
1569
1570 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1571 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1572 pd->map,
1573 pd->mbuf,
1574 segs, &nsegs, BUS_DMA_NOWAIT);
1575 if (rc) {
1576 m_free(pd->mbuf);
1577 break;
1578 }
1579
1580 if (nsegs != 1) {
1581 i--;
1582 continue;
1583 }
1584
1585 rq->packets_in = in;
1586 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1587
1588 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1589 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1590 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1591 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1592 RING_PUT(rq->ring, 1);
1593 added++;
1594 rq->pending++;
1595 }
1596 if (added != 0) {
1597 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1598 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1599 rxdb_reg.bits.qid = rq->rq_id;
1600 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1601 added -= OCE_MAX_RQ_POSTS;
1602 }
1603 if (added > 0) {
1604 rxdb_reg.bits.qid = rq->rq_id;
1605 rxdb_reg.bits.num_posted = added;
1606 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1607 }
1608 }
1609
1610 return 0;
1611}
1612
1613
1614/* Handle the Completion Queue for receive */
1615uint16_t
1616oce_rq_handler(void *arg)
1617{
1618 struct oce_rq *rq = (struct oce_rq *)arg;
1619 struct oce_cq *cq = rq->cq;
1620 POCE_SOFTC sc = rq->parent;
1621 struct oce_nic_rx_cqe *cqe;
1622 int num_cqes = 0, rq_buffers_used = 0;
1623
1624
1625 bus_dmamap_sync(cq->ring->dma.tag,
1626 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1627 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1628 while (cqe->u0.dw[2]) {
1629 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1630
1631 RING_GET(rq->ring, 1);
1632 if (cqe->u0.s.error == 0) {
1633 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1634 } else {
1635 rq->rx_stats.rxcp_err++;
1636 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1637 /* Post L3/L4 errors to stack.*/
1638 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1639 }
1640 rq->rx_stats.rx_compl++;
1641 cqe->u0.dw[2] = 0;
1642
1643#if defined(INET6) || defined(INET)
1644 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1645 oce_rx_flush_lro(rq);
1646 }
1647#endif
1648
1649 RING_GET(cq->ring, 1);
1650 bus_dmamap_sync(cq->ring->dma.tag,
1651 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1652 cqe =
1653 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1654 num_cqes++;
1655 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1656 break;
1657 }
1658
1659#if defined(INET6) || defined(INET)
1660 if (IF_LRO_ENABLED(sc))
1661 oce_rx_flush_lro(rq);
1662#endif
1663
1664 if (num_cqes) {
1665 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1666 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1667 if (rq_buffers_used > 1)
1668 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1669 }
1670
1671 return 0;
1672
1673}
1674
1675
1676
1677
1678/*****************************************************************************
1679 * Helper function prototypes in this file *
1680 *****************************************************************************/
1681
1682static int
1683oce_attach_ifp(POCE_SOFTC sc)
1684{
1685
1686 sc->ifp = if_alloc(IFT_ETHER);
1687 if (!sc->ifp)
1688 return ENOMEM;
1689
1690 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1691 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1692 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1693
1694 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1695 sc->ifp->if_ioctl = oce_ioctl;
1696 sc->ifp->if_start = oce_start;
1697 sc->ifp->if_init = oce_init;
1698 sc->ifp->if_mtu = ETHERMTU;
1699 sc->ifp->if_softc = sc;
1700#if __FreeBSD_version >= 800000
1701 sc->ifp->if_transmit = oce_multiq_start;
1702 sc->ifp->if_qflush = oce_multiq_flush;
1703#endif
1704
1705 if_initname(sc->ifp,
1706 device_get_name(sc->dev), device_get_unit(sc->dev));
1707
1708 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1709 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1710 IFQ_SET_READY(&sc->ifp->if_snd);
1711
1712 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1713 sc->ifp->if_hwassist |= CSUM_TSO;
1714 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1715
1716 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1717 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1718 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1719
1720#if defined(INET6) || defined(INET)
1721 sc->ifp->if_capabilities |= IFCAP_TSO;
1722 sc->ifp->if_capabilities |= IFCAP_LRO;
1723 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1724#endif
1725
1726 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1727 sc->ifp->if_baudrate = IF_Gbps(10);
1728
1729#if __FreeBSD_version >= 1000000
1730 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1731 sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1732 sc->ifp->if_hw_tsomaxsegsize = 4096;
1733#endif
1734
1735 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1736
1737 return 0;
1738}
1739
1740
1741static void
1742oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1743{
1744 POCE_SOFTC sc = ifp->if_softc;
1745
1746 if (ifp->if_softc != arg)
1747 return;
1748 if ((vtag == 0) || (vtag > 4095))
1749 return;
1750
1751 sc->vlan_tag[vtag] = 1;
1752 sc->vlans_added++;
1753 if (sc->vlans_added <= (sc->max_vlans + 1))
1754 oce_vid_config(sc);
1755}
1756
1757
1758static void
1759oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1760{
1761 POCE_SOFTC sc = ifp->if_softc;
1762
1763 if (ifp->if_softc != arg)
1764 return;
1765 if ((vtag == 0) || (vtag > 4095))
1766 return;
1767
1768 sc->vlan_tag[vtag] = 0;
1769 sc->vlans_added--;
1770 oce_vid_config(sc);
1771}
1772
1773
1774/*
1775 * A max of 64 vlans can be configured in BE. If the user configures
1776 * more, place the card in vlan promiscuous mode.
1777 */
1778static int
1779oce_vid_config(POCE_SOFTC sc)
1780{
1781 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1782 uint16_t ntags = 0, i;
1783 int status = 0;
1784
1785 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1786 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1787 for (i = 0; i < MAX_VLANS; i++) {
1788 if (sc->vlan_tag[i]) {
1789 vtags[ntags].vtag = i;
1790 ntags++;
1791 }
1792 }
1793 if (ntags)
1794 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1795 vtags, ntags, 1, 0);
1796 } else
1797 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1798 NULL, 0, 1, 1);
1799 return status;
1800}
1801
1802
1803static void
1804oce_mac_addr_set(POCE_SOFTC sc)
1805{
1806 uint32_t old_pmac_id = sc->pmac_id;
1807 int status = 0;
1808
1809
1810 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1811 sc->macaddr.size_of_struct);
1812 if (!status)
1813 return;
1814
1815 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1816 sc->if_id, &sc->pmac_id);
1817 if (!status) {
1818 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1819 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1820 sc->macaddr.size_of_struct);
1821 }
1822 if (status)
1823 device_printf(sc->dev, "Failed update macaddress\n");
1824
1825}
1826
1827
1828static int
1829oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1830{
1831 POCE_SOFTC sc = ifp->if_softc;
1832 struct ifreq *ifr = (struct ifreq *)data;
1833 int rc = ENXIO;
1834 char cookie[32] = {0};
1835 void *priv_data = (void *)ifr->ifr_data;
1835 void *priv_data = ifr_data_get_ptr(ifr);
1836 void *ioctl_ptr;
1837 uint32_t req_size;
1838 struct mbx_hdr req;
1839 OCE_DMA_MEM dma_mem;
1840 struct mbx_common_get_cntl_attr *fw_cmd;
1841
1842 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1843 return EFAULT;
1844
1845 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1846 return EINVAL;
1847
1848 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1849 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1850 return EFAULT;
1851
1852 req_size = le32toh(req.u0.req.request_length);
1853 if (req_size > 65536)
1854 return EINVAL;
1855
1856 req_size += sizeof(struct mbx_hdr);
1857 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1858 if (rc)
1859 return ENOMEM;
1860
1861 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1862 rc = EFAULT;
1863 goto dma_free;
1864 }
1865
1866 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1867 if (rc) {
1868 rc = EIO;
1869 goto dma_free;
1870 }
1871
1872 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1873 rc = EFAULT;
1874
1875 /*
1876 firmware is filling all the attributes for this ioctl except
1877 the driver version..so fill it
1878 */
1879 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1880 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1881 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1882 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1883 }
1884
1885dma_free:
1886 oce_dma_free(sc, &dma_mem);
1887 return rc;
1888
1889}
1890
1891static void
1892oce_eqd_set_periodic(POCE_SOFTC sc)
1893{
1894 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1895 struct oce_aic_obj *aic;
1896 struct oce_eq *eqo;
1897 uint64_t now = 0, delta;
1898 int eqd, i, num = 0;
1899 uint32_t ips = 0;
1900 int tps;
1901
1902 for (i = 0 ; i < sc->neqs; i++) {
1903 eqo = sc->eq[i];
1904 aic = &sc->aic_obj[i];
1905 /* When setting the static eq delay from the user space */
1906 if (!aic->enable) {
1907 eqd = aic->et_eqd;
1908 goto modify_eqd;
1909 }
1910
1911 now = ticks;
1912
1913 /* Over flow check */
1914 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1915 goto done;
1916
1917 delta = now - aic->ticks;
1918 tps = delta/hz;
1919
1920 /* Interrupt rate based on elapsed ticks */
1921 if(tps)
1922 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1923
1924 if (ips > INTR_RATE_HWM)
1925 eqd = aic->cur_eqd + 20;
1926 else if (ips < INTR_RATE_LWM)
1927 eqd = aic->cur_eqd / 2;
1928 else
1929 goto done;
1930
1931 if (eqd < 10)
1932 eqd = 0;
1933
1934 /* Make sure that the eq delay is in the known range */
1935 eqd = min(eqd, aic->max_eqd);
1936 eqd = max(eqd, aic->min_eqd);
1937
1938modify_eqd:
1939 if (eqd != aic->cur_eqd) {
1940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->eq_id;
1942 aic->cur_eqd = eqd;
1943 num++;
1944 }
1945done:
1946 aic->intr_prev = eqo->intr;
1947 aic->ticks = now;
1948 }
1949
1950 /* Is there atleast one eq that needs to be modified? */
1951 if(num)
1952 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1953}
1954
1955static void oce_detect_hw_error(POCE_SOFTC sc)
1956{
1957
1958 uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1959 uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1960 uint32_t i;
1961
1962 if (sc->hw_error)
1963 return;
1964
1965 if (IS_XE201(sc)) {
1966 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1967 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1968 sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1969 sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1970 }
1971 } else {
1972 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1973 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1974 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1975 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1976
1977 ue_low = (ue_low & ~ue_low_mask);
1978 ue_high = (ue_high & ~ue_high_mask);
1979 }
1980
1981 /* On certain platforms BE hardware can indicate spurious UEs.
1982 * Allow the h/w to stop working completely in case of a real UE.
1983 * Hence not setting the hw_error for UE detection.
1984 */
1985 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1986 sc->hw_error = TRUE;
1987 device_printf(sc->dev, "Error detected in the card\n");
1988 }
1989
1990 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1991 device_printf(sc->dev,
1992 "ERR: sliport status 0x%x\n", sliport_status);
1993 device_printf(sc->dev,
1994 "ERR: sliport error1 0x%x\n", sliport_err1);
1995 device_printf(sc->dev,
1996 "ERR: sliport error2 0x%x\n", sliport_err2);
1997 }
1998
1999 if (ue_low) {
2000 for (i = 0; ue_low; ue_low >>= 1, i++) {
2001 if (ue_low & 1)
2002 device_printf(sc->dev, "UE: %s bit set\n",
2003 ue_status_low_desc[i]);
2004 }
2005 }
2006
2007 if (ue_high) {
2008 for (i = 0; ue_high; ue_high >>= 1, i++) {
2009 if (ue_high & 1)
2010 device_printf(sc->dev, "UE: %s bit set\n",
2011 ue_status_hi_desc[i]);
2012 }
2013 }
2014
2015}
2016
2017
2018static void
2019oce_local_timer(void *arg)
2020{
2021 POCE_SOFTC sc = arg;
2022 int i = 0;
2023
2024 oce_detect_hw_error(sc);
2025 oce_refresh_nic_stats(sc);
2026 oce_refresh_queue_stats(sc);
2027 oce_mac_addr_set(sc);
2028
2029 /* TX Watch Dog*/
2030 for (i = 0; i < sc->nwqs; i++)
2031 oce_tx_restart(sc, sc->wq[i]);
2032
2033 /* calculate and set the eq delay for optimal interrupt rate */
2034 if (IS_BE(sc) || IS_SH(sc))
2035 oce_eqd_set_periodic(sc);
2036
2037 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2038}
2039
2040
2041/* NOTE : This should only be called holding
2042 * DEVICE_LOCK.
2043 */
2044static void
2045oce_if_deactivate(POCE_SOFTC sc)
2046{
2047 int i, mtime = 0;
2048 int wait_req = 0;
2049 struct oce_rq *rq;
2050 struct oce_wq *wq;
2051 struct oce_eq *eq;
2052
2053 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2054
2055 /*Wait for max of 400ms for TX completions to be done */
2056 while (mtime < 400) {
2057 wait_req = 0;
2058 for_all_wq_queues(sc, wq, i) {
2059 if (wq->ring->num_used) {
2060 wait_req = 1;
2061 DELAY(1);
2062 break;
2063 }
2064 }
2065 mtime += 1;
2066 if (!wait_req)
2067 break;
2068 }
2069
2070 /* Stop intrs and finish any bottom halves pending */
2071 oce_hw_intr_disable(sc);
2072
2073 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2074 any other lock. So unlock device lock and require after
2075 completing taskqueue_drain.
2076 */
2077 UNLOCK(&sc->dev_lock);
2078 for (i = 0; i < sc->intr_count; i++) {
2079 if (sc->intrs[i].tq != NULL) {
2080 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2081 }
2082 }
2083 LOCK(&sc->dev_lock);
2084
2085 /* Delete RX queue in card with flush param */
2086 oce_stop_rx(sc);
2087
2088 /* Invalidate any pending cq and eq entries*/
2089 for_all_evnt_queues(sc, eq, i)
2090 oce_drain_eq(eq);
2091 for_all_rq_queues(sc, rq, i)
2092 oce_drain_rq_cq(rq);
2093 for_all_wq_queues(sc, wq, i)
2094 oce_drain_wq_cq(wq);
2095
2096 /* But still we need to get MCC aync events.
2097 So enable intrs and also arm first EQ
2098 */
2099 oce_hw_intr_enable(sc);
2100 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2101
2102 DELAY(10);
2103}
2104
2105
2106static void
2107oce_if_activate(POCE_SOFTC sc)
2108{
2109 struct oce_eq *eq;
2110 struct oce_rq *rq;
2111 struct oce_wq *wq;
2112 int i, rc = 0;
2113
2114 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2115
2116 oce_hw_intr_disable(sc);
2117
2118 oce_start_rx(sc);
2119
2120 for_all_rq_queues(sc, rq, i) {
2121 rc = oce_start_rq(rq);
2122 if (rc)
2123 device_printf(sc->dev, "Unable to start RX\n");
2124 }
2125
2126 for_all_wq_queues(sc, wq, i) {
2127 rc = oce_start_wq(wq);
2128 if (rc)
2129 device_printf(sc->dev, "Unable to start TX\n");
2130 }
2131
2132
2133 for_all_evnt_queues(sc, eq, i)
2134 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2135
2136 oce_hw_intr_enable(sc);
2137
2138}
2139
2140static void
2141process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2142{
2143 /* Update Link status */
2144 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2145 ASYNC_EVENT_LINK_UP) {
2146 sc->link_status = ASYNC_EVENT_LINK_UP;
2147 if_link_state_change(sc->ifp, LINK_STATE_UP);
2148 } else {
2149 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2150 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2151 }
2152}
2153
2154
2155/* Handle the Completion Queue for the Mailbox/Async notifications */
2156uint16_t
2157oce_mq_handler(void *arg)
2158{
2159 struct oce_mq *mq = (struct oce_mq *)arg;
2160 POCE_SOFTC sc = mq->parent;
2161 struct oce_cq *cq = mq->cq;
2162 int num_cqes = 0, evt_type = 0, optype = 0;
2163 struct oce_mq_cqe *cqe;
2164 struct oce_async_cqe_link_state *acqe;
2165 struct oce_async_event_grp5_pvid_state *gcqe;
2166 struct oce_async_event_qnq *dbgcqe;
2167
2168
2169 bus_dmamap_sync(cq->ring->dma.tag,
2170 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2171 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2172
2173 while (cqe->u0.dw[3]) {
2174 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2175 if (cqe->u0.s.async_event) {
2176 evt_type = cqe->u0.s.event_type;
2177 optype = cqe->u0.s.async_type;
2178 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2179 /* Link status evt */
2180 acqe = (struct oce_async_cqe_link_state *)cqe;
2181 process_link_state(sc, acqe);
2182 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2183 (optype == ASYNC_EVENT_PVID_STATE)) {
2184 /* GRP5 PVID */
2185 gcqe =
2186 (struct oce_async_event_grp5_pvid_state *)cqe;
2187 if (gcqe->enabled)
2188 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2189 else
2190 sc->pvid = 0;
2191
2192 }
2193 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2194 optype == ASYNC_EVENT_DEBUG_QNQ) {
2195 dbgcqe =
2196 (struct oce_async_event_qnq *)cqe;
2197 if(dbgcqe->valid)
2198 sc->qnqid = dbgcqe->vlan_tag;
2199 sc->qnq_debug_event = TRUE;
2200 }
2201 }
2202 cqe->u0.dw[3] = 0;
2203 RING_GET(cq->ring, 1);
2204 bus_dmamap_sync(cq->ring->dma.tag,
2205 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2206 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2207 num_cqes++;
2208 }
2209
2210 if (num_cqes)
2211 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2212
2213 return 0;
2214}
2215
2216
2217static void
2218setup_max_queues_want(POCE_SOFTC sc)
2219{
2220 /* Check if it is FLEX machine. Is so dont use RSS */
2221 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2222 (sc->function_mode & FNM_UMC_MODE) ||
2223 (sc->function_mode & FNM_VNIC_MODE) ||
2224 (!is_rss_enabled(sc)) ||
2225 IS_BE2(sc)) {
2226 sc->nrqs = 1;
2227 sc->nwqs = 1;
2228 } else {
2229 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2230 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2231 }
2232
2233 if (IS_BE2(sc) && is_rss_enabled(sc))
2234 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2235}
2236
2237
2238static void
2239update_queues_got(POCE_SOFTC sc)
2240{
2241 if (is_rss_enabled(sc)) {
2242 sc->nrqs = sc->intr_count + 1;
2243 sc->nwqs = sc->intr_count;
2244 } else {
2245 sc->nrqs = 1;
2246 sc->nwqs = 1;
2247 }
2248
2249 if (IS_BE2(sc))
2250 sc->nwqs = 1;
2251}
2252
2253static int
2254oce_check_ipv6_ext_hdr(struct mbuf *m)
2255{
2256 struct ether_header *eh = mtod(m, struct ether_header *);
2257 caddr_t m_datatemp = m->m_data;
2258
2259 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2260 m->m_data += sizeof(struct ether_header);
2261 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2262
2263 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2264 (ip6->ip6_nxt != IPPROTO_UDP)){
2265 struct ip6_ext *ip6e = NULL;
2266 m->m_data += sizeof(struct ip6_hdr);
2267
2268 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2269 if(ip6e->ip6e_len == 0xff) {
2270 m->m_data = m_datatemp;
2271 return TRUE;
2272 }
2273 }
2274 m->m_data = m_datatemp;
2275 }
2276 return FALSE;
2277}
2278
2279static int
2280is_be3_a1(POCE_SOFTC sc)
2281{
2282 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2283 return TRUE;
2284 }
2285 return FALSE;
2286}
2287
2288static struct mbuf *
2289oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2290{
2291 uint16_t vlan_tag = 0;
2292
2293 if(!M_WRITABLE(m))
2294 return NULL;
2295
2296 /* Embed vlan tag in the packet if it is not part of it */
2297 if(m->m_flags & M_VLANTAG) {
2298 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2299 m->m_flags &= ~M_VLANTAG;
2300 }
2301
2302 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2303 if(sc->pvid) {
2304 if(!vlan_tag)
2305 vlan_tag = sc->pvid;
2306 *complete = FALSE;
2307 }
2308
2309 if(vlan_tag) {
2310 m = ether_vlanencap(m, vlan_tag);
2311 }
2312
2313 if(sc->qnqid) {
2314 m = ether_vlanencap(m, sc->qnqid);
2315 *complete = FALSE;
2316 }
2317 return m;
2318}
2319
2320static int
2321oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2322{
2323 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2324 oce_check_ipv6_ext_hdr(m)) {
2325 return TRUE;
2326 }
2327 return FALSE;
2328}
2329
2330static void
2331oce_get_config(POCE_SOFTC sc)
2332{
2333 int rc = 0;
2334 uint32_t max_rss = 0;
2335
2336 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2337 max_rss = OCE_LEGACY_MODE_RSS;
2338 else
2339 max_rss = OCE_MAX_RSS;
2340
2341 if (!IS_BE(sc)) {
2342 rc = oce_get_profile_config(sc, max_rss);
2343 if (rc) {
2344 sc->nwqs = OCE_MAX_WQ;
2345 sc->nrssqs = max_rss;
2346 sc->nrqs = sc->nrssqs + 1;
2347 }
2348 }
2349 else { /* For BE3 don't rely on fw for determining the resources */
2350 sc->nrssqs = max_rss;
2351 sc->nrqs = sc->nrssqs + 1;
2352 sc->nwqs = OCE_MAX_WQ;
2353 sc->max_vlans = MAX_VLANFILTER_SIZE;
2354 }
2355}
1836 void *ioctl_ptr;
1837 uint32_t req_size;
1838 struct mbx_hdr req;
1839 OCE_DMA_MEM dma_mem;
1840 struct mbx_common_get_cntl_attr *fw_cmd;
1841
1842 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1843 return EFAULT;
1844
1845 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1846 return EINVAL;
1847
1848 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1849 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1850 return EFAULT;
1851
1852 req_size = le32toh(req.u0.req.request_length);
1853 if (req_size > 65536)
1854 return EINVAL;
1855
1856 req_size += sizeof(struct mbx_hdr);
1857 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1858 if (rc)
1859 return ENOMEM;
1860
1861 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1862 rc = EFAULT;
1863 goto dma_free;
1864 }
1865
1866 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1867 if (rc) {
1868 rc = EIO;
1869 goto dma_free;
1870 }
1871
1872 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1873 rc = EFAULT;
1874
1875 /*
1876 firmware is filling all the attributes for this ioctl except
1877 the driver version..so fill it
1878 */
1879 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1880 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1881 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1882 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1883 }
1884
1885dma_free:
1886 oce_dma_free(sc, &dma_mem);
1887 return rc;
1888
1889}
1890
1891static void
1892oce_eqd_set_periodic(POCE_SOFTC sc)
1893{
1894 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1895 struct oce_aic_obj *aic;
1896 struct oce_eq *eqo;
1897 uint64_t now = 0, delta;
1898 int eqd, i, num = 0;
1899 uint32_t ips = 0;
1900 int tps;
1901
1902 for (i = 0 ; i < sc->neqs; i++) {
1903 eqo = sc->eq[i];
1904 aic = &sc->aic_obj[i];
1905 /* When setting the static eq delay from the user space */
1906 if (!aic->enable) {
1907 eqd = aic->et_eqd;
1908 goto modify_eqd;
1909 }
1910
1911 now = ticks;
1912
1913 /* Over flow check */
1914 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1915 goto done;
1916
1917 delta = now - aic->ticks;
1918 tps = delta/hz;
1919
1920 /* Interrupt rate based on elapsed ticks */
1921 if(tps)
1922 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1923
1924 if (ips > INTR_RATE_HWM)
1925 eqd = aic->cur_eqd + 20;
1926 else if (ips < INTR_RATE_LWM)
1927 eqd = aic->cur_eqd / 2;
1928 else
1929 goto done;
1930
1931 if (eqd < 10)
1932 eqd = 0;
1933
1934 /* Make sure that the eq delay is in the known range */
1935 eqd = min(eqd, aic->max_eqd);
1936 eqd = max(eqd, aic->min_eqd);
1937
1938modify_eqd:
1939 if (eqd != aic->cur_eqd) {
1940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->eq_id;
1942 aic->cur_eqd = eqd;
1943 num++;
1944 }
1945done:
1946 aic->intr_prev = eqo->intr;
1947 aic->ticks = now;
1948 }
1949
1950 /* Is there atleast one eq that needs to be modified? */
1951 if(num)
1952 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1953}
1954
1955static void oce_detect_hw_error(POCE_SOFTC sc)
1956{
1957
1958 uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1959 uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1960 uint32_t i;
1961
1962 if (sc->hw_error)
1963 return;
1964
1965 if (IS_XE201(sc)) {
1966 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1967 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1968 sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1969 sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1970 }
1971 } else {
1972 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1973 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1974 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1975 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1976
1977 ue_low = (ue_low & ~ue_low_mask);
1978 ue_high = (ue_high & ~ue_high_mask);
1979 }
1980
1981 /* On certain platforms BE hardware can indicate spurious UEs.
1982 * Allow the h/w to stop working completely in case of a real UE.
1983 * Hence not setting the hw_error for UE detection.
1984 */
1985 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1986 sc->hw_error = TRUE;
1987 device_printf(sc->dev, "Error detected in the card\n");
1988 }
1989
1990 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1991 device_printf(sc->dev,
1992 "ERR: sliport status 0x%x\n", sliport_status);
1993 device_printf(sc->dev,
1994 "ERR: sliport error1 0x%x\n", sliport_err1);
1995 device_printf(sc->dev,
1996 "ERR: sliport error2 0x%x\n", sliport_err2);
1997 }
1998
1999 if (ue_low) {
2000 for (i = 0; ue_low; ue_low >>= 1, i++) {
2001 if (ue_low & 1)
2002 device_printf(sc->dev, "UE: %s bit set\n",
2003 ue_status_low_desc[i]);
2004 }
2005 }
2006
2007 if (ue_high) {
2008 for (i = 0; ue_high; ue_high >>= 1, i++) {
2009 if (ue_high & 1)
2010 device_printf(sc->dev, "UE: %s bit set\n",
2011 ue_status_hi_desc[i]);
2012 }
2013 }
2014
2015}
2016
2017
2018static void
2019oce_local_timer(void *arg)
2020{
2021 POCE_SOFTC sc = arg;
2022 int i = 0;
2023
2024 oce_detect_hw_error(sc);
2025 oce_refresh_nic_stats(sc);
2026 oce_refresh_queue_stats(sc);
2027 oce_mac_addr_set(sc);
2028
2029 /* TX Watch Dog*/
2030 for (i = 0; i < sc->nwqs; i++)
2031 oce_tx_restart(sc, sc->wq[i]);
2032
2033 /* calculate and set the eq delay for optimal interrupt rate */
2034 if (IS_BE(sc) || IS_SH(sc))
2035 oce_eqd_set_periodic(sc);
2036
2037 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2038}
2039
2040
2041/* NOTE : This should only be called holding
2042 * DEVICE_LOCK.
2043 */
2044static void
2045oce_if_deactivate(POCE_SOFTC sc)
2046{
2047 int i, mtime = 0;
2048 int wait_req = 0;
2049 struct oce_rq *rq;
2050 struct oce_wq *wq;
2051 struct oce_eq *eq;
2052
2053 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2054
2055 /*Wait for max of 400ms for TX completions to be done */
2056 while (mtime < 400) {
2057 wait_req = 0;
2058 for_all_wq_queues(sc, wq, i) {
2059 if (wq->ring->num_used) {
2060 wait_req = 1;
2061 DELAY(1);
2062 break;
2063 }
2064 }
2065 mtime += 1;
2066 if (!wait_req)
2067 break;
2068 }
2069
2070 /* Stop intrs and finish any bottom halves pending */
2071 oce_hw_intr_disable(sc);
2072
2073 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2074 any other lock. So unlock device lock and require after
2075 completing taskqueue_drain.
2076 */
2077 UNLOCK(&sc->dev_lock);
2078 for (i = 0; i < sc->intr_count; i++) {
2079 if (sc->intrs[i].tq != NULL) {
2080 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2081 }
2082 }
2083 LOCK(&sc->dev_lock);
2084
2085 /* Delete RX queue in card with flush param */
2086 oce_stop_rx(sc);
2087
2088 /* Invalidate any pending cq and eq entries*/
2089 for_all_evnt_queues(sc, eq, i)
2090 oce_drain_eq(eq);
2091 for_all_rq_queues(sc, rq, i)
2092 oce_drain_rq_cq(rq);
2093 for_all_wq_queues(sc, wq, i)
2094 oce_drain_wq_cq(wq);
2095
2096 /* But still we need to get MCC aync events.
2097 So enable intrs and also arm first EQ
2098 */
2099 oce_hw_intr_enable(sc);
2100 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2101
2102 DELAY(10);
2103}
2104
2105
2106static void
2107oce_if_activate(POCE_SOFTC sc)
2108{
2109 struct oce_eq *eq;
2110 struct oce_rq *rq;
2111 struct oce_wq *wq;
2112 int i, rc = 0;
2113
2114 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2115
2116 oce_hw_intr_disable(sc);
2117
2118 oce_start_rx(sc);
2119
2120 for_all_rq_queues(sc, rq, i) {
2121 rc = oce_start_rq(rq);
2122 if (rc)
2123 device_printf(sc->dev, "Unable to start RX\n");
2124 }
2125
2126 for_all_wq_queues(sc, wq, i) {
2127 rc = oce_start_wq(wq);
2128 if (rc)
2129 device_printf(sc->dev, "Unable to start TX\n");
2130 }
2131
2132
2133 for_all_evnt_queues(sc, eq, i)
2134 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2135
2136 oce_hw_intr_enable(sc);
2137
2138}
2139
2140static void
2141process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2142{
2143 /* Update Link status */
2144 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2145 ASYNC_EVENT_LINK_UP) {
2146 sc->link_status = ASYNC_EVENT_LINK_UP;
2147 if_link_state_change(sc->ifp, LINK_STATE_UP);
2148 } else {
2149 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2150 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2151 }
2152}
2153
2154
2155/* Handle the Completion Queue for the Mailbox/Async notifications */
2156uint16_t
2157oce_mq_handler(void *arg)
2158{
2159 struct oce_mq *mq = (struct oce_mq *)arg;
2160 POCE_SOFTC sc = mq->parent;
2161 struct oce_cq *cq = mq->cq;
2162 int num_cqes = 0, evt_type = 0, optype = 0;
2163 struct oce_mq_cqe *cqe;
2164 struct oce_async_cqe_link_state *acqe;
2165 struct oce_async_event_grp5_pvid_state *gcqe;
2166 struct oce_async_event_qnq *dbgcqe;
2167
2168
2169 bus_dmamap_sync(cq->ring->dma.tag,
2170 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2171 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2172
2173 while (cqe->u0.dw[3]) {
2174 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2175 if (cqe->u0.s.async_event) {
2176 evt_type = cqe->u0.s.event_type;
2177 optype = cqe->u0.s.async_type;
2178 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2179 /* Link status evt */
2180 acqe = (struct oce_async_cqe_link_state *)cqe;
2181 process_link_state(sc, acqe);
2182 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2183 (optype == ASYNC_EVENT_PVID_STATE)) {
2184 /* GRP5 PVID */
2185 gcqe =
2186 (struct oce_async_event_grp5_pvid_state *)cqe;
2187 if (gcqe->enabled)
2188 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2189 else
2190 sc->pvid = 0;
2191
2192 }
2193 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2194 optype == ASYNC_EVENT_DEBUG_QNQ) {
2195 dbgcqe =
2196 (struct oce_async_event_qnq *)cqe;
2197 if(dbgcqe->valid)
2198 sc->qnqid = dbgcqe->vlan_tag;
2199 sc->qnq_debug_event = TRUE;
2200 }
2201 }
2202 cqe->u0.dw[3] = 0;
2203 RING_GET(cq->ring, 1);
2204 bus_dmamap_sync(cq->ring->dma.tag,
2205 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2206 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2207 num_cqes++;
2208 }
2209
2210 if (num_cqes)
2211 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2212
2213 return 0;
2214}
2215
2216
2217static void
2218setup_max_queues_want(POCE_SOFTC sc)
2219{
2220 /* Check if it is FLEX machine. Is so dont use RSS */
2221 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2222 (sc->function_mode & FNM_UMC_MODE) ||
2223 (sc->function_mode & FNM_VNIC_MODE) ||
2224 (!is_rss_enabled(sc)) ||
2225 IS_BE2(sc)) {
2226 sc->nrqs = 1;
2227 sc->nwqs = 1;
2228 } else {
2229 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2230 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2231 }
2232
2233 if (IS_BE2(sc) && is_rss_enabled(sc))
2234 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2235}
2236
2237
2238static void
2239update_queues_got(POCE_SOFTC sc)
2240{
2241 if (is_rss_enabled(sc)) {
2242 sc->nrqs = sc->intr_count + 1;
2243 sc->nwqs = sc->intr_count;
2244 } else {
2245 sc->nrqs = 1;
2246 sc->nwqs = 1;
2247 }
2248
2249 if (IS_BE2(sc))
2250 sc->nwqs = 1;
2251}
2252
2253static int
2254oce_check_ipv6_ext_hdr(struct mbuf *m)
2255{
2256 struct ether_header *eh = mtod(m, struct ether_header *);
2257 caddr_t m_datatemp = m->m_data;
2258
2259 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2260 m->m_data += sizeof(struct ether_header);
2261 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2262
2263 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2264 (ip6->ip6_nxt != IPPROTO_UDP)){
2265 struct ip6_ext *ip6e = NULL;
2266 m->m_data += sizeof(struct ip6_hdr);
2267
2268 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2269 if(ip6e->ip6e_len == 0xff) {
2270 m->m_data = m_datatemp;
2271 return TRUE;
2272 }
2273 }
2274 m->m_data = m_datatemp;
2275 }
2276 return FALSE;
2277}
2278
2279static int
2280is_be3_a1(POCE_SOFTC sc)
2281{
2282 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2283 return TRUE;
2284 }
2285 return FALSE;
2286}
2287
2288static struct mbuf *
2289oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2290{
2291 uint16_t vlan_tag = 0;
2292
2293 if(!M_WRITABLE(m))
2294 return NULL;
2295
2296 /* Embed vlan tag in the packet if it is not part of it */
2297 if(m->m_flags & M_VLANTAG) {
2298 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2299 m->m_flags &= ~M_VLANTAG;
2300 }
2301
2302 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2303 if(sc->pvid) {
2304 if(!vlan_tag)
2305 vlan_tag = sc->pvid;
2306 *complete = FALSE;
2307 }
2308
2309 if(vlan_tag) {
2310 m = ether_vlanencap(m, vlan_tag);
2311 }
2312
2313 if(sc->qnqid) {
2314 m = ether_vlanencap(m, sc->qnqid);
2315 *complete = FALSE;
2316 }
2317 return m;
2318}
2319
2320static int
2321oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2322{
2323 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2324 oce_check_ipv6_ext_hdr(m)) {
2325 return TRUE;
2326 }
2327 return FALSE;
2328}
2329
2330static void
2331oce_get_config(POCE_SOFTC sc)
2332{
2333 int rc = 0;
2334 uint32_t max_rss = 0;
2335
2336 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2337 max_rss = OCE_LEGACY_MODE_RSS;
2338 else
2339 max_rss = OCE_MAX_RSS;
2340
2341 if (!IS_BE(sc)) {
2342 rc = oce_get_profile_config(sc, max_rss);
2343 if (rc) {
2344 sc->nwqs = OCE_MAX_WQ;
2345 sc->nrssqs = max_rss;
2346 sc->nrqs = sc->nrssqs + 1;
2347 }
2348 }
2349 else { /* For BE3 don't rely on fw for determining the resources */
2350 sc->nrssqs = max_rss;
2351 sc->nrqs = sc->nrssqs + 1;
2352 sc->nwqs = OCE_MAX_WQ;
2353 sc->max_vlans = MAX_VLANFILTER_SIZE;
2354 }
2355}