Deleted Added
full compact
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 257007 2013-10-23 18:58:38Z delphij $ */
40
40/* $FreeBSD: head/sys/dev/oce/oce_if.c 252869 2013-07-06 08:30:45Z delphij $ */
41
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
117
118
119/* Driver entry points prototypes */
120static int oce_probe(device_t dev);
121static int oce_attach(device_t dev);
122static int oce_detach(device_t dev);
123static int oce_shutdown(device_t dev);
124static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int oce_setup_intr(POCE_SOFTC sc);
132static int oce_fast_isr(void *arg);
133static int oce_alloc_intr(POCE_SOFTC sc, int vector,
134 void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144 uint32_t status);
145static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153 struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181 DEVMETHOD(device_probe, oce_probe),
182 DEVMETHOD(device_attach, oce_attach),
183 DEVMETHOD(device_detach, oce_detach),
184 DEVMETHOD(device_shutdown, oce_shutdown),
185
186 DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190 "oce",
191 oce_dispatch,
192 sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] = {
217 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 * Driver entry points functions *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235 uint16_t vendor = 0;
236 uint16_t device = 0;
237 int i = 0;
238 char str[256] = {0};
239 POCE_SOFTC sc;
240
241 sc = device_get_softc(dev);
242 bzero(sc, sizeof(OCE_SOFTC));
243 sc->dev = dev;
244
245 vendor = pci_get_vendor(dev);
246 device = pci_get_device(dev);
247
248 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250 if (device == (supportedDevices[i] & 0xffff)) {
251 sprintf(str, "%s:%s", "Emulex CNA NIC function",
252 component_revision);
253 device_set_desc_copy(dev, str);
254
255 switch (device) {
256 case PCI_PRODUCT_BE2:
257 sc->flags |= OCE_FLAGS_BE2;
258 break;
259 case PCI_PRODUCT_BE3:
260 sc->flags |= OCE_FLAGS_BE3;
261 break;
262 case PCI_PRODUCT_XE201:
263 case PCI_PRODUCT_XE201_VF:
264 sc->flags |= OCE_FLAGS_XE201;
265 break;
266 case PCI_PRODUCT_SH:
267 sc->flags |= OCE_FLAGS_SH;
268 break;
269 default:
270 return ENXIO;
271 }
272 return BUS_PROBE_DEFAULT;
273 }
274 }
275 }
276
277 return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284 POCE_SOFTC sc;
285 int rc = 0;
286
287 sc = device_get_softc(dev);
288
289 rc = oce_hw_pci_alloc(sc);
290 if (rc)
291 return rc;
292
293 sc->tx_ring_size = OCE_TX_RING_SIZE;
294 sc->rx_ring_size = OCE_RX_RING_SIZE;
295 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
298
299 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300 LOCK_CREATE(&sc->dev_lock, "Device_lock");
301
302 /* initialise the hardware */
303 rc = oce_hw_init(sc);
304 if (rc)
305 goto pci_res_free;
306
307 oce_get_config(sc);
308
309 setup_max_queues_want(sc);
310
311 rc = oce_setup_intr(sc);
312 if (rc)
313 goto mbox_free;
314
315 rc = oce_queue_init_all(sc);
316 if (rc)
317 goto intr_free;
318
319 rc = oce_attach_ifp(sc);
320 if (rc)
321 goto queues_free;
322
323#if defined(INET6) || defined(INET)
324 rc = oce_init_lro(sc);
325 if (rc)
326 goto ifp_free;
327#endif
328
329 rc = oce_hw_start(sc);
330 if (rc)
331 goto lro_free;
332
333 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338 rc = oce_stats_init(sc);
339 if (rc)
340 goto vlan_free;
341
342 oce_add_sysctls(sc);
343
344 callout_init(&sc->timer, CALLOUT_MPSAFE);
345 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346 if (rc)
347 goto stats_free;
348
349 return 0;
350
351stats_free:
352 callout_drain(&sc->timer);
353 oce_stats_free(sc);
354vlan_free:
355 if (sc->vlan_attach)
356 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357 if (sc->vlan_detach)
358 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359 oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362 oce_free_lro(sc);
363ifp_free:
364#endif
365 ether_ifdetach(sc->ifp);
366 if_free(sc->ifp);
367queues_free:
368 oce_queue_release_all(sc);
369intr_free:
370 oce_intr_free(sc);
371mbox_free:
372 oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374 oce_hw_pci_free(sc);
375 LOCK_DESTROY(&sc->dev_lock);
376 LOCK_DESTROY(&sc->bmbx_lock);
377 return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385 POCE_SOFTC sc = device_get_softc(dev);
386
387 LOCK(&sc->dev_lock);
388 oce_if_deactivate(sc);
389 UNLOCK(&sc->dev_lock);
390
391 callout_drain(&sc->timer);
392
393 if (sc->vlan_attach != NULL)
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395 if (sc->vlan_detach != NULL)
396 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398 ether_ifdetach(sc->ifp);
399
400 if_free(sc->ifp);
401
402 oce_hw_shutdown(sc);
403
404 bus_generic_detach(dev);
405
406 return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413 int rc;
414
415 rc = oce_detach(dev);
416
417 return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424 struct ifreq *ifr = (struct ifreq *)data;
425 POCE_SOFTC sc = ifp->if_softc;
426 int rc = 0;
427 uint32_t u;
428
429 switch (command) {
430
431 case SIOCGIFMEDIA:
432 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433 break;
434
435 case SIOCSIFMTU:
436 if (ifr->ifr_mtu > OCE_MAX_MTU)
437 rc = EINVAL;
438 else
439 ifp->if_mtu = ifr->ifr_mtu;
440 break;
441
442 case SIOCSIFFLAGS:
443 if (ifp->if_flags & IFF_UP) {
444 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446 oce_init(sc);
447 }
448 device_printf(sc->dev, "Interface Up\n");
449 } else {
450 LOCK(&sc->dev_lock);
451
452 sc->ifp->if_drv_flags &=
453 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454 oce_if_deactivate(sc);
455
456 UNLOCK(&sc->dev_lock);
457
458 device_printf(sc->dev, "Interface Down\n");
459 }
460
461 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
391 sc->promisc = TRUE;
392 oce_rxf_set_promiscuous(sc, sc->promisc);
462 if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463 sc->promisc = TRUE;
464 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
394 sc->promisc = FALSE;
395 oce_rxf_set_promiscuous(sc, sc->promisc);
465 if (!oce_rxf_set_promiscuous(sc, 0))
466 sc->promisc = FALSE;
467 }
468
469 break;
470
471 case SIOCADDMULTI:
472 case SIOCDELMULTI:
473 rc = oce_hw_update_multicast(sc);
474 if (rc)
475 device_printf(sc->dev,
476 "Update multicast address failed\n");
477 break;
478
479 case SIOCSIFCAP:
480 u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482 if (u & IFCAP_TXCSUM) {
483 ifp->if_capenable ^= IFCAP_TXCSUM;
484 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486 if (IFCAP_TSO & ifp->if_capenable &&
487 !(IFCAP_TXCSUM & ifp->if_capenable)) {
488 ifp->if_capenable &= ~IFCAP_TSO;
489 ifp->if_hwassist &= ~CSUM_TSO;
490 if_printf(ifp,
491 "TSO disabled due to -txcsum.\n");
492 }
493 }
494
495 if (u & IFCAP_RXCSUM)
496 ifp->if_capenable ^= IFCAP_RXCSUM;
497
498 if (u & IFCAP_TSO4) {
499 ifp->if_capenable ^= IFCAP_TSO4;
500
501 if (IFCAP_TSO & ifp->if_capenable) {
502 if (IFCAP_TXCSUM & ifp->if_capenable)
503 ifp->if_hwassist |= CSUM_TSO;
504 else {
505 ifp->if_capenable &= ~IFCAP_TSO;
506 ifp->if_hwassist &= ~CSUM_TSO;
507 if_printf(ifp,
508 "Enable txcsum first.\n");
509 rc = EAGAIN;
510 }
511 } else
512 ifp->if_hwassist &= ~CSUM_TSO;
513 }
514
515 if (u & IFCAP_VLAN_HWTAGGING)
516 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518 if (u & IFCAP_VLAN_HWFILTER) {
519 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520 oce_vid_config(sc);
521 }
522#if defined(INET6) || defined(INET)
523 if (u & IFCAP_LRO)
524 ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527 break;
528
529 case SIOCGPRIVATE_0:
530 rc = oce_handle_passthrough(ifp, data);
531 break;
532 default:
533 rc = ether_ioctl(ifp, command, data);
534 break;
535 }
536
537 return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544 POCE_SOFTC sc = arg;
545
546 LOCK(&sc->dev_lock);
547
548 if (sc->ifp->if_flags & IFF_UP) {
549 oce_if_deactivate(sc);
550 oce_if_activate(sc);
551 }
552
553 UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561 POCE_SOFTC sc = ifp->if_softc;
562 struct oce_wq *wq = NULL;
563 int queue_index = 0;
564 int status = 0;
565
566 if (!sc->link_status)
567 return ENXIO;
568
569 if ((m->m_flags & M_FLOWID) != 0)
570 queue_index = m->m_pkthdr.flowid % sc->nwqs;
571
572 wq = sc->wq[queue_index];
573
574 LOCK(&wq->tx_lock);
575 status = oce_multiq_transmit(ifp, m, wq);
576 UNLOCK(&wq->tx_lock);
577
578 return status;
579
580}
581
582
583static void
584oce_multiq_flush(struct ifnet *ifp)
585{
586 POCE_SOFTC sc = ifp->if_softc;
587 struct mbuf *m;
588 int i = 0;
589
590 for (i = 0; i < sc->nwqs; i++) {
591 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
592 m_freem(m);
593 }
594 if_qflush(ifp);
595}
596
597
598
599/*****************************************************************************
600 * Driver interrupt routines functions *
601 *****************************************************************************/
602
603static void
604oce_intr(void *arg, int pending)
605{
606
607 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
608 POCE_SOFTC sc = ii->sc;
609 struct oce_eq *eq = ii->eq;
610 struct oce_eqe *eqe;
611 struct oce_cq *cq = NULL;
612 int i, num_eqes = 0;
613
614
615 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
616 BUS_DMASYNC_POSTWRITE);
617 do {
618 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
619 if (eqe->evnt == 0)
620 break;
621 eqe->evnt = 0;
622 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
623 BUS_DMASYNC_POSTWRITE);
624 RING_GET(eq->ring, 1);
625 num_eqes++;
626
627 } while (TRUE);
628
629 if (!num_eqes)
630 goto eq_arm; /* Spurious */
631
632 /* Clear EQ entries, but dont arm */
633 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
634
635 /* Process TX, RX and MCC. But dont arm CQ*/
636 for (i = 0; i < eq->cq_valid; i++) {
637 cq = eq->cq[i];
638 (*cq->cq_handler)(cq->cb_arg);
639 }
640
641 /* Arm all cqs connected to this EQ */
642 for (i = 0; i < eq->cq_valid; i++) {
643 cq = eq->cq[i];
644 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
645 }
646
647eq_arm:
648 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
649
650 return;
651}
652
653
654static int
655oce_setup_intr(POCE_SOFTC sc)
656{
657 int rc = 0, use_intx = 0;
658 int vector = 0, req_vectors = 0;
659
660 if (is_rss_enabled(sc))
661 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
662 else
663 req_vectors = 1;
664
665 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
666 sc->intr_count = req_vectors;
667 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
668 if (rc != 0) {
669 use_intx = 1;
670 pci_release_msi(sc->dev);
671 } else
672 sc->flags |= OCE_FLAGS_USING_MSIX;
673 } else
674 use_intx = 1;
675
676 if (use_intx)
677 sc->intr_count = 1;
678
679 /* Scale number of queues based on intr we got */
680 update_queues_got(sc);
681
682 if (use_intx) {
683 device_printf(sc->dev, "Using legacy interrupt\n");
684 rc = oce_alloc_intr(sc, vector, oce_intr);
685 if (rc)
686 goto error;
687 } else {
688 for (; vector < sc->intr_count; vector++) {
689 rc = oce_alloc_intr(sc, vector, oce_intr);
690 if (rc)
691 goto error;
692 }
693 }
694
695 return 0;
696error:
697 oce_intr_free(sc);
698 return rc;
699}
700
701
702static int
703oce_fast_isr(void *arg)
704{
705 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
706 POCE_SOFTC sc = ii->sc;
707
708 if (ii->eq == NULL)
709 return FILTER_STRAY;
710
711 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
712
713 taskqueue_enqueue_fast(ii->tq, &ii->task);
714
715 ii->eq->intr++;
716
717 return FILTER_HANDLED;
718}
719
720
721static int
722oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
723{
724 POCE_INTR_INFO ii = &sc->intrs[vector];
725 int rc = 0, rr;
726
727 if (vector >= OCE_MAX_EQ)
728 return (EINVAL);
729
730 /* Set the resource id for the interrupt.
731 * MSIx is vector + 1 for the resource id,
732 * INTx is 0 for the resource id.
733 */
734 if (sc->flags & OCE_FLAGS_USING_MSIX)
735 rr = vector + 1;
736 else
737 rr = 0;
738 ii->intr_res = bus_alloc_resource_any(sc->dev,
739 SYS_RES_IRQ,
740 &rr, RF_ACTIVE|RF_SHAREABLE);
741 ii->irq_rr = rr;
742 if (ii->intr_res == NULL) {
743 device_printf(sc->dev,
744 "Could not allocate interrupt\n");
745 rc = ENXIO;
746 return rc;
747 }
748
749 TASK_INIT(&ii->task, 0, isr, ii);
750 ii->vector = vector;
751 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
752 ii->tq = taskqueue_create_fast(ii->task_name,
753 M_NOWAIT,
754 taskqueue_thread_enqueue,
755 &ii->tq);
756 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
757 device_get_nameunit(sc->dev));
758
759 ii->sc = sc;
760 rc = bus_setup_intr(sc->dev,
761 ii->intr_res,
762 INTR_TYPE_NET,
763 oce_fast_isr, NULL, ii, &ii->tag);
764 return rc;
765
766}
767
768
769void
770oce_intr_free(POCE_SOFTC sc)
771{
772 int i = 0;
773
774 for (i = 0; i < sc->intr_count; i++) {
775
776 if (sc->intrs[i].tag != NULL)
777 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
778 sc->intrs[i].tag);
779 if (sc->intrs[i].tq != NULL)
780 taskqueue_free(sc->intrs[i].tq);
781
782 if (sc->intrs[i].intr_res != NULL)
783 bus_release_resource(sc->dev, SYS_RES_IRQ,
784 sc->intrs[i].irq_rr,
785 sc->intrs[i].intr_res);
786 sc->intrs[i].tag = NULL;
787 sc->intrs[i].intr_res = NULL;
788 }
789
790 if (sc->flags & OCE_FLAGS_USING_MSIX)
791 pci_release_msi(sc->dev);
792
793}
794
795
796
797/******************************************************************************
798* Media callbacks functions *
799******************************************************************************/
800
801static void
802oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
803{
804 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
805
806
807 req->ifm_status = IFM_AVALID;
808 req->ifm_active = IFM_ETHER;
809
810 if (sc->link_status == 1)
811 req->ifm_status |= IFM_ACTIVE;
812 else
813 return;
814
815 switch (sc->link_speed) {
816 case 1: /* 10 Mbps */
817 req->ifm_active |= IFM_10_T | IFM_FDX;
818 sc->speed = 10;
819 break;
820 case 2: /* 100 Mbps */
821 req->ifm_active |= IFM_100_TX | IFM_FDX;
822 sc->speed = 100;
823 break;
824 case 3: /* 1 Gbps */
825 req->ifm_active |= IFM_1000_T | IFM_FDX;
826 sc->speed = 1000;
827 break;
828 case 4: /* 10 Gbps */
829 req->ifm_active |= IFM_10G_SR | IFM_FDX;
830 sc->speed = 10000;
831 break;
832 }
833
834 return;
835}
836
837
838int
839oce_media_change(struct ifnet *ifp)
840{
841 return 0;
842}
843
844
845
846
847/*****************************************************************************
848 * Transmit routines functions *
849 *****************************************************************************/
850
851static int
852oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
853{
854 int rc = 0, i, retry_cnt = 0;
855 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
856 struct mbuf *m, *m_temp;
857 struct oce_wq *wq = sc->wq[wq_index];
858 struct oce_packet_desc *pd;
859 struct oce_nic_hdr_wqe *nichdr;
860 struct oce_nic_frag_wqe *nicfrag;
861 int num_wqes;
862 uint32_t reg_value;
863 boolean_t complete = TRUE;
864
865 m = *mpp;
866 if (!m)
867 return EINVAL;
868
869 if (!(m->m_flags & M_PKTHDR)) {
870 rc = ENXIO;
871 goto free_ret;
872 }
873
874 if(oce_tx_asic_stall_verify(sc, m)) {
875 m = oce_insert_vlan_tag(sc, m, &complete);
876 if(!m) {
877 device_printf(sc->dev, "Insertion unsuccessful\n");
878 return 0;
879 }
880
881 }
882
883 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
884 /* consolidate packet buffers for TSO/LSO segment offload */
885#if defined(INET6) || defined(INET)
886 m = oce_tso_setup(sc, mpp);
887#else
888 m = NULL;
889#endif
890 if (m == NULL) {
891 rc = ENXIO;
892 goto free_ret;
893 }
894 }
895
896 pd = &wq->pckts[wq->pkt_desc_head];
897retry:
898 rc = bus_dmamap_load_mbuf_sg(wq->tag,
899 pd->map,
900 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
901 if (rc == 0) {
902 num_wqes = pd->nsegs + 1;
903 if (IS_BE(sc) || IS_SH(sc)) {
904 /*Dummy required only for BE3.*/
905 if (num_wqes & 1)
906 num_wqes++;
907 }
908 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
909 bus_dmamap_unload(wq->tag, pd->map);
910 return EBUSY;
911 }
912 atomic_store_rel_int(&wq->pkt_desc_head,
913 (wq->pkt_desc_head + 1) % \
914 OCE_WQ_PACKET_ARRAY_SIZE);
915 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
916 pd->mbuf = m;
917
918 nichdr =
919 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
920 nichdr->u0.dw[0] = 0;
921 nichdr->u0.dw[1] = 0;
922 nichdr->u0.dw[2] = 0;
923 nichdr->u0.dw[3] = 0;
924
925 nichdr->u0.s.complete = complete;
926 nichdr->u0.s.event = 1;
927 nichdr->u0.s.crc = 1;
928 nichdr->u0.s.forward = 0;
929 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
930 nichdr->u0.s.udpcs =
931 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
932 nichdr->u0.s.tcpcs =
933 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
934 nichdr->u0.s.num_wqe = num_wqes;
935 nichdr->u0.s.total_length = m->m_pkthdr.len;
936
937 if (m->m_flags & M_VLANTAG) {
938 nichdr->u0.s.vlan = 1; /*Vlan present*/
939 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
940 }
941
942 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
943 if (m->m_pkthdr.tso_segsz) {
944 nichdr->u0.s.lso = 1;
945 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
946 }
947 if (!IS_BE(sc) || !IS_SH(sc))
948 nichdr->u0.s.ipcs = 1;
949 }
950
951 RING_PUT(wq->ring, 1);
952 atomic_add_int(&wq->ring->num_used, 1);
953
954 for (i = 0; i < pd->nsegs; i++) {
955 nicfrag =
956 RING_GET_PRODUCER_ITEM_VA(wq->ring,
957 struct oce_nic_frag_wqe);
958 nicfrag->u0.s.rsvd0 = 0;
959 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
960 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
961 nicfrag->u0.s.frag_len = segs[i].ds_len;
962 pd->wqe_idx = wq->ring->pidx;
963 RING_PUT(wq->ring, 1);
964 atomic_add_int(&wq->ring->num_used, 1);
965 }
966 if (num_wqes > (pd->nsegs + 1)) {
967 nicfrag =
968 RING_GET_PRODUCER_ITEM_VA(wq->ring,
969 struct oce_nic_frag_wqe);
970 nicfrag->u0.dw[0] = 0;
971 nicfrag->u0.dw[1] = 0;
972 nicfrag->u0.dw[2] = 0;
973 nicfrag->u0.dw[3] = 0;
974 pd->wqe_idx = wq->ring->pidx;
975 RING_PUT(wq->ring, 1);
976 atomic_add_int(&wq->ring->num_used, 1);
977 pd->nsegs++;
978 }
979
980 sc->ifp->if_opackets++;
981 wq->tx_stats.tx_reqs++;
982 wq->tx_stats.tx_wrbs += num_wqes;
983 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
984 wq->tx_stats.tx_pkts++;
985
986 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
988 reg_value = (num_wqes << 16) | wq->wq_id;
989 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
990
991 } else if (rc == EFBIG) {
992 if (retry_cnt == 0) {
993 m_temp = m_defrag(m, M_NOWAIT);
994 if (m_temp == NULL)
995 goto free_ret;
996 m = m_temp;
997 *mpp = m_temp;
998 retry_cnt = retry_cnt + 1;
999 goto retry;
1000 } else
1001 goto free_ret;
1002 } else if (rc == ENOMEM)
1003 return rc;
1004 else
1005 goto free_ret;
1006
1007 return 0;
1008
1009free_ret:
1010 m_freem(*mpp);
1011 *mpp = NULL;
1012 return rc;
1013}
1014
1015
1016static void
1017oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1018{
1019 struct oce_packet_desc *pd;
1020 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1021 struct mbuf *m;
1022
1023 pd = &wq->pckts[wq->pkt_desc_tail];
1024 atomic_store_rel_int(&wq->pkt_desc_tail,
1025 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1026 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1027 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1028 bus_dmamap_unload(wq->tag, pd->map);
1029
1030 m = pd->mbuf;
1031 m_freem(m);
1032 pd->mbuf = NULL;
1033
1034
1035 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1036 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1037 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1038 oce_tx_restart(sc, wq);
1039 }
1040 }
1041}
1042
1043
1044static void
1045oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1046{
1047
1048 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1049 return;
1050
1051#if __FreeBSD_version >= 800000
1052 if (!drbr_empty(sc->ifp, wq->br))
1053#else
1054 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1055#endif
1056 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1057
1058}
1059
1060
1061#if defined(INET6) || defined(INET)
1062static struct mbuf *
1063oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1064{
1065 struct mbuf *m;
1066#ifdef INET
1067 struct ip *ip;
1068#endif
1069#ifdef INET6
1070 struct ip6_hdr *ip6;
1071#endif
1072 struct ether_vlan_header *eh;
1073 struct tcphdr *th;
1074 uint16_t etype;
1075 int total_len = 0, ehdrlen = 0;
1076
1077 m = *mpp;
1078
1079 if (M_WRITABLE(m) == 0) {
1080 m = m_dup(*mpp, M_NOWAIT);
1081 if (!m)
1082 return NULL;
1083 m_freem(*mpp);
1084 *mpp = m;
1085 }
1086
1087 eh = mtod(m, struct ether_vlan_header *);
1088 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1089 etype = ntohs(eh->evl_proto);
1090 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1091 } else {
1092 etype = ntohs(eh->evl_encap_proto);
1093 ehdrlen = ETHER_HDR_LEN;
1094 }
1095
1096 switch (etype) {
1097#ifdef INET
1098 case ETHERTYPE_IP:
1099 ip = (struct ip *)(m->m_data + ehdrlen);
1100 if (ip->ip_p != IPPROTO_TCP)
1101 return NULL;
1102 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1103
1104 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1105 break;
1106#endif
1107#ifdef INET6
1108 case ETHERTYPE_IPV6:
1109 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1110 if (ip6->ip6_nxt != IPPROTO_TCP)
1111 return NULL;
1112 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1113
1114 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1115 break;
1116#endif
1117 default:
1118 return NULL;
1119 }
1120
1121 m = m_pullup(m, total_len);
1122 if (!m)
1123 return NULL;
1124 *mpp = m;
1125 return m;
1126
1127}
1128#endif /* INET6 || INET */
1129
1130void
1131oce_tx_task(void *arg, int npending)
1132{
1133 struct oce_wq *wq = arg;
1134 POCE_SOFTC sc = wq->parent;
1135 struct ifnet *ifp = sc->ifp;
1136 int rc = 0;
1137
1138#if __FreeBSD_version >= 800000
1139 LOCK(&wq->tx_lock);
1140 rc = oce_multiq_transmit(ifp, NULL, wq);
1141 if (rc) {
1142 device_printf(sc->dev,
1143 "TX[%d] restart failed\n", wq->queue_index);
1144 }
1145 UNLOCK(&wq->tx_lock);
1146#else
1147 oce_start(ifp);
1148#endif
1149
1150}
1151
1152
1153void
1154oce_start(struct ifnet *ifp)
1155{
1156 POCE_SOFTC sc = ifp->if_softc;
1157 struct mbuf *m;
1158 int rc = 0;
1159 int def_q = 0; /* Defualt tx queue is 0*/
1160
1161 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1162 IFF_DRV_RUNNING)
1163 return;
1164
1165 if (!sc->link_status)
1166 return;
1167
1168 do {
1169 IF_DEQUEUE(&sc->ifp->if_snd, m);
1170 if (m == NULL)
1171 break;
1172
1173 LOCK(&sc->wq[def_q]->tx_lock);
1174 rc = oce_tx(sc, &m, def_q);
1175 UNLOCK(&sc->wq[def_q]->tx_lock);
1176 if (rc) {
1177 if (m != NULL) {
1178 sc->wq[def_q]->tx_stats.tx_stops ++;
1179 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1180 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1181 m = NULL;
1182 }
1183 break;
1184 }
1185 if (m != NULL)
1186 ETHER_BPF_MTAP(ifp, m);
1187
1188 } while (TRUE);
1189
1190 return;
1191}
1192
1193
1194/* Handle the Completion Queue for transmit */
1195uint16_t
1196oce_wq_handler(void *arg)
1197{
1198 struct oce_wq *wq = (struct oce_wq *)arg;
1199 POCE_SOFTC sc = wq->parent;
1200 struct oce_cq *cq = wq->cq;
1201 struct oce_nic_tx_cqe *cqe;
1202 int num_cqes = 0;
1203
1204 bus_dmamap_sync(cq->ring->dma.tag,
1205 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1206 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1207 while (cqe->u0.dw[3]) {
1208 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1209
1210 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1211 if (wq->ring->cidx >= wq->ring->num_items)
1212 wq->ring->cidx -= wq->ring->num_items;
1213
1214 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1215 wq->tx_stats.tx_compl++;
1216 cqe->u0.dw[3] = 0;
1217 RING_GET(cq->ring, 1);
1218 bus_dmamap_sync(cq->ring->dma.tag,
1219 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1220 cqe =
1221 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1222 num_cqes++;
1223 }
1224
1225 if (num_cqes)
1226 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1227
1228 return 0;
1229}
1230
1231
1232#if __FreeBSD_version >= 1000000
1233static __inline void
1234drbr_stats_update(struct ifnet *ifp, int len, int mflags)
1235{
1236#ifndef NO_SLOW_STATS
1237 ifp->if_obytes += len;
1238 if (mflags & M_MCAST)
1239 ifp->if_omcasts++;
1240#endif
1241}
1242#endif
1243
1244static int
1245oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1246{
1247 POCE_SOFTC sc = ifp->if_softc;
1248 int status = 0, queue_index = 0;
1249 struct mbuf *next = NULL;
1250 struct buf_ring *br = NULL;
1251
1252 br = wq->br;
1253 queue_index = wq->queue_index;
1254
1255 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1256 IFF_DRV_RUNNING) {
1257 if (m != NULL)
1258 status = drbr_enqueue(ifp, br, m);
1259 return status;
1260 }
1261
1177 if (m != NULL) {
1262 if (m != NULL) {
1263 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1264 return status;
1265 }
1266 while ((next = drbr_peek(ifp, br)) != NULL) {
1267 if (oce_tx(sc, &next, queue_index)) {
1268 if (next == NULL) {
1269 drbr_advance(ifp, br);
1270 } else {
1271 drbr_putback(ifp, br, next);
1272 wq->tx_stats.tx_stops ++;
1273 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1274 status = drbr_enqueue(ifp, br, next);
1275 }
1276 break;
1277 }
1278 drbr_advance(ifp, br);
1279 ifp->if_obytes += next->m_pkthdr.len;
1280 if (next->m_flags & M_MCAST)
1281 ifp->if_omcasts++;
1282 ETHER_BPF_MTAP(ifp, next);
1283 }
1284
1285 return status;
1286}
1287
1288
1289
1290
1291/*****************************************************************************
1292 * Receive routines functions *
1293 *****************************************************************************/
1294
1295static void
1296oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1297{
1298 uint32_t out;
1299 struct oce_packet_desc *pd;
1300 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1301 int i, len, frag_len;
1302 struct mbuf *m = NULL, *tail = NULL;
1303 uint16_t vtag;
1304
1305 len = cqe->u0.s.pkt_size;
1306 if (!len) {
1307 /*partial DMA workaround for Lancer*/
1308 oce_discard_rx_comp(rq, cqe);
1309 goto exit;
1310 }
1311
1312 /* Get vlan_tag value */
1313 if(IS_BE(sc) || IS_SH(sc))
1314 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1315 else
1316 vtag = cqe->u0.s.vlan_tag;
1317
1318
1319 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1320
1321 if (rq->packets_out == rq->packets_in) {
1322 device_printf(sc->dev,
1323 "RQ transmit descriptor missing\n");
1324 }
1325 out = rq->packets_out + 1;
1326 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1327 out = 0;
1328 pd = &rq->pckts[rq->packets_out];
1329 rq->packets_out = out;
1330
1331 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1332 bus_dmamap_unload(rq->tag, pd->map);
1333 rq->pending--;
1334
1335 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1336 pd->mbuf->m_len = frag_len;
1337
1338 if (tail != NULL) {
1339 /* additional fragments */
1340 pd->mbuf->m_flags &= ~M_PKTHDR;
1341 tail->m_next = pd->mbuf;
1342 tail = pd->mbuf;
1343 } else {
1344 /* first fragment, fill out much of the packet header */
1345 pd->mbuf->m_pkthdr.len = len;
1346 pd->mbuf->m_pkthdr.csum_flags = 0;
1347 if (IF_CSUM_ENABLED(sc)) {
1348 if (cqe->u0.s.l4_cksum_pass) {
1349 pd->mbuf->m_pkthdr.csum_flags |=
1350 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1351 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1352 }
1353 if (cqe->u0.s.ip_cksum_pass) {
1354 if (!cqe->u0.s.ip_ver) { /* IPV4 */
1355 pd->mbuf->m_pkthdr.csum_flags |=
1356 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1357 }
1358 }
1359 }
1360 m = tail = pd->mbuf;
1361 }
1362 pd->mbuf = NULL;
1363 len -= frag_len;
1364 }
1365
1366 if (m) {
1367 if (!oce_cqe_portid_valid(sc, cqe)) {
1368 m_freem(m);
1369 goto exit;
1370 }
1371
1372 m->m_pkthdr.rcvif = sc->ifp;
1373#if __FreeBSD_version >= 800000
1374 if (rq->queue_index)
1375 m->m_pkthdr.flowid = (rq->queue_index - 1);
1376 else
1377 m->m_pkthdr.flowid = rq->queue_index;
1378 m->m_flags |= M_FLOWID;
1379#endif
1380 /* This deternies if vlan tag is Valid */
1381 if (oce_cqe_vtp_valid(sc, cqe)) {
1382 if (sc->function_mode & FNM_FLEX10_MODE) {
1383 /* FLEX10. If QnQ is not set, neglect VLAN */
1384 if (cqe->u0.s.qnq) {
1385 m->m_pkthdr.ether_vtag = vtag;
1386 m->m_flags |= M_VLANTAG;
1387 }
1388 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1389 /* In UMC mode generally pvid will be striped by
1390 hw. But in some cases we have seen it comes
1391 with pvid. So if pvid == vlan, neglect vlan.
1392 */
1393 m->m_pkthdr.ether_vtag = vtag;
1394 m->m_flags |= M_VLANTAG;
1395 }
1396 }
1397
1398 sc->ifp->if_ipackets++;
1399#if defined(INET6) || defined(INET)
1400 /* Try to queue to LRO */
1401 if (IF_LRO_ENABLED(sc) &&
1402 (cqe->u0.s.ip_cksum_pass) &&
1403 (cqe->u0.s.l4_cksum_pass) &&
1404 (!cqe->u0.s.ip_ver) &&
1405 (rq->lro.lro_cnt != 0)) {
1406
1407 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1408 rq->lro_pkts_queued ++;
1409 goto post_done;
1410 }
1411 /* If LRO posting fails then try to post to STACK */
1412 }
1413#endif
1414
1415 (*sc->ifp->if_input) (sc->ifp, m);
1416#if defined(INET6) || defined(INET)
1417post_done:
1418#endif
1419 /* Update rx stats per queue */
1420 rq->rx_stats.rx_pkts++;
1421 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1422 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1423 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1424 rq->rx_stats.rx_mcast_pkts++;
1425 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1426 rq->rx_stats.rx_ucast_pkts++;
1427 }
1428exit:
1429 return;
1430}
1431
1432
1433static void
1434oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1435{
1436 uint32_t out, i = 0;
1437 struct oce_packet_desc *pd;
1438 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1439 int num_frags = cqe->u0.s.num_fragments;
1440
1441 for (i = 0; i < num_frags; i++) {
1442 if (rq->packets_out == rq->packets_in) {
1443 device_printf(sc->dev,
1444 "RQ transmit descriptor missing\n");
1445 }
1446 out = rq->packets_out + 1;
1447 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1448 out = 0;
1449 pd = &rq->pckts[rq->packets_out];
1450 rq->packets_out = out;
1451
1452 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1453 bus_dmamap_unload(rq->tag, pd->map);
1454 rq->pending--;
1455 m_freem(pd->mbuf);
1456 }
1457
1458}
1459
1460
1461static int
1462oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1463{
1464 struct oce_nic_rx_cqe_v1 *cqe_v1;
1465 int vtp = 0;
1466
1467 if (sc->be3_native) {
1468 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1469 vtp = cqe_v1->u0.s.vlan_tag_present;
1470 } else
1471 vtp = cqe->u0.s.vlan_tag_present;
1472
1473 return vtp;
1474
1475}
1476
1477
1478static int
1479oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1480{
1481 struct oce_nic_rx_cqe_v1 *cqe_v1;
1482 int port_id = 0;
1483
1484 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1485 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1486 port_id = cqe_v1->u0.s.port;
1487 if (sc->port_id != port_id)
1488 return 0;
1489 } else
1490 ;/* For BE3 legacy and Lancer this is dummy */
1491
1492 return 1;
1493
1494}
1495
1496#if defined(INET6) || defined(INET)
1497static void
1498oce_rx_flush_lro(struct oce_rq *rq)
1499{
1500 struct lro_ctrl *lro = &rq->lro;
1501 struct lro_entry *queued;
1502 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1503
1504 if (!IF_LRO_ENABLED(sc))
1505 return;
1506
1507 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1508 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1509 tcp_lro_flush(lro, queued);
1510 }
1511 rq->lro_pkts_queued = 0;
1512
1513 return;
1514}
1515
1516
1517static int
1518oce_init_lro(POCE_SOFTC sc)
1519{
1520 struct lro_ctrl *lro = NULL;
1521 int i = 0, rc = 0;
1522
1523 for (i = 0; i < sc->nrqs; i++) {
1524 lro = &sc->rq[i]->lro;
1525 rc = tcp_lro_init(lro);
1526 if (rc != 0) {
1527 device_printf(sc->dev, "LRO init failed\n");
1528 return rc;
1529 }
1530 lro->ifp = sc->ifp;
1531 }
1532
1533 return rc;
1534}
1535
1536
1537void
1538oce_free_lro(POCE_SOFTC sc)
1539{
1540 struct lro_ctrl *lro = NULL;
1541 int i = 0;
1542
1543 for (i = 0; i < sc->nrqs; i++) {
1544 lro = &sc->rq[i]->lro;
1545 if (lro)
1546 tcp_lro_free(lro);
1547 }
1548}
1549#endif
1550
1551int
1552oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1553{
1554 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1555 int i, in, rc;
1556 struct oce_packet_desc *pd;
1557 bus_dma_segment_t segs[6];
1558 int nsegs, added = 0;
1559 struct oce_nic_rqe *rqe;
1560 pd_rxulp_db_t rxdb_reg;
1561
1562 bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1563 for (i = 0; i < count; i++) {
1564 in = rq->packets_in + 1;
1565 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1566 in = 0;
1567 if (in == rq->packets_out)
1568 break; /* no more room */
1569
1570 pd = &rq->pckts[rq->packets_in];
1571 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1572 if (pd->mbuf == NULL)
1573 break;
1574
1575 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1576 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1577 pd->map,
1578 pd->mbuf,
1579 segs, &nsegs, BUS_DMA_NOWAIT);
1580 if (rc) {
1581 m_free(pd->mbuf);
1582 break;
1583 }
1584
1585 if (nsegs != 1) {
1586 i--;
1587 continue;
1588 }
1589
1590 rq->packets_in = in;
1591 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1592
1593 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1594 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1595 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1596 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1597 RING_PUT(rq->ring, 1);
1598 added++;
1599 rq->pending++;
1600 }
1601 if (added != 0) {
1602 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1603 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1604 rxdb_reg.bits.qid = rq->rq_id;
1605 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1606 added -= OCE_MAX_RQ_POSTS;
1607 }
1608 if (added > 0) {
1609 rxdb_reg.bits.qid = rq->rq_id;
1610 rxdb_reg.bits.num_posted = added;
1611 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1612 }
1613 }
1614
1615 return 0;
1616}
1617
1618
1619/* Handle the Completion Queue for receive */
1620uint16_t
1621oce_rq_handler(void *arg)
1622{
1623 struct oce_rq *rq = (struct oce_rq *)arg;
1624 struct oce_cq *cq = rq->cq;
1625 POCE_SOFTC sc = rq->parent;
1626 struct oce_nic_rx_cqe *cqe;
1627 int num_cqes = 0, rq_buffers_used = 0;
1628
1629
1630 bus_dmamap_sync(cq->ring->dma.tag,
1631 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1632 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1633 while (cqe->u0.dw[2]) {
1634 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1635
1636 RING_GET(rq->ring, 1);
1637 if (cqe->u0.s.error == 0) {
1638 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1639 } else {
1640 rq->rx_stats.rxcp_err++;
1641 sc->ifp->if_ierrors++;
1642 /* Post L3/L4 errors to stack.*/
1643 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1644 }
1645 rq->rx_stats.rx_compl++;
1646 cqe->u0.dw[2] = 0;
1647
1648#if defined(INET6) || defined(INET)
1649 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1650 oce_rx_flush_lro(rq);
1651 }
1652#endif
1653
1654 RING_GET(cq->ring, 1);
1655 bus_dmamap_sync(cq->ring->dma.tag,
1656 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1657 cqe =
1658 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1659 num_cqes++;
1660 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1661 break;
1662 }
1663
1664#if defined(INET6) || defined(INET)
1665 if (IF_LRO_ENABLED(sc))
1666 oce_rx_flush_lro(rq);
1667#endif
1668
1669 if (num_cqes) {
1670 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1671 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1672 if (rq_buffers_used > 1)
1673 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1674 }
1675
1676 return 0;
1677
1678}
1679
1680
1681
1682
1683/*****************************************************************************
1684 * Helper function prototypes in this file *
1685 *****************************************************************************/
1686
1687static int
1688oce_attach_ifp(POCE_SOFTC sc)
1689{
1690
1691 sc->ifp = if_alloc(IFT_ETHER);
1692 if (!sc->ifp)
1693 return ENOMEM;
1694
1695 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1696 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1697 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1698
1699 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1700 sc->ifp->if_ioctl = oce_ioctl;
1701 sc->ifp->if_start = oce_start;
1702 sc->ifp->if_init = oce_init;
1703 sc->ifp->if_mtu = ETHERMTU;
1704 sc->ifp->if_softc = sc;
1705#if __FreeBSD_version >= 800000
1706 sc->ifp->if_transmit = oce_multiq_start;
1707 sc->ifp->if_qflush = oce_multiq_flush;
1708#endif
1709
1710 if_initname(sc->ifp,
1711 device_get_name(sc->dev), device_get_unit(sc->dev));
1712
1713 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1714 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1715 IFQ_SET_READY(&sc->ifp->if_snd);
1716
1717 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1718 sc->ifp->if_hwassist |= CSUM_TSO;
1719 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1720
1721 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1722 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1723 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1724
1725#if defined(INET6) || defined(INET)
1726 sc->ifp->if_capabilities |= IFCAP_TSO;
1727 sc->ifp->if_capabilities |= IFCAP_LRO;
1728 sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1729#endif
1730
1731 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1732 if_initbaudrate(sc->ifp, IF_Gbps(10));
1733
1734#if __FreeBSD_version >= 1000000
1735 sc->ifp->if_hw_tsomax = OCE_MAX_TSO_SIZE;
1736#endif
1737
1738 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1739
1740 return 0;
1741}
1742
1743
1744static void
1745oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1746{
1747 POCE_SOFTC sc = ifp->if_softc;
1748
1749 if (ifp->if_softc != arg)
1750 return;
1751 if ((vtag == 0) || (vtag > 4095))
1752 return;
1753
1754 sc->vlan_tag[vtag] = 1;
1755 sc->vlans_added++;
1667 oce_vid_config(sc);
1756 if (sc->vlans_added <= (sc->max_vlans + 1))
1757 oce_vid_config(sc);
1758}
1759
1760
1761static void
1762oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1763{
1764 POCE_SOFTC sc = ifp->if_softc;
1765
1766 if (ifp->if_softc != arg)
1767 return;
1768 if ((vtag == 0) || (vtag > 4095))
1769 return;
1770
1771 sc->vlan_tag[vtag] = 0;
1772 sc->vlans_added--;
1773 oce_vid_config(sc);
1774}
1775
1776
1777/*
1778 * A max of 64 vlans can be configured in BE. If the user configures
1779 * more, place the card in vlan promiscuous mode.
1780 */
1781static int
1782oce_vid_config(POCE_SOFTC sc)
1783{
1784 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1785 uint16_t ntags = 0, i;
1786 int status = 0;
1787
1788 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1789 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1790 for (i = 0; i < MAX_VLANS; i++) {
1791 if (sc->vlan_tag[i]) {
1792 vtags[ntags].vtag = i;
1793 ntags++;
1794 }
1795 }
1796 if (ntags)
1797 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1798 vtags, ntags, 1, 0);
1799 } else
1800 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1801 NULL, 0, 1, 1);
1802 return status;
1803}
1804
1805
1806static void
1807oce_mac_addr_set(POCE_SOFTC sc)
1808{
1809 uint32_t old_pmac_id = sc->pmac_id;
1810 int status = 0;
1811
1812
1813 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1814 sc->macaddr.size_of_struct);
1815 if (!status)
1816 return;
1817
1818 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1819 sc->if_id, &sc->pmac_id);
1820 if (!status) {
1821 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1822 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1823 sc->macaddr.size_of_struct);
1824 }
1825 if (status)
1826 device_printf(sc->dev, "Failed update macaddress\n");
1827
1828}
1829
1830
1831static int
1832oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1833{
1834 POCE_SOFTC sc = ifp->if_softc;
1835 struct ifreq *ifr = (struct ifreq *)data;
1836 int rc = ENXIO;
1837 char cookie[32] = {0};
1838 void *priv_data = (void *)ifr->ifr_data;
1839 void *ioctl_ptr;
1840 uint32_t req_size;
1841 struct mbx_hdr req;
1842 OCE_DMA_MEM dma_mem;
1843 struct mbx_common_get_cntl_attr *fw_cmd;
1844
1845 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1846 return EFAULT;
1847
1848 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1849 return EINVAL;
1850
1851 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1852 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1853 return EFAULT;
1854
1855 req_size = le32toh(req.u0.req.request_length);
1856 if (req_size > 65536)
1857 return EINVAL;
1858
1859 req_size += sizeof(struct mbx_hdr);
1860 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1861 if (rc)
1862 return ENOMEM;
1863
1864 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1865 rc = EFAULT;
1866 goto dma_free;
1867 }
1868
1869 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1870 if (rc) {
1871 rc = EIO;
1872 goto dma_free;
1873 }
1874
1875 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1876 rc = EFAULT;
1877
1878 /*
1879 firmware is filling all the attributes for this ioctl except
1880 the driver version..so fill it
1881 */
1882 if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1883 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1884 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1885 COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1886 }
1887
1888dma_free:
1889 oce_dma_free(sc, &dma_mem);
1890 return rc;
1891
1892}
1893
1894static void
1895oce_eqd_set_periodic(POCE_SOFTC sc)
1896{
1897 struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1898 struct oce_aic_obj *aic;
1899 struct oce_eq *eqo;
1900 uint64_t now = 0, delta;
1901 int eqd, i, num = 0;
1902 uint32_t ips = 0;
1903 int tps;
1904
1905 for (i = 0 ; i < sc->neqs; i++) {
1906 eqo = sc->eq[i];
1907 aic = &sc->aic_obj[i];
1908 /* When setting the static eq delay from the user space */
1909 if (!aic->enable) {
1910 eqd = aic->et_eqd;
1911 goto modify_eqd;
1912 }
1913
1914 now = ticks;
1915
1916 /* Over flow check */
1917 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1918 goto done;
1919
1920 delta = now - aic->ticks;
1921 tps = delta/hz;
1922
1923 /* Interrupt rate based on elapsed ticks */
1924 if(tps)
1925 ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1926
1927 if (ips > INTR_RATE_HWM)
1928 eqd = aic->cur_eqd + 20;
1929 else if (ips < INTR_RATE_LWM)
1930 eqd = aic->cur_eqd / 2;
1931 else
1932 goto done;
1933
1934 if (eqd < 10)
1935 eqd = 0;
1936
1937 /* Make sure that the eq delay is in the known range */
1938 eqd = min(eqd, aic->max_eqd);
1939 eqd = max(eqd, aic->min_eqd);
1940
1941modify_eqd:
1942 if (eqd != aic->cur_eqd) {
1943 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1944 set_eqd[num].eq_id = eqo->eq_id;
1945 aic->cur_eqd = eqd;
1946 num++;
1947 }
1948done:
1949 aic->intr_prev = eqo->intr;
1950 aic->ticks = now;
1951 }
1952
1953 /* Is there atleast one eq that needs to be modified? */
1954 if(num)
1955 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1956
1957}
1958
1959static void oce_detect_hw_error(POCE_SOFTC sc)
1960{
1961
1962 uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1963 uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1964 uint32_t i;
1965
1966 if (sc->hw_error)
1967 return;
1968
1969 if (IS_XE201(sc)) {
1970 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1971 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1972 sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1973 sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1974 }
1975 } else {
1976 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1977 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1978 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1979 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1980
1981 ue_low = (ue_low & ~ue_low_mask);
1982 ue_high = (ue_high & ~ue_high_mask);
1983 }
1984
1985 /* On certain platforms BE hardware can indicate spurious UEs.
1986 * Allow the h/w to stop working completely in case of a real UE.
1987 * Hence not setting the hw_error for UE detection.
1988 */
1989 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1990 sc->hw_error = TRUE;
1991 device_printf(sc->dev, "Error detected in the card\n");
1992 }
1993
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 device_printf(sc->dev,
1996 "ERR: sliport status 0x%x\n", sliport_status);
1997 device_printf(sc->dev,
1998 "ERR: sliport error1 0x%x\n", sliport_err1);
1999 device_printf(sc->dev,
2000 "ERR: sliport error2 0x%x\n", sliport_err2);
2001 }
2002
2003 if (ue_low) {
2004 for (i = 0; ue_low; ue_low >>= 1, i++) {
2005 if (ue_low & 1)
2006 device_printf(sc->dev, "UE: %s bit set\n",
2007 ue_status_low_desc[i]);
2008 }
2009 }
2010
2011 if (ue_high) {
2012 for (i = 0; ue_high; ue_high >>= 1, i++) {
2013 if (ue_high & 1)
2014 device_printf(sc->dev, "UE: %s bit set\n",
2015 ue_status_hi_desc[i]);
2016 }
2017 }
2018
2019}
2020
2021
2022static void
2023oce_local_timer(void *arg)
2024{
2025 POCE_SOFTC sc = arg;
2026 int i = 0;
2027
2028 oce_detect_hw_error(sc);
2029 oce_refresh_nic_stats(sc);
2030 oce_refresh_queue_stats(sc);
2031 oce_mac_addr_set(sc);
2032
2033 /* TX Watch Dog*/
2034 for (i = 0; i < sc->nwqs; i++)
2035 oce_tx_restart(sc, sc->wq[i]);
2036
2037 /* calculate and set the eq delay for optimal interrupt rate */
2038 if (IS_BE(sc) || IS_SH(sc))
2039 oce_eqd_set_periodic(sc);
2040
2041 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2042}
2043
2044
2045/* NOTE : This should only be called holding
2046 * DEVICE_LOCK.
1893*/
2047 */
2048static void
2049oce_if_deactivate(POCE_SOFTC sc)
2050{
2051 int i, mtime = 0;
2052 int wait_req = 0;
2053 struct oce_rq *rq;
2054 struct oce_wq *wq;
2055 struct oce_eq *eq;
2056
2057 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2058
2059 /*Wait for max of 400ms for TX completions to be done */
2060 while (mtime < 400) {
2061 wait_req = 0;
2062 for_all_wq_queues(sc, wq, i) {
2063 if (wq->ring->num_used) {
2064 wait_req = 1;
2065 DELAY(1);
2066 break;
2067 }
2068 }
2069 mtime += 1;
2070 if (!wait_req)
2071 break;
2072 }
2073
2074 /* Stop intrs and finish any bottom halves pending */
2075 oce_hw_intr_disable(sc);
2076
2077 /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2078 any other lock. So unlock device lock and require after
2079 completing taskqueue_drain.
2080 */
2081 UNLOCK(&sc->dev_lock);
2082 for (i = 0; i < sc->intr_count; i++) {
2083 if (sc->intrs[i].tq != NULL) {
2084 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2085 }
2086 }
2087 LOCK(&sc->dev_lock);
2088
2089 /* Delete RX queue in card with flush param */
2090 oce_stop_rx(sc);
2091
2092 /* Invalidate any pending cq and eq entries*/
2093 for_all_evnt_queues(sc, eq, i)
2094 oce_drain_eq(eq);
2095 for_all_rq_queues(sc, rq, i)
2096 oce_drain_rq_cq(rq);
2097 for_all_wq_queues(sc, wq, i)
2098 oce_drain_wq_cq(wq);
2099
2100 /* But still we need to get MCC aync events.
2101 So enable intrs and also arm first EQ
2102 */
2103 oce_hw_intr_enable(sc);
2104 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2105
2106 DELAY(10);
2107}
2108
2109
2110static void
2111oce_if_activate(POCE_SOFTC sc)
2112{
2113 struct oce_eq *eq;
2114 struct oce_rq *rq;
2115 struct oce_wq *wq;
2116 int i, rc = 0;
2117
2118 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2119
2120 oce_hw_intr_disable(sc);
2121
2122 oce_start_rx(sc);
2123
2124 for_all_rq_queues(sc, rq, i) {
2125 rc = oce_start_rq(rq);
2126 if (rc)
2127 device_printf(sc->dev, "Unable to start RX\n");
2128 }
2129
2130 for_all_wq_queues(sc, wq, i) {
2131 rc = oce_start_wq(wq);
2132 if (rc)
2133 device_printf(sc->dev, "Unable to start TX\n");
2134 }
2135
2136
2137 for_all_evnt_queues(sc, eq, i)
2138 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2139
2140 oce_hw_intr_enable(sc);
2141
2142}
2143
2144static void
2145process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2146{
2147 /* Update Link status */
2148 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2149 ASYNC_EVENT_LINK_UP) {
2150 sc->link_status = ASYNC_EVENT_LINK_UP;
2151 if_link_state_change(sc->ifp, LINK_STATE_UP);
2152 } else {
2153 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2154 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2155 }
2156
2157 /* Update speed */
2158 sc->link_speed = acqe->u0.s.speed;
2159 sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2160
2161}
2162
2163
2164/* Handle the Completion Queue for the Mailbox/Async notifications */
2165uint16_t
2166oce_mq_handler(void *arg)
2167{
2168 struct oce_mq *mq = (struct oce_mq *)arg;
2169 POCE_SOFTC sc = mq->parent;
2170 struct oce_cq *cq = mq->cq;
2171 int num_cqes = 0, evt_type = 0, optype = 0;
2172 struct oce_mq_cqe *cqe;
2173 struct oce_async_cqe_link_state *acqe;
2174 struct oce_async_event_grp5_pvid_state *gcqe;
2175 struct oce_async_event_qnq *dbgcqe;
2176
2177
2178 bus_dmamap_sync(cq->ring->dma.tag,
2179 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2180 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2181
2182 while (cqe->u0.dw[3]) {
2183 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2184 if (cqe->u0.s.async_event) {
2185 evt_type = cqe->u0.s.event_type;
2186 optype = cqe->u0.s.async_type;
2187 if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
2188 /* Link status evt */
2189 acqe = (struct oce_async_cqe_link_state *)cqe;
2190 process_link_state(sc, acqe);
2191 } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2192 (optype == ASYNC_EVENT_PVID_STATE)) {
2193 /* GRP5 PVID */
2194 gcqe =
2195 (struct oce_async_event_grp5_pvid_state *)cqe;
2196 if (gcqe->enabled)
2197 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2198 else
2199 sc->pvid = 0;
2200
2201 }
2202 else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2203 optype == ASYNC_EVENT_DEBUG_QNQ) {
2204 dbgcqe =
2205 (struct oce_async_event_qnq *)cqe;
2206 if(dbgcqe->valid)
2207 sc->qnqid = dbgcqe->vlan_tag;
2208 sc->qnq_debug_event = TRUE;
2209 }
2210 }
2211 cqe->u0.dw[3] = 0;
2212 RING_GET(cq->ring, 1);
2213 bus_dmamap_sync(cq->ring->dma.tag,
2214 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2215 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2216 num_cqes++;
2217 }
2218
2219 if (num_cqes)
2220 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2221
2222 return 0;
2223}
2224
2225
2226static void
2227setup_max_queues_want(POCE_SOFTC sc)
2228{
2229 /* Check if it is FLEX machine. Is so dont use RSS */
2230 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2231 (sc->function_mode & FNM_UMC_MODE) ||
2232 (sc->function_mode & FNM_VNIC_MODE) ||
2233 (!is_rss_enabled(sc)) ||
2234 (sc->flags & OCE_FLAGS_BE2)) {
2235 sc->nrqs = 1;
2236 sc->nwqs = 1;
2237 } else {
2238 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2239 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2240 }
2241}
2242
2243
2244static void
2245update_queues_got(POCE_SOFTC sc)
2246{
2247 if (is_rss_enabled(sc)) {
2248 sc->nrqs = sc->intr_count + 1;
2249 sc->nwqs = sc->intr_count;
2250 } else {
2251 sc->nrqs = 1;
2252 sc->nwqs = 1;
2253 }
2254}
2255
2256static int
2257oce_check_ipv6_ext_hdr(struct mbuf *m)
2258{
2259 struct ether_header *eh = mtod(m, struct ether_header *);
2260 caddr_t m_datatemp = m->m_data;
2261
2262 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2263 m->m_data += sizeof(struct ether_header);
2264 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2265
2266 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2267 (ip6->ip6_nxt != IPPROTO_UDP)){
2268 struct ip6_ext *ip6e = NULL;
2269 m->m_data += sizeof(struct ip6_hdr);
2270
2271 ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2272 if(ip6e->ip6e_len == 0xff) {
2273 m->m_data = m_datatemp;
2274 return TRUE;
2275 }
2276 }
2277 m->m_data = m_datatemp;
2278 }
2279 return FALSE;
2280}
2281
2282static int
2283is_be3_a1(POCE_SOFTC sc)
2284{
2285 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2286 return TRUE;
2287 }
2288 return FALSE;
2289}
2290
2291static struct mbuf *
2292oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2293{
2294 uint16_t vlan_tag = 0;
2295
2296 if(!M_WRITABLE(m))
2297 return NULL;
2298
2299 /* Embed vlan tag in the packet if it is not part of it */
2300 if(m->m_flags & M_VLANTAG) {
2301 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2302 m->m_flags &= ~M_VLANTAG;
2303 }
2304
2305 /* if UMC, ignore vlan tag insertion and instead insert pvid */
2306 if(sc->pvid) {
2307 if(!vlan_tag)
2308 vlan_tag = sc->pvid;
2309 *complete = FALSE;
2310 }
2311
2312 if(vlan_tag) {
2313 m = ether_vlanencap(m, vlan_tag);
2314 }
2315
2316 if(sc->qnqid) {
2317 m = ether_vlanencap(m, sc->qnqid);
2318 *complete = FALSE;
2319 }
2320 return m;
2321}
2322
2323static int
2324oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2325{
2326 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2327 oce_check_ipv6_ext_hdr(m)) {
2328 return TRUE;
2329 }
2330 return FALSE;
2331}
2332
2333static void
2334oce_get_config(POCE_SOFTC sc)
2335{
2336 int rc = 0;
2337 uint32_t max_rss = 0;
2338
2339 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2340 max_rss = OCE_LEGACY_MODE_RSS;
2341 else
2342 max_rss = OCE_MAX_RSS;
2343
2344 if (!IS_BE(sc)) {
2345 rc = oce_get_func_config(sc);
2346 if (rc) {
2347 sc->nwqs = OCE_MAX_WQ;
2348 sc->nrssqs = max_rss;
2349 sc->nrqs = sc->nrssqs + 1;
2350 }
2351 }
2352 else {
2353 rc = oce_get_profile_config(sc);
2354 sc->nrssqs = max_rss;
2355 sc->nrqs = sc->nrssqs + 1;
2356 if (rc)
2357 sc->nwqs = OCE_MAX_WQ;
2358 }
2359}