oce_if.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (C) 2013 Emulex
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 *    this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Emulex Corporation nor the names of its
18 *    contributors may be used to endorse or promote products derived from
19 *    this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Contact Information:
34 * freebsd-drivers@emulex.com
35 *
36 * Emulex
37 * 3333 Susan Street
38 * Costa Mesa, CA 92626
39 */
40
41/* $FreeBSD: stable/11/sys/dev/oce/oce_if.c 330897 2018-03-14 03:19:51Z eadler $ */
42
43#include "opt_inet6.h"
44#include "opt_inet.h"
45
46#include "oce_if.h"
47
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50	"CEV",
51	"CTX",
52	"DBUF",
53	"ERX",
54	"Host",
55	"MPU",
56	"NDMA",
57	"PTC ",
58	"RDMA ",
59	"RXF ",
60	"RXIPS ",
61	"RXULP0 ",
62	"RXULP1 ",
63	"RXULP2 ",
64	"TIM ",
65	"TPOST ",
66	"TPRE ",
67	"TXIPS ",
68	"TXULP0 ",
69	"TXULP1 ",
70	"UC ",
71	"WDMA ",
72	"TXULP2 ",
73	"HOST1 ",
74	"P0_OB_LINK ",
75	"P1_OB_LINK ",
76	"HOST_GPIO ",
77	"MBOX ",
78	"AXGMAC0",
79	"AXGMAC1",
80	"JTAG",
81	"MPU_INTPEND"
82};
83
84/* UE Status High CSR */
85static char *ue_status_hi_desc[] = {
86	"LPCMEMHOST",
87	"MGMT_MAC",
88	"PCS0ONLINE",
89	"MPU_IRAM",
90	"PCS1ONLINE",
91	"PCTL0",
92	"PCTL1",
93	"PMEM",
94	"RR",
95	"TXPB",
96	"RXPP",
97	"XAUI",
98	"TXP",
99	"ARM",
100	"IPC",
101	"HOST2",
102	"HOST3",
103	"HOST4",
104	"HOST5",
105	"HOST6",
106	"HOST7",
107	"HOST8",
108	"HOST9",
109	"NETC",
110	"Unknown",
111	"Unknown",
112	"Unknown",
113	"Unknown",
114	"Unknown",
115	"Unknown",
116	"Unknown",
117	"Unknown"
118};
119
120
121/* Driver entry points prototypes */
122static int  oce_probe(device_t dev);
123static int  oce_attach(device_t dev);
124static int  oce_detach(device_t dev);
125static int  oce_shutdown(device_t dev);
126static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
127static void oce_init(void *xsc);
128static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
129static void oce_multiq_flush(struct ifnet *ifp);
130
131/* Driver interrupt routines protypes */
132static void oce_intr(void *arg, int pending);
133static int  oce_setup_intr(POCE_SOFTC sc);
134static int  oce_fast_isr(void *arg);
135static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
136			  void (*isr) (void *arg, int pending));
137
138/* Media callbacks prototypes */
139static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
140static int  oce_media_change(struct ifnet *ifp);
141
142/* Transmit routines prototypes */
143static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
144static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
145static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
146					uint32_t status);
147static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
148				 struct oce_wq *wq);
149
150/* Receive routines prototypes */
151static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
152static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
153static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
154static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
155						struct oce_nic_rx_cqe *cqe);
156
157/* Helper function prototypes in this file */
158static int  oce_attach_ifp(POCE_SOFTC sc);
159static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
160static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
161static int  oce_vid_config(POCE_SOFTC sc);
162static void oce_mac_addr_set(POCE_SOFTC sc);
163static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
164static void oce_local_timer(void *arg);
165static void oce_if_deactivate(POCE_SOFTC sc);
166static void oce_if_activate(POCE_SOFTC sc);
167static void setup_max_queues_want(POCE_SOFTC sc);
168static void update_queues_got(POCE_SOFTC sc);
169static void process_link_state(POCE_SOFTC sc,
170		 struct oce_async_cqe_link_state *acqe);
171static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
172static void oce_get_config(POCE_SOFTC sc);
173static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
174
175/* IP specific */
176#if defined(INET6) || defined(INET)
177static int  oce_init_lro(POCE_SOFTC sc);
178static void oce_rx_flush_lro(struct oce_rq *rq);
179static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
180#endif
181
182static device_method_t oce_dispatch[] = {
183	DEVMETHOD(device_probe, oce_probe),
184	DEVMETHOD(device_attach, oce_attach),
185	DEVMETHOD(device_detach, oce_detach),
186	DEVMETHOD(device_shutdown, oce_shutdown),
187
188	DEVMETHOD_END
189};
190
191static driver_t oce_driver = {
192	"oce",
193	oce_dispatch,
194	sizeof(OCE_SOFTC)
195};
196static devclass_t oce_devclass;
197
198
199DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
200MODULE_DEPEND(oce, pci, 1, 1, 1);
201MODULE_DEPEND(oce, ether, 1, 1, 1);
202MODULE_VERSION(oce, 1);
203
204
205/* global vars */
206const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
207
208/* Module capabilites and parameters */
209uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
210uint32_t oce_enable_rss = OCE_MODCAP_RSS;
211
212
213TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
214TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
215
216
217/* Supported devices table */
218static uint32_t supportedDevices[] =  {
219	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
220	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
221	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
222	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
223	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
224	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
225};
226
227
228
229
230/*****************************************************************************
231 *			Driver entry points functions                        *
232 *****************************************************************************/
233
234static int
235oce_probe(device_t dev)
236{
237	uint16_t vendor = 0;
238	uint16_t device = 0;
239	int i = 0;
240	char str[256] = {0};
241	POCE_SOFTC sc;
242
243	sc = device_get_softc(dev);
244	bzero(sc, sizeof(OCE_SOFTC));
245	sc->dev = dev;
246
247	vendor = pci_get_vendor(dev);
248	device = pci_get_device(dev);
249
250	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
251		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
252			if (device == (supportedDevices[i] & 0xffff)) {
253				sprintf(str, "%s:%s", "Emulex CNA NIC function",
254					component_revision);
255				device_set_desc_copy(dev, str);
256
257				switch (device) {
258				case PCI_PRODUCT_BE2:
259					sc->flags |= OCE_FLAGS_BE2;
260					break;
261				case PCI_PRODUCT_BE3:
262					sc->flags |= OCE_FLAGS_BE3;
263					break;
264				case PCI_PRODUCT_XE201:
265				case PCI_PRODUCT_XE201_VF:
266					sc->flags |= OCE_FLAGS_XE201;
267					break;
268				case PCI_PRODUCT_SH:
269					sc->flags |= OCE_FLAGS_SH;
270					break;
271				default:
272					return ENXIO;
273				}
274				return BUS_PROBE_DEFAULT;
275			}
276		}
277	}
278
279	return ENXIO;
280}
281
282
283static int
284oce_attach(device_t dev)
285{
286	POCE_SOFTC sc;
287	int rc = 0;
288
289	sc = device_get_softc(dev);
290
291	rc = oce_hw_pci_alloc(sc);
292	if (rc)
293		return rc;
294
295	sc->tx_ring_size = OCE_TX_RING_SIZE;
296	sc->rx_ring_size = OCE_RX_RING_SIZE;
297	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
298	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
299	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
300
301	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
302	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
303
304	/* initialise the hardware */
305	rc = oce_hw_init(sc);
306	if (rc)
307		goto pci_res_free;
308
309	oce_get_config(sc);
310
311	setup_max_queues_want(sc);
312
313	rc = oce_setup_intr(sc);
314	if (rc)
315		goto mbox_free;
316
317	rc = oce_queue_init_all(sc);
318	if (rc)
319		goto intr_free;
320
321	rc = oce_attach_ifp(sc);
322	if (rc)
323		goto queues_free;
324
325#if defined(INET6) || defined(INET)
326	rc = oce_init_lro(sc);
327	if (rc)
328		goto ifp_free;
329#endif
330
331	rc = oce_hw_start(sc);
332	if (rc)
333		goto lro_free;
334
335	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
336				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
337	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
338				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
339
340	rc = oce_stats_init(sc);
341	if (rc)
342		goto vlan_free;
343
344	oce_add_sysctls(sc);
345
346	callout_init(&sc->timer, 1);
347	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
348	if (rc)
349		goto stats_free;
350
351	return 0;
352
353stats_free:
354	callout_drain(&sc->timer);
355	oce_stats_free(sc);
356vlan_free:
357	if (sc->vlan_attach)
358		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
359	if (sc->vlan_detach)
360		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
361	oce_hw_intr_disable(sc);
362lro_free:
363#if defined(INET6) || defined(INET)
364	oce_free_lro(sc);
365ifp_free:
366#endif
367	ether_ifdetach(sc->ifp);
368	if_free(sc->ifp);
369queues_free:
370	oce_queue_release_all(sc);
371intr_free:
372	oce_intr_free(sc);
373mbox_free:
374	oce_dma_free(sc, &sc->bsmbx);
375pci_res_free:
376	oce_hw_pci_free(sc);
377	LOCK_DESTROY(&sc->dev_lock);
378	LOCK_DESTROY(&sc->bmbx_lock);
379	return rc;
380
381}
382
383
384static int
385oce_detach(device_t dev)
386{
387	POCE_SOFTC sc = device_get_softc(dev);
388
389	LOCK(&sc->dev_lock);
390	oce_if_deactivate(sc);
391	UNLOCK(&sc->dev_lock);
392
393	callout_drain(&sc->timer);
394
395	if (sc->vlan_attach != NULL)
396		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
397	if (sc->vlan_detach != NULL)
398		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
399
400	ether_ifdetach(sc->ifp);
401
402	if_free(sc->ifp);
403
404	oce_hw_shutdown(sc);
405
406	bus_generic_detach(dev);
407
408	return 0;
409}
410
411
412static int
413oce_shutdown(device_t dev)
414{
415	int rc;
416
417	rc = oce_detach(dev);
418
419	return rc;
420}
421
422
423static int
424oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
425{
426	struct ifreq *ifr = (struct ifreq *)data;
427	POCE_SOFTC sc = ifp->if_softc;
428	int rc = 0;
429	uint32_t u;
430
431	switch (command) {
432
433	case SIOCGIFMEDIA:
434		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
435		break;
436
437	case SIOCSIFMTU:
438		if (ifr->ifr_mtu > OCE_MAX_MTU)
439			rc = EINVAL;
440		else
441			ifp->if_mtu = ifr->ifr_mtu;
442		break;
443
444	case SIOCSIFFLAGS:
445		if (ifp->if_flags & IFF_UP) {
446			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
447				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
448				oce_init(sc);
449			}
450			device_printf(sc->dev, "Interface Up\n");
451		} else {
452			LOCK(&sc->dev_lock);
453
454			sc->ifp->if_drv_flags &=
455			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
456			oce_if_deactivate(sc);
457
458			UNLOCK(&sc->dev_lock);
459
460			device_printf(sc->dev, "Interface Down\n");
461		}
462
463		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
464			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
465				sc->promisc = TRUE;
466		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
467			if (!oce_rxf_set_promiscuous(sc, 0))
468				sc->promisc = FALSE;
469		}
470
471		break;
472
473	case SIOCADDMULTI:
474	case SIOCDELMULTI:
475		rc = oce_hw_update_multicast(sc);
476		if (rc)
477			device_printf(sc->dev,
478				"Update multicast address failed\n");
479		break;
480
481	case SIOCSIFCAP:
482		u = ifr->ifr_reqcap ^ ifp->if_capenable;
483
484		if (u & IFCAP_TXCSUM) {
485			ifp->if_capenable ^= IFCAP_TXCSUM;
486			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
487
488			if (IFCAP_TSO & ifp->if_capenable &&
489			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
490				ifp->if_capenable &= ~IFCAP_TSO;
491				ifp->if_hwassist &= ~CSUM_TSO;
492				if_printf(ifp,
493					 "TSO disabled due to -txcsum.\n");
494			}
495		}
496
497		if (u & IFCAP_RXCSUM)
498			ifp->if_capenable ^= IFCAP_RXCSUM;
499
500		if (u & IFCAP_TSO4) {
501			ifp->if_capenable ^= IFCAP_TSO4;
502
503			if (IFCAP_TSO & ifp->if_capenable) {
504				if (IFCAP_TXCSUM & ifp->if_capenable)
505					ifp->if_hwassist |= CSUM_TSO;
506				else {
507					ifp->if_capenable &= ~IFCAP_TSO;
508					ifp->if_hwassist &= ~CSUM_TSO;
509					if_printf(ifp,
510					    "Enable txcsum first.\n");
511					rc = EAGAIN;
512				}
513			} else
514				ifp->if_hwassist &= ~CSUM_TSO;
515		}
516
517		if (u & IFCAP_VLAN_HWTAGGING)
518			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
519
520		if (u & IFCAP_VLAN_HWFILTER) {
521			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
522			oce_vid_config(sc);
523		}
524#if defined(INET6) || defined(INET)
525		if (u & IFCAP_LRO)
526			ifp->if_capenable ^= IFCAP_LRO;
527#endif
528
529		break;
530
531	case SIOCGPRIVATE_0:
532		rc = oce_handle_passthrough(ifp, data);
533		break;
534	default:
535		rc = ether_ioctl(ifp, command, data);
536		break;
537	}
538
539	return rc;
540}
541
542
543static void
544oce_init(void *arg)
545{
546	POCE_SOFTC sc = arg;
547
548	LOCK(&sc->dev_lock);
549
550	if (sc->ifp->if_flags & IFF_UP) {
551		oce_if_deactivate(sc);
552		oce_if_activate(sc);
553	}
554
555	UNLOCK(&sc->dev_lock);
556
557}
558
559
560static int
561oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
562{
563	POCE_SOFTC sc = ifp->if_softc;
564	struct oce_wq *wq = NULL;
565	int queue_index = 0;
566	int status = 0;
567
568	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
569		queue_index = m->m_pkthdr.flowid % sc->nwqs;
570
571	wq = sc->wq[queue_index];
572
573	LOCK(&wq->tx_lock);
574	status = oce_multiq_transmit(ifp, m, wq);
575	UNLOCK(&wq->tx_lock);
576
577	return status;
578
579}
580
581
582static void
583oce_multiq_flush(struct ifnet *ifp)
584{
585	POCE_SOFTC sc = ifp->if_softc;
586	struct mbuf     *m;
587	int i = 0;
588
589	for (i = 0; i < sc->nwqs; i++) {
590		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
591			m_freem(m);
592	}
593	if_qflush(ifp);
594}
595
596
597
598/*****************************************************************************
599 *                   Driver interrupt routines functions                     *
600 *****************************************************************************/
601
602static void
603oce_intr(void *arg, int pending)
604{
605
606	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
607	POCE_SOFTC sc = ii->sc;
608	struct oce_eq *eq = ii->eq;
609	struct oce_eqe *eqe;
610	struct oce_cq *cq = NULL;
611	int i, num_eqes = 0;
612
613
614	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
615				 BUS_DMASYNC_POSTWRITE);
616	do {
617		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
618		if (eqe->evnt == 0)
619			break;
620		eqe->evnt = 0;
621		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
622					BUS_DMASYNC_POSTWRITE);
623		RING_GET(eq->ring, 1);
624		num_eqes++;
625
626	} while (TRUE);
627
628	if (!num_eqes)
629		goto eq_arm; /* Spurious */
630
631 	/* Clear EQ entries, but dont arm */
632	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
633
634	/* Process TX, RX and MCC. But dont arm CQ*/
635	for (i = 0; i < eq->cq_valid; i++) {
636		cq = eq->cq[i];
637		(*cq->cq_handler)(cq->cb_arg);
638	}
639
640	/* Arm all cqs connected to this EQ */
641	for (i = 0; i < eq->cq_valid; i++) {
642		cq = eq->cq[i];
643		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
644	}
645
646eq_arm:
647	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
648
649	return;
650}
651
652
653static int
654oce_setup_intr(POCE_SOFTC sc)
655{
656	int rc = 0, use_intx = 0;
657	int vector = 0, req_vectors = 0;
658
659	if (is_rss_enabled(sc))
660		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
661	else
662		req_vectors = 1;
663
664	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
665		sc->intr_count = req_vectors;
666		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
667		if (rc != 0) {
668			use_intx = 1;
669			pci_release_msi(sc->dev);
670		} else
671			sc->flags |= OCE_FLAGS_USING_MSIX;
672	} else
673		use_intx = 1;
674
675	if (use_intx)
676		sc->intr_count = 1;
677
678	/* Scale number of queues based on intr we got */
679	update_queues_got(sc);
680
681	if (use_intx) {
682		device_printf(sc->dev, "Using legacy interrupt\n");
683		rc = oce_alloc_intr(sc, vector, oce_intr);
684		if (rc)
685			goto error;
686	} else {
687		for (; vector < sc->intr_count; vector++) {
688			rc = oce_alloc_intr(sc, vector, oce_intr);
689			if (rc)
690				goto error;
691		}
692	}
693
694	return 0;
695error:
696	oce_intr_free(sc);
697	return rc;
698}
699
700
701static int
702oce_fast_isr(void *arg)
703{
704	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
705	POCE_SOFTC sc = ii->sc;
706
707	if (ii->eq == NULL)
708		return FILTER_STRAY;
709
710	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
711
712	taskqueue_enqueue(ii->tq, &ii->task);
713
714 	ii->eq->intr++;
715
716	return FILTER_HANDLED;
717}
718
719
720static int
721oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
722{
723	POCE_INTR_INFO ii = &sc->intrs[vector];
724	int rc = 0, rr;
725
726	if (vector >= OCE_MAX_EQ)
727		return (EINVAL);
728
729	/* Set the resource id for the interrupt.
730	 * MSIx is vector + 1 for the resource id,
731	 * INTx is 0 for the resource id.
732	 */
733	if (sc->flags & OCE_FLAGS_USING_MSIX)
734		rr = vector + 1;
735	else
736		rr = 0;
737	ii->intr_res = bus_alloc_resource_any(sc->dev,
738					      SYS_RES_IRQ,
739					      &rr, RF_ACTIVE|RF_SHAREABLE);
740	ii->irq_rr = rr;
741	if (ii->intr_res == NULL) {
742		device_printf(sc->dev,
743			  "Could not allocate interrupt\n");
744		rc = ENXIO;
745		return rc;
746	}
747
748	TASK_INIT(&ii->task, 0, isr, ii);
749	ii->vector = vector;
750	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
751	ii->tq = taskqueue_create_fast(ii->task_name,
752			M_NOWAIT,
753			taskqueue_thread_enqueue,
754			&ii->tq);
755	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
756			device_get_nameunit(sc->dev));
757
758	ii->sc = sc;
759	rc = bus_setup_intr(sc->dev,
760			ii->intr_res,
761			INTR_TYPE_NET,
762			oce_fast_isr, NULL, ii, &ii->tag);
763	return rc;
764
765}
766
767
768void
769oce_intr_free(POCE_SOFTC sc)
770{
771	int i = 0;
772
773	for (i = 0; i < sc->intr_count; i++) {
774
775		if (sc->intrs[i].tag != NULL)
776			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
777						sc->intrs[i].tag);
778		if (sc->intrs[i].tq != NULL)
779			taskqueue_free(sc->intrs[i].tq);
780
781		if (sc->intrs[i].intr_res != NULL)
782			bus_release_resource(sc->dev, SYS_RES_IRQ,
783						sc->intrs[i].irq_rr,
784						sc->intrs[i].intr_res);
785		sc->intrs[i].tag = NULL;
786		sc->intrs[i].intr_res = NULL;
787	}
788
789	if (sc->flags & OCE_FLAGS_USING_MSIX)
790		pci_release_msi(sc->dev);
791
792}
793
794
795
796/******************************************************************************
797*			  Media callbacks functions 			      *
798******************************************************************************/
799
800static void
801oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
802{
803	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
804
805
806	req->ifm_status = IFM_AVALID;
807	req->ifm_active = IFM_ETHER;
808
809	if (sc->link_status == 1)
810		req->ifm_status |= IFM_ACTIVE;
811	else
812		return;
813
814	switch (sc->link_speed) {
815	case 1: /* 10 Mbps */
816		req->ifm_active |= IFM_10_T | IFM_FDX;
817		sc->speed = 10;
818		break;
819	case 2: /* 100 Mbps */
820		req->ifm_active |= IFM_100_TX | IFM_FDX;
821		sc->speed = 100;
822		break;
823	case 3: /* 1 Gbps */
824		req->ifm_active |= IFM_1000_T | IFM_FDX;
825		sc->speed = 1000;
826		break;
827	case 4: /* 10 Gbps */
828		req->ifm_active |= IFM_10G_SR | IFM_FDX;
829		sc->speed = 10000;
830		break;
831	case 5: /* 20 Gbps */
832		req->ifm_active |= IFM_10G_SR | IFM_FDX;
833		sc->speed = 20000;
834		break;
835	case 6: /* 25 Gbps */
836		req->ifm_active |= IFM_10G_SR | IFM_FDX;
837		sc->speed = 25000;
838		break;
839	case 7: /* 40 Gbps */
840		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
841		sc->speed = 40000;
842		break;
843	default:
844		sc->speed = 0;
845		break;
846	}
847
848	return;
849}
850
851
852int
853oce_media_change(struct ifnet *ifp)
854{
855	return 0;
856}
857
858
859
860
861/*****************************************************************************
862 *			  Transmit routines functions			     *
863 *****************************************************************************/
864
865static int
866oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
867{
868	int rc = 0, i, retry_cnt = 0;
869	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
870	struct mbuf *m, *m_temp;
871	struct oce_wq *wq = sc->wq[wq_index];
872	struct oce_packet_desc *pd;
873	struct oce_nic_hdr_wqe *nichdr;
874	struct oce_nic_frag_wqe *nicfrag;
875	int num_wqes;
876	uint32_t reg_value;
877	boolean_t complete = TRUE;
878
879	m = *mpp;
880	if (!m)
881		return EINVAL;
882
883	if (!(m->m_flags & M_PKTHDR)) {
884		rc = ENXIO;
885		goto free_ret;
886	}
887
888	if(oce_tx_asic_stall_verify(sc, m)) {
889		m = oce_insert_vlan_tag(sc, m, &complete);
890		if(!m) {
891			device_printf(sc->dev, "Insertion unsuccessful\n");
892			return 0;
893		}
894
895	}
896
897	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
898		/* consolidate packet buffers for TSO/LSO segment offload */
899#if defined(INET6) || defined(INET)
900		m = oce_tso_setup(sc, mpp);
901#else
902		m = NULL;
903#endif
904		if (m == NULL) {
905			rc = ENXIO;
906			goto free_ret;
907		}
908	}
909
910	pd = &wq->pckts[wq->pkt_desc_head];
911retry:
912	rc = bus_dmamap_load_mbuf_sg(wq->tag,
913				     pd->map,
914				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
915	if (rc == 0) {
916		num_wqes = pd->nsegs + 1;
917		if (IS_BE(sc) || IS_SH(sc)) {
918			/*Dummy required only for BE3.*/
919			if (num_wqes & 1)
920				num_wqes++;
921		}
922		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
923			bus_dmamap_unload(wq->tag, pd->map);
924			return EBUSY;
925		}
926		atomic_store_rel_int(&wq->pkt_desc_head,
927				     (wq->pkt_desc_head + 1) % \
928				      OCE_WQ_PACKET_ARRAY_SIZE);
929		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
930		pd->mbuf = m;
931
932		nichdr =
933		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
934		nichdr->u0.dw[0] = 0;
935		nichdr->u0.dw[1] = 0;
936		nichdr->u0.dw[2] = 0;
937		nichdr->u0.dw[3] = 0;
938
939		nichdr->u0.s.complete = complete;
940		nichdr->u0.s.event = 1;
941		nichdr->u0.s.crc = 1;
942		nichdr->u0.s.forward = 0;
943		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
944		nichdr->u0.s.udpcs =
945			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
946		nichdr->u0.s.tcpcs =
947			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
948		nichdr->u0.s.num_wqe = num_wqes;
949		nichdr->u0.s.total_length = m->m_pkthdr.len;
950
951		if (m->m_flags & M_VLANTAG) {
952			nichdr->u0.s.vlan = 1; /*Vlan present*/
953			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
954		}
955
956		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
957			if (m->m_pkthdr.tso_segsz) {
958				nichdr->u0.s.lso = 1;
959				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
960			}
961			if (!IS_BE(sc) || !IS_SH(sc))
962				nichdr->u0.s.ipcs = 1;
963		}
964
965		RING_PUT(wq->ring, 1);
966		atomic_add_int(&wq->ring->num_used, 1);
967
968		for (i = 0; i < pd->nsegs; i++) {
969			nicfrag =
970			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
971						      struct oce_nic_frag_wqe);
972			nicfrag->u0.s.rsvd0 = 0;
973			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
974			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
975			nicfrag->u0.s.frag_len = segs[i].ds_len;
976			pd->wqe_idx = wq->ring->pidx;
977			RING_PUT(wq->ring, 1);
978			atomic_add_int(&wq->ring->num_used, 1);
979		}
980		if (num_wqes > (pd->nsegs + 1)) {
981			nicfrag =
982			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
983						      struct oce_nic_frag_wqe);
984			nicfrag->u0.dw[0] = 0;
985			nicfrag->u0.dw[1] = 0;
986			nicfrag->u0.dw[2] = 0;
987			nicfrag->u0.dw[3] = 0;
988			pd->wqe_idx = wq->ring->pidx;
989			RING_PUT(wq->ring, 1);
990			atomic_add_int(&wq->ring->num_used, 1);
991			pd->nsegs++;
992		}
993
994		if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
995		wq->tx_stats.tx_reqs++;
996		wq->tx_stats.tx_wrbs += num_wqes;
997		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
998		wq->tx_stats.tx_pkts++;
999
1000		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1001				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1002		reg_value = (num_wqes << 16) | wq->wq_id;
1003		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1004
1005	} else if (rc == EFBIG)	{
1006		if (retry_cnt == 0) {
1007			m_temp = m_defrag(m, M_NOWAIT);
1008			if (m_temp == NULL)
1009				goto free_ret;
1010			m = m_temp;
1011			*mpp = m_temp;
1012			retry_cnt = retry_cnt + 1;
1013			goto retry;
1014		} else
1015			goto free_ret;
1016	} else if (rc == ENOMEM)
1017		return rc;
1018	else
1019		goto free_ret;
1020
1021	return 0;
1022
1023free_ret:
1024	m_freem(*mpp);
1025	*mpp = NULL;
1026	return rc;
1027}
1028
1029
1030static void
1031oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1032{
1033	struct oce_packet_desc *pd;
1034	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1035	struct mbuf *m;
1036
1037	pd = &wq->pckts[wq->pkt_desc_tail];
1038	atomic_store_rel_int(&wq->pkt_desc_tail,
1039			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1040	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1041	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1042	bus_dmamap_unload(wq->tag, pd->map);
1043
1044	m = pd->mbuf;
1045	m_freem(m);
1046	pd->mbuf = NULL;
1047
1048
1049	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1050		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1051			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1052			oce_tx_restart(sc, wq);
1053		}
1054	}
1055}
1056
1057
1058static void
1059oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1060{
1061
1062	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1063		return;
1064
1065#if __FreeBSD_version >= 800000
1066	if (!drbr_empty(sc->ifp, wq->br))
1067#else
1068	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1069#endif
1070		taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1071
1072}
1073
1074
1075#if defined(INET6) || defined(INET)
1076static struct mbuf *
1077oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1078{
1079	struct mbuf *m;
1080#ifdef INET
1081	struct ip *ip;
1082#endif
1083#ifdef INET6
1084	struct ip6_hdr *ip6;
1085#endif
1086	struct ether_vlan_header *eh;
1087	struct tcphdr *th;
1088	uint16_t etype;
1089	int total_len = 0, ehdrlen = 0;
1090
1091	m = *mpp;
1092
1093	if (M_WRITABLE(m) == 0) {
1094		m = m_dup(*mpp, M_NOWAIT);
1095		if (!m)
1096			return NULL;
1097		m_freem(*mpp);
1098		*mpp = m;
1099	}
1100
1101	eh = mtod(m, struct ether_vlan_header *);
1102	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1103		etype = ntohs(eh->evl_proto);
1104		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1105	} else {
1106		etype = ntohs(eh->evl_encap_proto);
1107		ehdrlen = ETHER_HDR_LEN;
1108	}
1109
1110	switch (etype) {
1111#ifdef INET
1112	case ETHERTYPE_IP:
1113		ip = (struct ip *)(m->m_data + ehdrlen);
1114		if (ip->ip_p != IPPROTO_TCP)
1115			return NULL;
1116		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1117
1118		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1119		break;
1120#endif
1121#ifdef INET6
1122	case ETHERTYPE_IPV6:
1123		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1124		if (ip6->ip6_nxt != IPPROTO_TCP)
1125			return NULL;
1126		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1127
1128		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1129		break;
1130#endif
1131	default:
1132		return NULL;
1133	}
1134
1135	m = m_pullup(m, total_len);
1136	if (!m)
1137		return NULL;
1138	*mpp = m;
1139	return m;
1140
1141}
1142#endif /* INET6 || INET */
1143
1144void
1145oce_tx_task(void *arg, int npending)
1146{
1147	struct oce_wq *wq = arg;
1148	POCE_SOFTC sc = wq->parent;
1149	struct ifnet *ifp = sc->ifp;
1150	int rc = 0;
1151
1152#if __FreeBSD_version >= 800000
1153	LOCK(&wq->tx_lock);
1154	rc = oce_multiq_transmit(ifp, NULL, wq);
1155	if (rc) {
1156		device_printf(sc->dev,
1157				"TX[%d] restart failed\n", wq->queue_index);
1158	}
1159	UNLOCK(&wq->tx_lock);
1160#else
1161	oce_start(ifp);
1162#endif
1163
1164}
1165
1166
1167void
1168oce_start(struct ifnet *ifp)
1169{
1170	POCE_SOFTC sc = ifp->if_softc;
1171	struct mbuf *m;
1172	int rc = 0;
1173	int def_q = 0; /* Defualt tx queue is 0*/
1174
1175	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1176			IFF_DRV_RUNNING)
1177		return;
1178
1179	if (!sc->link_status)
1180		return;
1181
1182	do {
1183		IF_DEQUEUE(&sc->ifp->if_snd, m);
1184		if (m == NULL)
1185			break;
1186
1187		LOCK(&sc->wq[def_q]->tx_lock);
1188		rc = oce_tx(sc, &m, def_q);
1189		UNLOCK(&sc->wq[def_q]->tx_lock);
1190		if (rc) {
1191			if (m != NULL) {
1192				sc->wq[def_q]->tx_stats.tx_stops ++;
1193				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1194				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1195				m = NULL;
1196			}
1197			break;
1198		}
1199		if (m != NULL)
1200			ETHER_BPF_MTAP(ifp, m);
1201
1202	} while (TRUE);
1203
1204	return;
1205}
1206
1207
1208/* Handle the Completion Queue for transmit */
1209uint16_t
1210oce_wq_handler(void *arg)
1211{
1212	struct oce_wq *wq = (struct oce_wq *)arg;
1213	POCE_SOFTC sc = wq->parent;
1214	struct oce_cq *cq = wq->cq;
1215	struct oce_nic_tx_cqe *cqe;
1216	int num_cqes = 0;
1217
1218	bus_dmamap_sync(cq->ring->dma.tag,
1219			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1220	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1221	while (cqe->u0.dw[3]) {
1222		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1223
1224		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1225		if (wq->ring->cidx >= wq->ring->num_items)
1226			wq->ring->cidx -= wq->ring->num_items;
1227
1228		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1229		wq->tx_stats.tx_compl++;
1230		cqe->u0.dw[3] = 0;
1231		RING_GET(cq->ring, 1);
1232		bus_dmamap_sync(cq->ring->dma.tag,
1233				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1234		cqe =
1235		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1236		num_cqes++;
1237	}
1238
1239	if (num_cqes)
1240		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1241
1242	return 0;
1243}
1244
1245
1246static int
1247oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1248{
1249	POCE_SOFTC sc = ifp->if_softc;
1250	int status = 0, queue_index = 0;
1251	struct mbuf *next = NULL;
1252	struct buf_ring *br = NULL;
1253
1254	br  = wq->br;
1255	queue_index = wq->queue_index;
1256
1257	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1258		IFF_DRV_RUNNING) {
1259		if (m != NULL)
1260			status = drbr_enqueue(ifp, br, m);
1261		return status;
1262	}
1263
1264	if (m != NULL) {
1265		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1266			return status;
1267	}
1268	while ((next = drbr_peek(ifp, br)) != NULL) {
1269		if (oce_tx(sc, &next, queue_index)) {
1270			if (next == NULL) {
1271				drbr_advance(ifp, br);
1272			} else {
1273				drbr_putback(ifp, br, next);
1274				wq->tx_stats.tx_stops ++;
1275				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1276			}
1277			break;
1278		}
1279		drbr_advance(ifp, br);
1280		if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1281		if (next->m_flags & M_MCAST)
1282			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1283		ETHER_BPF_MTAP(ifp, next);
1284	}
1285
1286	return 0;
1287}
1288
1289
1290
1291
1292/*****************************************************************************
1293 *			    Receive  routines functions 		     *
1294 *****************************************************************************/
1295
1296static void
1297oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1298{
1299	uint32_t out;
1300	struct oce_packet_desc *pd;
1301	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1302	int i, len, frag_len;
1303	struct mbuf *m = NULL, *tail = NULL;
1304	uint16_t vtag;
1305
1306	len = cqe->u0.s.pkt_size;
1307	if (!len) {
1308		/*partial DMA workaround for Lancer*/
1309		oce_discard_rx_comp(rq, cqe);
1310		goto exit;
1311	}
1312
1313	 /* Get vlan_tag value */
1314	if(IS_BE(sc) || IS_SH(sc))
1315		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1316	else
1317		vtag = cqe->u0.s.vlan_tag;
1318
1319
1320	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1321
1322		if (rq->packets_out == rq->packets_in) {
1323			device_printf(sc->dev,
1324				  "RQ transmit descriptor missing\n");
1325		}
1326		out = rq->packets_out + 1;
1327		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1328			out = 0;
1329		pd = &rq->pckts[rq->packets_out];
1330		rq->packets_out = out;
1331
1332		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1333		bus_dmamap_unload(rq->tag, pd->map);
1334		rq->pending--;
1335
1336		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1337		pd->mbuf->m_len = frag_len;
1338
1339		if (tail != NULL) {
1340			/* additional fragments */
1341			pd->mbuf->m_flags &= ~M_PKTHDR;
1342			tail->m_next = pd->mbuf;
1343			tail = pd->mbuf;
1344		} else {
1345			/* first fragment, fill out much of the packet header */
1346			pd->mbuf->m_pkthdr.len = len;
1347			pd->mbuf->m_pkthdr.csum_flags = 0;
1348			if (IF_CSUM_ENABLED(sc)) {
1349				if (cqe->u0.s.l4_cksum_pass) {
1350					pd->mbuf->m_pkthdr.csum_flags |=
1351					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1352					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1353				}
1354				if (cqe->u0.s.ip_cksum_pass) {
1355					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1356						pd->mbuf->m_pkthdr.csum_flags |=
1357						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1358					}
1359				}
1360			}
1361			m = tail = pd->mbuf;
1362		}
1363		pd->mbuf = NULL;
1364		len -= frag_len;
1365	}
1366
1367	if (m) {
1368		if (!oce_cqe_portid_valid(sc, cqe)) {
1369			 m_freem(m);
1370			 goto exit;
1371		}
1372
1373		m->m_pkthdr.rcvif = sc->ifp;
1374#if __FreeBSD_version >= 800000
1375		if (rq->queue_index)
1376			m->m_pkthdr.flowid = (rq->queue_index - 1);
1377		else
1378			m->m_pkthdr.flowid = rq->queue_index;
1379		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1380#endif
1381		/* This deternies if vlan tag is Valid */
1382		if (oce_cqe_vtp_valid(sc, cqe)) {
1383			if (sc->function_mode & FNM_FLEX10_MODE) {
1384				/* FLEX10. If QnQ is not set, neglect VLAN */
1385				if (cqe->u0.s.qnq) {
1386					m->m_pkthdr.ether_vtag = vtag;
1387					m->m_flags |= M_VLANTAG;
1388				}
1389			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1390				/* In UMC mode generally pvid will be striped by
1391				   hw. But in some cases we have seen it comes
1392				   with pvid. So if pvid == vlan, neglect vlan.
1393				*/
1394				m->m_pkthdr.ether_vtag = vtag;
1395				m->m_flags |= M_VLANTAG;
1396			}
1397		}
1398
1399		if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1400#if defined(INET6) || defined(INET)
1401		/* Try to queue to LRO */
1402		if (IF_LRO_ENABLED(sc) &&
1403		    (cqe->u0.s.ip_cksum_pass) &&
1404		    (cqe->u0.s.l4_cksum_pass) &&
1405		    (!cqe->u0.s.ip_ver)       &&
1406		    (rq->lro.lro_cnt != 0)) {
1407
1408			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1409				rq->lro_pkts_queued ++;
1410				goto post_done;
1411			}
1412			/* If LRO posting fails then try to post to STACK */
1413		}
1414#endif
1415
1416		(*sc->ifp->if_input) (sc->ifp, m);
1417#if defined(INET6) || defined(INET)
1418post_done:
1419#endif
1420		/* Update rx stats per queue */
1421		rq->rx_stats.rx_pkts++;
1422		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1423		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1424		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1425			rq->rx_stats.rx_mcast_pkts++;
1426		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1427			rq->rx_stats.rx_ucast_pkts++;
1428	}
1429exit:
1430	return;
1431}
1432
1433
1434static void
1435oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1436{
1437	uint32_t out, i = 0;
1438	struct oce_packet_desc *pd;
1439	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1440	int num_frags = cqe->u0.s.num_fragments;
1441
1442	for (i = 0; i < num_frags; i++) {
1443		if (rq->packets_out == rq->packets_in) {
1444			device_printf(sc->dev,
1445				"RQ transmit descriptor missing\n");
1446		}
1447		out = rq->packets_out + 1;
1448		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1449			out = 0;
1450		pd = &rq->pckts[rq->packets_out];
1451		rq->packets_out = out;
1452
1453		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1454		bus_dmamap_unload(rq->tag, pd->map);
1455		rq->pending--;
1456		m_freem(pd->mbuf);
1457	}
1458
1459}
1460
1461
1462static int
1463oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1464{
1465	struct oce_nic_rx_cqe_v1 *cqe_v1;
1466	int vtp = 0;
1467
1468	if (sc->be3_native) {
1469		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1470		vtp =  cqe_v1->u0.s.vlan_tag_present;
1471	} else
1472		vtp = cqe->u0.s.vlan_tag_present;
1473
1474	return vtp;
1475
1476}
1477
1478
1479static int
1480oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1481{
1482	struct oce_nic_rx_cqe_v1 *cqe_v1;
1483	int port_id = 0;
1484
1485	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1486		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1487		port_id =  cqe_v1->u0.s.port;
1488		if (sc->port_id != port_id)
1489			return 0;
1490	} else
1491		;/* For BE3 legacy and Lancer this is dummy */
1492
1493	return 1;
1494
1495}
1496
1497#if defined(INET6) || defined(INET)
1498static void
1499oce_rx_flush_lro(struct oce_rq *rq)
1500{
1501	struct lro_ctrl	*lro = &rq->lro;
1502	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1503
1504	if (!IF_LRO_ENABLED(sc))
1505		return;
1506
1507	tcp_lro_flush_all(lro);
1508	rq->lro_pkts_queued = 0;
1509
1510	return;
1511}
1512
1513
1514static int
1515oce_init_lro(POCE_SOFTC sc)
1516{
1517	struct lro_ctrl *lro = NULL;
1518	int i = 0, rc = 0;
1519
1520	for (i = 0; i < sc->nrqs; i++) {
1521		lro = &sc->rq[i]->lro;
1522		rc = tcp_lro_init(lro);
1523		if (rc != 0) {
1524			device_printf(sc->dev, "LRO init failed\n");
1525			return rc;
1526		}
1527		lro->ifp = sc->ifp;
1528	}
1529
1530	return rc;
1531}
1532
1533
1534void
1535oce_free_lro(POCE_SOFTC sc)
1536{
1537	struct lro_ctrl *lro = NULL;
1538	int i = 0;
1539
1540	for (i = 0; i < sc->nrqs; i++) {
1541		lro = &sc->rq[i]->lro;
1542		if (lro)
1543			tcp_lro_free(lro);
1544	}
1545}
1546#endif
1547
1548int
1549oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1550{
1551	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1552	int i, in, rc;
1553	struct oce_packet_desc *pd;
1554	bus_dma_segment_t segs[6];
1555	int nsegs, added = 0;
1556	struct oce_nic_rqe *rqe;
1557	pd_rxulp_db_t rxdb_reg;
1558
1559	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1560	for (i = 0; i < count; i++) {
1561		in = rq->packets_in + 1;
1562		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1563			in = 0;
1564		if (in == rq->packets_out)
1565			break;	/* no more room */
1566
1567		pd = &rq->pckts[rq->packets_in];
1568		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1569		if (pd->mbuf == NULL)
1570			break;
1571
1572		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1573		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1574					     pd->map,
1575					     pd->mbuf,
1576					     segs, &nsegs, BUS_DMA_NOWAIT);
1577		if (rc) {
1578			m_free(pd->mbuf);
1579			break;
1580		}
1581
1582		if (nsegs != 1) {
1583			i--;
1584			continue;
1585		}
1586
1587		rq->packets_in = in;
1588		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1589
1590		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1591		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1592		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1593		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1594		RING_PUT(rq->ring, 1);
1595		added++;
1596		rq->pending++;
1597	}
1598	if (added != 0) {
1599		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1600			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1601			rxdb_reg.bits.qid = rq->rq_id;
1602			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1603			added -= OCE_MAX_RQ_POSTS;
1604		}
1605		if (added > 0) {
1606			rxdb_reg.bits.qid = rq->rq_id;
1607			rxdb_reg.bits.num_posted = added;
1608			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1609		}
1610	}
1611
1612	return 0;
1613}
1614
1615
1616/* Handle the Completion Queue for receive */
1617uint16_t
1618oce_rq_handler(void *arg)
1619{
1620	struct oce_rq *rq = (struct oce_rq *)arg;
1621	struct oce_cq *cq = rq->cq;
1622	POCE_SOFTC sc = rq->parent;
1623	struct oce_nic_rx_cqe *cqe;
1624	int num_cqes = 0, rq_buffers_used = 0;
1625
1626
1627	bus_dmamap_sync(cq->ring->dma.tag,
1628			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1629	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1630	while (cqe->u0.dw[2]) {
1631		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1632
1633		RING_GET(rq->ring, 1);
1634		if (cqe->u0.s.error == 0) {
1635			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1636		} else {
1637			rq->rx_stats.rxcp_err++;
1638			if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1639			/* Post L3/L4 errors to stack.*/
1640			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1641		}
1642		rq->rx_stats.rx_compl++;
1643		cqe->u0.dw[2] = 0;
1644
1645#if defined(INET6) || defined(INET)
1646		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1647			oce_rx_flush_lro(rq);
1648		}
1649#endif
1650
1651		RING_GET(cq->ring, 1);
1652		bus_dmamap_sync(cq->ring->dma.tag,
1653				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1654		cqe =
1655		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1656		num_cqes++;
1657		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1658			break;
1659	}
1660
1661#if defined(INET6) || defined(INET)
1662	if (IF_LRO_ENABLED(sc))
1663		oce_rx_flush_lro(rq);
1664#endif
1665
1666	if (num_cqes) {
1667		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1668		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1669		if (rq_buffers_used > 1)
1670			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1671	}
1672
1673	return 0;
1674
1675}
1676
1677
1678
1679
1680/*****************************************************************************
1681 *		   Helper function prototypes in this file 		     *
1682 *****************************************************************************/
1683
1684static int
1685oce_attach_ifp(POCE_SOFTC sc)
1686{
1687
1688	sc->ifp = if_alloc(IFT_ETHER);
1689	if (!sc->ifp)
1690		return ENOMEM;
1691
1692	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1693	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1694	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1695
1696	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1697	sc->ifp->if_ioctl = oce_ioctl;
1698	sc->ifp->if_start = oce_start;
1699	sc->ifp->if_init = oce_init;
1700	sc->ifp->if_mtu = ETHERMTU;
1701	sc->ifp->if_softc = sc;
1702#if __FreeBSD_version >= 800000
1703	sc->ifp->if_transmit = oce_multiq_start;
1704	sc->ifp->if_qflush = oce_multiq_flush;
1705#endif
1706
1707	if_initname(sc->ifp,
1708		    device_get_name(sc->dev), device_get_unit(sc->dev));
1709
1710	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1711	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1712	IFQ_SET_READY(&sc->ifp->if_snd);
1713
1714	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1715	sc->ifp->if_hwassist |= CSUM_TSO;
1716	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1717
1718	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1719	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1720	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1721
1722#if defined(INET6) || defined(INET)
1723	sc->ifp->if_capabilities |= IFCAP_TSO;
1724	sc->ifp->if_capabilities |= IFCAP_LRO;
1725	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1726#endif
1727
1728	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1729	sc->ifp->if_baudrate = IF_Gbps(10);
1730
1731#if __FreeBSD_version >= 1000000
1732	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1733	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1734	sc->ifp->if_hw_tsomaxsegsize = 4096;
1735#endif
1736
1737	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1738
1739	return 0;
1740}
1741
1742
1743static void
1744oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1745{
1746	POCE_SOFTC sc = ifp->if_softc;
1747
1748	if (ifp->if_softc !=  arg)
1749		return;
1750	if ((vtag == 0) || (vtag > 4095))
1751		return;
1752
1753	sc->vlan_tag[vtag] = 1;
1754	sc->vlans_added++;
1755	if (sc->vlans_added <= (sc->max_vlans + 1))
1756		oce_vid_config(sc);
1757}
1758
1759
1760static void
1761oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1762{
1763	POCE_SOFTC sc = ifp->if_softc;
1764
1765	if (ifp->if_softc !=  arg)
1766		return;
1767	if ((vtag == 0) || (vtag > 4095))
1768		return;
1769
1770	sc->vlan_tag[vtag] = 0;
1771	sc->vlans_added--;
1772	oce_vid_config(sc);
1773}
1774
1775
1776/*
1777 * A max of 64 vlans can be configured in BE. If the user configures
1778 * more, place the card in vlan promiscuous mode.
1779 */
1780static int
1781oce_vid_config(POCE_SOFTC sc)
1782{
1783	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1784	uint16_t ntags = 0, i;
1785	int status = 0;
1786
1787	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1788			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1789		for (i = 0; i < MAX_VLANS; i++) {
1790			if (sc->vlan_tag[i]) {
1791				vtags[ntags].vtag = i;
1792				ntags++;
1793			}
1794		}
1795		if (ntags)
1796			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1797						vtags, ntags, 1, 0);
1798	} else
1799		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1800					 	NULL, 0, 1, 1);
1801	return status;
1802}
1803
1804
1805static void
1806oce_mac_addr_set(POCE_SOFTC sc)
1807{
1808	uint32_t old_pmac_id = sc->pmac_id;
1809	int status = 0;
1810
1811
1812	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1813			 sc->macaddr.size_of_struct);
1814	if (!status)
1815		return;
1816
1817	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1818					sc->if_id, &sc->pmac_id);
1819	if (!status) {
1820		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1821		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1822				 sc->macaddr.size_of_struct);
1823	}
1824	if (status)
1825		device_printf(sc->dev, "Failed update macaddress\n");
1826
1827}
1828
1829
1830static int
1831oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1832{
1833	POCE_SOFTC sc = ifp->if_softc;
1834	struct ifreq *ifr = (struct ifreq *)data;
1835	int rc = ENXIO;
1836	char cookie[32] = {0};
1837	void *priv_data = (void *)ifr->ifr_data;
1838	void *ioctl_ptr;
1839	uint32_t req_size;
1840	struct mbx_hdr req;
1841	OCE_DMA_MEM dma_mem;
1842	struct mbx_common_get_cntl_attr *fw_cmd;
1843
1844	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1845		return EFAULT;
1846
1847	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1848		return EINVAL;
1849
1850	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1851	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1852		return EFAULT;
1853
1854	req_size = le32toh(req.u0.req.request_length);
1855	if (req_size > 65536)
1856		return EINVAL;
1857
1858	req_size += sizeof(struct mbx_hdr);
1859	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1860	if (rc)
1861		return ENOMEM;
1862
1863	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1864		rc = EFAULT;
1865		goto dma_free;
1866	}
1867
1868	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1869	if (rc) {
1870		rc = EIO;
1871		goto dma_free;
1872	}
1873
1874	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1875		rc =  EFAULT;
1876
1877	/*
1878	   firmware is filling all the attributes for this ioctl except
1879	   the driver version..so fill it
1880	 */
1881	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1882		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1883		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1884			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1885	}
1886
1887dma_free:
1888	oce_dma_free(sc, &dma_mem);
1889	return rc;
1890
1891}
1892
1893static void
1894oce_eqd_set_periodic(POCE_SOFTC sc)
1895{
1896	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1897	struct oce_aic_obj *aic;
1898	struct oce_eq *eqo;
1899	uint64_t now = 0, delta;
1900	int eqd, i, num = 0;
1901	uint32_t ips = 0;
1902	int tps;
1903
1904	for (i = 0 ; i < sc->neqs; i++) {
1905		eqo = sc->eq[i];
1906		aic = &sc->aic_obj[i];
1907		/* When setting the static eq delay from the user space */
1908		if (!aic->enable) {
1909			eqd = aic->et_eqd;
1910			goto modify_eqd;
1911		}
1912
1913		now = ticks;
1914
1915		/* Over flow check */
1916		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1917			goto done;
1918
1919		delta = now - aic->ticks;
1920		tps = delta/hz;
1921
1922		/* Interrupt rate based on elapsed ticks */
1923		if(tps)
1924			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1925
1926		if (ips > INTR_RATE_HWM)
1927			eqd = aic->cur_eqd + 20;
1928		else if (ips < INTR_RATE_LWM)
1929			eqd = aic->cur_eqd / 2;
1930		else
1931			goto done;
1932
1933		if (eqd < 10)
1934			eqd = 0;
1935
1936		/* Make sure that the eq delay is in the known range */
1937		eqd = min(eqd, aic->max_eqd);
1938		eqd = max(eqd, aic->min_eqd);
1939
1940modify_eqd:
1941		if (eqd != aic->cur_eqd) {
1942			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1943			set_eqd[num].eq_id = eqo->eq_id;
1944			aic->cur_eqd = eqd;
1945			num++;
1946		}
1947done:
1948		aic->intr_prev = eqo->intr;
1949		aic->ticks = now;
1950	}
1951
1952	/* Is there atleast one eq that needs to be modified? */
1953	if(num)
1954		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1955}
1956
1957static void oce_detect_hw_error(POCE_SOFTC sc)
1958{
1959
1960	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1961	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1962	uint32_t i;
1963
1964	if (sc->hw_error)
1965		return;
1966
1967	if (IS_XE201(sc)) {
1968		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1969		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1970			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1971			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1972		}
1973	} else {
1974		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1975		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1976		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1977		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1978
1979		ue_low = (ue_low & ~ue_low_mask);
1980		ue_high = (ue_high & ~ue_high_mask);
1981	}
1982
1983	/* On certain platforms BE hardware can indicate spurious UEs.
1984	 * Allow the h/w to stop working completely in case of a real UE.
1985	 * Hence not setting the hw_error for UE detection.
1986	 */
1987	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1988		sc->hw_error = TRUE;
1989		device_printf(sc->dev, "Error detected in the card\n");
1990	}
1991
1992	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1993		device_printf(sc->dev,
1994				"ERR: sliport status 0x%x\n", sliport_status);
1995		device_printf(sc->dev,
1996				"ERR: sliport error1 0x%x\n", sliport_err1);
1997		device_printf(sc->dev,
1998				"ERR: sliport error2 0x%x\n", sliport_err2);
1999	}
2000
2001	if (ue_low) {
2002		for (i = 0; ue_low; ue_low >>= 1, i++) {
2003			if (ue_low & 1)
2004				device_printf(sc->dev, "UE: %s bit set\n",
2005							ue_status_low_desc[i]);
2006		}
2007	}
2008
2009	if (ue_high) {
2010		for (i = 0; ue_high; ue_high >>= 1, i++) {
2011			if (ue_high & 1)
2012				device_printf(sc->dev, "UE: %s bit set\n",
2013							ue_status_hi_desc[i]);
2014		}
2015	}
2016
2017}
2018
2019
2020static void
2021oce_local_timer(void *arg)
2022{
2023	POCE_SOFTC sc = arg;
2024	int i = 0;
2025
2026	oce_detect_hw_error(sc);
2027	oce_refresh_nic_stats(sc);
2028	oce_refresh_queue_stats(sc);
2029	oce_mac_addr_set(sc);
2030
2031	/* TX Watch Dog*/
2032	for (i = 0; i < sc->nwqs; i++)
2033		oce_tx_restart(sc, sc->wq[i]);
2034
2035	/* calculate and set the eq delay for optimal interrupt rate */
2036	if (IS_BE(sc) || IS_SH(sc))
2037		oce_eqd_set_periodic(sc);
2038
2039	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2040}
2041
2042
2043/* NOTE : This should only be called holding
2044 *        DEVICE_LOCK.
2045 */
2046static void
2047oce_if_deactivate(POCE_SOFTC sc)
2048{
2049	int i, mtime = 0;
2050	int wait_req = 0;
2051	struct oce_rq *rq;
2052	struct oce_wq *wq;
2053	struct oce_eq *eq;
2054
2055	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2056
2057	/*Wait for max of 400ms for TX completions to be done */
2058	while (mtime < 400) {
2059		wait_req = 0;
2060		for_all_wq_queues(sc, wq, i) {
2061			if (wq->ring->num_used) {
2062				wait_req = 1;
2063				DELAY(1);
2064				break;
2065			}
2066		}
2067		mtime += 1;
2068		if (!wait_req)
2069			break;
2070	}
2071
2072	/* Stop intrs and finish any bottom halves pending */
2073	oce_hw_intr_disable(sc);
2074
2075	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2076	   any other lock. So unlock device lock and require after
2077	   completing taskqueue_drain.
2078	*/
2079	UNLOCK(&sc->dev_lock);
2080	for (i = 0; i < sc->intr_count; i++) {
2081		if (sc->intrs[i].tq != NULL) {
2082			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2083		}
2084	}
2085	LOCK(&sc->dev_lock);
2086
2087	/* Delete RX queue in card with flush param */
2088	oce_stop_rx(sc);
2089
2090	/* Invalidate any pending cq and eq entries*/
2091	for_all_evnt_queues(sc, eq, i)
2092		oce_drain_eq(eq);
2093	for_all_rq_queues(sc, rq, i)
2094		oce_drain_rq_cq(rq);
2095	for_all_wq_queues(sc, wq, i)
2096		oce_drain_wq_cq(wq);
2097
2098	/* But still we need to get MCC aync events.
2099	   So enable intrs and also arm first EQ
2100	*/
2101	oce_hw_intr_enable(sc);
2102	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2103
2104	DELAY(10);
2105}
2106
2107
2108static void
2109oce_if_activate(POCE_SOFTC sc)
2110{
2111	struct oce_eq *eq;
2112	struct oce_rq *rq;
2113	struct oce_wq *wq;
2114	int i, rc = 0;
2115
2116	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2117
2118	oce_hw_intr_disable(sc);
2119
2120	oce_start_rx(sc);
2121
2122	for_all_rq_queues(sc, rq, i) {
2123		rc = oce_start_rq(rq);
2124		if (rc)
2125			device_printf(sc->dev, "Unable to start RX\n");
2126	}
2127
2128	for_all_wq_queues(sc, wq, i) {
2129		rc = oce_start_wq(wq);
2130		if (rc)
2131			device_printf(sc->dev, "Unable to start TX\n");
2132	}
2133
2134
2135	for_all_evnt_queues(sc, eq, i)
2136		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2137
2138	oce_hw_intr_enable(sc);
2139
2140}
2141
2142static void
2143process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2144{
2145	/* Update Link status */
2146	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2147	     ASYNC_EVENT_LINK_UP) {
2148		sc->link_status = ASYNC_EVENT_LINK_UP;
2149		if_link_state_change(sc->ifp, LINK_STATE_UP);
2150	} else {
2151		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2152		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2153	}
2154}
2155
2156
2157/* Handle the Completion Queue for the Mailbox/Async notifications */
2158uint16_t
2159oce_mq_handler(void *arg)
2160{
2161	struct oce_mq *mq = (struct oce_mq *)arg;
2162	POCE_SOFTC sc = mq->parent;
2163	struct oce_cq *cq = mq->cq;
2164	int num_cqes = 0, evt_type = 0, optype = 0;
2165	struct oce_mq_cqe *cqe;
2166	struct oce_async_cqe_link_state *acqe;
2167	struct oce_async_event_grp5_pvid_state *gcqe;
2168	struct oce_async_event_qnq *dbgcqe;
2169
2170
2171	bus_dmamap_sync(cq->ring->dma.tag,
2172			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2173	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2174
2175	while (cqe->u0.dw[3]) {
2176		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2177		if (cqe->u0.s.async_event) {
2178			evt_type = cqe->u0.s.event_type;
2179			optype = cqe->u0.s.async_type;
2180			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2181				/* Link status evt */
2182				acqe = (struct oce_async_cqe_link_state *)cqe;
2183				process_link_state(sc, acqe);
2184			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2185				   (optype == ASYNC_EVENT_PVID_STATE)) {
2186				/* GRP5 PVID */
2187				gcqe =
2188				(struct oce_async_event_grp5_pvid_state *)cqe;
2189				if (gcqe->enabled)
2190					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2191				else
2192					sc->pvid = 0;
2193
2194			}
2195			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2196				optype == ASYNC_EVENT_DEBUG_QNQ) {
2197				dbgcqe =
2198				(struct oce_async_event_qnq *)cqe;
2199				if(dbgcqe->valid)
2200					sc->qnqid = dbgcqe->vlan_tag;
2201				sc->qnq_debug_event = TRUE;
2202			}
2203		}
2204		cqe->u0.dw[3] = 0;
2205		RING_GET(cq->ring, 1);
2206		bus_dmamap_sync(cq->ring->dma.tag,
2207				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2208		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2209		num_cqes++;
2210	}
2211
2212	if (num_cqes)
2213		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2214
2215	return 0;
2216}
2217
2218
2219static void
2220setup_max_queues_want(POCE_SOFTC sc)
2221{
2222	/* Check if it is FLEX machine. Is so dont use RSS */
2223	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2224	    (sc->function_mode & FNM_UMC_MODE)    ||
2225	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2226	    (!is_rss_enabled(sc))		  ||
2227	    IS_BE2(sc)) {
2228		sc->nrqs = 1;
2229		sc->nwqs = 1;
2230	} else {
2231		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2232		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2233	}
2234
2235	if (IS_BE2(sc) && is_rss_enabled(sc))
2236		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2237}
2238
2239
2240static void
2241update_queues_got(POCE_SOFTC sc)
2242{
2243	if (is_rss_enabled(sc)) {
2244		sc->nrqs = sc->intr_count + 1;
2245		sc->nwqs = sc->intr_count;
2246	} else {
2247		sc->nrqs = 1;
2248		sc->nwqs = 1;
2249	}
2250
2251	if (IS_BE2(sc))
2252		sc->nwqs = 1;
2253}
2254
2255static int
2256oce_check_ipv6_ext_hdr(struct mbuf *m)
2257{
2258	struct ether_header *eh = mtod(m, struct ether_header *);
2259	caddr_t m_datatemp = m->m_data;
2260
2261	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2262		m->m_data += sizeof(struct ether_header);
2263		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2264
2265		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2266				(ip6->ip6_nxt != IPPROTO_UDP)){
2267			struct ip6_ext *ip6e = NULL;
2268			m->m_data += sizeof(struct ip6_hdr);
2269
2270			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2271			if(ip6e->ip6e_len == 0xff) {
2272				m->m_data = m_datatemp;
2273				return TRUE;
2274			}
2275		}
2276		m->m_data = m_datatemp;
2277	}
2278	return FALSE;
2279}
2280
2281static int
2282is_be3_a1(POCE_SOFTC sc)
2283{
2284	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2285		return TRUE;
2286	}
2287	return FALSE;
2288}
2289
2290static struct mbuf *
2291oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2292{
2293	uint16_t vlan_tag = 0;
2294
2295	if(!M_WRITABLE(m))
2296		return NULL;
2297
2298	/* Embed vlan tag in the packet if it is not part of it */
2299	if(m->m_flags & M_VLANTAG) {
2300		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2301		m->m_flags &= ~M_VLANTAG;
2302	}
2303
2304	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2305	if(sc->pvid) {
2306		if(!vlan_tag)
2307			vlan_tag = sc->pvid;
2308		*complete = FALSE;
2309	}
2310
2311	if(vlan_tag) {
2312		m = ether_vlanencap(m, vlan_tag);
2313	}
2314
2315	if(sc->qnqid) {
2316		m = ether_vlanencap(m, sc->qnqid);
2317		*complete = FALSE;
2318	}
2319	return m;
2320}
2321
2322static int
2323oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2324{
2325	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2326			oce_check_ipv6_ext_hdr(m)) {
2327		return TRUE;
2328	}
2329	return FALSE;
2330}
2331
2332static void
2333oce_get_config(POCE_SOFTC sc)
2334{
2335	int rc = 0;
2336	uint32_t max_rss = 0;
2337
2338	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2339		max_rss = OCE_LEGACY_MODE_RSS;
2340	else
2341		max_rss = OCE_MAX_RSS;
2342
2343	if (!IS_BE(sc)) {
2344		rc = oce_get_profile_config(sc, max_rss);
2345		if (rc) {
2346			sc->nwqs = OCE_MAX_WQ;
2347			sc->nrssqs = max_rss;
2348			sc->nrqs = sc->nrssqs + 1;
2349		}
2350	}
2351	else { /* For BE3 don't rely on fw for determining the resources */
2352		sc->nrssqs = max_rss;
2353		sc->nrqs = sc->nrssqs + 1;
2354		sc->nwqs = OCE_MAX_WQ;
2355		sc->max_vlans = MAX_VLANFILTER_SIZE;
2356	}
2357}
2358