1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 *    contributors may be used to endorse or promote products derived from
17 *    this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD$ */
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48	"CEV",
49	"CTX",
50	"DBUF",
51	"ERX",
52	"Host",
53	"MPU",
54	"NDMA",
55	"PTC ",
56	"RDMA ",
57	"RXF ",
58	"RXIPS ",
59	"RXULP0 ",
60	"RXULP1 ",
61	"RXULP2 ",
62	"TIM ",
63	"TPOST ",
64	"TPRE ",
65	"TXIPS ",
66	"TXULP0 ",
67	"TXULP1 ",
68	"UC ",
69	"WDMA ",
70	"TXULP2 ",
71	"HOST1 ",
72	"P0_OB_LINK ",
73	"P1_OB_LINK ",
74	"HOST_GPIO ",
75	"MBOX ",
76	"AXGMAC0",
77	"AXGMAC1",
78	"JTAG",
79	"MPU_INTPEND"
80};
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84	"LPCMEMHOST",
85	"MGMT_MAC",
86	"PCS0ONLINE",
87	"MPU_IRAM",
88	"PCS1ONLINE",
89	"PCTL0",
90	"PCTL1",
91	"PMEM",
92	"RR",
93	"TXPB",
94	"RXPP",
95	"XAUI",
96	"TXP",
97	"ARM",
98	"IPC",
99	"HOST2",
100	"HOST3",
101	"HOST4",
102	"HOST5",
103	"HOST6",
104	"HOST7",
105	"HOST8",
106	"HOST9",
107	"NETC",
108	"Unknown",
109	"Unknown",
110	"Unknown",
111	"Unknown",
112	"Unknown",
113	"Unknown",
114	"Unknown",
115	"Unknown"
116};
117
118
119/* Driver entry points prototypes */
120static int  oce_probe(device_t dev);
121static int  oce_attach(device_t dev);
122static int  oce_detach(device_t dev);
123static int  oce_shutdown(device_t dev);
124static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int  oce_setup_intr(POCE_SOFTC sc);
132static int  oce_fast_isr(void *arg);
133static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
134			  void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int  oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144					uint32_t status);
145static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146				 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153						struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int  oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int  oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168		 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int  oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181	DEVMETHOD(device_probe, oce_probe),
182	DEVMETHOD(device_attach, oce_attach),
183	DEVMETHOD(device_detach, oce_detach),
184	DEVMETHOD(device_shutdown, oce_shutdown),
185
186	DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190	"oce",
191	oce_dispatch,
192	sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] =  {
217	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 *			Driver entry points functions                        *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235	uint16_t vendor = 0;
236	uint16_t device = 0;
237	int i = 0;
238	char str[256] = {0};
239	POCE_SOFTC sc;
240
241	sc = device_get_softc(dev);
242	bzero(sc, sizeof(OCE_SOFTC));
243	sc->dev = dev;
244
245	vendor = pci_get_vendor(dev);
246	device = pci_get_device(dev);
247
248	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250			if (device == (supportedDevices[i] & 0xffff)) {
251				sprintf(str, "%s:%s", "Emulex CNA NIC function",
252					component_revision);
253				device_set_desc_copy(dev, str);
254
255				switch (device) {
256				case PCI_PRODUCT_BE2:
257					sc->flags |= OCE_FLAGS_BE2;
258					break;
259				case PCI_PRODUCT_BE3:
260					sc->flags |= OCE_FLAGS_BE3;
261					break;
262				case PCI_PRODUCT_XE201:
263				case PCI_PRODUCT_XE201_VF:
264					sc->flags |= OCE_FLAGS_XE201;
265					break;
266				case PCI_PRODUCT_SH:
267					sc->flags |= OCE_FLAGS_SH;
268					break;
269				default:
270					return ENXIO;
271				}
272				return BUS_PROBE_DEFAULT;
273			}
274		}
275	}
276
277	return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284	POCE_SOFTC sc;
285	int rc = 0;
286
287	sc = device_get_softc(dev);
288
289	rc = oce_hw_pci_alloc(sc);
290	if (rc)
291		return rc;
292
293	sc->tx_ring_size = OCE_TX_RING_SIZE;
294	sc->rx_ring_size = OCE_RX_RING_SIZE;
295	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
298
299	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
301
302	/* initialise the hardware */
303	rc = oce_hw_init(sc);
304	if (rc)
305		goto pci_res_free;
306
307	oce_get_config(sc);
308
309	setup_max_queues_want(sc);
310
311	rc = oce_setup_intr(sc);
312	if (rc)
313		goto mbox_free;
314
315	rc = oce_queue_init_all(sc);
316	if (rc)
317		goto intr_free;
318
319	rc = oce_attach_ifp(sc);
320	if (rc)
321		goto queues_free;
322
323#if defined(INET6) || defined(INET)
324	rc = oce_init_lro(sc);
325	if (rc)
326		goto ifp_free;
327#endif
328
329	rc = oce_hw_start(sc);
330	if (rc)
331		goto lro_free;
332
333	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338	rc = oce_stats_init(sc);
339	if (rc)
340		goto vlan_free;
341
342	oce_add_sysctls(sc);
343
344	callout_init(&sc->timer, CALLOUT_MPSAFE);
345	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346	if (rc)
347		goto stats_free;
348
349	return 0;
350
351stats_free:
352	callout_drain(&sc->timer);
353	oce_stats_free(sc);
354vlan_free:
355	if (sc->vlan_attach)
356		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357	if (sc->vlan_detach)
358		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359	oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362	oce_free_lro(sc);
363ifp_free:
364#endif
365	ether_ifdetach(sc->ifp);
366	if_free(sc->ifp);
367queues_free:
368	oce_queue_release_all(sc);
369intr_free:
370	oce_intr_free(sc);
371mbox_free:
372	oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374	oce_hw_pci_free(sc);
375	LOCK_DESTROY(&sc->dev_lock);
376	LOCK_DESTROY(&sc->bmbx_lock);
377	return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385	POCE_SOFTC sc = device_get_softc(dev);
386
387	LOCK(&sc->dev_lock);
388	oce_if_deactivate(sc);
389	UNLOCK(&sc->dev_lock);
390
391	callout_drain(&sc->timer);
392
393	if (sc->vlan_attach != NULL)
394		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395	if (sc->vlan_detach != NULL)
396		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398	ether_ifdetach(sc->ifp);
399
400	if_free(sc->ifp);
401
402	oce_hw_shutdown(sc);
403
404	bus_generic_detach(dev);
405
406	return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413	int rc;
414
415	rc = oce_detach(dev);
416
417	return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424	struct ifreq *ifr = (struct ifreq *)data;
425	POCE_SOFTC sc = ifp->if_softc;
426	int rc = 0;
427	uint32_t u;
428
429	switch (command) {
430
431	case SIOCGIFMEDIA:
432		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433		break;
434
435	case SIOCSIFMTU:
436		if (ifr->ifr_mtu > OCE_MAX_MTU)
437			rc = EINVAL;
438		else
439			ifp->if_mtu = ifr->ifr_mtu;
440		break;
441
442	case SIOCSIFFLAGS:
443		if (ifp->if_flags & IFF_UP) {
444			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446				oce_init(sc);
447			}
448			device_printf(sc->dev, "Interface Up\n");
449		} else {
450			LOCK(&sc->dev_lock);
451
452			sc->ifp->if_drv_flags &=
453			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454			oce_if_deactivate(sc);
455
456			UNLOCK(&sc->dev_lock);
457
458			device_printf(sc->dev, "Interface Down\n");
459		}
460
461		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
462			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463				sc->promisc = TRUE;
464		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
465			if (!oce_rxf_set_promiscuous(sc, 0))
466				sc->promisc = FALSE;
467		}
468
469		break;
470
471	case SIOCADDMULTI:
472	case SIOCDELMULTI:
473		rc = oce_hw_update_multicast(sc);
474		if (rc)
475			device_printf(sc->dev,
476				"Update multicast address failed\n");
477		break;
478
479	case SIOCSIFCAP:
480		u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482		if (u & IFCAP_TXCSUM) {
483			ifp->if_capenable ^= IFCAP_TXCSUM;
484			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486			if (IFCAP_TSO & ifp->if_capenable &&
487			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
488				ifp->if_capenable &= ~IFCAP_TSO;
489				ifp->if_hwassist &= ~CSUM_TSO;
490				if_printf(ifp,
491					 "TSO disabled due to -txcsum.\n");
492			}
493		}
494
495		if (u & IFCAP_RXCSUM)
496			ifp->if_capenable ^= IFCAP_RXCSUM;
497
498		if (u & IFCAP_TSO4) {
499			ifp->if_capenable ^= IFCAP_TSO4;
500
501			if (IFCAP_TSO & ifp->if_capenable) {
502				if (IFCAP_TXCSUM & ifp->if_capenable)
503					ifp->if_hwassist |= CSUM_TSO;
504				else {
505					ifp->if_capenable &= ~IFCAP_TSO;
506					ifp->if_hwassist &= ~CSUM_TSO;
507					if_printf(ifp,
508					    "Enable txcsum first.\n");
509					rc = EAGAIN;
510				}
511			} else
512				ifp->if_hwassist &= ~CSUM_TSO;
513		}
514
515		if (u & IFCAP_VLAN_HWTAGGING)
516			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518		if (u & IFCAP_VLAN_HWFILTER) {
519			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520			oce_vid_config(sc);
521		}
522#if defined(INET6) || defined(INET)
523		if (u & IFCAP_LRO)
524			ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527		break;
528
529	case SIOCGPRIVATE_0:
530		rc = oce_handle_passthrough(ifp, data);
531		break;
532	default:
533		rc = ether_ioctl(ifp, command, data);
534		break;
535	}
536
537	return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544	POCE_SOFTC sc = arg;
545
546	LOCK(&sc->dev_lock);
547
548	if (sc->ifp->if_flags & IFF_UP) {
549		oce_if_deactivate(sc);
550		oce_if_activate(sc);
551	}
552
553	UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561	POCE_SOFTC sc = ifp->if_softc;
562	struct oce_wq *wq = NULL;
563	int queue_index = 0;
564	int status = 0;
565
566	if (!sc->link_status)
567		return ENXIO;
568
569	if ((m->m_flags & M_FLOWID) != 0)
570		queue_index = m->m_pkthdr.flowid % sc->nwqs;
571
572	wq = sc->wq[queue_index];
573
574	LOCK(&wq->tx_lock);
575	status = oce_multiq_transmit(ifp, m, wq);
576	UNLOCK(&wq->tx_lock);
577
578	return status;
579
580}
581
582
583static void
584oce_multiq_flush(struct ifnet *ifp)
585{
586	POCE_SOFTC sc = ifp->if_softc;
587	struct mbuf     *m;
588	int i = 0;
589
590	for (i = 0; i < sc->nwqs; i++) {
591		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
592			m_freem(m);
593	}
594	if_qflush(ifp);
595}
596
597
598
599/*****************************************************************************
600 *                   Driver interrupt routines functions                     *
601 *****************************************************************************/
602
603static void
604oce_intr(void *arg, int pending)
605{
606
607	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
608	POCE_SOFTC sc = ii->sc;
609	struct oce_eq *eq = ii->eq;
610	struct oce_eqe *eqe;
611	struct oce_cq *cq = NULL;
612	int i, num_eqes = 0;
613
614
615	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
616				 BUS_DMASYNC_POSTWRITE);
617	do {
618		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
619		if (eqe->evnt == 0)
620			break;
621		eqe->evnt = 0;
622		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
623					BUS_DMASYNC_POSTWRITE);
624		RING_GET(eq->ring, 1);
625		num_eqes++;
626
627	} while (TRUE);
628
629	if (!num_eqes)
630		goto eq_arm; /* Spurious */
631
632 	/* Clear EQ entries, but dont arm */
633	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
634
635	/* Process TX, RX and MCC. But dont arm CQ*/
636	for (i = 0; i < eq->cq_valid; i++) {
637		cq = eq->cq[i];
638		(*cq->cq_handler)(cq->cb_arg);
639	}
640
641	/* Arm all cqs connected to this EQ */
642	for (i = 0; i < eq->cq_valid; i++) {
643		cq = eq->cq[i];
644		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
645	}
646
647eq_arm:
648	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
649
650	return;
651}
652
653
654static int
655oce_setup_intr(POCE_SOFTC sc)
656{
657	int rc = 0, use_intx = 0;
658	int vector = 0, req_vectors = 0;
659
660	if (is_rss_enabled(sc))
661		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
662	else
663		req_vectors = 1;
664
665	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
666		sc->intr_count = req_vectors;
667		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
668		if (rc != 0) {
669			use_intx = 1;
670			pci_release_msi(sc->dev);
671		} else
672			sc->flags |= OCE_FLAGS_USING_MSIX;
673	} else
674		use_intx = 1;
675
676	if (use_intx)
677		sc->intr_count = 1;
678
679	/* Scale number of queues based on intr we got */
680	update_queues_got(sc);
681
682	if (use_intx) {
683		device_printf(sc->dev, "Using legacy interrupt\n");
684		rc = oce_alloc_intr(sc, vector, oce_intr);
685		if (rc)
686			goto error;
687	} else {
688		for (; vector < sc->intr_count; vector++) {
689			rc = oce_alloc_intr(sc, vector, oce_intr);
690			if (rc)
691				goto error;
692		}
693	}
694
695	return 0;
696error:
697	oce_intr_free(sc);
698	return rc;
699}
700
701
702static int
703oce_fast_isr(void *arg)
704{
705	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
706	POCE_SOFTC sc = ii->sc;
707
708	if (ii->eq == NULL)
709		return FILTER_STRAY;
710
711	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
712
713	taskqueue_enqueue_fast(ii->tq, &ii->task);
714
715 	ii->eq->intr++;
716
717	return FILTER_HANDLED;
718}
719
720
721static int
722oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
723{
724	POCE_INTR_INFO ii = &sc->intrs[vector];
725	int rc = 0, rr;
726
727	if (vector >= OCE_MAX_EQ)
728		return (EINVAL);
729
730	/* Set the resource id for the interrupt.
731	 * MSIx is vector + 1 for the resource id,
732	 * INTx is 0 for the resource id.
733	 */
734	if (sc->flags & OCE_FLAGS_USING_MSIX)
735		rr = vector + 1;
736	else
737		rr = 0;
738	ii->intr_res = bus_alloc_resource_any(sc->dev,
739					      SYS_RES_IRQ,
740					      &rr, RF_ACTIVE|RF_SHAREABLE);
741	ii->irq_rr = rr;
742	if (ii->intr_res == NULL) {
743		device_printf(sc->dev,
744			  "Could not allocate interrupt\n");
745		rc = ENXIO;
746		return rc;
747	}
748
749	TASK_INIT(&ii->task, 0, isr, ii);
750	ii->vector = vector;
751	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
752	ii->tq = taskqueue_create_fast(ii->task_name,
753			M_NOWAIT,
754			taskqueue_thread_enqueue,
755			&ii->tq);
756	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
757			device_get_nameunit(sc->dev));
758
759	ii->sc = sc;
760	rc = bus_setup_intr(sc->dev,
761			ii->intr_res,
762			INTR_TYPE_NET,
763			oce_fast_isr, NULL, ii, &ii->tag);
764	return rc;
765
766}
767
768
769void
770oce_intr_free(POCE_SOFTC sc)
771{
772	int i = 0;
773
774	for (i = 0; i < sc->intr_count; i++) {
775
776		if (sc->intrs[i].tag != NULL)
777			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
778						sc->intrs[i].tag);
779		if (sc->intrs[i].tq != NULL)
780			taskqueue_free(sc->intrs[i].tq);
781
782		if (sc->intrs[i].intr_res != NULL)
783			bus_release_resource(sc->dev, SYS_RES_IRQ,
784						sc->intrs[i].irq_rr,
785						sc->intrs[i].intr_res);
786		sc->intrs[i].tag = NULL;
787		sc->intrs[i].intr_res = NULL;
788	}
789
790	if (sc->flags & OCE_FLAGS_USING_MSIX)
791		pci_release_msi(sc->dev);
792
793}
794
795
796
797/******************************************************************************
798*			  Media callbacks functions 			      *
799******************************************************************************/
800
801static void
802oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
803{
804	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
805
806
807	req->ifm_status = IFM_AVALID;
808	req->ifm_active = IFM_ETHER;
809
810	if (sc->link_status == 1)
811		req->ifm_status |= IFM_ACTIVE;
812	else
813		return;
814
815	switch (sc->link_speed) {
816	case 1: /* 10 Mbps */
817		req->ifm_active |= IFM_10_T | IFM_FDX;
818		sc->speed = 10;
819		break;
820	case 2: /* 100 Mbps */
821		req->ifm_active |= IFM_100_TX | IFM_FDX;
822		sc->speed = 100;
823		break;
824	case 3: /* 1 Gbps */
825		req->ifm_active |= IFM_1000_T | IFM_FDX;
826		sc->speed = 1000;
827		break;
828	case 4: /* 10 Gbps */
829		req->ifm_active |= IFM_10G_SR | IFM_FDX;
830		sc->speed = 10000;
831		break;
832	case 7: /* 40 Gbps */
833		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
834		sc->speed = 40000;
835		break;
836	}
837
838	return;
839}
840
841
842int
843oce_media_change(struct ifnet *ifp)
844{
845	return 0;
846}
847
848
849
850
851/*****************************************************************************
852 *			  Transmit routines functions			     *
853 *****************************************************************************/
854
855static int
856oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
857{
858	int rc = 0, i, retry_cnt = 0;
859	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
860	struct mbuf *m, *m_temp;
861	struct oce_wq *wq = sc->wq[wq_index];
862	struct oce_packet_desc *pd;
863	struct oce_nic_hdr_wqe *nichdr;
864	struct oce_nic_frag_wqe *nicfrag;
865	int num_wqes;
866	uint32_t reg_value;
867	boolean_t complete = TRUE;
868
869	m = *mpp;
870	if (!m)
871		return EINVAL;
872
873	if (!(m->m_flags & M_PKTHDR)) {
874		rc = ENXIO;
875		goto free_ret;
876	}
877
878	if(oce_tx_asic_stall_verify(sc, m)) {
879		m = oce_insert_vlan_tag(sc, m, &complete);
880		if(!m) {
881			device_printf(sc->dev, "Insertion unsuccessful\n");
882			return 0;
883		}
884
885	}
886
887	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
888		/* consolidate packet buffers for TSO/LSO segment offload */
889#if defined(INET6) || defined(INET)
890		m = oce_tso_setup(sc, mpp);
891#else
892		m = NULL;
893#endif
894		if (m == NULL) {
895			rc = ENXIO;
896			goto free_ret;
897		}
898	}
899
900	pd = &wq->pckts[wq->pkt_desc_head];
901retry:
902	rc = bus_dmamap_load_mbuf_sg(wq->tag,
903				     pd->map,
904				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
905	if (rc == 0) {
906		num_wqes = pd->nsegs + 1;
907		if (IS_BE(sc) || IS_SH(sc)) {
908			/*Dummy required only for BE3.*/
909			if (num_wqes & 1)
910				num_wqes++;
911		}
912		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
913			bus_dmamap_unload(wq->tag, pd->map);
914			return EBUSY;
915		}
916		atomic_store_rel_int(&wq->pkt_desc_head,
917				     (wq->pkt_desc_head + 1) % \
918				      OCE_WQ_PACKET_ARRAY_SIZE);
919		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
920		pd->mbuf = m;
921
922		nichdr =
923		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
924		nichdr->u0.dw[0] = 0;
925		nichdr->u0.dw[1] = 0;
926		nichdr->u0.dw[2] = 0;
927		nichdr->u0.dw[3] = 0;
928
929		nichdr->u0.s.complete = complete;
930		nichdr->u0.s.event = 1;
931		nichdr->u0.s.crc = 1;
932		nichdr->u0.s.forward = 0;
933		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
934		nichdr->u0.s.udpcs =
935			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
936		nichdr->u0.s.tcpcs =
937			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
938		nichdr->u0.s.num_wqe = num_wqes;
939		nichdr->u0.s.total_length = m->m_pkthdr.len;
940
941		if (m->m_flags & M_VLANTAG) {
942			nichdr->u0.s.vlan = 1; /*Vlan present*/
943			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
944		}
945
946		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
947			if (m->m_pkthdr.tso_segsz) {
948				nichdr->u0.s.lso = 1;
949				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
950			}
951			if (!IS_BE(sc) || !IS_SH(sc))
952				nichdr->u0.s.ipcs = 1;
953		}
954
955		RING_PUT(wq->ring, 1);
956		atomic_add_int(&wq->ring->num_used, 1);
957
958		for (i = 0; i < pd->nsegs; i++) {
959			nicfrag =
960			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
961						      struct oce_nic_frag_wqe);
962			nicfrag->u0.s.rsvd0 = 0;
963			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
964			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
965			nicfrag->u0.s.frag_len = segs[i].ds_len;
966			pd->wqe_idx = wq->ring->pidx;
967			RING_PUT(wq->ring, 1);
968			atomic_add_int(&wq->ring->num_used, 1);
969		}
970		if (num_wqes > (pd->nsegs + 1)) {
971			nicfrag =
972			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
973						      struct oce_nic_frag_wqe);
974			nicfrag->u0.dw[0] = 0;
975			nicfrag->u0.dw[1] = 0;
976			nicfrag->u0.dw[2] = 0;
977			nicfrag->u0.dw[3] = 0;
978			pd->wqe_idx = wq->ring->pidx;
979			RING_PUT(wq->ring, 1);
980			atomic_add_int(&wq->ring->num_used, 1);
981			pd->nsegs++;
982		}
983
984		sc->ifp->if_opackets++;
985		wq->tx_stats.tx_reqs++;
986		wq->tx_stats.tx_wrbs += num_wqes;
987		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
988		wq->tx_stats.tx_pkts++;
989
990		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
991				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
992		reg_value = (num_wqes << 16) | wq->wq_id;
993		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
994
995	} else if (rc == EFBIG)	{
996		if (retry_cnt == 0) {
997			m_temp = m_defrag(m, M_NOWAIT);
998			if (m_temp == NULL)
999				goto free_ret;
1000			m = m_temp;
1001			*mpp = m_temp;
1002			retry_cnt = retry_cnt + 1;
1003			goto retry;
1004		} else
1005			goto free_ret;
1006	} else if (rc == ENOMEM)
1007		return rc;
1008	else
1009		goto free_ret;
1010
1011	return 0;
1012
1013free_ret:
1014	m_freem(*mpp);
1015	*mpp = NULL;
1016	return rc;
1017}
1018
1019
1020static void
1021oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1022{
1023	struct oce_packet_desc *pd;
1024	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1025	struct mbuf *m;
1026
1027	pd = &wq->pckts[wq->pkt_desc_tail];
1028	atomic_store_rel_int(&wq->pkt_desc_tail,
1029			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1030	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1031	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1032	bus_dmamap_unload(wq->tag, pd->map);
1033
1034	m = pd->mbuf;
1035	m_freem(m);
1036	pd->mbuf = NULL;
1037
1038
1039	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1040		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1041			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1042			oce_tx_restart(sc, wq);
1043		}
1044	}
1045}
1046
1047
1048static void
1049oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1050{
1051
1052	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1053		return;
1054
1055#if __FreeBSD_version >= 800000
1056	if (!drbr_empty(sc->ifp, wq->br))
1057#else
1058	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1059#endif
1060		taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1061
1062}
1063
1064
1065#if defined(INET6) || defined(INET)
1066static struct mbuf *
1067oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1068{
1069	struct mbuf *m;
1070#ifdef INET
1071	struct ip *ip;
1072#endif
1073#ifdef INET6
1074	struct ip6_hdr *ip6;
1075#endif
1076	struct ether_vlan_header *eh;
1077	struct tcphdr *th;
1078	uint16_t etype;
1079	int total_len = 0, ehdrlen = 0;
1080
1081	m = *mpp;
1082
1083	if (M_WRITABLE(m) == 0) {
1084		m = m_dup(*mpp, M_NOWAIT);
1085		if (!m)
1086			return NULL;
1087		m_freem(*mpp);
1088		*mpp = m;
1089	}
1090
1091	eh = mtod(m, struct ether_vlan_header *);
1092	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1093		etype = ntohs(eh->evl_proto);
1094		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1095	} else {
1096		etype = ntohs(eh->evl_encap_proto);
1097		ehdrlen = ETHER_HDR_LEN;
1098	}
1099
1100	switch (etype) {
1101#ifdef INET
1102	case ETHERTYPE_IP:
1103		ip = (struct ip *)(m->m_data + ehdrlen);
1104		if (ip->ip_p != IPPROTO_TCP)
1105			return NULL;
1106		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1107
1108		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1109		break;
1110#endif
1111#ifdef INET6
1112	case ETHERTYPE_IPV6:
1113		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1114		if (ip6->ip6_nxt != IPPROTO_TCP)
1115			return NULL;
1116		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1117
1118		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1119		break;
1120#endif
1121	default:
1122		return NULL;
1123	}
1124
1125	m = m_pullup(m, total_len);
1126	if (!m)
1127		return NULL;
1128	*mpp = m;
1129	return m;
1130
1131}
1132#endif /* INET6 || INET */
1133
1134void
1135oce_tx_task(void *arg, int npending)
1136{
1137	struct oce_wq *wq = arg;
1138	POCE_SOFTC sc = wq->parent;
1139	struct ifnet *ifp = sc->ifp;
1140	int rc = 0;
1141
1142#if __FreeBSD_version >= 800000
1143	LOCK(&wq->tx_lock);
1144	rc = oce_multiq_transmit(ifp, NULL, wq);
1145	if (rc) {
1146		device_printf(sc->dev,
1147				"TX[%d] restart failed\n", wq->queue_index);
1148	}
1149	UNLOCK(&wq->tx_lock);
1150#else
1151	oce_start(ifp);
1152#endif
1153
1154}
1155
1156
1157void
1158oce_start(struct ifnet *ifp)
1159{
1160	POCE_SOFTC sc = ifp->if_softc;
1161	struct mbuf *m;
1162	int rc = 0;
1163	int def_q = 0; /* Defualt tx queue is 0*/
1164
1165	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1166			IFF_DRV_RUNNING)
1167		return;
1168
1169	if (!sc->link_status)
1170		return;
1171
1172	do {
1173		IF_DEQUEUE(&sc->ifp->if_snd, m);
1174		if (m == NULL)
1175			break;
1176
1177		LOCK(&sc->wq[def_q]->tx_lock);
1178		rc = oce_tx(sc, &m, def_q);
1179		UNLOCK(&sc->wq[def_q]->tx_lock);
1180		if (rc) {
1181			if (m != NULL) {
1182				sc->wq[def_q]->tx_stats.tx_stops ++;
1183				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1184				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1185				m = NULL;
1186			}
1187			break;
1188		}
1189		if (m != NULL)
1190			ETHER_BPF_MTAP(ifp, m);
1191
1192	} while (TRUE);
1193
1194	return;
1195}
1196
1197
1198/* Handle the Completion Queue for transmit */
1199uint16_t
1200oce_wq_handler(void *arg)
1201{
1202	struct oce_wq *wq = (struct oce_wq *)arg;
1203	POCE_SOFTC sc = wq->parent;
1204	struct oce_cq *cq = wq->cq;
1205	struct oce_nic_tx_cqe *cqe;
1206	int num_cqes = 0;
1207
1208	bus_dmamap_sync(cq->ring->dma.tag,
1209			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1210	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1211	while (cqe->u0.dw[3]) {
1212		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1213
1214		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1215		if (wq->ring->cidx >= wq->ring->num_items)
1216			wq->ring->cidx -= wq->ring->num_items;
1217
1218		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1219		wq->tx_stats.tx_compl++;
1220		cqe->u0.dw[3] = 0;
1221		RING_GET(cq->ring, 1);
1222		bus_dmamap_sync(cq->ring->dma.tag,
1223				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1224		cqe =
1225		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1226		num_cqes++;
1227	}
1228
1229	if (num_cqes)
1230		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1231
1232	return 0;
1233}
1234
1235
1236#if __FreeBSD_version >= 1000000
1237static __inline void
1238drbr_stats_update(struct ifnet *ifp, int len, int mflags)
1239{
1240#ifndef NO_SLOW_STATS
1241	ifp->if_obytes += len;
1242	if (mflags & M_MCAST)
1243		ifp->if_omcasts++;
1244#endif
1245}
1246#endif
1247
1248static int
1249oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1250{
1251	POCE_SOFTC sc = ifp->if_softc;
1252	int status = 0, queue_index = 0;
1253	struct mbuf *next = NULL;
1254	struct buf_ring *br = NULL;
1255
1256	br  = wq->br;
1257	queue_index = wq->queue_index;
1258
1259	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1260		IFF_DRV_RUNNING) {
1261		if (m != NULL)
1262			status = drbr_enqueue(ifp, br, m);
1263		return status;
1264	}
1265
1266	if (m != NULL) {
1267		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1268			return status;
1269	}
1270	while ((next = drbr_peek(ifp, br)) != NULL) {
1271		if (oce_tx(sc, &next, queue_index)) {
1272			if (next == NULL) {
1273				drbr_advance(ifp, br);
1274			} else {
1275				drbr_putback(ifp, br, next);
1276				wq->tx_stats.tx_stops ++;
1277				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1278				status = drbr_enqueue(ifp, br, next);
1279			}
1280			break;
1281		}
1282		drbr_advance(ifp, br);
1283		ifp->if_obytes += next->m_pkthdr.len;
1284		if (next->m_flags & M_MCAST)
1285			ifp->if_omcasts++;
1286		ETHER_BPF_MTAP(ifp, next);
1287	}
1288
1289	return status;
1290}
1291
1292
1293
1294
1295/*****************************************************************************
1296 *			    Receive  routines functions 		     *
1297 *****************************************************************************/
1298
1299static void
1300oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1301{
1302	uint32_t out;
1303	struct oce_packet_desc *pd;
1304	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1305	int i, len, frag_len;
1306	struct mbuf *m = NULL, *tail = NULL;
1307	uint16_t vtag;
1308
1309	len = cqe->u0.s.pkt_size;
1310	if (!len) {
1311		/*partial DMA workaround for Lancer*/
1312		oce_discard_rx_comp(rq, cqe);
1313		goto exit;
1314	}
1315
1316	 /* Get vlan_tag value */
1317	if(IS_BE(sc) || IS_SH(sc))
1318		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1319	else
1320		vtag = cqe->u0.s.vlan_tag;
1321
1322
1323	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1324
1325		if (rq->packets_out == rq->packets_in) {
1326			device_printf(sc->dev,
1327				  "RQ transmit descriptor missing\n");
1328		}
1329		out = rq->packets_out + 1;
1330		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1331			out = 0;
1332		pd = &rq->pckts[rq->packets_out];
1333		rq->packets_out = out;
1334
1335		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1336		bus_dmamap_unload(rq->tag, pd->map);
1337		rq->pending--;
1338
1339		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1340		pd->mbuf->m_len = frag_len;
1341
1342		if (tail != NULL) {
1343			/* additional fragments */
1344			pd->mbuf->m_flags &= ~M_PKTHDR;
1345			tail->m_next = pd->mbuf;
1346			tail = pd->mbuf;
1347		} else {
1348			/* first fragment, fill out much of the packet header */
1349			pd->mbuf->m_pkthdr.len = len;
1350			pd->mbuf->m_pkthdr.csum_flags = 0;
1351			if (IF_CSUM_ENABLED(sc)) {
1352				if (cqe->u0.s.l4_cksum_pass) {
1353					pd->mbuf->m_pkthdr.csum_flags |=
1354					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1355					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1356				}
1357				if (cqe->u0.s.ip_cksum_pass) {
1358					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1359						pd->mbuf->m_pkthdr.csum_flags |=
1360						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1361					}
1362				}
1363			}
1364			m = tail = pd->mbuf;
1365		}
1366		pd->mbuf = NULL;
1367		len -= frag_len;
1368	}
1369
1370	if (m) {
1371		if (!oce_cqe_portid_valid(sc, cqe)) {
1372			 m_freem(m);
1373			 goto exit;
1374		}
1375
1376		m->m_pkthdr.rcvif = sc->ifp;
1377#if __FreeBSD_version >= 800000
1378		if (rq->queue_index)
1379			m->m_pkthdr.flowid = (rq->queue_index - 1);
1380		else
1381			m->m_pkthdr.flowid = rq->queue_index;
1382		m->m_flags |= M_FLOWID;
1383#endif
1384		/* This deternies if vlan tag is Valid */
1385		if (oce_cqe_vtp_valid(sc, cqe)) {
1386			if (sc->function_mode & FNM_FLEX10_MODE) {
1387				/* FLEX10. If QnQ is not set, neglect VLAN */
1388				if (cqe->u0.s.qnq) {
1389					m->m_pkthdr.ether_vtag = vtag;
1390					m->m_flags |= M_VLANTAG;
1391				}
1392			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1393				/* In UMC mode generally pvid will be striped by
1394				   hw. But in some cases we have seen it comes
1395				   with pvid. So if pvid == vlan, neglect vlan.
1396				*/
1397				m->m_pkthdr.ether_vtag = vtag;
1398				m->m_flags |= M_VLANTAG;
1399			}
1400		}
1401
1402		sc->ifp->if_ipackets++;
1403#if defined(INET6) || defined(INET)
1404		/* Try to queue to LRO */
1405		if (IF_LRO_ENABLED(sc) &&
1406		    (cqe->u0.s.ip_cksum_pass) &&
1407		    (cqe->u0.s.l4_cksum_pass) &&
1408		    (!cqe->u0.s.ip_ver)       &&
1409		    (rq->lro.lro_cnt != 0)) {
1410
1411			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1412				rq->lro_pkts_queued ++;
1413				goto post_done;
1414			}
1415			/* If LRO posting fails then try to post to STACK */
1416		}
1417#endif
1418
1419		(*sc->ifp->if_input) (sc->ifp, m);
1420#if defined(INET6) || defined(INET)
1421post_done:
1422#endif
1423		/* Update rx stats per queue */
1424		rq->rx_stats.rx_pkts++;
1425		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1426		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1427		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1428			rq->rx_stats.rx_mcast_pkts++;
1429		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1430			rq->rx_stats.rx_ucast_pkts++;
1431	}
1432exit:
1433	return;
1434}
1435
1436
1437static void
1438oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1439{
1440	uint32_t out, i = 0;
1441	struct oce_packet_desc *pd;
1442	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1443	int num_frags = cqe->u0.s.num_fragments;
1444
1445	for (i = 0; i < num_frags; i++) {
1446		if (rq->packets_out == rq->packets_in) {
1447			device_printf(sc->dev,
1448				"RQ transmit descriptor missing\n");
1449		}
1450		out = rq->packets_out + 1;
1451		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1452			out = 0;
1453		pd = &rq->pckts[rq->packets_out];
1454		rq->packets_out = out;
1455
1456		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1457		bus_dmamap_unload(rq->tag, pd->map);
1458		rq->pending--;
1459		m_freem(pd->mbuf);
1460	}
1461
1462}
1463
1464
1465static int
1466oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1467{
1468	struct oce_nic_rx_cqe_v1 *cqe_v1;
1469	int vtp = 0;
1470
1471	if (sc->be3_native) {
1472		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1473		vtp =  cqe_v1->u0.s.vlan_tag_present;
1474	} else
1475		vtp = cqe->u0.s.vlan_tag_present;
1476
1477	return vtp;
1478
1479}
1480
1481
1482static int
1483oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1484{
1485	struct oce_nic_rx_cqe_v1 *cqe_v1;
1486	int port_id = 0;
1487
1488	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1489		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1490		port_id =  cqe_v1->u0.s.port;
1491		if (sc->port_id != port_id)
1492			return 0;
1493	} else
1494		;/* For BE3 legacy and Lancer this is dummy */
1495
1496	return 1;
1497
1498}
1499
1500#if defined(INET6) || defined(INET)
1501static void
1502oce_rx_flush_lro(struct oce_rq *rq)
1503{
1504	struct lro_ctrl	*lro = &rq->lro;
1505	struct lro_entry *queued;
1506	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1507
1508	if (!IF_LRO_ENABLED(sc))
1509		return;
1510
1511	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1512		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1513		tcp_lro_flush(lro, queued);
1514	}
1515	rq->lro_pkts_queued = 0;
1516
1517	return;
1518}
1519
1520
1521static int
1522oce_init_lro(POCE_SOFTC sc)
1523{
1524	struct lro_ctrl *lro = NULL;
1525	int i = 0, rc = 0;
1526
1527	for (i = 0; i < sc->nrqs; i++) {
1528		lro = &sc->rq[i]->lro;
1529		rc = tcp_lro_init(lro);
1530		if (rc != 0) {
1531			device_printf(sc->dev, "LRO init failed\n");
1532			return rc;
1533		}
1534		lro->ifp = sc->ifp;
1535	}
1536
1537	return rc;
1538}
1539
1540
1541void
1542oce_free_lro(POCE_SOFTC sc)
1543{
1544	struct lro_ctrl *lro = NULL;
1545	int i = 0;
1546
1547	for (i = 0; i < sc->nrqs; i++) {
1548		lro = &sc->rq[i]->lro;
1549		if (lro)
1550			tcp_lro_free(lro);
1551	}
1552}
1553#endif
1554
1555int
1556oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1557{
1558	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1559	int i, in, rc;
1560	struct oce_packet_desc *pd;
1561	bus_dma_segment_t segs[6];
1562	int nsegs, added = 0;
1563	struct oce_nic_rqe *rqe;
1564	pd_rxulp_db_t rxdb_reg;
1565
1566	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1567	for (i = 0; i < count; i++) {
1568		in = rq->packets_in + 1;
1569		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1570			in = 0;
1571		if (in == rq->packets_out)
1572			break;	/* no more room */
1573
1574		pd = &rq->pckts[rq->packets_in];
1575		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1576		if (pd->mbuf == NULL)
1577			break;
1578
1579		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1580		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1581					     pd->map,
1582					     pd->mbuf,
1583					     segs, &nsegs, BUS_DMA_NOWAIT);
1584		if (rc) {
1585			m_free(pd->mbuf);
1586			break;
1587		}
1588
1589		if (nsegs != 1) {
1590			i--;
1591			continue;
1592		}
1593
1594		rq->packets_in = in;
1595		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1596
1597		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1598		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1599		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1600		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1601		RING_PUT(rq->ring, 1);
1602		added++;
1603		rq->pending++;
1604	}
1605	if (added != 0) {
1606		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1607			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1608			rxdb_reg.bits.qid = rq->rq_id;
1609			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1610			added -= OCE_MAX_RQ_POSTS;
1611		}
1612		if (added > 0) {
1613			rxdb_reg.bits.qid = rq->rq_id;
1614			rxdb_reg.bits.num_posted = added;
1615			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1616		}
1617	}
1618
1619	return 0;
1620}
1621
1622
1623/* Handle the Completion Queue for receive */
1624uint16_t
1625oce_rq_handler(void *arg)
1626{
1627	struct oce_rq *rq = (struct oce_rq *)arg;
1628	struct oce_cq *cq = rq->cq;
1629	POCE_SOFTC sc = rq->parent;
1630	struct oce_nic_rx_cqe *cqe;
1631	int num_cqes = 0, rq_buffers_used = 0;
1632
1633
1634	bus_dmamap_sync(cq->ring->dma.tag,
1635			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1636	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1637	while (cqe->u0.dw[2]) {
1638		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1639
1640		RING_GET(rq->ring, 1);
1641		if (cqe->u0.s.error == 0) {
1642			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1643		} else {
1644			rq->rx_stats.rxcp_err++;
1645			sc->ifp->if_ierrors++;
1646			/* Post L3/L4 errors to stack.*/
1647			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1648		}
1649		rq->rx_stats.rx_compl++;
1650		cqe->u0.dw[2] = 0;
1651
1652#if defined(INET6) || defined(INET)
1653		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1654			oce_rx_flush_lro(rq);
1655		}
1656#endif
1657
1658		RING_GET(cq->ring, 1);
1659		bus_dmamap_sync(cq->ring->dma.tag,
1660				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1661		cqe =
1662		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1663		num_cqes++;
1664		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1665			break;
1666	}
1667
1668#if defined(INET6) || defined(INET)
1669	if (IF_LRO_ENABLED(sc))
1670		oce_rx_flush_lro(rq);
1671#endif
1672
1673	if (num_cqes) {
1674		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1675		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1676		if (rq_buffers_used > 1)
1677			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1678	}
1679
1680	return 0;
1681
1682}
1683
1684
1685
1686
1687/*****************************************************************************
1688 *		   Helper function prototypes in this file 		     *
1689 *****************************************************************************/
1690
1691static int
1692oce_attach_ifp(POCE_SOFTC sc)
1693{
1694
1695	sc->ifp = if_alloc(IFT_ETHER);
1696	if (!sc->ifp)
1697		return ENOMEM;
1698
1699	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1700	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1701	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1702
1703	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1704	sc->ifp->if_ioctl = oce_ioctl;
1705	sc->ifp->if_start = oce_start;
1706	sc->ifp->if_init = oce_init;
1707	sc->ifp->if_mtu = ETHERMTU;
1708	sc->ifp->if_softc = sc;
1709#if __FreeBSD_version >= 800000
1710	sc->ifp->if_transmit = oce_multiq_start;
1711	sc->ifp->if_qflush = oce_multiq_flush;
1712#endif
1713
1714	if_initname(sc->ifp,
1715		    device_get_name(sc->dev), device_get_unit(sc->dev));
1716
1717	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1718	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1719	IFQ_SET_READY(&sc->ifp->if_snd);
1720
1721	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1722	sc->ifp->if_hwassist |= CSUM_TSO;
1723	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1724
1725	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1726	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1727	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1728
1729#if defined(INET6) || defined(INET)
1730	sc->ifp->if_capabilities |= IFCAP_TSO;
1731	sc->ifp->if_capabilities |= IFCAP_LRO;
1732	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1733#endif
1734
1735	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1736	if_initbaudrate(sc->ifp, IF_Gbps(10));
1737
1738#if __FreeBSD_version >= 1000000
1739	sc->ifp->if_hw_tsomax = OCE_MAX_TSO_SIZE;
1740#endif
1741
1742	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1743
1744	return 0;
1745}
1746
1747
1748static void
1749oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1750{
1751	POCE_SOFTC sc = ifp->if_softc;
1752
1753	if (ifp->if_softc !=  arg)
1754		return;
1755	if ((vtag == 0) || (vtag > 4095))
1756		return;
1757
1758	sc->vlan_tag[vtag] = 1;
1759	sc->vlans_added++;
1760	if (sc->vlans_added <= (sc->max_vlans + 1))
1761		oce_vid_config(sc);
1762}
1763
1764
1765static void
1766oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1767{
1768	POCE_SOFTC sc = ifp->if_softc;
1769
1770	if (ifp->if_softc !=  arg)
1771		return;
1772	if ((vtag == 0) || (vtag > 4095))
1773		return;
1774
1775	sc->vlan_tag[vtag] = 0;
1776	sc->vlans_added--;
1777	oce_vid_config(sc);
1778}
1779
1780
1781/*
1782 * A max of 64 vlans can be configured in BE. If the user configures
1783 * more, place the card in vlan promiscuous mode.
1784 */
1785static int
1786oce_vid_config(POCE_SOFTC sc)
1787{
1788	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1789	uint16_t ntags = 0, i;
1790	int status = 0;
1791
1792	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1793			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1794		for (i = 0; i < MAX_VLANS; i++) {
1795			if (sc->vlan_tag[i]) {
1796				vtags[ntags].vtag = i;
1797				ntags++;
1798			}
1799		}
1800		if (ntags)
1801			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1802						vtags, ntags, 1, 0);
1803	} else
1804		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1805					 	NULL, 0, 1, 1);
1806	return status;
1807}
1808
1809
1810static void
1811oce_mac_addr_set(POCE_SOFTC sc)
1812{
1813	uint32_t old_pmac_id = sc->pmac_id;
1814	int status = 0;
1815
1816
1817	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1818			 sc->macaddr.size_of_struct);
1819	if (!status)
1820		return;
1821
1822	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1823					sc->if_id, &sc->pmac_id);
1824	if (!status) {
1825		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1826		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1827				 sc->macaddr.size_of_struct);
1828	}
1829	if (status)
1830		device_printf(sc->dev, "Failed update macaddress\n");
1831
1832}
1833
1834
1835static int
1836oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1837{
1838	POCE_SOFTC sc = ifp->if_softc;
1839	struct ifreq *ifr = (struct ifreq *)data;
1840	int rc = ENXIO;
1841	char cookie[32] = {0};
1842	void *priv_data = (void *)ifr->ifr_data;
1843	void *ioctl_ptr;
1844	uint32_t req_size;
1845	struct mbx_hdr req;
1846	OCE_DMA_MEM dma_mem;
1847	struct mbx_common_get_cntl_attr *fw_cmd;
1848
1849	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1850		return EFAULT;
1851
1852	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1853		return EINVAL;
1854
1855	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1856	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1857		return EFAULT;
1858
1859	req_size = le32toh(req.u0.req.request_length);
1860	if (req_size > 65536)
1861		return EINVAL;
1862
1863	req_size += sizeof(struct mbx_hdr);
1864	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1865	if (rc)
1866		return ENOMEM;
1867
1868	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1869		rc = EFAULT;
1870		goto dma_free;
1871	}
1872
1873	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1874	if (rc) {
1875		rc = EIO;
1876		goto dma_free;
1877	}
1878
1879	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1880		rc =  EFAULT;
1881
1882	/*
1883	   firmware is filling all the attributes for this ioctl except
1884	   the driver version..so fill it
1885	 */
1886	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1887		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1888		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1889			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1890	}
1891
1892dma_free:
1893	oce_dma_free(sc, &dma_mem);
1894	return rc;
1895
1896}
1897
1898static void
1899oce_eqd_set_periodic(POCE_SOFTC sc)
1900{
1901	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1902	struct oce_aic_obj *aic;
1903	struct oce_eq *eqo;
1904	uint64_t now = 0, delta;
1905	int eqd, i, num = 0;
1906	uint32_t ips = 0;
1907	int tps;
1908
1909	for (i = 0 ; i < sc->neqs; i++) {
1910		eqo = sc->eq[i];
1911		aic = &sc->aic_obj[i];
1912		/* When setting the static eq delay from the user space */
1913		if (!aic->enable) {
1914			eqd = aic->et_eqd;
1915			goto modify_eqd;
1916		}
1917
1918		now = ticks;
1919
1920		/* Over flow check */
1921		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1922			goto done;
1923
1924		delta = now - aic->ticks;
1925		tps = delta/hz;
1926
1927		/* Interrupt rate based on elapsed ticks */
1928		if(tps)
1929			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1930
1931		if (ips > INTR_RATE_HWM)
1932			eqd = aic->cur_eqd + 20;
1933		else if (ips < INTR_RATE_LWM)
1934			eqd = aic->cur_eqd / 2;
1935		else
1936			goto done;
1937
1938		if (eqd < 10)
1939			eqd = 0;
1940
1941		/* Make sure that the eq delay is in the known range */
1942		eqd = min(eqd, aic->max_eqd);
1943		eqd = max(eqd, aic->min_eqd);
1944
1945modify_eqd:
1946		if (eqd != aic->cur_eqd) {
1947			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1948			set_eqd[num].eq_id = eqo->eq_id;
1949			aic->cur_eqd = eqd;
1950			num++;
1951		}
1952done:
1953		aic->intr_prev = eqo->intr;
1954		aic->ticks = now;
1955	}
1956
1957	/* Is there atleast one eq that needs to be modified? */
1958	if(num)
1959		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1960}
1961
1962static void oce_detect_hw_error(POCE_SOFTC sc)
1963{
1964
1965	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1966	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1967	uint32_t i;
1968
1969	if (sc->hw_error)
1970		return;
1971
1972	if (IS_XE201(sc)) {
1973		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1974		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1975			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1976			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1977		}
1978	} else {
1979		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1980		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1981		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1982		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1983
1984		ue_low = (ue_low & ~ue_low_mask);
1985		ue_high = (ue_high & ~ue_high_mask);
1986	}
1987
1988	/* On certain platforms BE hardware can indicate spurious UEs.
1989	 * Allow the h/w to stop working completely in case of a real UE.
1990	 * Hence not setting the hw_error for UE detection.
1991	 */
1992	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1993		sc->hw_error = TRUE;
1994		device_printf(sc->dev, "Error detected in the card\n");
1995	}
1996
1997	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1998		device_printf(sc->dev,
1999				"ERR: sliport status 0x%x\n", sliport_status);
2000		device_printf(sc->dev,
2001				"ERR: sliport error1 0x%x\n", sliport_err1);
2002		device_printf(sc->dev,
2003				"ERR: sliport error2 0x%x\n", sliport_err2);
2004	}
2005
2006	if (ue_low) {
2007		for (i = 0; ue_low; ue_low >>= 1, i++) {
2008			if (ue_low & 1)
2009				device_printf(sc->dev, "UE: %s bit set\n",
2010							ue_status_low_desc[i]);
2011		}
2012	}
2013
2014	if (ue_high) {
2015		for (i = 0; ue_high; ue_high >>= 1, i++) {
2016			if (ue_high & 1)
2017				device_printf(sc->dev, "UE: %s bit set\n",
2018							ue_status_hi_desc[i]);
2019		}
2020	}
2021
2022}
2023
2024
2025static void
2026oce_local_timer(void *arg)
2027{
2028	POCE_SOFTC sc = arg;
2029	int i = 0;
2030
2031	oce_detect_hw_error(sc);
2032	oce_refresh_nic_stats(sc);
2033	oce_refresh_queue_stats(sc);
2034	oce_mac_addr_set(sc);
2035
2036	/* TX Watch Dog*/
2037	for (i = 0; i < sc->nwqs; i++)
2038		oce_tx_restart(sc, sc->wq[i]);
2039
2040	/* calculate and set the eq delay for optimal interrupt rate */
2041	if (IS_BE(sc) || IS_SH(sc))
2042		oce_eqd_set_periodic(sc);
2043
2044	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2045}
2046
2047
2048/* NOTE : This should only be called holding
2049 *        DEVICE_LOCK.
2050 */
2051static void
2052oce_if_deactivate(POCE_SOFTC sc)
2053{
2054	int i, mtime = 0;
2055	int wait_req = 0;
2056	struct oce_rq *rq;
2057	struct oce_wq *wq;
2058	struct oce_eq *eq;
2059
2060	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2061
2062	/*Wait for max of 400ms for TX completions to be done */
2063	while (mtime < 400) {
2064		wait_req = 0;
2065		for_all_wq_queues(sc, wq, i) {
2066			if (wq->ring->num_used) {
2067				wait_req = 1;
2068				DELAY(1);
2069				break;
2070			}
2071		}
2072		mtime += 1;
2073		if (!wait_req)
2074			break;
2075	}
2076
2077	/* Stop intrs and finish any bottom halves pending */
2078	oce_hw_intr_disable(sc);
2079
2080	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2081	   any other lock. So unlock device lock and require after
2082	   completing taskqueue_drain.
2083	*/
2084	UNLOCK(&sc->dev_lock);
2085	for (i = 0; i < sc->intr_count; i++) {
2086		if (sc->intrs[i].tq != NULL) {
2087			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2088		}
2089	}
2090	LOCK(&sc->dev_lock);
2091
2092	/* Delete RX queue in card with flush param */
2093	oce_stop_rx(sc);
2094
2095	/* Invalidate any pending cq and eq entries*/
2096	for_all_evnt_queues(sc, eq, i)
2097		oce_drain_eq(eq);
2098	for_all_rq_queues(sc, rq, i)
2099		oce_drain_rq_cq(rq);
2100	for_all_wq_queues(sc, wq, i)
2101		oce_drain_wq_cq(wq);
2102
2103	/* But still we need to get MCC aync events.
2104	   So enable intrs and also arm first EQ
2105	*/
2106	oce_hw_intr_enable(sc);
2107	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2108
2109	DELAY(10);
2110}
2111
2112
2113static void
2114oce_if_activate(POCE_SOFTC sc)
2115{
2116	struct oce_eq *eq;
2117	struct oce_rq *rq;
2118	struct oce_wq *wq;
2119	int i, rc = 0;
2120
2121	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2122
2123	oce_hw_intr_disable(sc);
2124
2125	oce_start_rx(sc);
2126
2127	for_all_rq_queues(sc, rq, i) {
2128		rc = oce_start_rq(rq);
2129		if (rc)
2130			device_printf(sc->dev, "Unable to start RX\n");
2131	}
2132
2133	for_all_wq_queues(sc, wq, i) {
2134		rc = oce_start_wq(wq);
2135		if (rc)
2136			device_printf(sc->dev, "Unable to start TX\n");
2137	}
2138
2139
2140	for_all_evnt_queues(sc, eq, i)
2141		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2142
2143	oce_hw_intr_enable(sc);
2144
2145}
2146
2147static void
2148process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2149{
2150	/* Update Link status */
2151	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2152	     ASYNC_EVENT_LINK_UP) {
2153		sc->link_status = ASYNC_EVENT_LINK_UP;
2154		if_link_state_change(sc->ifp, LINK_STATE_UP);
2155	} else {
2156		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2157		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2158	}
2159}
2160
2161
2162/* Handle the Completion Queue for the Mailbox/Async notifications */
2163uint16_t
2164oce_mq_handler(void *arg)
2165{
2166	struct oce_mq *mq = (struct oce_mq *)arg;
2167	POCE_SOFTC sc = mq->parent;
2168	struct oce_cq *cq = mq->cq;
2169	int num_cqes = 0, evt_type = 0, optype = 0;
2170	struct oce_mq_cqe *cqe;
2171	struct oce_async_cqe_link_state *acqe;
2172	struct oce_async_event_grp5_pvid_state *gcqe;
2173	struct oce_async_event_qnq *dbgcqe;
2174
2175
2176	bus_dmamap_sync(cq->ring->dma.tag,
2177			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2178	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2179
2180	while (cqe->u0.dw[3]) {
2181		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2182		if (cqe->u0.s.async_event) {
2183			evt_type = cqe->u0.s.event_type;
2184			optype = cqe->u0.s.async_type;
2185			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2186				/* Link status evt */
2187				acqe = (struct oce_async_cqe_link_state *)cqe;
2188				process_link_state(sc, acqe);
2189			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2190				   (optype == ASYNC_EVENT_PVID_STATE)) {
2191				/* GRP5 PVID */
2192				gcqe =
2193				(struct oce_async_event_grp5_pvid_state *)cqe;
2194				if (gcqe->enabled)
2195					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2196				else
2197					sc->pvid = 0;
2198
2199			}
2200			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2201				optype == ASYNC_EVENT_DEBUG_QNQ) {
2202				dbgcqe =
2203				(struct oce_async_event_qnq *)cqe;
2204				if(dbgcqe->valid)
2205					sc->qnqid = dbgcqe->vlan_tag;
2206				sc->qnq_debug_event = TRUE;
2207			}
2208		}
2209		cqe->u0.dw[3] = 0;
2210		RING_GET(cq->ring, 1);
2211		bus_dmamap_sync(cq->ring->dma.tag,
2212				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2213		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2214		num_cqes++;
2215	}
2216
2217	if (num_cqes)
2218		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2219
2220	return 0;
2221}
2222
2223
2224static void
2225setup_max_queues_want(POCE_SOFTC sc)
2226{
2227	/* Check if it is FLEX machine. Is so dont use RSS */
2228	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2229	    (sc->function_mode & FNM_UMC_MODE)    ||
2230	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2231	    (!is_rss_enabled(sc))		  ||
2232	    (sc->flags & OCE_FLAGS_BE2)) {
2233		sc->nrqs = 1;
2234		sc->nwqs = 1;
2235	} else {
2236		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2237		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2238	}
2239}
2240
2241
2242static void
2243update_queues_got(POCE_SOFTC sc)
2244{
2245	if (is_rss_enabled(sc)) {
2246		sc->nrqs = sc->intr_count + 1;
2247		sc->nwqs = sc->intr_count;
2248	} else {
2249		sc->nrqs = 1;
2250		sc->nwqs = 1;
2251	}
2252}
2253
2254static int
2255oce_check_ipv6_ext_hdr(struct mbuf *m)
2256{
2257	struct ether_header *eh = mtod(m, struct ether_header *);
2258	caddr_t m_datatemp = m->m_data;
2259
2260	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2261		m->m_data += sizeof(struct ether_header);
2262		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2263
2264		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2265				(ip6->ip6_nxt != IPPROTO_UDP)){
2266			struct ip6_ext *ip6e = NULL;
2267			m->m_data += sizeof(struct ip6_hdr);
2268
2269			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2270			if(ip6e->ip6e_len == 0xff) {
2271				m->m_data = m_datatemp;
2272				return TRUE;
2273			}
2274		}
2275		m->m_data = m_datatemp;
2276	}
2277	return FALSE;
2278}
2279
2280static int
2281is_be3_a1(POCE_SOFTC sc)
2282{
2283	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2284		return TRUE;
2285	}
2286	return FALSE;
2287}
2288
2289static struct mbuf *
2290oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2291{
2292	uint16_t vlan_tag = 0;
2293
2294	if(!M_WRITABLE(m))
2295		return NULL;
2296
2297	/* Embed vlan tag in the packet if it is not part of it */
2298	if(m->m_flags & M_VLANTAG) {
2299		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2300		m->m_flags &= ~M_VLANTAG;
2301	}
2302
2303	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2304	if(sc->pvid) {
2305		if(!vlan_tag)
2306			vlan_tag = sc->pvid;
2307		*complete = FALSE;
2308	}
2309
2310	if(vlan_tag) {
2311		m = ether_vlanencap(m, vlan_tag);
2312	}
2313
2314	if(sc->qnqid) {
2315		m = ether_vlanencap(m, sc->qnqid);
2316		*complete = FALSE;
2317	}
2318	return m;
2319}
2320
2321static int
2322oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2323{
2324	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2325			oce_check_ipv6_ext_hdr(m)) {
2326		return TRUE;
2327	}
2328	return FALSE;
2329}
2330
2331static void
2332oce_get_config(POCE_SOFTC sc)
2333{
2334	int rc = 0;
2335	uint32_t max_rss = 0;
2336
2337	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2338		max_rss = OCE_LEGACY_MODE_RSS;
2339	else
2340		max_rss = OCE_MAX_RSS;
2341
2342	if (!IS_BE(sc)) {
2343		rc = oce_get_profile_config(sc, max_rss);
2344		if (rc) {
2345			sc->nwqs = OCE_MAX_WQ;
2346			sc->nrssqs = max_rss;
2347			sc->nrqs = sc->nrssqs + 1;
2348		}
2349	}
2350	else { /* For BE3 don't rely on fw for determining the resources */
2351		sc->nrssqs = max_rss;
2352		sc->nrqs = sc->nrssqs + 1;
2353		sc->nwqs = OCE_MAX_WQ;
2354		sc->max_vlans = MAX_VLANFILTER_SIZE;
2355	}
2356}
2357