oce_if.c revision 274043
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 *    contributors may be used to endorse or promote products derived from
17 *    this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: stable/10/sys/dev/oce/oce_if.c 274043 2014-11-03 12:38:29Z hselasky $ */
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48	"CEV",
49	"CTX",
50	"DBUF",
51	"ERX",
52	"Host",
53	"MPU",
54	"NDMA",
55	"PTC ",
56	"RDMA ",
57	"RXF ",
58	"RXIPS ",
59	"RXULP0 ",
60	"RXULP1 ",
61	"RXULP2 ",
62	"TIM ",
63	"TPOST ",
64	"TPRE ",
65	"TXIPS ",
66	"TXULP0 ",
67	"TXULP1 ",
68	"UC ",
69	"WDMA ",
70	"TXULP2 ",
71	"HOST1 ",
72	"P0_OB_LINK ",
73	"P1_OB_LINK ",
74	"HOST_GPIO ",
75	"MBOX ",
76	"AXGMAC0",
77	"AXGMAC1",
78	"JTAG",
79	"MPU_INTPEND"
80};
81
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84	"LPCMEMHOST",
85	"MGMT_MAC",
86	"PCS0ONLINE",
87	"MPU_IRAM",
88	"PCS1ONLINE",
89	"PCTL0",
90	"PCTL1",
91	"PMEM",
92	"RR",
93	"TXPB",
94	"RXPP",
95	"XAUI",
96	"TXP",
97	"ARM",
98	"IPC",
99	"HOST2",
100	"HOST3",
101	"HOST4",
102	"HOST5",
103	"HOST6",
104	"HOST7",
105	"HOST8",
106	"HOST9",
107	"NETC",
108	"Unknown",
109	"Unknown",
110	"Unknown",
111	"Unknown",
112	"Unknown",
113	"Unknown",
114	"Unknown",
115	"Unknown"
116};
117
118
119/* Driver entry points prototypes */
120static int  oce_probe(device_t dev);
121static int  oce_attach(device_t dev);
122static int  oce_detach(device_t dev);
123static int  oce_shutdown(device_t dev);
124static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125static void oce_init(void *xsc);
126static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127static void oce_multiq_flush(struct ifnet *ifp);
128
129/* Driver interrupt routines protypes */
130static void oce_intr(void *arg, int pending);
131static int  oce_setup_intr(POCE_SOFTC sc);
132static int  oce_fast_isr(void *arg);
133static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
134			  void (*isr) (void *arg, int pending));
135
136/* Media callbacks prototypes */
137static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138static int  oce_media_change(struct ifnet *ifp);
139
140/* Transmit routines prototypes */
141static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144					uint32_t status);
145static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146				 struct oce_wq *wq);
147
148/* Receive routines prototypes */
149static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153						struct oce_nic_rx_cqe *cqe);
154
155/* Helper function prototypes in this file */
156static int  oce_attach_ifp(POCE_SOFTC sc);
157static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159static int  oce_vid_config(POCE_SOFTC sc);
160static void oce_mac_addr_set(POCE_SOFTC sc);
161static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162static void oce_local_timer(void *arg);
163static void oce_if_deactivate(POCE_SOFTC sc);
164static void oce_if_activate(POCE_SOFTC sc);
165static void setup_max_queues_want(POCE_SOFTC sc);
166static void update_queues_got(POCE_SOFTC sc);
167static void process_link_state(POCE_SOFTC sc,
168		 struct oce_async_cqe_link_state *acqe);
169static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170static void oce_get_config(POCE_SOFTC sc);
171static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173/* IP specific */
174#if defined(INET6) || defined(INET)
175static int  oce_init_lro(POCE_SOFTC sc);
176static void oce_rx_flush_lro(struct oce_rq *rq);
177static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178#endif
179
180static device_method_t oce_dispatch[] = {
181	DEVMETHOD(device_probe, oce_probe),
182	DEVMETHOD(device_attach, oce_attach),
183	DEVMETHOD(device_detach, oce_detach),
184	DEVMETHOD(device_shutdown, oce_shutdown),
185
186	DEVMETHOD_END
187};
188
189static driver_t oce_driver = {
190	"oce",
191	oce_dispatch,
192	sizeof(OCE_SOFTC)
193};
194static devclass_t oce_devclass;
195
196
197DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
198MODULE_DEPEND(oce, pci, 1, 1, 1);
199MODULE_DEPEND(oce, ether, 1, 1, 1);
200MODULE_VERSION(oce, 1);
201
202
203/* global vars */
204const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
205
206/* Module capabilites and parameters */
207uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
208uint32_t oce_enable_rss = OCE_MODCAP_RSS;
209
210
211TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
212TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
213
214
215/* Supported devices table */
216static uint32_t supportedDevices[] =  {
217	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
218	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
219	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
220	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
221	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
222	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
223};
224
225
226
227
228/*****************************************************************************
229 *			Driver entry points functions                        *
230 *****************************************************************************/
231
232static int
233oce_probe(device_t dev)
234{
235	uint16_t vendor = 0;
236	uint16_t device = 0;
237	int i = 0;
238	char str[256] = {0};
239	POCE_SOFTC sc;
240
241	sc = device_get_softc(dev);
242	bzero(sc, sizeof(OCE_SOFTC));
243	sc->dev = dev;
244
245	vendor = pci_get_vendor(dev);
246	device = pci_get_device(dev);
247
248	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
249		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
250			if (device == (supportedDevices[i] & 0xffff)) {
251				sprintf(str, "%s:%s", "Emulex CNA NIC function",
252					component_revision);
253				device_set_desc_copy(dev, str);
254
255				switch (device) {
256				case PCI_PRODUCT_BE2:
257					sc->flags |= OCE_FLAGS_BE2;
258					break;
259				case PCI_PRODUCT_BE3:
260					sc->flags |= OCE_FLAGS_BE3;
261					break;
262				case PCI_PRODUCT_XE201:
263				case PCI_PRODUCT_XE201_VF:
264					sc->flags |= OCE_FLAGS_XE201;
265					break;
266				case PCI_PRODUCT_SH:
267					sc->flags |= OCE_FLAGS_SH;
268					break;
269				default:
270					return ENXIO;
271				}
272				return BUS_PROBE_DEFAULT;
273			}
274		}
275	}
276
277	return ENXIO;
278}
279
280
281static int
282oce_attach(device_t dev)
283{
284	POCE_SOFTC sc;
285	int rc = 0;
286
287	sc = device_get_softc(dev);
288
289	rc = oce_hw_pci_alloc(sc);
290	if (rc)
291		return rc;
292
293	sc->tx_ring_size = OCE_TX_RING_SIZE;
294	sc->rx_ring_size = OCE_RX_RING_SIZE;
295	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
296	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
297	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
298
299	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
300	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
301
302	/* initialise the hardware */
303	rc = oce_hw_init(sc);
304	if (rc)
305		goto pci_res_free;
306
307	oce_get_config(sc);
308
309	setup_max_queues_want(sc);
310
311	rc = oce_setup_intr(sc);
312	if (rc)
313		goto mbox_free;
314
315	rc = oce_queue_init_all(sc);
316	if (rc)
317		goto intr_free;
318
319	rc = oce_attach_ifp(sc);
320	if (rc)
321		goto queues_free;
322
323#if defined(INET6) || defined(INET)
324	rc = oce_init_lro(sc);
325	if (rc)
326		goto ifp_free;
327#endif
328
329	rc = oce_hw_start(sc);
330	if (rc)
331		goto lro_free;
332
333	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
334				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
335	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
336				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
337
338	rc = oce_stats_init(sc);
339	if (rc)
340		goto vlan_free;
341
342	oce_add_sysctls(sc);
343
344	callout_init(&sc->timer, CALLOUT_MPSAFE);
345	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
346	if (rc)
347		goto stats_free;
348
349	return 0;
350
351stats_free:
352	callout_drain(&sc->timer);
353	oce_stats_free(sc);
354vlan_free:
355	if (sc->vlan_attach)
356		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
357	if (sc->vlan_detach)
358		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
359	oce_hw_intr_disable(sc);
360lro_free:
361#if defined(INET6) || defined(INET)
362	oce_free_lro(sc);
363ifp_free:
364#endif
365	ether_ifdetach(sc->ifp);
366	if_free(sc->ifp);
367queues_free:
368	oce_queue_release_all(sc);
369intr_free:
370	oce_intr_free(sc);
371mbox_free:
372	oce_dma_free(sc, &sc->bsmbx);
373pci_res_free:
374	oce_hw_pci_free(sc);
375	LOCK_DESTROY(&sc->dev_lock);
376	LOCK_DESTROY(&sc->bmbx_lock);
377	return rc;
378
379}
380
381
382static int
383oce_detach(device_t dev)
384{
385	POCE_SOFTC sc = device_get_softc(dev);
386
387	LOCK(&sc->dev_lock);
388	oce_if_deactivate(sc);
389	UNLOCK(&sc->dev_lock);
390
391	callout_drain(&sc->timer);
392
393	if (sc->vlan_attach != NULL)
394		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
395	if (sc->vlan_detach != NULL)
396		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
397
398	ether_ifdetach(sc->ifp);
399
400	if_free(sc->ifp);
401
402	oce_hw_shutdown(sc);
403
404	bus_generic_detach(dev);
405
406	return 0;
407}
408
409
410static int
411oce_shutdown(device_t dev)
412{
413	int rc;
414
415	rc = oce_detach(dev);
416
417	return rc;
418}
419
420
421static int
422oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
423{
424	struct ifreq *ifr = (struct ifreq *)data;
425	POCE_SOFTC sc = ifp->if_softc;
426	int rc = 0;
427	uint32_t u;
428
429	switch (command) {
430
431	case SIOCGIFMEDIA:
432		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
433		break;
434
435	case SIOCSIFMTU:
436		if (ifr->ifr_mtu > OCE_MAX_MTU)
437			rc = EINVAL;
438		else
439			ifp->if_mtu = ifr->ifr_mtu;
440		break;
441
442	case SIOCSIFFLAGS:
443		if (ifp->if_flags & IFF_UP) {
444			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
445				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
446				oce_init(sc);
447			}
448			device_printf(sc->dev, "Interface Up\n");
449		} else {
450			LOCK(&sc->dev_lock);
451
452			sc->ifp->if_drv_flags &=
453			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
454			oce_if_deactivate(sc);
455
456			UNLOCK(&sc->dev_lock);
457
458			device_printf(sc->dev, "Interface Down\n");
459		}
460
461		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
462			if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
463				sc->promisc = TRUE;
464		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
465			if (!oce_rxf_set_promiscuous(sc, 0))
466				sc->promisc = FALSE;
467		}
468
469		break;
470
471	case SIOCADDMULTI:
472	case SIOCDELMULTI:
473		rc = oce_hw_update_multicast(sc);
474		if (rc)
475			device_printf(sc->dev,
476				"Update multicast address failed\n");
477		break;
478
479	case SIOCSIFCAP:
480		u = ifr->ifr_reqcap ^ ifp->if_capenable;
481
482		if (u & IFCAP_TXCSUM) {
483			ifp->if_capenable ^= IFCAP_TXCSUM;
484			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
485
486			if (IFCAP_TSO & ifp->if_capenable &&
487			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
488				ifp->if_capenable &= ~IFCAP_TSO;
489				ifp->if_hwassist &= ~CSUM_TSO;
490				if_printf(ifp,
491					 "TSO disabled due to -txcsum.\n");
492			}
493		}
494
495		if (u & IFCAP_RXCSUM)
496			ifp->if_capenable ^= IFCAP_RXCSUM;
497
498		if (u & IFCAP_TSO4) {
499			ifp->if_capenable ^= IFCAP_TSO4;
500
501			if (IFCAP_TSO & ifp->if_capenable) {
502				if (IFCAP_TXCSUM & ifp->if_capenable)
503					ifp->if_hwassist |= CSUM_TSO;
504				else {
505					ifp->if_capenable &= ~IFCAP_TSO;
506					ifp->if_hwassist &= ~CSUM_TSO;
507					if_printf(ifp,
508					    "Enable txcsum first.\n");
509					rc = EAGAIN;
510				}
511			} else
512				ifp->if_hwassist &= ~CSUM_TSO;
513		}
514
515		if (u & IFCAP_VLAN_HWTAGGING)
516			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
517
518		if (u & IFCAP_VLAN_HWFILTER) {
519			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
520			oce_vid_config(sc);
521		}
522#if defined(INET6) || defined(INET)
523		if (u & IFCAP_LRO)
524			ifp->if_capenable ^= IFCAP_LRO;
525#endif
526
527		break;
528
529	case SIOCGPRIVATE_0:
530		rc = oce_handle_passthrough(ifp, data);
531		break;
532	default:
533		rc = ether_ioctl(ifp, command, data);
534		break;
535	}
536
537	return rc;
538}
539
540
541static void
542oce_init(void *arg)
543{
544	POCE_SOFTC sc = arg;
545
546	LOCK(&sc->dev_lock);
547
548	if (sc->ifp->if_flags & IFF_UP) {
549		oce_if_deactivate(sc);
550		oce_if_activate(sc);
551	}
552
553	UNLOCK(&sc->dev_lock);
554
555}
556
557
558static int
559oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
560{
561	POCE_SOFTC sc = ifp->if_softc;
562	struct oce_wq *wq = NULL;
563	int queue_index = 0;
564	int status = 0;
565
566	if (!sc->link_status)
567		return ENXIO;
568
569	if ((m->m_flags & M_FLOWID) != 0)
570		queue_index = m->m_pkthdr.flowid % sc->nwqs;
571
572	wq = sc->wq[queue_index];
573
574	LOCK(&wq->tx_lock);
575	status = oce_multiq_transmit(ifp, m, wq);
576	UNLOCK(&wq->tx_lock);
577
578	return status;
579
580}
581
582
583static void
584oce_multiq_flush(struct ifnet *ifp)
585{
586	POCE_SOFTC sc = ifp->if_softc;
587	struct mbuf     *m;
588	int i = 0;
589
590	for (i = 0; i < sc->nwqs; i++) {
591		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
592			m_freem(m);
593	}
594	if_qflush(ifp);
595}
596
597
598
599/*****************************************************************************
600 *                   Driver interrupt routines functions                     *
601 *****************************************************************************/
602
603static void
604oce_intr(void *arg, int pending)
605{
606
607	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
608	POCE_SOFTC sc = ii->sc;
609	struct oce_eq *eq = ii->eq;
610	struct oce_eqe *eqe;
611	struct oce_cq *cq = NULL;
612	int i, num_eqes = 0;
613
614
615	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
616				 BUS_DMASYNC_POSTWRITE);
617	do {
618		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
619		if (eqe->evnt == 0)
620			break;
621		eqe->evnt = 0;
622		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
623					BUS_DMASYNC_POSTWRITE);
624		RING_GET(eq->ring, 1);
625		num_eqes++;
626
627	} while (TRUE);
628
629	if (!num_eqes)
630		goto eq_arm; /* Spurious */
631
632 	/* Clear EQ entries, but dont arm */
633	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
634
635	/* Process TX, RX and MCC. But dont arm CQ*/
636	for (i = 0; i < eq->cq_valid; i++) {
637		cq = eq->cq[i];
638		(*cq->cq_handler)(cq->cb_arg);
639	}
640
641	/* Arm all cqs connected to this EQ */
642	for (i = 0; i < eq->cq_valid; i++) {
643		cq = eq->cq[i];
644		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
645	}
646
647eq_arm:
648	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
649
650	return;
651}
652
653
654static int
655oce_setup_intr(POCE_SOFTC sc)
656{
657	int rc = 0, use_intx = 0;
658	int vector = 0, req_vectors = 0;
659
660	if (is_rss_enabled(sc))
661		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
662	else
663		req_vectors = 1;
664
665	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
666		sc->intr_count = req_vectors;
667		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
668		if (rc != 0) {
669			use_intx = 1;
670			pci_release_msi(sc->dev);
671		} else
672			sc->flags |= OCE_FLAGS_USING_MSIX;
673	} else
674		use_intx = 1;
675
676	if (use_intx)
677		sc->intr_count = 1;
678
679	/* Scale number of queues based on intr we got */
680	update_queues_got(sc);
681
682	if (use_intx) {
683		device_printf(sc->dev, "Using legacy interrupt\n");
684		rc = oce_alloc_intr(sc, vector, oce_intr);
685		if (rc)
686			goto error;
687	} else {
688		for (; vector < sc->intr_count; vector++) {
689			rc = oce_alloc_intr(sc, vector, oce_intr);
690			if (rc)
691				goto error;
692		}
693	}
694
695	return 0;
696error:
697	oce_intr_free(sc);
698	return rc;
699}
700
701
702static int
703oce_fast_isr(void *arg)
704{
705	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
706	POCE_SOFTC sc = ii->sc;
707
708	if (ii->eq == NULL)
709		return FILTER_STRAY;
710
711	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
712
713	taskqueue_enqueue_fast(ii->tq, &ii->task);
714
715 	ii->eq->intr++;
716
717	return FILTER_HANDLED;
718}
719
720
721static int
722oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
723{
724	POCE_INTR_INFO ii = &sc->intrs[vector];
725	int rc = 0, rr;
726
727	if (vector >= OCE_MAX_EQ)
728		return (EINVAL);
729
730	/* Set the resource id for the interrupt.
731	 * MSIx is vector + 1 for the resource id,
732	 * INTx is 0 for the resource id.
733	 */
734	if (sc->flags & OCE_FLAGS_USING_MSIX)
735		rr = vector + 1;
736	else
737		rr = 0;
738	ii->intr_res = bus_alloc_resource_any(sc->dev,
739					      SYS_RES_IRQ,
740					      &rr, RF_ACTIVE|RF_SHAREABLE);
741	ii->irq_rr = rr;
742	if (ii->intr_res == NULL) {
743		device_printf(sc->dev,
744			  "Could not allocate interrupt\n");
745		rc = ENXIO;
746		return rc;
747	}
748
749	TASK_INIT(&ii->task, 0, isr, ii);
750	ii->vector = vector;
751	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
752	ii->tq = taskqueue_create_fast(ii->task_name,
753			M_NOWAIT,
754			taskqueue_thread_enqueue,
755			&ii->tq);
756	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
757			device_get_nameunit(sc->dev));
758
759	ii->sc = sc;
760	rc = bus_setup_intr(sc->dev,
761			ii->intr_res,
762			INTR_TYPE_NET,
763			oce_fast_isr, NULL, ii, &ii->tag);
764	return rc;
765
766}
767
768
769void
770oce_intr_free(POCE_SOFTC sc)
771{
772	int i = 0;
773
774	for (i = 0; i < sc->intr_count; i++) {
775
776		if (sc->intrs[i].tag != NULL)
777			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
778						sc->intrs[i].tag);
779		if (sc->intrs[i].tq != NULL)
780			taskqueue_free(sc->intrs[i].tq);
781
782		if (sc->intrs[i].intr_res != NULL)
783			bus_release_resource(sc->dev, SYS_RES_IRQ,
784						sc->intrs[i].irq_rr,
785						sc->intrs[i].intr_res);
786		sc->intrs[i].tag = NULL;
787		sc->intrs[i].intr_res = NULL;
788	}
789
790	if (sc->flags & OCE_FLAGS_USING_MSIX)
791		pci_release_msi(sc->dev);
792
793}
794
795
796
797/******************************************************************************
798*			  Media callbacks functions 			      *
799******************************************************************************/
800
801static void
802oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
803{
804	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
805
806
807	req->ifm_status = IFM_AVALID;
808	req->ifm_active = IFM_ETHER;
809
810	if (sc->link_status == 1)
811		req->ifm_status |= IFM_ACTIVE;
812	else
813		return;
814
815	switch (sc->link_speed) {
816	case 1: /* 10 Mbps */
817		req->ifm_active |= IFM_10_T | IFM_FDX;
818		sc->speed = 10;
819		break;
820	case 2: /* 100 Mbps */
821		req->ifm_active |= IFM_100_TX | IFM_FDX;
822		sc->speed = 100;
823		break;
824	case 3: /* 1 Gbps */
825		req->ifm_active |= IFM_1000_T | IFM_FDX;
826		sc->speed = 1000;
827		break;
828	case 4: /* 10 Gbps */
829		req->ifm_active |= IFM_10G_SR | IFM_FDX;
830		sc->speed = 10000;
831		break;
832	case 5: /* 20 Gbps */
833		req->ifm_active |= IFM_10G_SR | IFM_FDX;
834		sc->speed = 20000;
835		break;
836	case 6: /* 25 Gbps */
837		req->ifm_active |= IFM_10G_SR | IFM_FDX;
838		sc->speed = 25000;
839		break;
840	case 7: /* 40 Gbps */
841		req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
842		sc->speed = 40000;
843		break;
844	default:
845		sc->speed = 0;
846		break;
847	}
848
849	return;
850}
851
852
853int
854oce_media_change(struct ifnet *ifp)
855{
856	return 0;
857}
858
859
860
861
862/*****************************************************************************
863 *			  Transmit routines functions			     *
864 *****************************************************************************/
865
866static int
867oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
868{
869	int rc = 0, i, retry_cnt = 0;
870	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
871	struct mbuf *m, *m_temp;
872	struct oce_wq *wq = sc->wq[wq_index];
873	struct oce_packet_desc *pd;
874	struct oce_nic_hdr_wqe *nichdr;
875	struct oce_nic_frag_wqe *nicfrag;
876	int num_wqes;
877	uint32_t reg_value;
878	boolean_t complete = TRUE;
879
880	m = *mpp;
881	if (!m)
882		return EINVAL;
883
884	if (!(m->m_flags & M_PKTHDR)) {
885		rc = ENXIO;
886		goto free_ret;
887	}
888
889	if(oce_tx_asic_stall_verify(sc, m)) {
890		m = oce_insert_vlan_tag(sc, m, &complete);
891		if(!m) {
892			device_printf(sc->dev, "Insertion unsuccessful\n");
893			return 0;
894		}
895
896	}
897
898	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
899		/* consolidate packet buffers for TSO/LSO segment offload */
900#if defined(INET6) || defined(INET)
901		m = oce_tso_setup(sc, mpp);
902#else
903		m = NULL;
904#endif
905		if (m == NULL) {
906			rc = ENXIO;
907			goto free_ret;
908		}
909	}
910
911	pd = &wq->pckts[wq->pkt_desc_head];
912retry:
913	rc = bus_dmamap_load_mbuf_sg(wq->tag,
914				     pd->map,
915				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
916	if (rc == 0) {
917		num_wqes = pd->nsegs + 1;
918		if (IS_BE(sc) || IS_SH(sc)) {
919			/*Dummy required only for BE3.*/
920			if (num_wqes & 1)
921				num_wqes++;
922		}
923		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
924			bus_dmamap_unload(wq->tag, pd->map);
925			return EBUSY;
926		}
927		atomic_store_rel_int(&wq->pkt_desc_head,
928				     (wq->pkt_desc_head + 1) % \
929				      OCE_WQ_PACKET_ARRAY_SIZE);
930		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
931		pd->mbuf = m;
932
933		nichdr =
934		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
935		nichdr->u0.dw[0] = 0;
936		nichdr->u0.dw[1] = 0;
937		nichdr->u0.dw[2] = 0;
938		nichdr->u0.dw[3] = 0;
939
940		nichdr->u0.s.complete = complete;
941		nichdr->u0.s.event = 1;
942		nichdr->u0.s.crc = 1;
943		nichdr->u0.s.forward = 0;
944		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
945		nichdr->u0.s.udpcs =
946			(m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
947		nichdr->u0.s.tcpcs =
948			(m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
949		nichdr->u0.s.num_wqe = num_wqes;
950		nichdr->u0.s.total_length = m->m_pkthdr.len;
951
952		if (m->m_flags & M_VLANTAG) {
953			nichdr->u0.s.vlan = 1; /*Vlan present*/
954			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
955		}
956
957		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
958			if (m->m_pkthdr.tso_segsz) {
959				nichdr->u0.s.lso = 1;
960				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
961			}
962			if (!IS_BE(sc) || !IS_SH(sc))
963				nichdr->u0.s.ipcs = 1;
964		}
965
966		RING_PUT(wq->ring, 1);
967		atomic_add_int(&wq->ring->num_used, 1);
968
969		for (i = 0; i < pd->nsegs; i++) {
970			nicfrag =
971			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
972						      struct oce_nic_frag_wqe);
973			nicfrag->u0.s.rsvd0 = 0;
974			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
975			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
976			nicfrag->u0.s.frag_len = segs[i].ds_len;
977			pd->wqe_idx = wq->ring->pidx;
978			RING_PUT(wq->ring, 1);
979			atomic_add_int(&wq->ring->num_used, 1);
980		}
981		if (num_wqes > (pd->nsegs + 1)) {
982			nicfrag =
983			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
984						      struct oce_nic_frag_wqe);
985			nicfrag->u0.dw[0] = 0;
986			nicfrag->u0.dw[1] = 0;
987			nicfrag->u0.dw[2] = 0;
988			nicfrag->u0.dw[3] = 0;
989			pd->wqe_idx = wq->ring->pidx;
990			RING_PUT(wq->ring, 1);
991			atomic_add_int(&wq->ring->num_used, 1);
992			pd->nsegs++;
993		}
994
995		sc->ifp->if_opackets++;
996		wq->tx_stats.tx_reqs++;
997		wq->tx_stats.tx_wrbs += num_wqes;
998		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
999		wq->tx_stats.tx_pkts++;
1000
1001		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1002				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1003		reg_value = (num_wqes << 16) | wq->wq_id;
1004		OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1005
1006	} else if (rc == EFBIG)	{
1007		if (retry_cnt == 0) {
1008			m_temp = m_defrag(m, M_NOWAIT);
1009			if (m_temp == NULL)
1010				goto free_ret;
1011			m = m_temp;
1012			*mpp = m_temp;
1013			retry_cnt = retry_cnt + 1;
1014			goto retry;
1015		} else
1016			goto free_ret;
1017	} else if (rc == ENOMEM)
1018		return rc;
1019	else
1020		goto free_ret;
1021
1022	return 0;
1023
1024free_ret:
1025	m_freem(*mpp);
1026	*mpp = NULL;
1027	return rc;
1028}
1029
1030
1031static void
1032oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1033{
1034	struct oce_packet_desc *pd;
1035	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1036	struct mbuf *m;
1037
1038	pd = &wq->pckts[wq->pkt_desc_tail];
1039	atomic_store_rel_int(&wq->pkt_desc_tail,
1040			     (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1041	atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1042	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1043	bus_dmamap_unload(wq->tag, pd->map);
1044
1045	m = pd->mbuf;
1046	m_freem(m);
1047	pd->mbuf = NULL;
1048
1049
1050	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1051		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1052			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1053			oce_tx_restart(sc, wq);
1054		}
1055	}
1056}
1057
1058
1059static void
1060oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1061{
1062
1063	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1064		return;
1065
1066#if __FreeBSD_version >= 800000
1067	if (!drbr_empty(sc->ifp, wq->br))
1068#else
1069	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1070#endif
1071		taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1072
1073}
1074
1075
1076#if defined(INET6) || defined(INET)
1077static struct mbuf *
1078oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1079{
1080	struct mbuf *m;
1081#ifdef INET
1082	struct ip *ip;
1083#endif
1084#ifdef INET6
1085	struct ip6_hdr *ip6;
1086#endif
1087	struct ether_vlan_header *eh;
1088	struct tcphdr *th;
1089	uint16_t etype;
1090	int total_len = 0, ehdrlen = 0;
1091
1092	m = *mpp;
1093
1094	if (M_WRITABLE(m) == 0) {
1095		m = m_dup(*mpp, M_NOWAIT);
1096		if (!m)
1097			return NULL;
1098		m_freem(*mpp);
1099		*mpp = m;
1100	}
1101
1102	eh = mtod(m, struct ether_vlan_header *);
1103	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1104		etype = ntohs(eh->evl_proto);
1105		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1106	} else {
1107		etype = ntohs(eh->evl_encap_proto);
1108		ehdrlen = ETHER_HDR_LEN;
1109	}
1110
1111	switch (etype) {
1112#ifdef INET
1113	case ETHERTYPE_IP:
1114		ip = (struct ip *)(m->m_data + ehdrlen);
1115		if (ip->ip_p != IPPROTO_TCP)
1116			return NULL;
1117		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1118
1119		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1120		break;
1121#endif
1122#ifdef INET6
1123	case ETHERTYPE_IPV6:
1124		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1125		if (ip6->ip6_nxt != IPPROTO_TCP)
1126			return NULL;
1127		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1128
1129		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1130		break;
1131#endif
1132	default:
1133		return NULL;
1134	}
1135
1136	m = m_pullup(m, total_len);
1137	if (!m)
1138		return NULL;
1139	*mpp = m;
1140	return m;
1141
1142}
1143#endif /* INET6 || INET */
1144
1145void
1146oce_tx_task(void *arg, int npending)
1147{
1148	struct oce_wq *wq = arg;
1149	POCE_SOFTC sc = wq->parent;
1150	struct ifnet *ifp = sc->ifp;
1151	int rc = 0;
1152
1153#if __FreeBSD_version >= 800000
1154	LOCK(&wq->tx_lock);
1155	rc = oce_multiq_transmit(ifp, NULL, wq);
1156	if (rc) {
1157		device_printf(sc->dev,
1158				"TX[%d] restart failed\n", wq->queue_index);
1159	}
1160	UNLOCK(&wq->tx_lock);
1161#else
1162	oce_start(ifp);
1163#endif
1164
1165}
1166
1167
1168void
1169oce_start(struct ifnet *ifp)
1170{
1171	POCE_SOFTC sc = ifp->if_softc;
1172	struct mbuf *m;
1173	int rc = 0;
1174	int def_q = 0; /* Defualt tx queue is 0*/
1175
1176	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1177			IFF_DRV_RUNNING)
1178		return;
1179
1180	if (!sc->link_status)
1181		return;
1182
1183	do {
1184		IF_DEQUEUE(&sc->ifp->if_snd, m);
1185		if (m == NULL)
1186			break;
1187
1188		LOCK(&sc->wq[def_q]->tx_lock);
1189		rc = oce_tx(sc, &m, def_q);
1190		UNLOCK(&sc->wq[def_q]->tx_lock);
1191		if (rc) {
1192			if (m != NULL) {
1193				sc->wq[def_q]->tx_stats.tx_stops ++;
1194				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1195				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1196				m = NULL;
1197			}
1198			break;
1199		}
1200		if (m != NULL)
1201			ETHER_BPF_MTAP(ifp, m);
1202
1203	} while (TRUE);
1204
1205	return;
1206}
1207
1208
1209/* Handle the Completion Queue for transmit */
1210uint16_t
1211oce_wq_handler(void *arg)
1212{
1213	struct oce_wq *wq = (struct oce_wq *)arg;
1214	POCE_SOFTC sc = wq->parent;
1215	struct oce_cq *cq = wq->cq;
1216	struct oce_nic_tx_cqe *cqe;
1217	int num_cqes = 0;
1218
1219	bus_dmamap_sync(cq->ring->dma.tag,
1220			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1221	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1222	while (cqe->u0.dw[3]) {
1223		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1224
1225		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1226		if (wq->ring->cidx >= wq->ring->num_items)
1227			wq->ring->cidx -= wq->ring->num_items;
1228
1229		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1230		wq->tx_stats.tx_compl++;
1231		cqe->u0.dw[3] = 0;
1232		RING_GET(cq->ring, 1);
1233		bus_dmamap_sync(cq->ring->dma.tag,
1234				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1235		cqe =
1236		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1237		num_cqes++;
1238	}
1239
1240	if (num_cqes)
1241		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1242
1243	return 0;
1244}
1245
1246
1247static int
1248oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1249{
1250	POCE_SOFTC sc = ifp->if_softc;
1251	int status = 0, queue_index = 0;
1252	struct mbuf *next = NULL;
1253	struct buf_ring *br = NULL;
1254
1255	br  = wq->br;
1256	queue_index = wq->queue_index;
1257
1258	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1259		IFF_DRV_RUNNING) {
1260		if (m != NULL)
1261			status = drbr_enqueue(ifp, br, m);
1262		return status;
1263	}
1264
1265	if (m != NULL) {
1266		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1267			return status;
1268	}
1269	while ((next = drbr_peek(ifp, br)) != NULL) {
1270		if (oce_tx(sc, &next, queue_index)) {
1271			if (next == NULL) {
1272				drbr_advance(ifp, br);
1273			} else {
1274				drbr_putback(ifp, br, next);
1275				wq->tx_stats.tx_stops ++;
1276				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1277				status = drbr_enqueue(ifp, br, next);
1278			}
1279			break;
1280		}
1281		drbr_advance(ifp, br);
1282		ifp->if_obytes += next->m_pkthdr.len;
1283		if (next->m_flags & M_MCAST)
1284			ifp->if_omcasts++;
1285		ETHER_BPF_MTAP(ifp, next);
1286	}
1287
1288	return status;
1289}
1290
1291
1292
1293
1294/*****************************************************************************
1295 *			    Receive  routines functions 		     *
1296 *****************************************************************************/
1297
1298static void
1299oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1300{
1301	uint32_t out;
1302	struct oce_packet_desc *pd;
1303	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1304	int i, len, frag_len;
1305	struct mbuf *m = NULL, *tail = NULL;
1306	uint16_t vtag;
1307
1308	len = cqe->u0.s.pkt_size;
1309	if (!len) {
1310		/*partial DMA workaround for Lancer*/
1311		oce_discard_rx_comp(rq, cqe);
1312		goto exit;
1313	}
1314
1315	 /* Get vlan_tag value */
1316	if(IS_BE(sc) || IS_SH(sc))
1317		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1318	else
1319		vtag = cqe->u0.s.vlan_tag;
1320
1321
1322	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1323
1324		if (rq->packets_out == rq->packets_in) {
1325			device_printf(sc->dev,
1326				  "RQ transmit descriptor missing\n");
1327		}
1328		out = rq->packets_out + 1;
1329		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1330			out = 0;
1331		pd = &rq->pckts[rq->packets_out];
1332		rq->packets_out = out;
1333
1334		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1335		bus_dmamap_unload(rq->tag, pd->map);
1336		rq->pending--;
1337
1338		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1339		pd->mbuf->m_len = frag_len;
1340
1341		if (tail != NULL) {
1342			/* additional fragments */
1343			pd->mbuf->m_flags &= ~M_PKTHDR;
1344			tail->m_next = pd->mbuf;
1345			tail = pd->mbuf;
1346		} else {
1347			/* first fragment, fill out much of the packet header */
1348			pd->mbuf->m_pkthdr.len = len;
1349			pd->mbuf->m_pkthdr.csum_flags = 0;
1350			if (IF_CSUM_ENABLED(sc)) {
1351				if (cqe->u0.s.l4_cksum_pass) {
1352					pd->mbuf->m_pkthdr.csum_flags |=
1353					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1354					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1355				}
1356				if (cqe->u0.s.ip_cksum_pass) {
1357					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1358						pd->mbuf->m_pkthdr.csum_flags |=
1359						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1360					}
1361				}
1362			}
1363			m = tail = pd->mbuf;
1364		}
1365		pd->mbuf = NULL;
1366		len -= frag_len;
1367	}
1368
1369	if (m) {
1370		if (!oce_cqe_portid_valid(sc, cqe)) {
1371			 m_freem(m);
1372			 goto exit;
1373		}
1374
1375		m->m_pkthdr.rcvif = sc->ifp;
1376#if __FreeBSD_version >= 800000
1377		if (rq->queue_index)
1378			m->m_pkthdr.flowid = (rq->queue_index - 1);
1379		else
1380			m->m_pkthdr.flowid = rq->queue_index;
1381		m->m_flags |= M_FLOWID;
1382#endif
1383		/* This deternies if vlan tag is Valid */
1384		if (oce_cqe_vtp_valid(sc, cqe)) {
1385			if (sc->function_mode & FNM_FLEX10_MODE) {
1386				/* FLEX10. If QnQ is not set, neglect VLAN */
1387				if (cqe->u0.s.qnq) {
1388					m->m_pkthdr.ether_vtag = vtag;
1389					m->m_flags |= M_VLANTAG;
1390				}
1391			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1392				/* In UMC mode generally pvid will be striped by
1393				   hw. But in some cases we have seen it comes
1394				   with pvid. So if pvid == vlan, neglect vlan.
1395				*/
1396				m->m_pkthdr.ether_vtag = vtag;
1397				m->m_flags |= M_VLANTAG;
1398			}
1399		}
1400
1401		sc->ifp->if_ipackets++;
1402#if defined(INET6) || defined(INET)
1403		/* Try to queue to LRO */
1404		if (IF_LRO_ENABLED(sc) &&
1405		    (cqe->u0.s.ip_cksum_pass) &&
1406		    (cqe->u0.s.l4_cksum_pass) &&
1407		    (!cqe->u0.s.ip_ver)       &&
1408		    (rq->lro.lro_cnt != 0)) {
1409
1410			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1411				rq->lro_pkts_queued ++;
1412				goto post_done;
1413			}
1414			/* If LRO posting fails then try to post to STACK */
1415		}
1416#endif
1417
1418		(*sc->ifp->if_input) (sc->ifp, m);
1419#if defined(INET6) || defined(INET)
1420post_done:
1421#endif
1422		/* Update rx stats per queue */
1423		rq->rx_stats.rx_pkts++;
1424		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1425		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1426		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1427			rq->rx_stats.rx_mcast_pkts++;
1428		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1429			rq->rx_stats.rx_ucast_pkts++;
1430	}
1431exit:
1432	return;
1433}
1434
1435
1436static void
1437oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1438{
1439	uint32_t out, i = 0;
1440	struct oce_packet_desc *pd;
1441	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1442	int num_frags = cqe->u0.s.num_fragments;
1443
1444	for (i = 0; i < num_frags; i++) {
1445		if (rq->packets_out == rq->packets_in) {
1446			device_printf(sc->dev,
1447				"RQ transmit descriptor missing\n");
1448		}
1449		out = rq->packets_out + 1;
1450		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1451			out = 0;
1452		pd = &rq->pckts[rq->packets_out];
1453		rq->packets_out = out;
1454
1455		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1456		bus_dmamap_unload(rq->tag, pd->map);
1457		rq->pending--;
1458		m_freem(pd->mbuf);
1459	}
1460
1461}
1462
1463
1464static int
1465oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1466{
1467	struct oce_nic_rx_cqe_v1 *cqe_v1;
1468	int vtp = 0;
1469
1470	if (sc->be3_native) {
1471		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1472		vtp =  cqe_v1->u0.s.vlan_tag_present;
1473	} else
1474		vtp = cqe->u0.s.vlan_tag_present;
1475
1476	return vtp;
1477
1478}
1479
1480
1481static int
1482oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1483{
1484	struct oce_nic_rx_cqe_v1 *cqe_v1;
1485	int port_id = 0;
1486
1487	if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1488		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1489		port_id =  cqe_v1->u0.s.port;
1490		if (sc->port_id != port_id)
1491			return 0;
1492	} else
1493		;/* For BE3 legacy and Lancer this is dummy */
1494
1495	return 1;
1496
1497}
1498
1499#if defined(INET6) || defined(INET)
1500static void
1501oce_rx_flush_lro(struct oce_rq *rq)
1502{
1503	struct lro_ctrl	*lro = &rq->lro;
1504	struct lro_entry *queued;
1505	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1506
1507	if (!IF_LRO_ENABLED(sc))
1508		return;
1509
1510	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1511		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1512		tcp_lro_flush(lro, queued);
1513	}
1514	rq->lro_pkts_queued = 0;
1515
1516	return;
1517}
1518
1519
1520static int
1521oce_init_lro(POCE_SOFTC sc)
1522{
1523	struct lro_ctrl *lro = NULL;
1524	int i = 0, rc = 0;
1525
1526	for (i = 0; i < sc->nrqs; i++) {
1527		lro = &sc->rq[i]->lro;
1528		rc = tcp_lro_init(lro);
1529		if (rc != 0) {
1530			device_printf(sc->dev, "LRO init failed\n");
1531			return rc;
1532		}
1533		lro->ifp = sc->ifp;
1534	}
1535
1536	return rc;
1537}
1538
1539
1540void
1541oce_free_lro(POCE_SOFTC sc)
1542{
1543	struct lro_ctrl *lro = NULL;
1544	int i = 0;
1545
1546	for (i = 0; i < sc->nrqs; i++) {
1547		lro = &sc->rq[i]->lro;
1548		if (lro)
1549			tcp_lro_free(lro);
1550	}
1551}
1552#endif
1553
1554int
1555oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1556{
1557	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1558	int i, in, rc;
1559	struct oce_packet_desc *pd;
1560	bus_dma_segment_t segs[6];
1561	int nsegs, added = 0;
1562	struct oce_nic_rqe *rqe;
1563	pd_rxulp_db_t rxdb_reg;
1564
1565	bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1566	for (i = 0; i < count; i++) {
1567		in = rq->packets_in + 1;
1568		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1569			in = 0;
1570		if (in == rq->packets_out)
1571			break;	/* no more room */
1572
1573		pd = &rq->pckts[rq->packets_in];
1574		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1575		if (pd->mbuf == NULL)
1576			break;
1577
1578		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1579		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1580					     pd->map,
1581					     pd->mbuf,
1582					     segs, &nsegs, BUS_DMA_NOWAIT);
1583		if (rc) {
1584			m_free(pd->mbuf);
1585			break;
1586		}
1587
1588		if (nsegs != 1) {
1589			i--;
1590			continue;
1591		}
1592
1593		rq->packets_in = in;
1594		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1595
1596		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1597		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1598		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1599		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1600		RING_PUT(rq->ring, 1);
1601		added++;
1602		rq->pending++;
1603	}
1604	if (added != 0) {
1605		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1606			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1607			rxdb_reg.bits.qid = rq->rq_id;
1608			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1609			added -= OCE_MAX_RQ_POSTS;
1610		}
1611		if (added > 0) {
1612			rxdb_reg.bits.qid = rq->rq_id;
1613			rxdb_reg.bits.num_posted = added;
1614			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1615		}
1616	}
1617
1618	return 0;
1619}
1620
1621
1622/* Handle the Completion Queue for receive */
1623uint16_t
1624oce_rq_handler(void *arg)
1625{
1626	struct oce_rq *rq = (struct oce_rq *)arg;
1627	struct oce_cq *cq = rq->cq;
1628	POCE_SOFTC sc = rq->parent;
1629	struct oce_nic_rx_cqe *cqe;
1630	int num_cqes = 0, rq_buffers_used = 0;
1631
1632
1633	bus_dmamap_sync(cq->ring->dma.tag,
1634			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1635	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1636	while (cqe->u0.dw[2]) {
1637		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1638
1639		RING_GET(rq->ring, 1);
1640		if (cqe->u0.s.error == 0) {
1641			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1642		} else {
1643			rq->rx_stats.rxcp_err++;
1644			sc->ifp->if_ierrors++;
1645			/* Post L3/L4 errors to stack.*/
1646			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1647		}
1648		rq->rx_stats.rx_compl++;
1649		cqe->u0.dw[2] = 0;
1650
1651#if defined(INET6) || defined(INET)
1652		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1653			oce_rx_flush_lro(rq);
1654		}
1655#endif
1656
1657		RING_GET(cq->ring, 1);
1658		bus_dmamap_sync(cq->ring->dma.tag,
1659				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1660		cqe =
1661		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1662		num_cqes++;
1663		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1664			break;
1665	}
1666
1667#if defined(INET6) || defined(INET)
1668	if (IF_LRO_ENABLED(sc))
1669		oce_rx_flush_lro(rq);
1670#endif
1671
1672	if (num_cqes) {
1673		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1674		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1675		if (rq_buffers_used > 1)
1676			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1677	}
1678
1679	return 0;
1680
1681}
1682
1683
1684
1685
1686/*****************************************************************************
1687 *		   Helper function prototypes in this file 		     *
1688 *****************************************************************************/
1689
1690static int
1691oce_attach_ifp(POCE_SOFTC sc)
1692{
1693
1694	sc->ifp = if_alloc(IFT_ETHER);
1695	if (!sc->ifp)
1696		return ENOMEM;
1697
1698	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1699	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1700	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1701
1702	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1703	sc->ifp->if_ioctl = oce_ioctl;
1704	sc->ifp->if_start = oce_start;
1705	sc->ifp->if_init = oce_init;
1706	sc->ifp->if_mtu = ETHERMTU;
1707	sc->ifp->if_softc = sc;
1708#if __FreeBSD_version >= 800000
1709	sc->ifp->if_transmit = oce_multiq_start;
1710	sc->ifp->if_qflush = oce_multiq_flush;
1711#endif
1712
1713	if_initname(sc->ifp,
1714		    device_get_name(sc->dev), device_get_unit(sc->dev));
1715
1716	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1717	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1718	IFQ_SET_READY(&sc->ifp->if_snd);
1719
1720	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1721	sc->ifp->if_hwassist |= CSUM_TSO;
1722	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1723
1724	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1725	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1726	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1727
1728#if defined(INET6) || defined(INET)
1729	sc->ifp->if_capabilities |= IFCAP_TSO;
1730	sc->ifp->if_capabilities |= IFCAP_LRO;
1731	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1732#endif
1733
1734	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1735	if_initbaudrate(sc->ifp, IF_Gbps(10));
1736
1737#if __FreeBSD_version >= 1000000
1738	sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1739	sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS;
1740	sc->ifp->if_hw_tsomaxsegsize = 4096;
1741#endif
1742
1743	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1744
1745	return 0;
1746}
1747
1748
1749static void
1750oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1751{
1752	POCE_SOFTC sc = ifp->if_softc;
1753
1754	if (ifp->if_softc !=  arg)
1755		return;
1756	if ((vtag == 0) || (vtag > 4095))
1757		return;
1758
1759	sc->vlan_tag[vtag] = 1;
1760	sc->vlans_added++;
1761	if (sc->vlans_added <= (sc->max_vlans + 1))
1762		oce_vid_config(sc);
1763}
1764
1765
1766static void
1767oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1768{
1769	POCE_SOFTC sc = ifp->if_softc;
1770
1771	if (ifp->if_softc !=  arg)
1772		return;
1773	if ((vtag == 0) || (vtag > 4095))
1774		return;
1775
1776	sc->vlan_tag[vtag] = 0;
1777	sc->vlans_added--;
1778	oce_vid_config(sc);
1779}
1780
1781
1782/*
1783 * A max of 64 vlans can be configured in BE. If the user configures
1784 * more, place the card in vlan promiscuous mode.
1785 */
1786static int
1787oce_vid_config(POCE_SOFTC sc)
1788{
1789	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1790	uint16_t ntags = 0, i;
1791	int status = 0;
1792
1793	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1794			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1795		for (i = 0; i < MAX_VLANS; i++) {
1796			if (sc->vlan_tag[i]) {
1797				vtags[ntags].vtag = i;
1798				ntags++;
1799			}
1800		}
1801		if (ntags)
1802			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1803						vtags, ntags, 1, 0);
1804	} else
1805		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1806					 	NULL, 0, 1, 1);
1807	return status;
1808}
1809
1810
1811static void
1812oce_mac_addr_set(POCE_SOFTC sc)
1813{
1814	uint32_t old_pmac_id = sc->pmac_id;
1815	int status = 0;
1816
1817
1818	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1819			 sc->macaddr.size_of_struct);
1820	if (!status)
1821		return;
1822
1823	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1824					sc->if_id, &sc->pmac_id);
1825	if (!status) {
1826		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1827		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1828				 sc->macaddr.size_of_struct);
1829	}
1830	if (status)
1831		device_printf(sc->dev, "Failed update macaddress\n");
1832
1833}
1834
1835
1836static int
1837oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1838{
1839	POCE_SOFTC sc = ifp->if_softc;
1840	struct ifreq *ifr = (struct ifreq *)data;
1841	int rc = ENXIO;
1842	char cookie[32] = {0};
1843	void *priv_data = (void *)ifr->ifr_data;
1844	void *ioctl_ptr;
1845	uint32_t req_size;
1846	struct mbx_hdr req;
1847	OCE_DMA_MEM dma_mem;
1848	struct mbx_common_get_cntl_attr *fw_cmd;
1849
1850	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1851		return EFAULT;
1852
1853	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1854		return EINVAL;
1855
1856	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1857	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1858		return EFAULT;
1859
1860	req_size = le32toh(req.u0.req.request_length);
1861	if (req_size > 65536)
1862		return EINVAL;
1863
1864	req_size += sizeof(struct mbx_hdr);
1865	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1866	if (rc)
1867		return ENOMEM;
1868
1869	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1870		rc = EFAULT;
1871		goto dma_free;
1872	}
1873
1874	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1875	if (rc) {
1876		rc = EIO;
1877		goto dma_free;
1878	}
1879
1880	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1881		rc =  EFAULT;
1882
1883	/*
1884	   firmware is filling all the attributes for this ioctl except
1885	   the driver version..so fill it
1886	 */
1887	if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1888		fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1889		strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1890			COMPONENT_REVISION, strlen(COMPONENT_REVISION));
1891	}
1892
1893dma_free:
1894	oce_dma_free(sc, &dma_mem);
1895	return rc;
1896
1897}
1898
1899static void
1900oce_eqd_set_periodic(POCE_SOFTC sc)
1901{
1902	struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1903	struct oce_aic_obj *aic;
1904	struct oce_eq *eqo;
1905	uint64_t now = 0, delta;
1906	int eqd, i, num = 0;
1907	uint32_t ips = 0;
1908	int tps;
1909
1910	for (i = 0 ; i < sc->neqs; i++) {
1911		eqo = sc->eq[i];
1912		aic = &sc->aic_obj[i];
1913		/* When setting the static eq delay from the user space */
1914		if (!aic->enable) {
1915			eqd = aic->et_eqd;
1916			goto modify_eqd;
1917		}
1918
1919		now = ticks;
1920
1921		/* Over flow check */
1922		if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1923			goto done;
1924
1925		delta = now - aic->ticks;
1926		tps = delta/hz;
1927
1928		/* Interrupt rate based on elapsed ticks */
1929		if(tps)
1930			ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1931
1932		if (ips > INTR_RATE_HWM)
1933			eqd = aic->cur_eqd + 20;
1934		else if (ips < INTR_RATE_LWM)
1935			eqd = aic->cur_eqd / 2;
1936		else
1937			goto done;
1938
1939		if (eqd < 10)
1940			eqd = 0;
1941
1942		/* Make sure that the eq delay is in the known range */
1943		eqd = min(eqd, aic->max_eqd);
1944		eqd = max(eqd, aic->min_eqd);
1945
1946modify_eqd:
1947		if (eqd != aic->cur_eqd) {
1948			set_eqd[num].delay_multiplier = (eqd * 65)/100;
1949			set_eqd[num].eq_id = eqo->eq_id;
1950			aic->cur_eqd = eqd;
1951			num++;
1952		}
1953done:
1954		aic->intr_prev = eqo->intr;
1955		aic->ticks = now;
1956	}
1957
1958	/* Is there atleast one eq that needs to be modified? */
1959	if(num)
1960		oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1961}
1962
1963static void oce_detect_hw_error(POCE_SOFTC sc)
1964{
1965
1966	uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1967	uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1968	uint32_t i;
1969
1970	if (sc->hw_error)
1971		return;
1972
1973	if (IS_XE201(sc)) {
1974		sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1975		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1976			sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1977			sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1978		}
1979	} else {
1980		ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1981		ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1982		ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1983		ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1984
1985		ue_low = (ue_low & ~ue_low_mask);
1986		ue_high = (ue_high & ~ue_high_mask);
1987	}
1988
1989	/* On certain platforms BE hardware can indicate spurious UEs.
1990	 * Allow the h/w to stop working completely in case of a real UE.
1991	 * Hence not setting the hw_error for UE detection.
1992	 */
1993	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1994		sc->hw_error = TRUE;
1995		device_printf(sc->dev, "Error detected in the card\n");
1996	}
1997
1998	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1999		device_printf(sc->dev,
2000				"ERR: sliport status 0x%x\n", sliport_status);
2001		device_printf(sc->dev,
2002				"ERR: sliport error1 0x%x\n", sliport_err1);
2003		device_printf(sc->dev,
2004				"ERR: sliport error2 0x%x\n", sliport_err2);
2005	}
2006
2007	if (ue_low) {
2008		for (i = 0; ue_low; ue_low >>= 1, i++) {
2009			if (ue_low & 1)
2010				device_printf(sc->dev, "UE: %s bit set\n",
2011							ue_status_low_desc[i]);
2012		}
2013	}
2014
2015	if (ue_high) {
2016		for (i = 0; ue_high; ue_high >>= 1, i++) {
2017			if (ue_high & 1)
2018				device_printf(sc->dev, "UE: %s bit set\n",
2019							ue_status_hi_desc[i]);
2020		}
2021	}
2022
2023}
2024
2025
2026static void
2027oce_local_timer(void *arg)
2028{
2029	POCE_SOFTC sc = arg;
2030	int i = 0;
2031
2032	oce_detect_hw_error(sc);
2033	oce_refresh_nic_stats(sc);
2034	oce_refresh_queue_stats(sc);
2035	oce_mac_addr_set(sc);
2036
2037	/* TX Watch Dog*/
2038	for (i = 0; i < sc->nwqs; i++)
2039		oce_tx_restart(sc, sc->wq[i]);
2040
2041	/* calculate and set the eq delay for optimal interrupt rate */
2042	if (IS_BE(sc) || IS_SH(sc))
2043		oce_eqd_set_periodic(sc);
2044
2045	callout_reset(&sc->timer, hz, oce_local_timer, sc);
2046}
2047
2048
2049/* NOTE : This should only be called holding
2050 *        DEVICE_LOCK.
2051 */
2052static void
2053oce_if_deactivate(POCE_SOFTC sc)
2054{
2055	int i, mtime = 0;
2056	int wait_req = 0;
2057	struct oce_rq *rq;
2058	struct oce_wq *wq;
2059	struct oce_eq *eq;
2060
2061	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2062
2063	/*Wait for max of 400ms for TX completions to be done */
2064	while (mtime < 400) {
2065		wait_req = 0;
2066		for_all_wq_queues(sc, wq, i) {
2067			if (wq->ring->num_used) {
2068				wait_req = 1;
2069				DELAY(1);
2070				break;
2071			}
2072		}
2073		mtime += 1;
2074		if (!wait_req)
2075			break;
2076	}
2077
2078	/* Stop intrs and finish any bottom halves pending */
2079	oce_hw_intr_disable(sc);
2080
2081	/* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2082	   any other lock. So unlock device lock and require after
2083	   completing taskqueue_drain.
2084	*/
2085	UNLOCK(&sc->dev_lock);
2086	for (i = 0; i < sc->intr_count; i++) {
2087		if (sc->intrs[i].tq != NULL) {
2088			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2089		}
2090	}
2091	LOCK(&sc->dev_lock);
2092
2093	/* Delete RX queue in card with flush param */
2094	oce_stop_rx(sc);
2095
2096	/* Invalidate any pending cq and eq entries*/
2097	for_all_evnt_queues(sc, eq, i)
2098		oce_drain_eq(eq);
2099	for_all_rq_queues(sc, rq, i)
2100		oce_drain_rq_cq(rq);
2101	for_all_wq_queues(sc, wq, i)
2102		oce_drain_wq_cq(wq);
2103
2104	/* But still we need to get MCC aync events.
2105	   So enable intrs and also arm first EQ
2106	*/
2107	oce_hw_intr_enable(sc);
2108	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2109
2110	DELAY(10);
2111}
2112
2113
2114static void
2115oce_if_activate(POCE_SOFTC sc)
2116{
2117	struct oce_eq *eq;
2118	struct oce_rq *rq;
2119	struct oce_wq *wq;
2120	int i, rc = 0;
2121
2122	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2123
2124	oce_hw_intr_disable(sc);
2125
2126	oce_start_rx(sc);
2127
2128	for_all_rq_queues(sc, rq, i) {
2129		rc = oce_start_rq(rq);
2130		if (rc)
2131			device_printf(sc->dev, "Unable to start RX\n");
2132	}
2133
2134	for_all_wq_queues(sc, wq, i) {
2135		rc = oce_start_wq(wq);
2136		if (rc)
2137			device_printf(sc->dev, "Unable to start TX\n");
2138	}
2139
2140
2141	for_all_evnt_queues(sc, eq, i)
2142		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2143
2144	oce_hw_intr_enable(sc);
2145
2146}
2147
2148static void
2149process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2150{
2151	/* Update Link status */
2152	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2153	     ASYNC_EVENT_LINK_UP) {
2154		sc->link_status = ASYNC_EVENT_LINK_UP;
2155		if_link_state_change(sc->ifp, LINK_STATE_UP);
2156	} else {
2157		sc->link_status = ASYNC_EVENT_LINK_DOWN;
2158		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2159	}
2160}
2161
2162
2163/* Handle the Completion Queue for the Mailbox/Async notifications */
2164uint16_t
2165oce_mq_handler(void *arg)
2166{
2167	struct oce_mq *mq = (struct oce_mq *)arg;
2168	POCE_SOFTC sc = mq->parent;
2169	struct oce_cq *cq = mq->cq;
2170	int num_cqes = 0, evt_type = 0, optype = 0;
2171	struct oce_mq_cqe *cqe;
2172	struct oce_async_cqe_link_state *acqe;
2173	struct oce_async_event_grp5_pvid_state *gcqe;
2174	struct oce_async_event_qnq *dbgcqe;
2175
2176
2177	bus_dmamap_sync(cq->ring->dma.tag,
2178			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2179	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2180
2181	while (cqe->u0.dw[3]) {
2182		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2183		if (cqe->u0.s.async_event) {
2184			evt_type = cqe->u0.s.event_type;
2185			optype = cqe->u0.s.async_type;
2186			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2187				/* Link status evt */
2188				acqe = (struct oce_async_cqe_link_state *)cqe;
2189				process_link_state(sc, acqe);
2190			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
2191				   (optype == ASYNC_EVENT_PVID_STATE)) {
2192				/* GRP5 PVID */
2193				gcqe =
2194				(struct oce_async_event_grp5_pvid_state *)cqe;
2195				if (gcqe->enabled)
2196					sc->pvid = gcqe->tag & VLAN_VID_MASK;
2197				else
2198					sc->pvid = 0;
2199
2200			}
2201			else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2202				optype == ASYNC_EVENT_DEBUG_QNQ) {
2203				dbgcqe =
2204				(struct oce_async_event_qnq *)cqe;
2205				if(dbgcqe->valid)
2206					sc->qnqid = dbgcqe->vlan_tag;
2207				sc->qnq_debug_event = TRUE;
2208			}
2209		}
2210		cqe->u0.dw[3] = 0;
2211		RING_GET(cq->ring, 1);
2212		bus_dmamap_sync(cq->ring->dma.tag,
2213				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2214		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2215		num_cqes++;
2216	}
2217
2218	if (num_cqes)
2219		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2220
2221	return 0;
2222}
2223
2224
2225static void
2226setup_max_queues_want(POCE_SOFTC sc)
2227{
2228	/* Check if it is FLEX machine. Is so dont use RSS */
2229	if ((sc->function_mode & FNM_FLEX10_MODE) ||
2230	    (sc->function_mode & FNM_UMC_MODE)    ||
2231	    (sc->function_mode & FNM_VNIC_MODE)	  ||
2232	    (!is_rss_enabled(sc))		  ||
2233	    IS_BE2(sc)) {
2234		sc->nrqs = 1;
2235		sc->nwqs = 1;
2236	} else {
2237		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2238		sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2239	}
2240
2241	if (IS_BE2(sc) && is_rss_enabled(sc))
2242		sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2243}
2244
2245
2246static void
2247update_queues_got(POCE_SOFTC sc)
2248{
2249	if (is_rss_enabled(sc)) {
2250		sc->nrqs = sc->intr_count + 1;
2251		sc->nwqs = sc->intr_count;
2252	} else {
2253		sc->nrqs = 1;
2254		sc->nwqs = 1;
2255	}
2256
2257	if (IS_BE2(sc))
2258		sc->nwqs = 1;
2259}
2260
2261static int
2262oce_check_ipv6_ext_hdr(struct mbuf *m)
2263{
2264	struct ether_header *eh = mtod(m, struct ether_header *);
2265	caddr_t m_datatemp = m->m_data;
2266
2267	if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2268		m->m_data += sizeof(struct ether_header);
2269		struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2270
2271		if((ip6->ip6_nxt != IPPROTO_TCP) && \
2272				(ip6->ip6_nxt != IPPROTO_UDP)){
2273			struct ip6_ext *ip6e = NULL;
2274			m->m_data += sizeof(struct ip6_hdr);
2275
2276			ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2277			if(ip6e->ip6e_len == 0xff) {
2278				m->m_data = m_datatemp;
2279				return TRUE;
2280			}
2281		}
2282		m->m_data = m_datatemp;
2283	}
2284	return FALSE;
2285}
2286
2287static int
2288is_be3_a1(POCE_SOFTC sc)
2289{
2290	if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2291		return TRUE;
2292	}
2293	return FALSE;
2294}
2295
2296static struct mbuf *
2297oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2298{
2299	uint16_t vlan_tag = 0;
2300
2301	if(!M_WRITABLE(m))
2302		return NULL;
2303
2304	/* Embed vlan tag in the packet if it is not part of it */
2305	if(m->m_flags & M_VLANTAG) {
2306		vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2307		m->m_flags &= ~M_VLANTAG;
2308	}
2309
2310	/* if UMC, ignore vlan tag insertion and instead insert pvid */
2311	if(sc->pvid) {
2312		if(!vlan_tag)
2313			vlan_tag = sc->pvid;
2314		*complete = FALSE;
2315	}
2316
2317	if(vlan_tag) {
2318		m = ether_vlanencap(m, vlan_tag);
2319	}
2320
2321	if(sc->qnqid) {
2322		m = ether_vlanencap(m, sc->qnqid);
2323		*complete = FALSE;
2324	}
2325	return m;
2326}
2327
2328static int
2329oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2330{
2331	if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2332			oce_check_ipv6_ext_hdr(m)) {
2333		return TRUE;
2334	}
2335	return FALSE;
2336}
2337
2338static void
2339oce_get_config(POCE_SOFTC sc)
2340{
2341	int rc = 0;
2342	uint32_t max_rss = 0;
2343
2344	if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2345		max_rss = OCE_LEGACY_MODE_RSS;
2346	else
2347		max_rss = OCE_MAX_RSS;
2348
2349	if (!IS_BE(sc)) {
2350		rc = oce_get_profile_config(sc, max_rss);
2351		if (rc) {
2352			sc->nwqs = OCE_MAX_WQ;
2353			sc->nrssqs = max_rss;
2354			sc->nrqs = sc->nrssqs + 1;
2355		}
2356	}
2357	else { /* For BE3 don't rely on fw for determining the resources */
2358		sc->nrssqs = max_rss;
2359		sc->nrqs = sc->nrssqs + 1;
2360		sc->nwqs = OCE_MAX_WQ;
2361		sc->max_vlans = MAX_VLANFILTER_SIZE;
2362	}
2363}
2364