oce_if.c revision 243857
1/*-
2 * Copyright (C) 2012 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 *    contributors may be used to endorse or promote products derived from
17 *    this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_if.c 243857 2012-12-04 09:32:43Z glebius $ */
40
41#include "opt_inet6.h"
42#include "opt_inet.h"
43
44#include "oce_if.h"
45
46
47/* Driver entry points prototypes */
48static int  oce_probe(device_t dev);
49static int  oce_attach(device_t dev);
50static int  oce_detach(device_t dev);
51static int  oce_shutdown(device_t dev);
52static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
53static void oce_init(void *xsc);
54static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
55static void oce_multiq_flush(struct ifnet *ifp);
56
57/* Driver interrupt routines protypes */
58static void oce_intr(void *arg, int pending);
59static int  oce_setup_intr(POCE_SOFTC sc);
60static int  oce_fast_isr(void *arg);
61static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
62			  void (*isr) (void *arg, int pending));
63
64/* Media callbacks prototypes */
65static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66static int  oce_media_change(struct ifnet *ifp);
67
68/* Transmit routines prototypes */
69static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72					uint32_t status);
73static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74				 struct oce_wq *wq);
75
76/* Receive routines prototypes */
77static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81						struct oce_nic_rx_cqe *cqe);
82
83/* Helper function prototypes in this file */
84static int  oce_attach_ifp(POCE_SOFTC sc);
85static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87static int  oce_vid_config(POCE_SOFTC sc);
88static void oce_mac_addr_set(POCE_SOFTC sc);
89static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90static void oce_local_timer(void *arg);
91static void oce_if_deactivate(POCE_SOFTC sc);
92static void oce_if_activate(POCE_SOFTC sc);
93static void setup_max_queues_want(POCE_SOFTC sc);
94static void update_queues_got(POCE_SOFTC sc);
95static void process_link_state(POCE_SOFTC sc,
96		 struct oce_async_cqe_link_state *acqe);
97
98
99/* IP specific */
100#if defined(INET6) || defined(INET)
101static int  oce_init_lro(POCE_SOFTC sc);
102static void oce_rx_flush_lro(struct oce_rq *rq);
103static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104#endif
105
106static device_method_t oce_dispatch[] = {
107	DEVMETHOD(device_probe, oce_probe),
108	DEVMETHOD(device_attach, oce_attach),
109	DEVMETHOD(device_detach, oce_detach),
110	DEVMETHOD(device_shutdown, oce_shutdown),
111	{0, 0}
112};
113
114static driver_t oce_driver = {
115	"oce",
116	oce_dispatch,
117	sizeof(OCE_SOFTC)
118};
119static devclass_t oce_devclass;
120
121
122DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
123MODULE_DEPEND(oce, pci, 1, 1, 1);
124MODULE_DEPEND(oce, ether, 1, 1, 1);
125MODULE_VERSION(oce, 1);
126
127
128/* global vars */
129const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
130
131/* Module capabilites and parameters */
132uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
133uint32_t oce_enable_rss = OCE_MODCAP_RSS;
134
135
136TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
137TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
138
139
140/* Supported devices table */
141static uint32_t supportedDevices[] =  {
142	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
143	(PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
144	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
145	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
146	(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
147};
148
149
150
151
152/*****************************************************************************
153 *			Driver entry points functions                        *
154 *****************************************************************************/
155
156static int
157oce_probe(device_t dev)
158{
159	uint16_t vendor = 0;
160	uint16_t device = 0;
161	int i = 0;
162	char str[256] = {0};
163	POCE_SOFTC sc;
164
165	sc = device_get_softc(dev);
166	bzero(sc, sizeof(OCE_SOFTC));
167	sc->dev = dev;
168
169	vendor = pci_get_vendor(dev);
170	device = pci_get_device(dev);
171
172	for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
173		if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
174			if (device == (supportedDevices[i] & 0xffff)) {
175				sprintf(str, "%s:%s", "Emulex CNA NIC function",
176					component_revision);
177				device_set_desc_copy(dev, str);
178
179				switch (device) {
180				case PCI_PRODUCT_BE2:
181					sc->flags |= OCE_FLAGS_BE2;
182					break;
183				case PCI_PRODUCT_BE3:
184					sc->flags |= OCE_FLAGS_BE3;
185					break;
186				case PCI_PRODUCT_XE201:
187				case PCI_PRODUCT_XE201_VF:
188					sc->flags |= OCE_FLAGS_XE201;
189					break;
190				default:
191					return ENXIO;
192				}
193				return BUS_PROBE_DEFAULT;
194			}
195		}
196	}
197
198	return ENXIO;
199}
200
201
202static int
203oce_attach(device_t dev)
204{
205	POCE_SOFTC sc;
206	int rc = 0;
207
208	sc = device_get_softc(dev);
209
210	rc = oce_hw_pci_alloc(sc);
211	if (rc)
212		return rc;
213
214	sc->rss_enable 	 = oce_enable_rss;
215	sc->tx_ring_size = OCE_TX_RING_SIZE;
216	sc->rx_ring_size = OCE_RX_RING_SIZE;
217	sc->rq_frag_size = OCE_RQ_BUF_SIZE;
218	sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
219	sc->promisc	 = OCE_DEFAULT_PROMISCUOUS;
220
221	LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
222	LOCK_CREATE(&sc->dev_lock,  "Device_lock");
223
224	/* initialise the hardware */
225	rc = oce_hw_init(sc);
226	if (rc)
227		goto pci_res_free;
228
229	setup_max_queues_want(sc);
230
231	rc = oce_setup_intr(sc);
232	if (rc)
233		goto mbox_free;
234
235	rc = oce_queue_init_all(sc);
236	if (rc)
237		goto intr_free;
238
239	rc = oce_attach_ifp(sc);
240	if (rc)
241		goto queues_free;
242
243#if defined(INET6) || defined(INET)
244	rc = oce_init_lro(sc);
245	if (rc)
246		goto ifp_free;
247#endif
248
249	rc = oce_hw_start(sc);
250	if (rc)
251		goto lro_free;
252
253	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
254				oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
255	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
256				oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
257
258	rc = oce_stats_init(sc);
259	if (rc)
260		goto vlan_free;
261
262	oce_add_sysctls(sc);
263
264	callout_init(&sc->timer, CALLOUT_MPSAFE);
265	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
266	if (rc)
267		goto stats_free;
268#ifdef DEV_NETMAP
269#endif /* DEV_NETMAP */
270
271	return 0;
272
273stats_free:
274	callout_drain(&sc->timer);
275	oce_stats_free(sc);
276vlan_free:
277	if (sc->vlan_attach)
278		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
279	if (sc->vlan_detach)
280		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
281	oce_hw_intr_disable(sc);
282lro_free:
283#if defined(INET6) || defined(INET)
284	oce_free_lro(sc);
285ifp_free:
286#endif
287	ether_ifdetach(sc->ifp);
288	if_free(sc->ifp);
289queues_free:
290	oce_queue_release_all(sc);
291intr_free:
292	oce_intr_free(sc);
293mbox_free:
294	oce_dma_free(sc, &sc->bsmbx);
295pci_res_free:
296	oce_hw_pci_free(sc);
297	LOCK_DESTROY(&sc->dev_lock);
298	LOCK_DESTROY(&sc->bmbx_lock);
299	return rc;
300
301}
302
303
304static int
305oce_detach(device_t dev)
306{
307	POCE_SOFTC sc = device_get_softc(dev);
308
309	LOCK(&sc->dev_lock);
310	oce_if_deactivate(sc);
311	UNLOCK(&sc->dev_lock);
312
313	callout_drain(&sc->timer);
314
315	if (sc->vlan_attach != NULL)
316		EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
317	if (sc->vlan_detach != NULL)
318		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
319
320	ether_ifdetach(sc->ifp);
321
322	if_free(sc->ifp);
323
324	oce_hw_shutdown(sc);
325
326	bus_generic_detach(dev);
327
328	return 0;
329}
330
331
332static int
333oce_shutdown(device_t dev)
334{
335	int rc;
336
337	rc = oce_detach(dev);
338
339	return rc;
340}
341
342
343static int
344oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
345{
346	struct ifreq *ifr = (struct ifreq *)data;
347	POCE_SOFTC sc = ifp->if_softc;
348	int rc = 0;
349	uint32_t u;
350
351	switch (command) {
352
353	case SIOCGIFMEDIA:
354		rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
355		break;
356
357	case SIOCSIFMTU:
358		if (ifr->ifr_mtu > OCE_MAX_MTU)
359			rc = EINVAL;
360		else
361			ifp->if_mtu = ifr->ifr_mtu;
362		break;
363
364	case SIOCSIFFLAGS:
365		if (ifp->if_flags & IFF_UP) {
366			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
367				sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
368				oce_init(sc);
369			}
370			device_printf(sc->dev, "Interface Up\n");
371		} else {
372			LOCK(&sc->dev_lock);
373
374			sc->ifp->if_drv_flags &=
375			    ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
376			oce_if_deactivate(sc);
377
378			UNLOCK(&sc->dev_lock);
379
380			device_printf(sc->dev, "Interface Down\n");
381		}
382
383		if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
384			sc->promisc = TRUE;
385			oce_rxf_set_promiscuous(sc, sc->promisc);
386		} else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
387			sc->promisc = FALSE;
388			oce_rxf_set_promiscuous(sc, sc->promisc);
389		}
390
391		break;
392
393	case SIOCADDMULTI:
394	case SIOCDELMULTI:
395		rc = oce_hw_update_multicast(sc);
396		if (rc)
397			device_printf(sc->dev,
398				"Update multicast address failed\n");
399		break;
400
401	case SIOCSIFCAP:
402		u = ifr->ifr_reqcap ^ ifp->if_capenable;
403
404		if (u & IFCAP_TXCSUM) {
405			ifp->if_capenable ^= IFCAP_TXCSUM;
406			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
407
408			if (IFCAP_TSO & ifp->if_capenable &&
409			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
410				ifp->if_capenable &= ~IFCAP_TSO;
411				ifp->if_hwassist &= ~CSUM_TSO;
412				if_printf(ifp,
413					 "TSO disabled due to -txcsum.\n");
414			}
415		}
416
417		if (u & IFCAP_RXCSUM)
418			ifp->if_capenable ^= IFCAP_RXCSUM;
419
420		if (u & IFCAP_TSO4) {
421			ifp->if_capenable ^= IFCAP_TSO4;
422
423			if (IFCAP_TSO & ifp->if_capenable) {
424				if (IFCAP_TXCSUM & ifp->if_capenable)
425					ifp->if_hwassist |= CSUM_TSO;
426				else {
427					ifp->if_capenable &= ~IFCAP_TSO;
428					ifp->if_hwassist &= ~CSUM_TSO;
429					if_printf(ifp,
430					    "Enable txcsum first.\n");
431					rc = EAGAIN;
432				}
433			} else
434				ifp->if_hwassist &= ~CSUM_TSO;
435		}
436
437		if (u & IFCAP_VLAN_HWTAGGING)
438			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
439
440		if (u & IFCAP_VLAN_HWFILTER) {
441			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
442			oce_vid_config(sc);
443		}
444#if defined(INET6) || defined(INET)
445		if (u & IFCAP_LRO)
446			ifp->if_capenable ^= IFCAP_LRO;
447#endif
448
449		break;
450
451	case SIOCGPRIVATE_0:
452		rc = oce_handle_passthrough(ifp, data);
453		break;
454	default:
455		rc = ether_ioctl(ifp, command, data);
456		break;
457	}
458
459	return rc;
460}
461
462
463static void
464oce_init(void *arg)
465{
466	POCE_SOFTC sc = arg;
467
468	LOCK(&sc->dev_lock);
469
470	if (sc->ifp->if_flags & IFF_UP) {
471		oce_if_deactivate(sc);
472		oce_if_activate(sc);
473	}
474
475	UNLOCK(&sc->dev_lock);
476
477}
478
479
480static int
481oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
482{
483	POCE_SOFTC sc = ifp->if_softc;
484	struct oce_wq *wq = NULL;
485	int queue_index = 0;
486	int status = 0;
487
488	if ((m->m_flags & M_FLOWID) != 0)
489		queue_index = m->m_pkthdr.flowid % sc->nwqs;
490
491	wq = sc->wq[queue_index];
492
493	if (TRY_LOCK(&wq->tx_lock)) {
494		status = oce_multiq_transmit(ifp, m, wq);
495		UNLOCK(&wq->tx_lock);
496	} else {
497		status = drbr_enqueue(ifp, wq->br, m);
498	}
499	return status;
500
501}
502
503
504static void
505oce_multiq_flush(struct ifnet *ifp)
506{
507	POCE_SOFTC sc = ifp->if_softc;
508	struct mbuf     *m;
509	int i = 0;
510
511	for (i = 0; i < sc->nwqs; i++) {
512		while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
513			m_freem(m);
514	}
515	if_qflush(ifp);
516}
517
518
519
520/*****************************************************************************
521 *                   Driver interrupt routines functions                     *
522 *****************************************************************************/
523
524static void
525oce_intr(void *arg, int pending)
526{
527
528	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
529	POCE_SOFTC sc = ii->sc;
530	struct oce_eq *eq = ii->eq;
531	struct oce_eqe *eqe;
532	struct oce_cq *cq = NULL;
533	int i, num_eqes = 0;
534
535
536	bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
537				 BUS_DMASYNC_POSTWRITE);
538	do {
539		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
540		if (eqe->evnt == 0)
541			break;
542		eqe->evnt = 0;
543		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
544					BUS_DMASYNC_POSTWRITE);
545		RING_GET(eq->ring, 1);
546		num_eqes++;
547
548	} while (TRUE);
549
550	if (!num_eqes)
551		goto eq_arm; /* Spurious */
552
553 	/* Clear EQ entries, but dont arm */
554	oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
555
556	/* Process TX, RX and MCC. But dont arm CQ*/
557	for (i = 0; i < eq->cq_valid; i++) {
558		cq = eq->cq[i];
559		(*cq->cq_handler)(cq->cb_arg);
560	}
561
562	/* Arm all cqs connected to this EQ */
563	for (i = 0; i < eq->cq_valid; i++) {
564		cq = eq->cq[i];
565		oce_arm_cq(sc, cq->cq_id, 0, TRUE);
566	}
567
568eq_arm:
569	oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
570	return;
571}
572
573
574static int
575oce_setup_intr(POCE_SOFTC sc)
576{
577	int rc = 0, use_intx = 0;
578	int vector = 0, req_vectors = 0;
579
580	if (sc->rss_enable)
581		req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
582	else
583		req_vectors = 1;
584
585	if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
586		sc->intr_count = req_vectors;
587		rc = pci_alloc_msix(sc->dev, &sc->intr_count);
588		if (rc != 0) {
589			use_intx = 1;
590			pci_release_msi(sc->dev);
591		} else
592			sc->flags |= OCE_FLAGS_USING_MSIX;
593	} else
594		use_intx = 1;
595
596	if (use_intx)
597		sc->intr_count = 1;
598
599	/* Scale number of queues based on intr we got */
600	update_queues_got(sc);
601
602	if (use_intx) {
603		device_printf(sc->dev, "Using legacy interrupt\n");
604		rc = oce_alloc_intr(sc, vector, oce_intr);
605		if (rc)
606			goto error;
607	} else {
608		for (; vector < sc->intr_count; vector++) {
609			rc = oce_alloc_intr(sc, vector, oce_intr);
610			if (rc)
611				goto error;
612		}
613	}
614
615	return 0;
616error:
617	oce_intr_free(sc);
618	return rc;
619}
620
621
622static int
623oce_fast_isr(void *arg)
624{
625	POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
626	POCE_SOFTC sc = ii->sc;
627
628	if (ii->eq == NULL)
629		return FILTER_STRAY;
630
631	oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
632
633	taskqueue_enqueue_fast(ii->tq, &ii->task);
634
635	return FILTER_HANDLED;
636}
637
638
639static int
640oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
641{
642	POCE_INTR_INFO ii = &sc->intrs[vector];
643	int rc = 0, rr;
644
645	if (vector >= OCE_MAX_EQ)
646		return (EINVAL);
647
648	/* Set the resource id for the interrupt.
649	 * MSIx is vector + 1 for the resource id,
650	 * INTx is 0 for the resource id.
651	 */
652	if (sc->flags & OCE_FLAGS_USING_MSIX)
653		rr = vector + 1;
654	else
655		rr = 0;
656	ii->intr_res = bus_alloc_resource_any(sc->dev,
657					      SYS_RES_IRQ,
658					      &rr, RF_ACTIVE|RF_SHAREABLE);
659	ii->irq_rr = rr;
660	if (ii->intr_res == NULL) {
661		device_printf(sc->dev,
662			  "Could not allocate interrupt\n");
663		rc = ENXIO;
664		return rc;
665	}
666
667	TASK_INIT(&ii->task, 0, isr, ii);
668	ii->vector = vector;
669	sprintf(ii->task_name, "oce_task[%d]", ii->vector);
670	ii->tq = taskqueue_create_fast(ii->task_name,
671			M_NOWAIT,
672			taskqueue_thread_enqueue,
673			&ii->tq);
674	taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
675			device_get_nameunit(sc->dev));
676
677	ii->sc = sc;
678	rc = bus_setup_intr(sc->dev,
679			ii->intr_res,
680			INTR_TYPE_NET,
681			oce_fast_isr, NULL, ii, &ii->tag);
682	return rc;
683
684}
685
686
687void
688oce_intr_free(POCE_SOFTC sc)
689{
690	int i = 0;
691
692	for (i = 0; i < sc->intr_count; i++) {
693
694		if (sc->intrs[i].tag != NULL)
695			bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
696						sc->intrs[i].tag);
697		if (sc->intrs[i].tq != NULL)
698			taskqueue_free(sc->intrs[i].tq);
699
700		if (sc->intrs[i].intr_res != NULL)
701			bus_release_resource(sc->dev, SYS_RES_IRQ,
702						sc->intrs[i].irq_rr,
703						sc->intrs[i].intr_res);
704		sc->intrs[i].tag = NULL;
705		sc->intrs[i].intr_res = NULL;
706	}
707
708	if (sc->flags & OCE_FLAGS_USING_MSIX)
709		pci_release_msi(sc->dev);
710
711}
712
713
714
715/******************************************************************************
716*			  Media callbacks functions 			      *
717******************************************************************************/
718
719static void
720oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
721{
722	POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
723
724
725	req->ifm_status = IFM_AVALID;
726	req->ifm_active = IFM_ETHER;
727
728	if (sc->link_status == 1)
729		req->ifm_status |= IFM_ACTIVE;
730	else
731		return;
732
733	switch (sc->link_speed) {
734	case 1: /* 10 Mbps */
735		req->ifm_active |= IFM_10_T | IFM_FDX;
736		sc->speed = 10;
737		break;
738	case 2: /* 100 Mbps */
739		req->ifm_active |= IFM_100_TX | IFM_FDX;
740		sc->speed = 100;
741		break;
742	case 3: /* 1 Gbps */
743		req->ifm_active |= IFM_1000_T | IFM_FDX;
744		sc->speed = 1000;
745		break;
746	case 4: /* 10 Gbps */
747		req->ifm_active |= IFM_10G_SR | IFM_FDX;
748		sc->speed = 10000;
749		break;
750	}
751
752	return;
753}
754
755
756int
757oce_media_change(struct ifnet *ifp)
758{
759	return 0;
760}
761
762
763
764
765/*****************************************************************************
766 *			  Transmit routines functions			     *
767 *****************************************************************************/
768
769static int
770oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
771{
772	int rc = 0, i, retry_cnt = 0;
773	bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
774	struct mbuf *m, *m_temp;
775	struct oce_wq *wq = sc->wq[wq_index];
776	struct oce_packet_desc *pd;
777	uint32_t out;
778	struct oce_nic_hdr_wqe *nichdr;
779	struct oce_nic_frag_wqe *nicfrag;
780	int num_wqes;
781	uint32_t reg_value;
782
783	m = *mpp;
784	if (!m)
785		return EINVAL;
786
787	if (!(m->m_flags & M_PKTHDR)) {
788		rc = ENXIO;
789		goto free_ret;
790	}
791
792	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
793		/* consolidate packet buffers for TSO/LSO segment offload */
794#if defined(INET6) || defined(INET)
795		m = oce_tso_setup(sc, mpp);
796#else
797		m = NULL;
798#endif
799		if (m == NULL) {
800			rc = ENXIO;
801			goto free_ret;
802		}
803	}
804
805	out = wq->packets_out + 1;
806	if (out == OCE_WQ_PACKET_ARRAY_SIZE)
807		out = 0;
808	if (out == wq->packets_in)
809		return EBUSY;
810
811	pd = &wq->pckts[wq->packets_out];
812retry:
813	rc = bus_dmamap_load_mbuf_sg(wq->tag,
814				     pd->map,
815				     m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
816	if (rc == 0) {
817		num_wqes = pd->nsegs + 1;
818		if (IS_BE(sc)) {
819			/*Dummy required only for BE3.*/
820			if (num_wqes & 1)
821				num_wqes++;
822		}
823		if (num_wqes >= RING_NUM_FREE(wq->ring)) {
824			bus_dmamap_unload(wq->tag, pd->map);
825			return EBUSY;
826		}
827
828		bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
829		pd->mbuf = m;
830		wq->packets_out = out;
831
832		nichdr =
833		    RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
834		nichdr->u0.dw[0] = 0;
835		nichdr->u0.dw[1] = 0;
836		nichdr->u0.dw[2] = 0;
837		nichdr->u0.dw[3] = 0;
838
839		nichdr->u0.s.complete = 1;
840		nichdr->u0.s.event = 1;
841		nichdr->u0.s.crc = 1;
842		nichdr->u0.s.forward = 0;
843		nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
844		nichdr->u0.s.udpcs =
845		    (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
846		nichdr->u0.s.tcpcs =
847		    (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
848		nichdr->u0.s.num_wqe = num_wqes;
849		nichdr->u0.s.total_length = m->m_pkthdr.len;
850		if (m->m_flags & M_VLANTAG) {
851			nichdr->u0.s.vlan = 1; /*Vlan present*/
852			nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
853		}
854		if (m->m_pkthdr.csum_flags & CSUM_TSO) {
855			if (m->m_pkthdr.tso_segsz) {
856				nichdr->u0.s.lso = 1;
857				nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
858			}
859			if (!IS_BE(sc))
860				nichdr->u0.s.ipcs = 1;
861		}
862
863		RING_PUT(wq->ring, 1);
864		wq->ring->num_used++;
865
866		for (i = 0; i < pd->nsegs; i++) {
867			nicfrag =
868			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
869						      struct oce_nic_frag_wqe);
870			nicfrag->u0.s.rsvd0 = 0;
871			nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
872			nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
873			nicfrag->u0.s.frag_len = segs[i].ds_len;
874			pd->wqe_idx = wq->ring->pidx;
875			RING_PUT(wq->ring, 1);
876			wq->ring->num_used++;
877		}
878		if (num_wqes > (pd->nsegs + 1)) {
879			nicfrag =
880			    RING_GET_PRODUCER_ITEM_VA(wq->ring,
881						      struct oce_nic_frag_wqe);
882			nicfrag->u0.dw[0] = 0;
883			nicfrag->u0.dw[1] = 0;
884			nicfrag->u0.dw[2] = 0;
885			nicfrag->u0.dw[3] = 0;
886			pd->wqe_idx = wq->ring->pidx;
887			RING_PUT(wq->ring, 1);
888			wq->ring->num_used++;
889			pd->nsegs++;
890		}
891
892		sc->ifp->if_opackets++;
893		wq->tx_stats.tx_reqs++;
894		wq->tx_stats.tx_wrbs += num_wqes;
895		wq->tx_stats.tx_bytes += m->m_pkthdr.len;
896		wq->tx_stats.tx_pkts++;
897
898		bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
899				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
900		reg_value = (num_wqes << 16) | wq->wq_id;
901		OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
902
903	} else if (rc == EFBIG)	{
904		if (retry_cnt == 0) {
905			m_temp = m_defrag(m, M_NOWAIT);
906			if (m_temp == NULL)
907				goto free_ret;
908			m = m_temp;
909			*mpp = m_temp;
910			retry_cnt = retry_cnt + 1;
911			goto retry;
912		} else
913			goto free_ret;
914	} else if (rc == ENOMEM)
915		return rc;
916	else
917		goto free_ret;
918
919	return 0;
920
921free_ret:
922	m_freem(*mpp);
923	*mpp = NULL;
924	return rc;
925}
926
927
928static void
929oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
930{
931	uint32_t in;
932	struct oce_packet_desc *pd;
933	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
934	struct mbuf *m;
935
936	if (wq->packets_out == wq->packets_in)
937		device_printf(sc->dev, "WQ transmit descriptor missing\n");
938
939	in = wq->packets_in + 1;
940	if (in == OCE_WQ_PACKET_ARRAY_SIZE)
941		in = 0;
942
943	pd = &wq->pckts[wq->packets_in];
944	wq->packets_in = in;
945	wq->ring->num_used -= (pd->nsegs + 1);
946	bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
947	bus_dmamap_unload(wq->tag, pd->map);
948
949	m = pd->mbuf;
950	m_freem(m);
951	pd->mbuf = NULL;
952
953	if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
954		if (wq->ring->num_used < (wq->ring->num_items / 2)) {
955			sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
956			oce_tx_restart(sc, wq);
957		}
958	}
959}
960
961
962static void
963oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
964{
965
966	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
967		return;
968
969#if __FreeBSD_version >= 800000
970	if (!drbr_empty(sc->ifp, wq->br))
971#else
972	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
973#endif
974		taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
975
976}
977
978
979#if defined(INET6) || defined(INET)
980static struct mbuf *
981oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
982{
983	struct mbuf *m;
984#ifdef INET
985	struct ip *ip;
986#endif
987#ifdef INET6
988	struct ip6_hdr *ip6;
989#endif
990	struct ether_vlan_header *eh;
991	struct tcphdr *th;
992	uint16_t etype;
993	int total_len = 0, ehdrlen = 0;
994
995	m = *mpp;
996
997	if (M_WRITABLE(m) == 0) {
998		m = m_dup(*mpp, M_NOWAIT);
999		if (!m)
1000			return NULL;
1001		m_freem(*mpp);
1002		*mpp = m;
1003	}
1004
1005	eh = mtod(m, struct ether_vlan_header *);
1006	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1007		etype = ntohs(eh->evl_proto);
1008		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1009	} else {
1010		etype = ntohs(eh->evl_encap_proto);
1011		ehdrlen = ETHER_HDR_LEN;
1012	}
1013
1014	switch (etype) {
1015#ifdef INET
1016	case ETHERTYPE_IP:
1017		ip = (struct ip *)(m->m_data + ehdrlen);
1018		if (ip->ip_p != IPPROTO_TCP)
1019			return NULL;
1020		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1021
1022		total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1023		break;
1024#endif
1025#ifdef INET6
1026	case ETHERTYPE_IPV6:
1027		ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1028		if (ip6->ip6_nxt != IPPROTO_TCP)
1029			return NULL;
1030		th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1031
1032		total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1033		break;
1034#endif
1035	default:
1036		return NULL;
1037	}
1038
1039	m = m_pullup(m, total_len);
1040	if (!m)
1041		return NULL;
1042	*mpp = m;
1043	return m;
1044
1045}
1046#endif /* INET6 || INET */
1047
1048void
1049oce_tx_task(void *arg, int npending)
1050{
1051	struct oce_wq *wq = arg;
1052	POCE_SOFTC sc = wq->parent;
1053	struct ifnet *ifp = sc->ifp;
1054	int rc = 0;
1055
1056#if __FreeBSD_version >= 800000
1057	if (TRY_LOCK(&wq->tx_lock)) {
1058		rc = oce_multiq_transmit(ifp, NULL, wq);
1059		if (rc) {
1060			device_printf(sc->dev,
1061			 "TX[%d] restart failed\n", wq->queue_index);
1062		}
1063		UNLOCK(&wq->tx_lock);
1064	}
1065#else
1066	oce_start(ifp);
1067#endif
1068
1069}
1070
1071
1072void
1073oce_start(struct ifnet *ifp)
1074{
1075	POCE_SOFTC sc = ifp->if_softc;
1076	struct mbuf *m;
1077	int rc = 0;
1078	int def_q = 0; /* Defualt tx queue is 0*/
1079
1080	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1081			IFF_DRV_RUNNING)
1082		return;
1083
1084	do {
1085		IF_DEQUEUE(&sc->ifp->if_snd, m);
1086		if (m == NULL)
1087			break;
1088
1089		LOCK(&sc->wq[def_q]->tx_lock);
1090		rc = oce_tx(sc, &m, def_q);
1091		UNLOCK(&sc->wq[def_q]->tx_lock);
1092		if (rc) {
1093			if (m != NULL) {
1094				sc->wq[def_q]->tx_stats.tx_stops ++;
1095				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1097				m = NULL;
1098			}
1099			break;
1100		}
1101		if (m != NULL)
1102			ETHER_BPF_MTAP(ifp, m);
1103
1104	} while (TRUE);
1105
1106	return;
1107}
1108
1109
1110/* Handle the Completion Queue for transmit */
1111uint16_t
1112oce_wq_handler(void *arg)
1113{
1114	struct oce_wq *wq = (struct oce_wq *)arg;
1115	POCE_SOFTC sc = wq->parent;
1116	struct oce_cq *cq = wq->cq;
1117	struct oce_nic_tx_cqe *cqe;
1118	int num_cqes = 0;
1119
1120	LOCK(&wq->tx_lock);
1121	bus_dmamap_sync(cq->ring->dma.tag,
1122			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1123	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1124	while (cqe->u0.dw[3]) {
1125		DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1126
1127		wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1128		if (wq->ring->cidx >= wq->ring->num_items)
1129			wq->ring->cidx -= wq->ring->num_items;
1130
1131		oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1132		wq->tx_stats.tx_compl++;
1133		cqe->u0.dw[3] = 0;
1134		RING_GET(cq->ring, 1);
1135		bus_dmamap_sync(cq->ring->dma.tag,
1136				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1137		cqe =
1138		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1139		num_cqes++;
1140	}
1141
1142	if (num_cqes)
1143		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1144	UNLOCK(&wq->tx_lock);
1145
1146	return 0;
1147}
1148
1149
1150static int
1151oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1152{
1153	POCE_SOFTC sc = ifp->if_softc;
1154	int status = 0, queue_index = 0;
1155	struct mbuf *next = NULL;
1156	struct buf_ring *br = NULL;
1157
1158	br  = wq->br;
1159	queue_index = wq->queue_index;
1160
1161	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1162		IFF_DRV_RUNNING) {
1163		if (m != NULL)
1164			status = drbr_enqueue(ifp, br, m);
1165		return status;
1166	}
1167
1168	if (m == NULL)
1169		next = drbr_dequeue(ifp, br);
1170	else if (drbr_needs_enqueue(ifp, br)) {
1171		if ((status = drbr_enqueue(ifp, br, m)) != 0)
1172			return status;
1173		next = drbr_dequeue(ifp, br);
1174	} else
1175		next = m;
1176
1177	while (next != NULL) {
1178		if (oce_tx(sc, &next, queue_index)) {
1179			if (next != NULL) {
1180				wq->tx_stats.tx_stops ++;
1181				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1182				status = drbr_enqueue(ifp, br, next);
1183			}
1184			break;
1185		}
1186		ifp->if_obytes += next->m_pkthdr.len;
1187		if (next->m_flags & M_MCAST)
1188			ifp->if_omcasts++;
1189		ETHER_BPF_MTAP(ifp, next);
1190		next = drbr_dequeue(ifp, br);
1191	}
1192
1193	return status;
1194}
1195
1196
1197
1198
1199/*****************************************************************************
1200 *			    Receive  routines functions 		     *
1201 *****************************************************************************/
1202
1203static void
1204oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1205{
1206	uint32_t out;
1207	struct oce_packet_desc *pd;
1208	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1209	int i, len, frag_len;
1210	struct mbuf *m = NULL, *tail = NULL;
1211	uint16_t vtag;
1212
1213	len = cqe->u0.s.pkt_size;
1214	if (!len) {
1215		/*partial DMA workaround for Lancer*/
1216		oce_discard_rx_comp(rq, cqe);
1217		goto exit;
1218	}
1219
1220	 /* Get vlan_tag value */
1221	if(IS_BE(sc))
1222		vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1223	else
1224		vtag = cqe->u0.s.vlan_tag;
1225
1226
1227	for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1228
1229		if (rq->packets_out == rq->packets_in) {
1230			device_printf(sc->dev,
1231				  "RQ transmit descriptor missing\n");
1232		}
1233		out = rq->packets_out + 1;
1234		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1235			out = 0;
1236		pd = &rq->pckts[rq->packets_out];
1237		rq->packets_out = out;
1238
1239		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1240		bus_dmamap_unload(rq->tag, pd->map);
1241		rq->pending--;
1242
1243		frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1244		pd->mbuf->m_len = frag_len;
1245
1246		if (tail != NULL) {
1247			/* additional fragments */
1248			pd->mbuf->m_flags &= ~M_PKTHDR;
1249			tail->m_next = pd->mbuf;
1250			tail = pd->mbuf;
1251		} else {
1252			/* first fragment, fill out much of the packet header */
1253			pd->mbuf->m_pkthdr.len = len;
1254			pd->mbuf->m_pkthdr.csum_flags = 0;
1255			if (IF_CSUM_ENABLED(sc)) {
1256				if (cqe->u0.s.l4_cksum_pass) {
1257					pd->mbuf->m_pkthdr.csum_flags |=
1258					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1259					pd->mbuf->m_pkthdr.csum_data = 0xffff;
1260				}
1261				if (cqe->u0.s.ip_cksum_pass) {
1262					if (!cqe->u0.s.ip_ver) { /* IPV4 */
1263						pd->mbuf->m_pkthdr.csum_flags |=
1264						(CSUM_IP_CHECKED|CSUM_IP_VALID);
1265					}
1266				}
1267			}
1268			m = tail = pd->mbuf;
1269		}
1270		pd->mbuf = NULL;
1271		len -= frag_len;
1272	}
1273
1274	if (m) {
1275		if (!oce_cqe_portid_valid(sc, cqe)) {
1276			 m_freem(m);
1277			 goto exit;
1278		}
1279
1280		m->m_pkthdr.rcvif = sc->ifp;
1281#if __FreeBSD_version >= 800000
1282		m->m_pkthdr.flowid = rq->queue_index;
1283		m->m_flags |= M_FLOWID;
1284#endif
1285		/* This deternies if vlan tag is Valid */
1286		if (oce_cqe_vtp_valid(sc, cqe)) {
1287			if (sc->function_mode & FNM_FLEX10_MODE) {
1288				/* FLEX10. If QnQ is not set, neglect VLAN */
1289				if (cqe->u0.s.qnq) {
1290					m->m_pkthdr.ether_vtag = vtag;
1291					m->m_flags |= M_VLANTAG;
1292				}
1293			} else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1294				/* In UMC mode generally pvid will be striped by
1295				   hw. But in some cases we have seen it comes
1296				   with pvid. So if pvid == vlan, neglect vlan.
1297				*/
1298				m->m_pkthdr.ether_vtag = vtag;
1299				m->m_flags |= M_VLANTAG;
1300			}
1301		}
1302
1303		sc->ifp->if_ipackets++;
1304#if defined(INET6) || defined(INET)
1305		/* Try to queue to LRO */
1306		if (IF_LRO_ENABLED(sc) &&
1307		    !(m->m_flags & M_VLANTAG) &&
1308		    (cqe->u0.s.ip_cksum_pass) &&
1309		    (cqe->u0.s.l4_cksum_pass) &&
1310		    (!cqe->u0.s.ip_ver)       &&
1311		    (rq->lro.lro_cnt != 0)) {
1312
1313			if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1314				rq->lro_pkts_queued ++;
1315				goto post_done;
1316			}
1317			/* If LRO posting fails then try to post to STACK */
1318		}
1319#endif
1320
1321		(*sc->ifp->if_input) (sc->ifp, m);
1322#if defined(INET6) || defined(INET)
1323post_done:
1324#endif
1325		/* Update rx stats per queue */
1326		rq->rx_stats.rx_pkts++;
1327		rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1328		rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1329		if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1330			rq->rx_stats.rx_mcast_pkts++;
1331		if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1332			rq->rx_stats.rx_ucast_pkts++;
1333	}
1334exit:
1335	return;
1336}
1337
1338
1339static void
1340oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1341{
1342	uint32_t out, i = 0;
1343	struct oce_packet_desc *pd;
1344	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1345	int num_frags = cqe->u0.s.num_fragments;
1346
1347	if (IS_XE201(sc) && cqe->u0.s.error) {
1348		/* Lancer A0 workaround
1349		* num_frags will be 1 more than actual in case of error
1350		 */
1351		if (num_frags)
1352			num_frags -= 1;
1353	}
1354	for (i = 0; i < num_frags; i++) {
1355		if (rq->packets_out == rq->packets_in) {
1356			device_printf(sc->dev,
1357				"RQ transmit descriptor missing\n");
1358		}
1359		out = rq->packets_out + 1;
1360		if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1361			out = 0;
1362		pd = &rq->pckts[rq->packets_out];
1363		rq->packets_out = out;
1364
1365		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1366		bus_dmamap_unload(rq->tag, pd->map);
1367		rq->pending--;
1368		m_freem(pd->mbuf);
1369	}
1370
1371}
1372
1373
1374static int
1375oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1376{
1377	struct oce_nic_rx_cqe_v1 *cqe_v1;
1378	int vtp = 0;
1379
1380	if (sc->be3_native) {
1381		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1382		vtp =  cqe_v1->u0.s.vlan_tag_present;
1383	} else
1384		vtp = cqe->u0.s.vlan_tag_present;
1385
1386	return vtp;
1387
1388}
1389
1390
1391static int
1392oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1393{
1394	struct oce_nic_rx_cqe_v1 *cqe_v1;
1395	int port_id = 0;
1396
1397	if (sc->be3_native && IS_BE(sc)) {
1398		cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1399		port_id =  cqe_v1->u0.s.port;
1400		if (sc->port_id != port_id)
1401			return 0;
1402	} else
1403		;/* For BE3 legacy and Lancer this is dummy */
1404
1405	return 1;
1406
1407}
1408
1409#if defined(INET6) || defined(INET)
1410static void
1411oce_rx_flush_lro(struct oce_rq *rq)
1412{
1413	struct lro_ctrl	*lro = &rq->lro;
1414	struct lro_entry *queued;
1415	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1416
1417	if (!IF_LRO_ENABLED(sc))
1418		return;
1419
1420	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1421		SLIST_REMOVE_HEAD(&lro->lro_active, next);
1422		tcp_lro_flush(lro, queued);
1423	}
1424	rq->lro_pkts_queued = 0;
1425
1426	return;
1427}
1428
1429
1430static int
1431oce_init_lro(POCE_SOFTC sc)
1432{
1433	struct lro_ctrl *lro = NULL;
1434	int i = 0, rc = 0;
1435
1436	for (i = 0; i < sc->nrqs; i++) {
1437		lro = &sc->rq[i]->lro;
1438		rc = tcp_lro_init(lro);
1439		if (rc != 0) {
1440			device_printf(sc->dev, "LRO init failed\n");
1441			return rc;
1442		}
1443		lro->ifp = sc->ifp;
1444	}
1445
1446	return rc;
1447}
1448
1449
1450void
1451oce_free_lro(POCE_SOFTC sc)
1452{
1453	struct lro_ctrl *lro = NULL;
1454	int i = 0;
1455
1456	for (i = 0; i < sc->nrqs; i++) {
1457		lro = &sc->rq[i]->lro;
1458		if (lro)
1459			tcp_lro_free(lro);
1460	}
1461}
1462#endif /* INET6 || INET */
1463
1464int
1465oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1466{
1467	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1468	int i, in, rc;
1469	struct oce_packet_desc *pd;
1470	bus_dma_segment_t segs[6];
1471	int nsegs, added = 0;
1472	struct oce_nic_rqe *rqe;
1473	pd_rxulp_db_t rxdb_reg;
1474
1475
1476	for (i = 0; i < count; i++) {
1477		in = rq->packets_in + 1;
1478		if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1479			in = 0;
1480		if (in == rq->packets_out)
1481			break;	/* no more room */
1482
1483		pd = &rq->pckts[rq->packets_in];
1484		pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1485		if (pd->mbuf == NULL)
1486			break;
1487
1488		pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1489		rc = bus_dmamap_load_mbuf_sg(rq->tag,
1490					     pd->map,
1491					     pd->mbuf,
1492					     segs, &nsegs, BUS_DMA_NOWAIT);
1493		if (rc) {
1494			m_free(pd->mbuf);
1495			break;
1496		}
1497
1498		if (nsegs != 1) {
1499			i--;
1500			continue;
1501		}
1502
1503		rq->packets_in = in;
1504		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1505
1506		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1507		rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1508		rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1509		DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1510		RING_PUT(rq->ring, 1);
1511		added++;
1512		rq->pending++;
1513	}
1514	if (added != 0) {
1515		for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1516			DELAY(1);
1517			rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1518			rxdb_reg.bits.qid = rq->rq_id;
1519			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1520			added -= OCE_MAX_RQ_POSTS;
1521		}
1522		if (added > 0) {
1523			DELAY(1);
1524			rxdb_reg.bits.qid = rq->rq_id;
1525			rxdb_reg.bits.num_posted = added;
1526			OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1527		}
1528	}
1529
1530	return 0;
1531}
1532
1533
1534/* Handle the Completion Queue for receive */
1535uint16_t
1536oce_rq_handler(void *arg)
1537{
1538	struct oce_rq *rq = (struct oce_rq *)arg;
1539	struct oce_cq *cq = rq->cq;
1540	POCE_SOFTC sc = rq->parent;
1541	struct oce_nic_rx_cqe *cqe;
1542	int num_cqes = 0, rq_buffers_used = 0;
1543
1544
1545	LOCK(&rq->rx_lock);
1546	bus_dmamap_sync(cq->ring->dma.tag,
1547			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1548	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1549	while (cqe->u0.dw[2]) {
1550		DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1551
1552		RING_GET(rq->ring, 1);
1553		if (cqe->u0.s.error == 0) {
1554			oce_rx(rq, cqe->u0.s.frag_index, cqe);
1555		} else {
1556			rq->rx_stats.rxcp_err++;
1557			sc->ifp->if_ierrors++;
1558			if (IS_XE201(sc))
1559				/* Lancer A0 no buffer workaround */
1560				oce_discard_rx_comp(rq, cqe);
1561			else
1562				/* Post L3/L4 errors to stack.*/
1563				oce_rx(rq, cqe->u0.s.frag_index, cqe);
1564
1565		}
1566		rq->rx_stats.rx_compl++;
1567		cqe->u0.dw[2] = 0;
1568
1569#if defined(INET6) || defined(INET)
1570		if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1571			oce_rx_flush_lro(rq);
1572		}
1573#endif
1574
1575		RING_GET(cq->ring, 1);
1576		bus_dmamap_sync(cq->ring->dma.tag,
1577				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1578		cqe =
1579		    RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1580		num_cqes++;
1581		if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1582			break;
1583	}
1584
1585#if defined(INET6) || defined(INET)
1586	if (IF_LRO_ENABLED(sc))
1587		oce_rx_flush_lro(rq);
1588#endif
1589
1590	if (num_cqes) {
1591		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1592		rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1593		if (rq_buffers_used > 1)
1594			oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1595	}
1596
1597	UNLOCK(&rq->rx_lock);
1598
1599	return 0;
1600
1601}
1602
1603
1604
1605
1606/*****************************************************************************
1607 *		   Helper function prototypes in this file 		     *
1608 *****************************************************************************/
1609
1610static int
1611oce_attach_ifp(POCE_SOFTC sc)
1612{
1613
1614	sc->ifp = if_alloc(IFT_ETHER);
1615	if (!sc->ifp)
1616		return ENOMEM;
1617
1618	ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1619	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1620	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1621
1622	sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1623	sc->ifp->if_ioctl = oce_ioctl;
1624	sc->ifp->if_start = oce_start;
1625	sc->ifp->if_init = oce_init;
1626	sc->ifp->if_mtu = ETHERMTU;
1627	sc->ifp->if_softc = sc;
1628#if __FreeBSD_version >= 800000
1629	sc->ifp->if_transmit = oce_multiq_start;
1630	sc->ifp->if_qflush = oce_multiq_flush;
1631#endif
1632
1633	if_initname(sc->ifp,
1634		    device_get_name(sc->dev), device_get_unit(sc->dev));
1635
1636	sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1637	IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1638	IFQ_SET_READY(&sc->ifp->if_snd);
1639
1640	sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1641	sc->ifp->if_hwassist |= CSUM_TSO;
1642	sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1643
1644	sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1645	sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1646	sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1647
1648#if defined(INET6) || defined(INET)
1649	sc->ifp->if_capabilities |= IFCAP_TSO;
1650	sc->ifp->if_capabilities |= IFCAP_LRO;
1651	sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1652#endif
1653
1654	sc->ifp->if_capenable = sc->ifp->if_capabilities;
1655	if_initbaudrate(sc->ifp, IF_Gbps(10));
1656
1657	ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1658
1659	return 0;
1660}
1661
1662
1663static void
1664oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1665{
1666	POCE_SOFTC sc = ifp->if_softc;
1667
1668	if (ifp->if_softc !=  arg)
1669		return;
1670	if ((vtag == 0) || (vtag > 4095))
1671		return;
1672
1673	sc->vlan_tag[vtag] = 1;
1674	sc->vlans_added++;
1675	oce_vid_config(sc);
1676}
1677
1678
1679static void
1680oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1681{
1682	POCE_SOFTC sc = ifp->if_softc;
1683
1684	if (ifp->if_softc !=  arg)
1685		return;
1686	if ((vtag == 0) || (vtag > 4095))
1687		return;
1688
1689	sc->vlan_tag[vtag] = 0;
1690	sc->vlans_added--;
1691	oce_vid_config(sc);
1692}
1693
1694
1695/*
1696 * A max of 64 vlans can be configured in BE. If the user configures
1697 * more, place the card in vlan promiscuous mode.
1698 */
1699static int
1700oce_vid_config(POCE_SOFTC sc)
1701{
1702	struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1703	uint16_t ntags = 0, i;
1704	int status = 0;
1705
1706	if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1707			(sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1708		for (i = 0; i < MAX_VLANS; i++) {
1709			if (sc->vlan_tag[i]) {
1710				vtags[ntags].vtag = i;
1711				ntags++;
1712			}
1713		}
1714		if (ntags)
1715			status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1716						vtags, ntags, 1, 0);
1717	} else
1718		status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1719					 	NULL, 0, 1, 1);
1720	return status;
1721}
1722
1723
1724static void
1725oce_mac_addr_set(POCE_SOFTC sc)
1726{
1727	uint32_t old_pmac_id = sc->pmac_id;
1728	int status = 0;
1729
1730
1731	status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1732			 sc->macaddr.size_of_struct);
1733	if (!status)
1734		return;
1735
1736	status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1737					sc->if_id, &sc->pmac_id);
1738	if (!status) {
1739		status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1740		bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1741				 sc->macaddr.size_of_struct);
1742	}
1743	if (status)
1744		device_printf(sc->dev, "Failed update macaddress\n");
1745
1746}
1747
1748
1749static int
1750oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1751{
1752	POCE_SOFTC sc = ifp->if_softc;
1753	struct ifreq *ifr = (struct ifreq *)data;
1754	int rc = ENXIO;
1755	char cookie[32] = {0};
1756	void *priv_data = (void *)ifr->ifr_data;
1757	void *ioctl_ptr;
1758	uint32_t req_size;
1759	struct mbx_hdr req;
1760	OCE_DMA_MEM dma_mem;
1761
1762
1763	if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1764		return EFAULT;
1765
1766	if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1767		return EINVAL;
1768
1769	ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1770	if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1771		return EFAULT;
1772
1773	req_size = le32toh(req.u0.req.request_length);
1774	if (req_size > 65536)
1775		return EINVAL;
1776
1777	req_size += sizeof(struct mbx_hdr);
1778	rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1779	if (rc)
1780		return ENOMEM;
1781
1782	if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1783		rc = EFAULT;
1784		goto dma_free;
1785	}
1786
1787	rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1788	if (rc) {
1789		rc = EIO;
1790		goto dma_free;
1791	}
1792
1793	if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1794		rc =  EFAULT;
1795
1796dma_free:
1797	oce_dma_free(sc, &dma_mem);
1798	return rc;
1799
1800}
1801
1802
1803static void
1804oce_local_timer(void *arg)
1805{
1806	POCE_SOFTC sc = arg;
1807	int i = 0;
1808
1809	oce_refresh_nic_stats(sc);
1810	oce_refresh_queue_stats(sc);
1811	oce_mac_addr_set(sc);
1812
1813	/* TX Watch Dog*/
1814	for (i = 0; i < sc->nwqs; i++)
1815		oce_tx_restart(sc, sc->wq[i]);
1816
1817	callout_reset(&sc->timer, hz, oce_local_timer, sc);
1818}
1819
1820
1821static void
1822oce_if_deactivate(POCE_SOFTC sc)
1823{
1824	int i, mtime = 0;
1825	int wait_req = 0;
1826	struct oce_rq *rq;
1827	struct oce_wq *wq;
1828	struct oce_eq *eq;
1829
1830	sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1831
1832	/*Wait for max of 400ms for TX completions to be done */
1833	while (mtime < 400) {
1834		wait_req = 0;
1835		for_all_wq_queues(sc, wq, i) {
1836			if (wq->ring->num_used) {
1837				wait_req = 1;
1838				DELAY(1);
1839				break;
1840			}
1841		}
1842		mtime += 1;
1843		if (!wait_req)
1844			break;
1845	}
1846
1847	/* Stop intrs and finish any bottom halves pending */
1848	oce_hw_intr_disable(sc);
1849
1850	for (i = 0; i < sc->intr_count; i++) {
1851		if (sc->intrs[i].tq != NULL) {
1852			taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1853		}
1854	}
1855
1856	/* Delete RX queue in card with flush param */
1857	oce_stop_rx(sc);
1858
1859	/* Invalidate any pending cq and eq entries*/
1860	for_all_evnt_queues(sc, eq, i)
1861		oce_drain_eq(eq);
1862	for_all_rq_queues(sc, rq, i)
1863		oce_drain_rq_cq(rq);
1864	for_all_wq_queues(sc, wq, i)
1865		oce_drain_wq_cq(wq);
1866
1867	/* But still we need to get MCC aync events.
1868	   So enable intrs and also arm first EQ
1869        */
1870	oce_hw_intr_enable(sc);
1871	oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1872
1873	DELAY(10);
1874}
1875
1876
1877static void
1878oce_if_activate(POCE_SOFTC sc)
1879{
1880	struct oce_eq *eq;
1881	struct oce_rq *rq;
1882	struct oce_wq *wq;
1883	int i, rc = 0;
1884
1885	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1886
1887	oce_hw_intr_disable(sc);
1888
1889	oce_start_rx(sc);
1890
1891	for_all_rq_queues(sc, rq, i) {
1892		rc = oce_start_rq(rq);
1893		if (rc)
1894			device_printf(sc->dev, "Unable to start RX\n");
1895	}
1896
1897	for_all_wq_queues(sc, wq, i) {
1898		rc = oce_start_wq(wq);
1899		if (rc)
1900			device_printf(sc->dev, "Unable to start TX\n");
1901	}
1902
1903
1904	for_all_evnt_queues(sc, eq, i)
1905		oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1906
1907	oce_hw_intr_enable(sc);
1908
1909}
1910
1911static void
1912process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1913{
1914	/* Update Link status */
1915	if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1916	     ASYNC_EVENT_LINK_UP) {
1917		sc->link_status = ASYNC_EVENT_LINK_UP;
1918		if_link_state_change(sc->ifp, LINK_STATE_UP);
1919	} else {
1920		sc->link_status = ASYNC_EVENT_LINK_DOWN;
1921		if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1922	}
1923
1924	/* Update speed */
1925	sc->link_speed = acqe->u0.s.speed;
1926	sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1927
1928}
1929
1930
1931/* Handle the Completion Queue for the Mailbox/Async notifications */
1932uint16_t
1933oce_mq_handler(void *arg)
1934{
1935	struct oce_mq *mq = (struct oce_mq *)arg;
1936	POCE_SOFTC sc = mq->parent;
1937	struct oce_cq *cq = mq->cq;
1938	int num_cqes = 0, evt_type = 0, optype = 0;
1939	struct oce_mq_cqe *cqe;
1940	struct oce_async_cqe_link_state *acqe;
1941	struct oce_async_event_grp5_pvid_state *gcqe;
1942
1943
1944	bus_dmamap_sync(cq->ring->dma.tag,
1945			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1946	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1947
1948	while (cqe->u0.dw[3]) {
1949		DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1950		if (cqe->u0.s.async_event) {
1951			evt_type = cqe->u0.s.event_type;
1952			optype = cqe->u0.s.async_type;
1953			if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
1954				/* Link status evt */
1955				acqe = (struct oce_async_cqe_link_state *)cqe;
1956				process_link_state(sc, acqe);
1957			} else if ((evt_type == ASYNC_EVENT_GRP5) &&
1958				   (optype == ASYNC_EVENT_PVID_STATE)) {
1959				/* GRP5 PVID */
1960				gcqe =
1961				(struct oce_async_event_grp5_pvid_state *)cqe;
1962				if (gcqe->enabled)
1963					sc->pvid = gcqe->tag & VLAN_VID_MASK;
1964				else
1965					sc->pvid = 0;
1966
1967			}
1968		}
1969		cqe->u0.dw[3] = 0;
1970		RING_GET(cq->ring, 1);
1971		bus_dmamap_sync(cq->ring->dma.tag,
1972				cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1973		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1974		num_cqes++;
1975	}
1976
1977	if (num_cqes)
1978		oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1979
1980	return 0;
1981}
1982
1983
1984static void
1985setup_max_queues_want(POCE_SOFTC sc)
1986{
1987	int max_rss = 0;
1988
1989	/* Check if it is FLEX machine. Is so dont use RSS */
1990	if ((sc->function_mode & FNM_FLEX10_MODE) ||
1991	    (sc->function_mode & FNM_UMC_MODE)    ||
1992	    (sc->function_mode & FNM_VNIC_MODE)	  ||
1993	    (!sc->rss_enable)			  ||
1994	    (sc->flags & OCE_FLAGS_BE2)) {
1995		sc->nrqs = 1;
1996		sc->nwqs = 1;
1997		sc->rss_enable = 0;
1998	} else {
1999		/* For multiq, our deisgn is to have TX rings equal to
2000		   RSS rings. So that we can pair up one RSS ring and TX
2001		   to a single intr, which improves CPU cache efficiency.
2002		 */
2003		if (IS_BE(sc) && (!sc->be3_native))
2004			max_rss = OCE_LEGACY_MODE_RSS;
2005		else
2006			max_rss = OCE_MAX_RSS;
2007
2008		sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2009		sc->nwqs = MIN(OCE_NCPUS, max_rss);
2010	}
2011
2012}
2013
2014
2015static void
2016update_queues_got(POCE_SOFTC sc)
2017{
2018	if (sc->rss_enable) {
2019		sc->nrqs = sc->intr_count + 1;
2020		sc->nwqs = sc->intr_count;
2021	} else {
2022		sc->nrqs = 1;
2023		sc->nwqs = 1;
2024	}
2025}
2026
2027