1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (C) 2013 Emulex
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 *    this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Emulex Corporation nor the names of its
18 *    contributors may be used to endorse or promote products derived from
19 *    this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Contact Information:
34 * freebsd-drivers@emulex.com
35 *
36 * Emulex
37 * 3333 Susan Street
38 * Costa Mesa, CA 92626
39 */
40
41/* $FreeBSD$ */
42
43#include "oce_if.h"
44
45/*****************************************************
46 * local queue functions
47 *****************************************************/
48
49static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
50				  uint32_t q_len, uint32_t wq_type);
51static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
52static void oce_wq_free(struct oce_wq *wq);
53static void oce_wq_del(struct oce_wq *wq);
54static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
55				  uint32_t q_len,
56				  uint32_t frag_size,
57				  uint32_t mtu, uint32_t rss);
58static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
59static void oce_rq_free(struct oce_rq *rq);
60static void oce_rq_del(struct oce_rq *rq);
61static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
62				    uint32_t q_len,
63				    uint32_t item_size,
64				    uint32_t eq_delay,
65				    uint32_t vector);
66static void oce_eq_del(struct oce_eq *eq);
67static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
68				    struct oce_eq *eq, uint32_t q_len);
69static void oce_mq_free(struct oce_mq *mq);
70static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
71			 *mbx, size_t req_size, enum qtype qtype, int version);
72struct oce_cq *oce_cq_create(POCE_SOFTC sc,
73			     struct oce_eq *eq,
74			     uint32_t q_len,
75			     uint32_t item_size,
76			     uint32_t sol_event,
77			     uint32_t is_eventable,
78			     uint32_t nodelay, uint32_t ncoalesce);
79static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
80
81
82
83/**
84 * @brief	Create and initialize all the queues on the board
85 * @param sc	software handle to the device
86 * @returns 0	if successful, or error
87 **/
88int
89oce_queue_init_all(POCE_SOFTC sc)
90{
91	int rc = 0, i, vector;
92	struct oce_wq *wq;
93	struct oce_rq *rq;
94	struct oce_aic_obj *aic;
95
96	/* alloc TX/RX queues */
97	for_all_wq_queues(sc, wq, i) {
98		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
99					 NIC_WQ_TYPE_STANDARD);
100		if (!sc->wq[i])
101			goto error;
102
103	}
104
105	for_all_rq_queues(sc, rq, i) {
106		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
107					OCE_MAX_JUMBO_FRAME_SIZE,
108					(i == 0) ? 0 : is_rss_enabled(sc));
109		if (!sc->rq[i])
110			goto error;
111	}
112
113	/* Create network interface on card */
114	if (oce_create_nw_interface(sc))
115		goto error;
116
117	/* create all of the event queues */
118	for (vector = 0; vector < sc->intr_count; vector++) {
119		/* setup aic defaults for each event queue */
120		aic = &sc->aic_obj[vector];
121		aic->max_eqd = OCE_MAX_EQD;
122		aic->min_eqd = OCE_MIN_EQD;
123		aic->et_eqd = OCE_MIN_EQD;
124		aic->enable = TRUE;
125
126		sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
127						EQE_SIZE_4,0, vector);
128
129		if (!sc->eq[vector])
130			goto error;
131	}
132
133	/* create Tx, Rx and mcc queues */
134	for_all_wq_queues(sc, wq, i) {
135		rc = oce_wq_create(wq, sc->eq[i]);
136		if (rc)
137			goto error;
138		wq->queue_index = i;
139		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140	}
141
142	for_all_rq_queues(sc, rq, i) {
143		rc = oce_rq_create(rq, sc->if_id,
144					sc->eq[(i == 0) ? 0:(i-1)]);
145		if (rc)
146			goto error;
147		rq->queue_index = i;
148	}
149
150	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
151	if (!sc->mq)
152		goto error;
153
154	return rc;
155
156error:
157	oce_queue_release_all(sc);
158	return 1;
159}
160
161
162
163/**
164 * @brief Releases all mailbox queues created
165 * @param sc		software handle to the device
166 */
167void
168oce_queue_release_all(POCE_SOFTC sc)
169{
170	int i = 0;
171	struct oce_wq *wq;
172	struct oce_rq *rq;
173	struct oce_eq *eq;
174
175	/* before deleting lro queues, we have to disable hwlro	*/
176	if(sc->enable_hwlro)
177		oce_mbox_nic_set_iface_lro_config(sc, 0);
178
179	for_all_rq_queues(sc, rq, i) {
180		if (rq) {
181			oce_rq_del(sc->rq[i]);
182			oce_rq_free(sc->rq[i]);
183		}
184	}
185
186	for_all_wq_queues(sc, wq, i) {
187		if (wq) {
188			oce_wq_del(sc->wq[i]);
189			oce_wq_free(sc->wq[i]);
190		}
191	}
192
193	if (sc->mq)
194		oce_mq_free(sc->mq);
195
196	for_all_evnt_queues(sc, eq, i) {
197		if (eq)
198			oce_eq_del(sc->eq[i]);
199	}
200}
201
202
203
204/**
205 * @brief 		Function to create a WQ for NIC Tx
206 * @param sc 		software handle to the device
207 * @param qlen		number of entries in the queue
208 * @param wq_type	work queue type
209 * @returns		the pointer to the WQ created or NULL on failure
210 */
211static struct
212oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
213{
214	struct oce_wq *wq;
215	int rc = 0, i;
216
217	/* q_len must be min 256 and max 2k */
218	if (q_len < 256 || q_len > 2048) {
219		device_printf(sc->dev,
220			  "Invalid q length. Must be "
221			  "[256, 2000]: 0x%x\n", q_len);
222		return NULL;
223	}
224
225	/* allocate wq */
226	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
227	if (!wq)
228		return NULL;
229
230	/* Set the wq config */
231	wq->cfg.q_len = q_len;
232	wq->cfg.wq_type = (uint8_t) wq_type;
233	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
234	wq->cfg.nbufs = 2 * wq->cfg.q_len;
235	wq->cfg.nhdl = 2 * wq->cfg.q_len;
236
237	wq->parent = (void *)sc;
238
239	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
240				1, 0,
241				BUS_SPACE_MAXADDR,
242				BUS_SPACE_MAXADDR,
243				NULL, NULL,
244				OCE_MAX_TX_SIZE,
245				OCE_MAX_TX_ELEMENTS,
246				PAGE_SIZE, 0, NULL, NULL, &wq->tag);
247
248	if (rc)
249		goto free_wq;
250
251
252	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
253		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
254		if (rc)
255			goto free_wq;
256	}
257
258	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
259	if (!wq->ring)
260		goto free_wq;
261
262
263	LOCK_CREATE(&wq->tx_lock, "TX_lock");
264	LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
265
266#if __FreeBSD_version >= 800000
267	/* Allocate buf ring for multiqueue*/
268	wq->br = buf_ring_alloc(4096, M_DEVBUF,
269			M_WAITOK, &wq->tx_lock.mutex);
270	if (!wq->br)
271		goto free_wq;
272#endif
273	return wq;
274
275
276free_wq:
277	device_printf(sc->dev, "Create WQ failed\n");
278	oce_wq_free(wq);
279	return NULL;
280}
281
282
283
284/**
285 * @brief 		Frees the work queue
286 * @param wq		pointer to work queue to free
287 */
288static void
289oce_wq_free(struct oce_wq *wq)
290{
291	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
292	int i;
293
294	taskqueue_drain(taskqueue_swi, &wq->txtask);
295
296	if (wq->ring != NULL) {
297		oce_destroy_ring_buffer(sc, wq->ring);
298		wq->ring = NULL;
299	}
300
301	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
302		if (wq->pckts[i].map != NULL) {
303			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
304			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
305			wq->pckts[i].map = NULL;
306		}
307	}
308
309	if (wq->tag != NULL)
310		bus_dma_tag_destroy(wq->tag);
311	if (wq->br != NULL)
312		buf_ring_free(wq->br, M_DEVBUF);
313
314	LOCK_DESTROY(&wq->tx_lock);
315	LOCK_DESTROY(&wq->tx_compl_lock);
316	free(wq, M_DEVBUF);
317}
318
319
320
321/**
322 * @brief 		Create a work queue
323 * @param wq		pointer to work queue
324 * @param eq		pointer to associated event queue
325 */
326static int
327oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
328{
329	POCE_SOFTC sc = wq->parent;
330	struct oce_cq *cq;
331	int rc = 0;
332
333	/* create the CQ */
334	cq = oce_cq_create(sc,
335			   eq,
336			   CQ_LEN_1024,
337			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
338	if (!cq)
339		return ENXIO;
340
341
342	wq->cq = cq;
343
344	rc = oce_mbox_create_wq(wq);
345	if (rc)
346		goto error;
347
348	wq->qstate = QCREATED;
349	wq->wq_free = wq->cfg.q_len;
350	wq->ring->cidx = 0;
351	wq->ring->pidx = 0;
352
353	eq->cq[eq->cq_valid] = cq;
354	eq->cq_valid++;
355	cq->cb_arg = wq;
356	cq->cq_handler = oce_wq_handler;
357
358	return 0;
359
360error:
361	device_printf(sc->dev, "WQ create failed\n");
362	oce_wq_del(wq);
363	return rc;
364}
365
366
367
368
369/**
370 * @brief 		Delete a work queue
371 * @param wq		pointer to work queue
372 */
373static void
374oce_wq_del(struct oce_wq *wq)
375{
376	struct oce_mbx mbx;
377	struct mbx_delete_nic_wq *fwcmd;
378	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
379
380	if (wq->qstate == QCREATED) {
381		bzero(&mbx, sizeof(struct oce_mbx));
382		/* now fill the command */
383		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
384		fwcmd->params.req.wq_id = wq->wq_id;
385		(void)oce_destroy_q(sc, &mbx,
386				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
387		wq->qstate = QDELETED;
388	}
389
390	if (wq->cq != NULL) {
391		oce_cq_del(sc, wq->cq);
392		wq->cq = NULL;
393	}
394}
395
396
397
398/**
399 * @brief 		function to allocate receive queue resources
400 * @param sc		software handle to the device
401 * @param q_len		length of receive queue
402 * @param frag_size	size of an receive queue fragment
403 * @param mtu		maximum transmission unit
404 * @param rss		is-rss-queue flag
405 * @returns		the pointer to the RQ created or NULL on failure
406 */
407static struct
408oce_rq *oce_rq_init(POCE_SOFTC sc,
409				  uint32_t q_len,
410				  uint32_t frag_size,
411				  uint32_t mtu, uint32_t rss)
412{
413	struct oce_rq *rq;
414	int rc = 0, i;
415
416	if (OCE_LOG2(frag_size) <= 0)
417		return NULL;
418
419	if ((q_len == 0) || (q_len > 1024))
420		return NULL;
421
422	/* allocate the rq */
423	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
424	if (!rq)
425		return NULL;
426
427
428	rq->cfg.q_len = q_len;
429	rq->cfg.frag_size = frag_size;
430	rq->cfg.mtu = mtu;
431	rq->cfg.eqd = 0;
432	rq->lro_pkts_queued = 0;
433	rq->cfg.is_rss_queue = rss;
434        rq->pending = 0;
435
436	rq->parent = (void *)sc;
437
438	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
439			1, 0,
440			BUS_SPACE_MAXADDR,
441			BUS_SPACE_MAXADDR,
442			NULL, NULL,
443			oce_rq_buf_size,
444			1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
445	if (rc)
446		goto free_rq;
447
448	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
449		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
450		if (rc)
451			goto free_rq;
452	}
453
454	/* create the ring buffer */
455	rq->ring = oce_create_ring_buffer(sc, q_len,
456				 sizeof(struct oce_nic_rqe));
457	if (!rq->ring)
458		goto free_rq;
459
460	LOCK_CREATE(&rq->rx_lock, "RX_lock");
461
462	return rq;
463
464free_rq:
465	device_printf(sc->dev, "Create RQ failed\n");
466	oce_rq_free(rq);
467	return NULL;
468}
469
470
471
472
473/**
474 * @brief 		Free a receive queue
475 * @param rq		pointer to receive queue
476 */
477static void
478oce_rq_free(struct oce_rq *rq)
479{
480	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
481	int i = 0 ;
482
483	if (rq->ring != NULL) {
484		oce_destroy_ring_buffer(sc, rq->ring);
485		rq->ring = NULL;
486	}
487	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
488		if (rq->pckts[i].map != NULL) {
489			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
490			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
491			rq->pckts[i].map = NULL;
492		}
493		if (rq->pckts[i].mbuf) {
494			m_free(rq->pckts[i].mbuf);
495			rq->pckts[i].mbuf = NULL;
496		}
497	}
498
499	if (rq->tag != NULL)
500		bus_dma_tag_destroy(rq->tag);
501
502	LOCK_DESTROY(&rq->rx_lock);
503	free(rq, M_DEVBUF);
504}
505
506
507
508
509/**
510 * @brief 		Create a receive queue
511 * @param rq 		receive queue
512 * @param if_id		interface identifier index`
513 * @param eq		pointer to event queue
514 */
515static int
516oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
517{
518	POCE_SOFTC sc = rq->parent;
519	struct oce_cq *cq;
520
521	cq = oce_cq_create(sc, eq,
522		       	sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
523			sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
524
525	if (!cq)
526		return ENXIO;
527
528	rq->cq = cq;
529	rq->cfg.if_id = if_id;
530
531	/* Dont create RQ here. Create in if_activate */
532	rq->qstate     = 0;
533	rq->ring->cidx = 0;
534	rq->ring->pidx = 0;
535	eq->cq[eq->cq_valid] = cq;
536	eq->cq_valid++;
537	cq->cb_arg = rq;
538	cq->cq_handler = oce_rq_handler;
539
540	return 0;
541
542}
543
544
545
546
547/**
548 * @brief 		Delete a receive queue
549 * @param rq		receive queue
550 */
551static void
552oce_rq_del(struct oce_rq *rq)
553{
554	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
555	struct oce_mbx mbx;
556	struct mbx_delete_nic_rq *fwcmd;
557	struct mbx_delete_nic_rq_v1 *fwcmd1;
558
559	if (rq->qstate == QCREATED) {
560		bzero(&mbx, sizeof(mbx));
561		if(!rq->islro) {
562			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
563			fwcmd->params.req.rq_id = rq->rq_id;
564			(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
565		}else {
566			fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
567			fwcmd1->params.req.rq_id = rq->rq_id;
568			fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
569			(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
570		}
571		rq->qstate = QDELETED;
572	}
573
574	if (rq->cq != NULL) {
575		oce_cq_del(sc, rq->cq);
576		rq->cq = NULL;
577	}
578}
579
580
581
582/**
583 * @brief		function to create an event queue
584 * @param sc		software handle to the device
585 * @param q_len		length of event queue
586 * @param item_size	size of an event queue item
587 * @param eq_delay	event queue delay
588 * @retval eq      	success, pointer to event queue
589 * @retval NULL		failure
590 */
591static struct
592oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
593				    uint32_t item_size,
594				    uint32_t eq_delay,
595				    uint32_t vector)
596{
597	struct oce_eq *eq;
598	int rc = 0;
599
600	/* allocate an eq */
601	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
602	if (eq == NULL)
603		return NULL;
604
605	eq->parent = (void *)sc;
606	eq->eq_id = 0xffff;
607	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
608	if (!eq->ring)
609		goto free_eq;
610
611	eq->eq_cfg.q_len = q_len;
612	eq->eq_cfg.item_size = item_size;
613	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
614
615	rc = oce_mbox_create_eq(eq);
616	if (rc)
617		goto free_eq;
618
619	sc->intrs[sc->neqs++].eq = eq;
620
621	return eq;
622
623free_eq:
624	oce_eq_del(eq);
625	return NULL;
626}
627
628
629
630
631/**
632 * @brief 		Function to delete an event queue
633 * @param eq		pointer to an event queue
634 */
635static void
636oce_eq_del(struct oce_eq *eq)
637{
638	struct oce_mbx mbx;
639	struct mbx_destroy_common_eq *fwcmd;
640	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
641
642	if (eq->eq_id != 0xffff) {
643		bzero(&mbx, sizeof(mbx));
644		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
645		fwcmd->params.req.id = eq->eq_id;
646		(void)oce_destroy_q(sc, &mbx,
647			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
648	}
649
650	if (eq->ring != NULL) {
651		oce_destroy_ring_buffer(sc, eq->ring);
652		eq->ring = NULL;
653	}
654
655	free(eq, M_DEVBUF);
656
657}
658
659
660
661
662/**
663 * @brief		Function to create an MQ
664 * @param sc		software handle to the device
665 * @param eq		the EQ to associate with the MQ for event notification
666 * @param q_len		the number of entries to create in the MQ
667 * @returns		pointer to the created MQ, failure otherwise
668 */
669static struct oce_mq *
670oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
671{
672	struct oce_mbx mbx;
673	struct mbx_create_common_mq_ex *fwcmd = NULL;
674	struct oce_mq *mq = NULL;
675	int rc = 0;
676	struct oce_cq *cq;
677	oce_mq_ext_ctx_t *ctx;
678	uint32_t num_pages;
679	uint32_t page_size;
680	int version;
681
682	cq = oce_cq_create(sc, eq, CQ_LEN_256,
683			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
684	if (!cq)
685		return NULL;
686
687	/* allocate the mq */
688	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
689	if (!mq) {
690		oce_cq_del(sc, cq);
691		goto error;
692	}
693
694	mq->parent = sc;
695
696	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
697	if (!mq->ring)
698		goto error;
699
700	bzero(&mbx, sizeof(struct oce_mbx));
701
702	IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
703	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
704	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
705				MBX_SUBSYSTEM_COMMON,
706				OPCODE_COMMON_CREATE_MQ_EXT,
707				MBX_TIMEOUT_SEC,
708				sizeof(struct mbx_create_common_mq_ex),
709				version);
710
711	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
712	page_size = mq->ring->num_items * mq->ring->item_size;
713
714	ctx = &fwcmd->params.req.context;
715
716	if (IS_XE201(sc)) {
717		ctx->v1.num_pages = num_pages;
718		ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
719		ctx->v1.cq_id = cq->cq_id;
720		ctx->v1.valid = 1;
721		ctx->v1.async_cq_id = cq->cq_id;
722		ctx->v1.async_cq_valid = 1;
723		/* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
724		ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
725		ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
726		ctx->v1.async_evt_bitmap |=
727					LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
728	}
729	else {
730		ctx->v0.num_pages = num_pages;
731		ctx->v0.cq_id = cq->cq_id;
732		ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
733		ctx->v0.valid = 1;
734		/* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
735		ctx->v0.async_evt_bitmap = 0xffffffff;
736	}
737
738	mbx.u0.s.embedded = 1;
739	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
740	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
741
742	rc = oce_mbox_post(sc, &mbx, NULL);
743	if (!rc)
744                rc = fwcmd->hdr.u0.rsp.status;
745	if (rc) {
746		device_printf(sc->dev,"%s failed - cmd status: %d\n",
747			      __FUNCTION__, rc);
748		goto error;
749	}
750	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
751	mq->cq = cq;
752	eq->cq[eq->cq_valid] = cq;
753	eq->cq_valid++;
754	mq->cq->eq = eq;
755	mq->cfg.q_len = (uint8_t) q_len;
756	mq->cfg.eqd = 0;
757	mq->qstate = QCREATED;
758
759	mq->cq->cb_arg = mq;
760	mq->cq->cq_handler = oce_mq_handler;
761
762	return mq;
763
764error:
765	device_printf(sc->dev, "MQ create failed\n");
766	oce_mq_free(mq);
767	mq = NULL;
768	return mq;
769}
770
771
772
773
774
775/**
776 * @brief		Function to free a mailbox queue
777 * @param mq		pointer to a mailbox queue
778 */
779static void
780oce_mq_free(struct oce_mq *mq)
781{
782	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
783	struct oce_mbx mbx;
784	struct mbx_destroy_common_mq *fwcmd;
785
786	if (!mq)
787		return;
788
789	if (mq->ring != NULL) {
790		oce_destroy_ring_buffer(sc, mq->ring);
791		mq->ring = NULL;
792		if (mq->qstate == QCREATED) {
793			bzero(&mbx, sizeof (struct oce_mbx));
794			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
795			fwcmd->params.req.id = mq->mq_id;
796			(void) oce_destroy_q(sc, &mbx,
797				sizeof (struct mbx_destroy_common_mq),
798				QTYPE_MQ, 0);
799		}
800		mq->qstate = QDELETED;
801	}
802
803	if (mq->cq != NULL) {
804		oce_cq_del(sc, mq->cq);
805		mq->cq = NULL;
806	}
807
808	free(mq, M_DEVBUF);
809	mq = NULL;
810}
811
812
813
814/**
815 * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
816 * @param sc		sofware handle to the device
817 * @param mbx		mailbox command to send to the fw to delete the queue
818 *			(mbx contains the queue information to delete)
819 * @param req_size	the size of the mbx payload dependent on the qtype
820 * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
821 * @returns 		0 on success, failure otherwise
822 */
823static int
824oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
825		enum qtype qtype, int version)
826{
827	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
828	int opcode;
829	int subsys;
830	int rc = 0;
831
832	switch (qtype) {
833	case QTYPE_EQ:
834		opcode = OPCODE_COMMON_DESTROY_EQ;
835		subsys = MBX_SUBSYSTEM_COMMON;
836		break;
837	case QTYPE_CQ:
838		opcode = OPCODE_COMMON_DESTROY_CQ;
839		subsys = MBX_SUBSYSTEM_COMMON;
840		break;
841	case QTYPE_MQ:
842		opcode = OPCODE_COMMON_DESTROY_MQ;
843		subsys = MBX_SUBSYSTEM_COMMON;
844		break;
845	case QTYPE_WQ:
846		opcode = NIC_DELETE_WQ;
847		subsys = MBX_SUBSYSTEM_NIC;
848		break;
849	case QTYPE_RQ:
850		opcode = NIC_DELETE_RQ;
851		subsys = MBX_SUBSYSTEM_NIC;
852		break;
853	default:
854		return EINVAL;
855	}
856
857	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
858				opcode, MBX_TIMEOUT_SEC, req_size,
859				version);
860
861	mbx->u0.s.embedded = 1;
862	mbx->payload_length = (uint32_t) req_size;
863	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
864
865	rc = oce_mbox_post(sc, mbx, NULL);
866	if (!rc)
867                rc = hdr->u0.rsp.status;
868	if (rc)
869		device_printf(sc->dev,"%s failed - cmd status: %d\n",
870			      __FUNCTION__, rc);
871	return rc;
872}
873
874
875
876/**
877 * @brief		Function to create a completion queue
878 * @param sc		software handle to the device
879 * @param eq		optional eq to be associated with to the cq
880 * @param q_len		length of completion queue
881 * @param item_size	size of completion queue items
882 * @param sol_event	command context event
883 * @param is_eventable	event table
884 * @param nodelay	no delay flag
885 * @param ncoalesce	no coalescence flag
886 * @returns 		pointer to the cq created, NULL on failure
887 */
888struct oce_cq *
889oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
890			     uint32_t q_len,
891			     uint32_t item_size,
892			     uint32_t sol_event,
893			     uint32_t is_eventable,
894			     uint32_t nodelay, uint32_t ncoalesce)
895{
896	struct oce_cq *cq = NULL;
897	int rc = 0;
898
899	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
900	if (!cq)
901		return NULL;
902
903	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
904	if (!cq->ring)
905		goto error;
906
907	cq->parent = sc;
908	cq->eq = eq;
909	cq->cq_cfg.q_len = q_len;
910	cq->cq_cfg.item_size = item_size;
911	cq->cq_cfg.nodelay = (uint8_t) nodelay;
912
913	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
914	if (rc)
915		goto error;
916
917	sc->cq[sc->ncqs++] = cq;
918
919	return cq;
920
921error:
922	device_printf(sc->dev, "CQ create failed\n");
923	oce_cq_del(sc, cq);
924	return NULL;
925}
926
927
928
929/**
930 * @brief		Deletes the completion queue
931 * @param sc		software handle to the device
932 * @param cq		pointer to a completion queue
933 */
934static void
935oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
936{
937	struct oce_mbx mbx;
938	struct mbx_destroy_common_cq *fwcmd;
939
940	if (cq->ring != NULL) {
941
942		bzero(&mbx, sizeof(struct oce_mbx));
943		/* now fill the command */
944		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
945		fwcmd->params.req.id = cq->cq_id;
946		(void)oce_destroy_q(sc, &mbx,
947			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
948		/*NOW destroy the ring */
949		oce_destroy_ring_buffer(sc, cq->ring);
950		cq->ring = NULL;
951	}
952
953	free(cq, M_DEVBUF);
954	cq = NULL;
955}
956
957
958
959/**
960 * @brief		Start a receive queue
961 * @param rq		pointer to a receive queue
962 */
963int
964oce_start_rq(struct oce_rq *rq)
965{
966	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
967	int rc;
968
969	if(sc->enable_hwlro)
970		rc = oce_alloc_rx_bufs(rq, 960);
971	else
972		rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
973
974	if (rc == 0)
975		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
976
977	return rc;
978}
979
980
981
982/**
983 * @brief		Start a work queue
984 * @param wq		pointer to a work queue
985 */
986int
987oce_start_wq(struct oce_wq *wq)
988{
989	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
990	return 0;
991}
992
993
994
995/**
996 * @brief		Start a mailbox queue
997 * @param mq		pointer to a mailbox queue
998 */
999int
1000oce_start_mq(struct oce_mq *mq)
1001{
1002	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
1003	return 0;
1004}
1005
1006
1007
1008/**
1009 * @brief		Function to arm an EQ so that it can generate events
1010 * @param sc		software handle to the device
1011 * @param qid		id of the EQ returned by the fw at the time of creation
1012 * @param npopped	number of EQEs to arm
1013 * @param rearm		rearm bit enable/disable
1014 * @param clearint	bit to clear the interrupt condition because of which
1015 *			EQEs are generated
1016 */
1017void
1018oce_arm_eq(POCE_SOFTC sc,
1019	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1020{
1021	eq_db_t eq_db = { 0 };
1022
1023	eq_db.bits.rearm = rearm;
1024	eq_db.bits.event = 1;
1025	eq_db.bits.num_popped = npopped;
1026	eq_db.bits.clrint = clearint;
1027	eq_db.bits.qid = qid;
1028	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1029
1030}
1031
1032
1033
1034
1035/**
1036 * @brief		Function to arm a CQ with CQEs
1037 * @param sc		software handle to the device
1038 * @param qid		id of the CQ returned by the fw at the time of creation
1039 * @param npopped	number of CQEs to arm
1040 * @param rearm		rearm bit enable/disable
1041 */
1042void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1043{
1044	cq_db_t cq_db = { 0 };
1045
1046	cq_db.bits.rearm = rearm;
1047	cq_db.bits.num_popped = npopped;
1048	cq_db.bits.event = 0;
1049	cq_db.bits.qid = qid;
1050	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1051
1052}
1053
1054
1055
1056
1057/*
1058 * @brief		function to cleanup the eqs used during stop
1059 * @param eq		pointer to event queue structure
1060 * @returns		the number of EQs processed
1061 */
1062void
1063oce_drain_eq(struct oce_eq *eq)
1064{
1065
1066	struct oce_eqe *eqe;
1067	uint16_t num_eqe = 0;
1068	POCE_SOFTC sc = eq->parent;
1069
1070	do {
1071		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1072		if (eqe->evnt == 0)
1073			break;
1074		eqe->evnt = 0;
1075		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1076					BUS_DMASYNC_POSTWRITE);
1077		num_eqe++;
1078		RING_GET(eq->ring, 1);
1079
1080	} while (TRUE);
1081
1082	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1083
1084}
1085
1086
1087
1088void
1089oce_drain_wq_cq(struct oce_wq *wq)
1090{
1091        POCE_SOFTC sc = wq->parent;
1092        struct oce_cq *cq = wq->cq;
1093        struct oce_nic_tx_cqe *cqe;
1094        int num_cqes = 0;
1095
1096	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1097				 BUS_DMASYNC_POSTWRITE);
1098
1099	do {
1100		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1101		if (cqe->u0.dw[3] == 0)
1102			break;
1103		cqe->u0.dw[3] = 0;
1104		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1105				 BUS_DMASYNC_POSTWRITE);
1106		RING_GET(cq->ring, 1);
1107		num_cqes++;
1108
1109	} while (TRUE);
1110
1111	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1112
1113}
1114
1115
1116/*
1117 * @brief		function to drain a MCQ and process its CQEs
1118 * @param dev		software handle to the device
1119 * @param cq		pointer to the cq to drain
1120 * @returns		the number of CQEs processed
1121 */
1122void
1123oce_drain_mq_cq(void *arg)
1124{
1125	/* TODO: additional code. */
1126	return;
1127}
1128
1129
1130
1131/**
1132 * @brief		function to process a Recieve queue
1133 * @param arg		pointer to the RQ to charge
1134 * @return		number of cqes processed
1135 */
1136void
1137oce_drain_rq_cq(struct oce_rq *rq)
1138{
1139	struct oce_nic_rx_cqe *cqe;
1140	uint16_t num_cqe = 0;
1141	struct oce_cq  *cq;
1142	POCE_SOFTC sc;
1143
1144	sc = rq->parent;
1145	cq = rq->cq;
1146	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1147	/* dequeue till you reach an invalid cqe */
1148	while (RQ_CQE_VALID(cqe)) {
1149		RQ_CQE_INVALIDATE(cqe);
1150		RING_GET(cq->ring, 1);
1151		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1152		    struct oce_nic_rx_cqe);
1153		num_cqe++;
1154	}
1155	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1156
1157	return;
1158}
1159
1160
1161void
1162oce_free_posted_rxbuf(struct oce_rq *rq)
1163{
1164	struct oce_packet_desc *pd;
1165
1166	while (rq->pending) {
1167
1168		pd = &rq->pckts[rq->ring->cidx];
1169		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1170		bus_dmamap_unload(rq->tag, pd->map);
1171		if (pd->mbuf != NULL) {
1172			m_freem(pd->mbuf);
1173			pd->mbuf = NULL;
1174		}
1175
1176		RING_GET(rq->ring,1);
1177                rq->pending--;
1178	}
1179
1180}
1181
1182void
1183oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1184{
1185        struct oce_cq *cq = rq->cq;
1186        POCE_SOFTC sc = rq->parent;
1187        struct nic_hwlro_singleton_cqe *cqe;
1188        struct nic_hwlro_cqe_part2 *cqe2;
1189        int flush_wait = 0;
1190        int flush_compl = 0;
1191	int num_frags = 0;
1192
1193        for (;;) {
1194                bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1195                cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1196                if(cqe->valid) {
1197                        if(cqe->cqe_type == 0) { /* singleton cqe */
1198                                /* we should not get singleton cqe after cqe1 on same rq */
1199                                if(rq->cqe_firstpart != NULL) {
1200                                        device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1201                                        goto exit_rx_cq_clean_hwlro;
1202                                }
1203				num_frags = cqe->pkt_size / rq->cfg.frag_size;
1204				if(cqe->pkt_size % rq->cfg.frag_size)
1205					num_frags++;
1206                                oce_discard_rx_comp(rq, num_frags);
1207                                /* Check if CQE is flush completion */
1208                                if(!cqe->pkt_size)
1209                                        flush_compl = 1;
1210                                cqe->valid = 0;
1211                                RING_GET(cq->ring, 1);
1212                        }else if(cqe->cqe_type == 0x1) { /* first part */
1213                                /* we should not get cqe1 after cqe1 on same rq */
1214                                if(rq->cqe_firstpart != NULL) {
1215                                        device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1216                                        goto exit_rx_cq_clean_hwlro;
1217                                }
1218                                rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1219                                RING_GET(cq->ring, 1);
1220                        }else if(cqe->cqe_type == 0x2) { /* second part */
1221                                cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1222                                /* We should not get cqe2 without cqe1 */
1223                                if(rq->cqe_firstpart == NULL) {
1224                                        device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1225                                        goto exit_rx_cq_clean_hwlro;
1226                                }
1227				num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1228				if(cqe2->coalesced_size % rq->cfg.frag_size)
1229					num_frags++;
1230
1231				/* Flush completion will always come in singleton CQE */
1232                                oce_discard_rx_comp(rq, num_frags);
1233
1234                                rq->cqe_firstpart->valid = 0;
1235                                cqe2->valid = 0;
1236                                rq->cqe_firstpart = NULL;
1237                                RING_GET(cq->ring, 1);
1238                        }
1239                        oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1240                        if(flush_compl)
1241                                break;
1242                }else {
1243                        if (flush_wait++ > 100) {
1244                                device_printf(sc->dev, "did not receive hwlro flush compl\n");
1245                                break;
1246                        }
1247                        oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1248                        DELAY(1000);
1249                }
1250        }
1251
1252        /* After cleanup, leave the CQ in unarmed state */
1253        oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1254
1255exit_rx_cq_clean_hwlro:
1256	return;
1257}
1258
1259
1260void
1261oce_rx_cq_clean(struct oce_rq *rq)
1262{
1263	struct oce_nic_rx_cqe *cqe;
1264        struct oce_cq  *cq;
1265        POCE_SOFTC sc;
1266	int flush_wait = 0;
1267	int flush_compl = 0;
1268        sc = rq->parent;
1269        cq = rq->cq;
1270
1271	for (;;) {
1272		bus_dmamap_sync(cq->ring->dma.tag,
1273			cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1274        	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1275		if(RQ_CQE_VALID(cqe)) {
1276			DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1277                        oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1278                        /* Check if CQE is flush completion */
1279                        if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1280				flush_compl = 1;
1281
1282                        RQ_CQE_INVALIDATE(cqe);
1283                        RING_GET(cq->ring, 1);
1284#if defined(INET6) || defined(INET)
1285		        if (IF_LRO_ENABLED(sc))
1286                		oce_rx_flush_lro(rq);
1287#endif
1288                        oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1289			if(flush_compl)
1290				break;
1291		}else {
1292			if (flush_wait++ > 100) {
1293				device_printf(sc->dev, "did not receive flush compl\n");
1294				break;
1295			}
1296			oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1297			DELAY(1000);
1298                }
1299        }
1300
1301	/* After cleanup, leave the CQ in unarmed state */
1302	oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1303}
1304
1305void
1306oce_stop_rx(POCE_SOFTC sc)
1307{
1308        struct oce_mbx mbx;
1309        struct mbx_delete_nic_rq *fwcmd;
1310        struct mbx_delete_nic_rq_v1 *fwcmd1;
1311        struct oce_rq *rq;
1312        int i = 0;
1313
1314       /* before deleting disable hwlro */
1315	if(sc->enable_hwlro)
1316        	oce_mbox_nic_set_iface_lro_config(sc, 0);
1317
1318        for_all_rq_queues(sc, rq, i) {
1319                if (rq->qstate == QCREATED) {
1320                        /* Delete rxq in firmware */
1321			LOCK(&rq->rx_lock);
1322
1323                        bzero(&mbx, sizeof(mbx));
1324                	if(!rq->islro) {
1325                        	fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1326                        	fwcmd->params.req.rq_id = rq->rq_id;
1327                        	(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1328                	}else {
1329                        	fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1330                        	fwcmd1->params.req.rq_id = rq->rq_id;
1331                               	fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1332
1333                        	(void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1334                	}
1335                        rq->qstate = QDELETED;
1336
1337                        DELAY(1000);
1338
1339			if(!rq->islro)
1340				oce_rx_cq_clean(rq);
1341			else
1342				oce_rx_cq_clean_hwlro(rq);
1343
1344                        /* Free posted RX buffers that are not used */
1345                        oce_free_posted_rxbuf(rq);
1346			UNLOCK(&rq->rx_lock);
1347                }
1348        }
1349}
1350
1351
1352
1353int
1354oce_start_rx(POCE_SOFTC sc)
1355{
1356	struct oce_rq *rq;
1357	int rc = 0, i;
1358
1359	for_all_rq_queues(sc, rq, i) {
1360		if (rq->qstate == QCREATED)
1361			continue;
1362		if((i == 0) || (!sc->enable_hwlro)) {
1363        	        rc = oce_mbox_create_rq(rq);
1364                        if (rc)
1365                                goto error;
1366			rq->islro = 0;
1367		}else {
1368			rc = oce_mbox_create_rq_v2(rq);
1369                        if (rc)
1370                                goto error;
1371                        rq->islro = 1;
1372		}
1373                /* reset queue pointers */
1374                rq->qstate       = QCREATED;
1375                rq->pending      = 0;
1376                rq->ring->cidx   = 0;
1377                rq->ring->pidx   = 0;
1378	}
1379
1380	if(sc->enable_hwlro) {
1381		rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1382		if (rc)
1383			goto error;
1384	}
1385
1386	DELAY(1);
1387
1388	/* RSS config */
1389	if (is_rss_enabled(sc)) {
1390		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1391		if (rc)
1392			goto error;
1393
1394	}
1395
1396	DELAY(1);
1397	return rc;
1398error:
1399	device_printf(sc->dev, "Start RX failed\n");
1400	return rc;
1401
1402}
1403
1404
1405
1406