oce_queue.c revision 252869
1292915Sdim/*-
2292915Sdim * Copyright (C) 2013 Emulex
3353358Sdim * All rights reserved.
4353358Sdim *
5353358Sdim * Redistribution and use in source and binary forms, with or without
6292915Sdim * modification, are permitted provided that the following conditions are met:
7292915Sdim *
8292915Sdim * 1. Redistributions of source code must retain the above copyright notice,
9292915Sdim *    this list of conditions and the following disclaimer.
10292915Sdim *
11292915Sdim * 2. Redistributions in binary form must reproduce the above copyright
12292915Sdim *    notice, this list of conditions and the following disclaimer in the
13292915Sdim *    documentation and/or other materials provided with the distribution.
14292915Sdim *
15292915Sdim * 3. Neither the name of the Emulex Corporation nor the names of its
16292915Sdim *    contributors may be used to endorse or promote products derived from
17292915Sdim *    this software without specific prior written permission.
18292915Sdim *
19292915Sdim * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20292915Sdim * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21292915Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22292915Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23292915Sdim * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24292915Sdim * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25292915Sdim * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26292915Sdim * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27292915Sdim * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28292915Sdim * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29292915Sdim * POSSIBILITY OF SUCH DAMAGE.
30292915Sdim *
31292915Sdim * Contact Information:
32292915Sdim * freebsd-drivers@emulex.com
33292915Sdim *
34292915Sdim * Emulex
35341825Sdim * 3333 Susan Street
36292915Sdim * Costa Mesa, CA 92626
37292915Sdim */
38341825Sdim
39292915Sdim
40292915Sdim
41292915Sdim/* $FreeBSD: head/sys/dev/oce/oce_queue.c 252869 2013-07-06 08:30:45Z delphij $ */
42292915Sdim
43341825Sdim
44292915Sdim#include "oce_if.h"
45292915Sdim
46341825Sdim/*****************************************************
47292915Sdim * local queue functions
48292915Sdim *****************************************************/
49341825Sdim
50292915Sdimstatic struct oce_wq *oce_wq_init(POCE_SOFTC sc,
51292915Sdim				  uint32_t q_len, uint32_t wq_type);
52292915Sdimstatic int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
53292915Sdimstatic void oce_wq_free(struct oce_wq *wq);
54292915Sdimstatic void oce_wq_del(struct oce_wq *wq);
55292915Sdimstatic struct oce_rq *oce_rq_init(POCE_SOFTC sc,
56341825Sdim				  uint32_t q_len,
57292915Sdim				  uint32_t frag_size,
58292915Sdim				  uint32_t mtu, uint32_t rss);
59292915Sdimstatic int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
60321369Sdimstatic void oce_rq_free(struct oce_rq *rq);
61292915Sdimstatic void oce_rq_del(struct oce_rq *rq);
62353358Sdimstatic struct oce_eq *oce_eq_create(POCE_SOFTC sc,
63353358Sdim				    uint32_t q_len,
64353358Sdim				    uint32_t item_size,
65353358Sdim				    uint32_t eq_delay,
66353358Sdim				    uint32_t vector);
67353358Sdimstatic void oce_eq_del(struct oce_eq *eq);
68353358Sdimstatic struct oce_mq *oce_mq_create(POCE_SOFTC sc,
69353358Sdim				    struct oce_eq *eq, uint32_t q_len);
70292915Sdimstatic void oce_mq_free(struct oce_mq *mq);
71292915Sdimstatic int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
72292915Sdim			 *mbx, size_t req_size, enum qtype qtype);
73292915Sdimstruct oce_cq *oce_cq_create(POCE_SOFTC sc,
74292915Sdim			     struct oce_eq *eq,
75			     uint32_t q_len,
76			     uint32_t item_size,
77			     uint32_t sol_event,
78			     uint32_t is_eventable,
79			     uint32_t nodelay, uint32_t ncoalesce);
80static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
81
82
83
84/**
85 * @brief	Create and initialize all the queues on the board
86 * @param sc	software handle to the device
87 * @returns 0	if successful, or error
88 **/
89int
90oce_queue_init_all(POCE_SOFTC sc)
91{
92	int rc = 0, i, vector;
93	struct oce_wq *wq;
94	struct oce_rq *rq;
95	struct oce_aic_obj *aic;
96
97	/* alloc TX/RX queues */
98	for_all_wq_queues(sc, wq, i) {
99		sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
100					 NIC_WQ_TYPE_STANDARD);
101		if (!sc->wq[i])
102			goto error;
103
104	}
105
106	for_all_rq_queues(sc, rq, i) {
107		sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
108					OCE_MAX_JUMBO_FRAME_SIZE,
109					(i == 0) ? 0 : is_rss_enabled(sc));
110		if (!sc->rq[i])
111			goto error;
112	}
113
114	/* Create network interface on card */
115	if (oce_create_nw_interface(sc))
116		goto error;
117
118	/* create all of the event queues */
119	for (vector = 0; vector < sc->intr_count; vector++) {
120		/* setup aic defaults for each event queue */
121		aic = &sc->aic_obj[vector];
122		aic->max_eqd = OCE_MAX_EQD;
123		aic->min_eqd = OCE_MIN_EQD;
124		aic->et_eqd = OCE_MIN_EQD;
125		aic->enable = TRUE;
126
127		sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
128						 0, vector);
129		if (!sc->eq[vector])
130			goto error;
131	}
132
133	/* create Tx, Rx and mcc queues */
134	for_all_wq_queues(sc, wq, i) {
135		rc = oce_wq_create(wq, sc->eq[i]);
136		if (rc)
137			goto error;
138		wq->queue_index = i;
139		TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140	}
141
142	for_all_rq_queues(sc, rq, i) {
143		rc = oce_rq_create(rq, sc->if_id,
144					sc->eq[(i == 0) ? 0:(i-1)]);
145		if (rc)
146			goto error;
147		rq->queue_index = i;
148	}
149
150	sc->mq = oce_mq_create(sc, sc->eq[0], 64);
151	if (!sc->mq)
152		goto error;
153
154	return rc;
155
156error:
157	oce_queue_release_all(sc);
158	return 1;
159}
160
161
162
163/**
164 * @brief Releases all mailbox queues created
165 * @param sc		software handle to the device
166 */
167void
168oce_queue_release_all(POCE_SOFTC sc)
169{
170	int i = 0;
171	struct oce_wq *wq;
172	struct oce_rq *rq;
173	struct oce_eq *eq;
174
175	for_all_rq_queues(sc, rq, i) {
176		if (rq) {
177			oce_rq_del(sc->rq[i]);
178			oce_rq_free(sc->rq[i]);
179		}
180	}
181
182	for_all_wq_queues(sc, wq, i) {
183		if (wq) {
184			oce_wq_del(sc->wq[i]);
185			oce_wq_free(sc->wq[i]);
186		}
187	}
188
189	if (sc->mq)
190		oce_mq_free(sc->mq);
191
192	for_all_evnt_queues(sc, eq, i) {
193		if (eq)
194			oce_eq_del(sc->eq[i]);
195	}
196}
197
198
199
200/**
201 * @brief 		Function to create a WQ for NIC Tx
202 * @param sc 		software handle to the device
203 * @param qlen		number of entries in the queue
204 * @param wq_type	work queue type
205 * @returns		the pointer to the WQ created or NULL on failure
206 */
207static struct
208oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
209{
210	struct oce_wq *wq;
211	int rc = 0, i;
212
213	/* q_len must be min 256 and max 2k */
214	if (q_len < 256 || q_len > 2048) {
215		device_printf(sc->dev,
216			  "Invalid q length. Must be "
217			  "[256, 2000]: 0x%x\n", q_len);
218		return NULL;
219	}
220
221	/* allocate wq */
222	wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
223	if (!wq)
224		return NULL;
225
226	/* Set the wq config */
227	wq->cfg.q_len = q_len;
228	wq->cfg.wq_type = (uint8_t) wq_type;
229	wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
230	wq->cfg.nbufs = 2 * wq->cfg.q_len;
231	wq->cfg.nhdl = 2 * wq->cfg.q_len;
232
233	wq->parent = (void *)sc;
234
235	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
236				1, 0,
237				BUS_SPACE_MAXADDR,
238				BUS_SPACE_MAXADDR,
239				NULL, NULL,
240				OCE_MAX_TX_SIZE,
241				OCE_MAX_TX_ELEMENTS,
242				PAGE_SIZE, 0, NULL, NULL, &wq->tag);
243
244	if (rc)
245		goto free_wq;
246
247
248	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
249		rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
250		if (rc)
251			goto free_wq;
252	}
253
254	wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
255	if (!wq->ring)
256		goto free_wq;
257
258
259	LOCK_CREATE(&wq->tx_lock, "TX_lock");
260
261#if __FreeBSD_version >= 800000
262	/* Allocate buf ring for multiqueue*/
263	wq->br = buf_ring_alloc(4096, M_DEVBUF,
264			M_WAITOK, &wq->tx_lock.mutex);
265	if (!wq->br)
266		goto free_wq;
267#endif
268	return wq;
269
270
271free_wq:
272	device_printf(sc->dev, "Create WQ failed\n");
273	oce_wq_free(wq);
274	return NULL;
275}
276
277
278
279/**
280 * @brief 		Frees the work queue
281 * @param wq		pointer to work queue to free
282 */
283static void
284oce_wq_free(struct oce_wq *wq)
285{
286	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
287	int i;
288
289	taskqueue_drain(taskqueue_swi, &wq->txtask);
290
291	if (wq->ring != NULL) {
292		oce_destroy_ring_buffer(sc, wq->ring);
293		wq->ring = NULL;
294	}
295
296	for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
297		if (wq->pckts[i].map != NULL) {
298			bus_dmamap_unload(wq->tag, wq->pckts[i].map);
299			bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
300			wq->pckts[i].map = NULL;
301		}
302	}
303
304	if (wq->tag != NULL)
305		bus_dma_tag_destroy(wq->tag);
306	if (wq->br != NULL)
307		buf_ring_free(wq->br, M_DEVBUF);
308
309	LOCK_DESTROY(&wq->tx_lock);
310	free(wq, M_DEVBUF);
311}
312
313
314
315/**
316 * @brief 		Create a work queue
317 * @param wq		pointer to work queue
318 * @param eq		pointer to associated event queue
319 */
320static int
321oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
322{
323	POCE_SOFTC sc = wq->parent;
324	struct oce_cq *cq;
325	int rc = 0;
326
327	/* create the CQ */
328	cq = oce_cq_create(sc,
329			   eq,
330			   CQ_LEN_1024,
331			   sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
332	if (!cq)
333		return ENXIO;
334
335
336	wq->cq = cq;
337
338	rc = oce_mbox_create_wq(wq);
339	if (rc)
340		goto error;
341
342	wq->qstate = QCREATED;
343	wq->wq_free = wq->cfg.q_len;
344	wq->ring->cidx = 0;
345	wq->ring->pidx = 0;
346
347	eq->cq[eq->cq_valid] = cq;
348	eq->cq_valid++;
349	cq->cb_arg = wq;
350	cq->cq_handler = oce_wq_handler;
351
352	return 0;
353
354error:
355	device_printf(sc->dev, "WQ create failed\n");
356	oce_wq_del(wq);
357	return rc;
358}
359
360
361
362
363/**
364 * @brief 		Delete a work queue
365 * @param wq		pointer to work queue
366 */
367static void
368oce_wq_del(struct oce_wq *wq)
369{
370	struct oce_mbx mbx;
371	struct mbx_delete_nic_wq *fwcmd;
372	POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
373
374	if (wq->qstate == QCREATED) {
375		bzero(&mbx, sizeof(struct oce_mbx));
376		/* now fill the command */
377		fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
378		fwcmd->params.req.wq_id = wq->wq_id;
379		(void)oce_destroy_q(sc, &mbx,
380				sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
381		wq->qstate = QDELETED;
382	}
383
384	if (wq->cq != NULL) {
385		oce_cq_del(sc, wq->cq);
386		wq->cq = NULL;
387	}
388}
389
390
391
392/**
393 * @brief 		function to allocate receive queue resources
394 * @param sc		software handle to the device
395 * @param q_len		length of receive queue
396 * @param frag_size	size of an receive queue fragment
397 * @param mtu		maximum transmission unit
398 * @param rss		is-rss-queue flag
399 * @returns		the pointer to the RQ created or NULL on failure
400 */
401static struct
402oce_rq *oce_rq_init(POCE_SOFTC sc,
403				  uint32_t q_len,
404				  uint32_t frag_size,
405				  uint32_t mtu, uint32_t rss)
406{
407	struct oce_rq *rq;
408	int rc = 0, i;
409
410	if (OCE_LOG2(frag_size) <= 0)
411		return NULL;
412
413	if ((q_len == 0) || (q_len > 1024))
414		return NULL;
415
416	/* allocate the rq */
417	rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
418	if (!rq)
419		return NULL;
420
421
422	rq->cfg.q_len = q_len;
423	rq->cfg.frag_size = frag_size;
424	rq->cfg.mtu = mtu;
425	rq->cfg.eqd = 0;
426	rq->lro_pkts_queued = 0;
427	rq->cfg.is_rss_queue = rss;
428	rq->packets_in = 0;
429        rq->packets_out = 0;
430        rq->pending = 0;
431
432	rq->parent = (void *)sc;
433
434	rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
435				1, 0,
436				BUS_SPACE_MAXADDR,
437				BUS_SPACE_MAXADDR,
438				NULL, NULL,
439				OCE_MAX_RX_SIZE,
440				1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
441
442	if (rc)
443		goto free_rq;
444
445	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
446		rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
447		if (rc)
448			goto free_rq;
449	}
450
451	/* create the ring buffer */
452	rq->ring = oce_create_ring_buffer(sc, q_len,
453				 sizeof(struct oce_nic_rqe));
454	if (!rq->ring)
455		goto free_rq;
456
457	LOCK_CREATE(&rq->rx_lock, "RX_lock");
458
459	return rq;
460
461free_rq:
462	device_printf(sc->dev, "Create RQ failed\n");
463	oce_rq_free(rq);
464	return NULL;
465}
466
467
468
469
470/**
471 * @brief 		Free a receive queue
472 * @param rq		pointer to receive queue
473 */
474static void
475oce_rq_free(struct oce_rq *rq)
476{
477	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
478	int i = 0 ;
479
480	if (rq->ring != NULL) {
481		oce_destroy_ring_buffer(sc, rq->ring);
482		rq->ring = NULL;
483	}
484	for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
485		if (rq->pckts[i].map != NULL) {
486			bus_dmamap_unload(rq->tag, rq->pckts[i].map);
487			bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
488			rq->pckts[i].map = NULL;
489		}
490		if (rq->pckts[i].mbuf) {
491			m_free(rq->pckts[i].mbuf);
492			rq->pckts[i].mbuf = NULL;
493		}
494	}
495
496	if (rq->tag != NULL)
497		bus_dma_tag_destroy(rq->tag);
498
499	LOCK_DESTROY(&rq->rx_lock);
500	free(rq, M_DEVBUF);
501}
502
503
504
505
506/**
507 * @brief 		Create a receive queue
508 * @param rq 		receive queue
509 * @param if_id		interface identifier index`
510 * @param eq		pointer to event queue
511 */
512static int
513oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
514{
515	POCE_SOFTC sc = rq->parent;
516	struct oce_cq *cq;
517
518	cq = oce_cq_create(sc,
519			   eq,
520			   CQ_LEN_1024,
521			   sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
522	if (!cq)
523		return ENXIO;
524
525	rq->cq = cq;
526	rq->cfg.if_id = if_id;
527
528	/* Dont create RQ here. Create in if_activate */
529	rq->qstate     = 0;
530	rq->ring->cidx = 0;
531	rq->ring->pidx = 0;
532	eq->cq[eq->cq_valid] = cq;
533	eq->cq_valid++;
534	cq->cb_arg = rq;
535	cq->cq_handler = oce_rq_handler;
536
537	return 0;
538
539}
540
541
542
543
544/**
545 * @brief 		Delete a receive queue
546 * @param rq		receive queue
547 */
548static void
549oce_rq_del(struct oce_rq *rq)
550{
551	POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
552	struct oce_mbx mbx;
553	struct mbx_delete_nic_rq *fwcmd;
554
555	if (rq->qstate == QCREATED) {
556		bzero(&mbx, sizeof(mbx));
557
558		fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
559		fwcmd->params.req.rq_id = rq->rq_id;
560		(void)oce_destroy_q(sc, &mbx,
561				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
562		rq->qstate = QDELETED;
563	}
564
565	if (rq->cq != NULL) {
566		oce_cq_del(sc, rq->cq);
567		rq->cq = NULL;
568	}
569}
570
571
572
573/**
574 * @brief		function to create an event queue
575 * @param sc		software handle to the device
576 * @param q_len		length of event queue
577 * @param item_size	size of an event queue item
578 * @param eq_delay	event queue delay
579 * @retval eq      	success, pointer to event queue
580 * @retval NULL		failure
581 */
582static struct
583oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
584				    uint32_t item_size,
585				    uint32_t eq_delay,
586				    uint32_t vector)
587{
588	struct oce_eq *eq;
589	int rc = 0;
590
591	/* allocate an eq */
592	eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
593	if (eq == NULL)
594		return NULL;
595
596	eq->parent = (void *)sc;
597	eq->eq_id = 0xffff;
598	eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
599	if (!eq->ring)
600		goto free_eq;
601
602	eq->eq_cfg.q_len = q_len;
603	eq->eq_cfg.item_size = item_size;
604	eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
605
606	rc = oce_mbox_create_eq(eq);
607	if (rc)
608		goto free_eq;
609
610	sc->intrs[sc->neqs++].eq = eq;
611
612	return eq;
613
614free_eq:
615	oce_eq_del(eq);
616	return NULL;
617}
618
619
620
621
622/**
623 * @brief 		Function to delete an event queue
624 * @param eq		pointer to an event queue
625 */
626static void
627oce_eq_del(struct oce_eq *eq)
628{
629	struct oce_mbx mbx;
630	struct mbx_destroy_common_eq *fwcmd;
631	POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
632
633	if (eq->eq_id != 0xffff) {
634		bzero(&mbx, sizeof(mbx));
635		fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
636		fwcmd->params.req.id = eq->eq_id;
637		(void)oce_destroy_q(sc, &mbx,
638			sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
639	}
640
641	if (eq->ring != NULL) {
642		oce_destroy_ring_buffer(sc, eq->ring);
643		eq->ring = NULL;
644	}
645
646	free(eq, M_DEVBUF);
647
648}
649
650
651
652
653/**
654 * @brief		Function to create an MQ
655 * @param sc		software handle to the device
656 * @param eq		the EQ to associate with the MQ for event notification
657 * @param q_len		the number of entries to create in the MQ
658 * @returns		pointer to the created MQ, failure otherwise
659 */
660static struct oce_mq *
661oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
662{
663	struct oce_mbx mbx;
664	struct mbx_create_common_mq_ex *fwcmd = NULL;
665	struct oce_mq *mq = NULL;
666	int rc = 0;
667	struct oce_cq *cq;
668	oce_mq_ext_ctx_t *ctx;
669	uint32_t num_pages;
670	uint32_t page_size;
671	int version;
672
673	cq = oce_cq_create(sc, eq, CQ_LEN_256,
674			sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
675	if (!cq)
676		return NULL;
677
678	/* allocate the mq */
679	mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
680	if (!mq) {
681		oce_cq_del(sc, cq);
682		goto error;
683	}
684
685	mq->parent = sc;
686
687	mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
688	if (!mq->ring)
689		goto error;
690
691	bzero(&mbx, sizeof(struct oce_mbx));
692
693	IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
694	fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
695	mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
696				MBX_SUBSYSTEM_COMMON,
697				OPCODE_COMMON_CREATE_MQ_EXT,
698				MBX_TIMEOUT_SEC,
699				sizeof(struct mbx_create_common_mq_ex),
700				version);
701
702	num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
703	page_size = mq->ring->num_items * mq->ring->item_size;
704
705	ctx = &fwcmd->params.req.context;
706
707	if (IS_XE201(sc)) {
708		ctx->v1.num_pages = num_pages;
709		ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
710		ctx->v1.cq_id = cq->cq_id;
711		ctx->v1.valid = 1;
712		ctx->v1.async_cq_id = cq->cq_id;
713		ctx->v1.async_cq_valid = 1;
714		/* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
715		ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
716		ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
717		ctx->v1.async_evt_bitmap |=
718					LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
719	}
720	else {
721		ctx->v0.num_pages = num_pages;
722		ctx->v0.cq_id = cq->cq_id;
723		ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
724		ctx->v0.valid = 1;
725		/* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
726		ctx->v0.async_evt_bitmap = 0xffffffff;
727	}
728
729	mbx.u0.s.embedded = 1;
730	mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
731	DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
732
733	rc = oce_mbox_post(sc, &mbx, NULL);
734	if (!rc)
735                rc = fwcmd->hdr.u0.rsp.status;
736	if (rc) {
737		device_printf(sc->dev,"%s failed - cmd status: %d\n",
738			      __FUNCTION__, rc);
739		goto error;
740	}
741	mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
742	mq->cq = cq;
743	eq->cq[eq->cq_valid] = cq;
744	eq->cq_valid++;
745	mq->cq->eq = eq;
746	mq->cfg.q_len = (uint8_t) q_len;
747	mq->cfg.eqd = 0;
748	mq->qstate = QCREATED;
749
750	mq->cq->cb_arg = mq;
751	mq->cq->cq_handler = oce_mq_handler;
752
753	return mq;
754
755error:
756	device_printf(sc->dev, "MQ create failed\n");
757	oce_mq_free(mq);
758	mq = NULL;
759	return mq;
760}
761
762
763
764
765
766/**
767 * @brief		Function to free a mailbox queue
768 * @param mq		pointer to a mailbox queue
769 */
770static void
771oce_mq_free(struct oce_mq *mq)
772{
773	POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
774	struct oce_mbx mbx;
775	struct mbx_destroy_common_mq *fwcmd;
776
777	if (!mq)
778		return;
779
780	if (mq->ring != NULL) {
781		oce_destroy_ring_buffer(sc, mq->ring);
782		mq->ring = NULL;
783		if (mq->qstate == QCREATED) {
784			bzero(&mbx, sizeof (struct oce_mbx));
785			fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
786			fwcmd->params.req.id = mq->mq_id;
787			(void) oce_destroy_q(sc, &mbx,
788				sizeof (struct mbx_destroy_common_mq),
789				QTYPE_MQ);
790		}
791		mq->qstate = QDELETED;
792	}
793
794	if (mq->cq != NULL) {
795		oce_cq_del(sc, mq->cq);
796		mq->cq = NULL;
797	}
798
799	free(mq, M_DEVBUF);
800	mq = NULL;
801}
802
803
804
805/**
806 * @brief		Function to delete a EQ, CQ, MQ, WQ or RQ
807 * @param sc		sofware handle to the device
808 * @param mbx		mailbox command to send to the fw to delete the queue
809 *			(mbx contains the queue information to delete)
810 * @param req_size	the size of the mbx payload dependent on the qtype
811 * @param qtype		the type of queue i.e. EQ, CQ, MQ, WQ or RQ
812 * @returns 		0 on success, failure otherwise
813 */
814static int
815oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
816		enum qtype qtype)
817{
818	struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
819	int opcode;
820	int subsys;
821	int rc = 0;
822
823	switch (qtype) {
824	case QTYPE_EQ:
825		opcode = OPCODE_COMMON_DESTROY_EQ;
826		subsys = MBX_SUBSYSTEM_COMMON;
827		break;
828	case QTYPE_CQ:
829		opcode = OPCODE_COMMON_DESTROY_CQ;
830		subsys = MBX_SUBSYSTEM_COMMON;
831		break;
832	case QTYPE_MQ:
833		opcode = OPCODE_COMMON_DESTROY_MQ;
834		subsys = MBX_SUBSYSTEM_COMMON;
835		break;
836	case QTYPE_WQ:
837		opcode = NIC_DELETE_WQ;
838		subsys = MBX_SUBSYSTEM_NIC;
839		break;
840	case QTYPE_RQ:
841		opcode = NIC_DELETE_RQ;
842		subsys = MBX_SUBSYSTEM_NIC;
843		break;
844	default:
845		return EINVAL;
846	}
847
848	mbx_common_req_hdr_init(hdr, 0, 0, subsys,
849				opcode, MBX_TIMEOUT_SEC, req_size,
850				OCE_MBX_VER_V0);
851
852	mbx->u0.s.embedded = 1;
853	mbx->payload_length = (uint32_t) req_size;
854	DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
855
856	rc = oce_mbox_post(sc, mbx, NULL);
857	if (!rc)
858                rc = hdr->u0.rsp.status;
859	if (rc)
860		device_printf(sc->dev,"%s failed - cmd status: %d\n",
861			      __FUNCTION__, rc);
862	return rc;
863}
864
865
866
867/**
868 * @brief		Function to create a completion queue
869 * @param sc		software handle to the device
870 * @param eq		optional eq to be associated with to the cq
871 * @param q_len		length of completion queue
872 * @param item_size	size of completion queue items
873 * @param sol_event	command context event
874 * @param is_eventable	event table
875 * @param nodelay	no delay flag
876 * @param ncoalesce	no coalescence flag
877 * @returns 		pointer to the cq created, NULL on failure
878 */
879struct oce_cq *
880oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
881			     uint32_t q_len,
882			     uint32_t item_size,
883			     uint32_t sol_event,
884			     uint32_t is_eventable,
885			     uint32_t nodelay, uint32_t ncoalesce)
886{
887	struct oce_cq *cq = NULL;
888	int rc = 0;
889
890	cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
891	if (!cq)
892		return NULL;
893
894	cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
895	if (!cq->ring)
896		goto error;
897
898	cq->parent = sc;
899	cq->eq = eq;
900	cq->cq_cfg.q_len = q_len;
901	cq->cq_cfg.item_size = item_size;
902	cq->cq_cfg.nodelay = (uint8_t) nodelay;
903
904	rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
905	if (rc)
906		goto error;
907
908	sc->cq[sc->ncqs++] = cq;
909
910	return cq;
911
912error:
913	device_printf(sc->dev, "CQ create failed\n");
914	oce_cq_del(sc, cq);
915	return NULL;
916}
917
918
919
920/**
921 * @brief		Deletes the completion queue
922 * @param sc		software handle to the device
923 * @param cq		pointer to a completion queue
924 */
925static void
926oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
927{
928	struct oce_mbx mbx;
929	struct mbx_destroy_common_cq *fwcmd;
930
931	if (cq->ring != NULL) {
932
933		bzero(&mbx, sizeof(struct oce_mbx));
934		/* now fill the command */
935		fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
936		fwcmd->params.req.id = cq->cq_id;
937		(void)oce_destroy_q(sc, &mbx,
938			sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
939		/*NOW destroy the ring */
940		oce_destroy_ring_buffer(sc, cq->ring);
941		cq->ring = NULL;
942	}
943
944	free(cq, M_DEVBUF);
945	cq = NULL;
946}
947
948
949
950/**
951 * @brief		Start a receive queue
952 * @param rq		pointer to a receive queue
953 */
954int
955oce_start_rq(struct oce_rq *rq)
956{
957	int rc;
958
959	rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
960
961	if (rc == 0)
962		oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
963	return rc;
964}
965
966
967
968/**
969 * @brief		Start a work queue
970 * @param wq		pointer to a work queue
971 */
972int
973oce_start_wq(struct oce_wq *wq)
974{
975	oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
976	return 0;
977}
978
979
980
981/**
982 * @brief		Start a mailbox queue
983 * @param mq		pointer to a mailbox queue
984 */
985int
986oce_start_mq(struct oce_mq *mq)
987{
988	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
989	return 0;
990}
991
992
993
994/**
995 * @brief		Function to arm an EQ so that it can generate events
996 * @param sc		software handle to the device
997 * @param qid		id of the EQ returned by the fw at the time of creation
998 * @param npopped	number of EQEs to arm
999 * @param rearm		rearm bit enable/disable
1000 * @param clearint	bit to clear the interrupt condition because of which
1001 *			EQEs are generated
1002 */
1003void
1004oce_arm_eq(POCE_SOFTC sc,
1005	   int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1006{
1007	eq_db_t eq_db = { 0 };
1008
1009	eq_db.bits.rearm = rearm;
1010	eq_db.bits.event = 1;
1011	eq_db.bits.num_popped = npopped;
1012	eq_db.bits.clrint = clearint;
1013	eq_db.bits.qid = qid;
1014	OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1015
1016}
1017
1018
1019
1020
1021/**
1022 * @brief		Function to arm a CQ with CQEs
1023 * @param sc		software handle to the device
1024 * @param qid		id of the CQ returned by the fw at the time of creation
1025 * @param npopped	number of CQEs to arm
1026 * @param rearm		rearm bit enable/disable
1027 */
1028void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1029{
1030	cq_db_t cq_db = { 0 };
1031
1032	cq_db.bits.rearm = rearm;
1033	cq_db.bits.num_popped = npopped;
1034	cq_db.bits.event = 0;
1035	cq_db.bits.qid = qid;
1036	OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1037
1038}
1039
1040
1041
1042
1043/*
1044 * @brief		function to cleanup the eqs used during stop
1045 * @param eq		pointer to event queue structure
1046 * @returns		the number of EQs processed
1047 */
1048void
1049oce_drain_eq(struct oce_eq *eq)
1050{
1051
1052	struct oce_eqe *eqe;
1053	uint16_t num_eqe = 0;
1054	POCE_SOFTC sc = eq->parent;
1055
1056	do {
1057		eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1058		if (eqe->evnt == 0)
1059			break;
1060		eqe->evnt = 0;
1061		bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1062					BUS_DMASYNC_POSTWRITE);
1063		num_eqe++;
1064		RING_GET(eq->ring, 1);
1065
1066	} while (TRUE);
1067
1068	oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1069
1070}
1071
1072
1073
1074void
1075oce_drain_wq_cq(struct oce_wq *wq)
1076{
1077        POCE_SOFTC sc = wq->parent;
1078        struct oce_cq *cq = wq->cq;
1079        struct oce_nic_tx_cqe *cqe;
1080        int num_cqes = 0;
1081
1082	bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1083				 BUS_DMASYNC_POSTWRITE);
1084
1085	do {
1086		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1087		if (cqe->u0.dw[3] == 0)
1088			break;
1089		cqe->u0.dw[3] = 0;
1090		bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1091				 BUS_DMASYNC_POSTWRITE);
1092		RING_GET(cq->ring, 1);
1093		num_cqes++;
1094
1095	} while (TRUE);
1096
1097	oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1098
1099}
1100
1101
1102/*
1103 * @brief		function to drain a MCQ and process its CQEs
1104 * @param dev		software handle to the device
1105 * @param cq		pointer to the cq to drain
1106 * @returns		the number of CQEs processed
1107 */
1108void
1109oce_drain_mq_cq(void *arg)
1110{
1111	/* TODO: additional code. */
1112	return;
1113}
1114
1115
1116
1117/**
1118 * @brief		function to process a Recieve queue
1119 * @param arg		pointer to the RQ to charge
1120 * @return		number of cqes processed
1121 */
1122void
1123oce_drain_rq_cq(struct oce_rq *rq)
1124{
1125	struct oce_nic_rx_cqe *cqe;
1126	uint16_t num_cqe = 0;
1127	struct oce_cq  *cq;
1128	POCE_SOFTC sc;
1129
1130	sc = rq->parent;
1131	cq = rq->cq;
1132	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1133	/* dequeue till you reach an invalid cqe */
1134	while (RQ_CQE_VALID(cqe)) {
1135		RQ_CQE_INVALIDATE(cqe);
1136		RING_GET(cq->ring, 1);
1137		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1138		    struct oce_nic_rx_cqe);
1139		num_cqe++;
1140	}
1141	oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1142
1143	return;
1144}
1145
1146
1147void
1148oce_free_posted_rxbuf(struct oce_rq *rq)
1149{
1150	struct oce_packet_desc *pd;
1151
1152	while (rq->pending) {
1153
1154		pd = &rq->pckts[rq->packets_out];
1155		bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1156		bus_dmamap_unload(rq->tag, pd->map);
1157		if (pd->mbuf != NULL) {
1158			m_freem(pd->mbuf);
1159			pd->mbuf = NULL;
1160		}
1161
1162		if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1163			rq->packets_out = 0;
1164		else
1165			rq->packets_out++;
1166
1167                rq->pending--;
1168	}
1169
1170}
1171
1172void
1173oce_stop_rx(POCE_SOFTC sc)
1174{
1175	struct oce_mbx mbx;
1176	struct mbx_delete_nic_rq *fwcmd;
1177	struct oce_rq *rq;
1178	int i = 0;
1179
1180	for_all_rq_queues(sc, rq, i) {
1181		if (rq->qstate == QCREATED) {
1182			/* Delete rxq in firmware */
1183
1184			bzero(&mbx, sizeof(mbx));
1185			fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1186			fwcmd->params.req.rq_id = rq->rq_id;
1187
1188			(void)oce_destroy_q(sc, &mbx,
1189				sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1190
1191			rq->qstate = QDELETED;
1192
1193			DELAY(1);
1194
1195			/* Free posted RX buffers that are not used */
1196			oce_free_posted_rxbuf(rq);
1197
1198		}
1199	}
1200}
1201
1202
1203
1204int
1205oce_start_rx(POCE_SOFTC sc)
1206{
1207	struct oce_rq *rq;
1208	int rc = 0, i;
1209
1210	for_all_rq_queues(sc, rq, i) {
1211		if (rq->qstate == QCREATED)
1212			continue;
1213		rc = oce_mbox_create_rq(rq);
1214		if (rc)
1215			goto error;
1216		/* reset queue pointers */
1217		rq->qstate 	 = QCREATED;
1218		rq->pending	 = 0;
1219		rq->ring->cidx	 = 0;
1220		rq->ring->pidx	 = 0;
1221		rq->packets_in	 = 0;
1222		rq->packets_out	 = 0;
1223	}
1224
1225	DELAY(1);
1226
1227	/* RSS config */
1228	if (is_rss_enabled(sc)) {
1229		rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1230		if (rc)
1231			goto error;
1232
1233	}
1234
1235	return rc;
1236error:
1237	device_printf(sc->dev, "Start RX failed\n");
1238	return rc;
1239
1240}
1241
1242
1243
1244