Deleted Added
full compact
oce_queue.c (252869) oce_queue.c (257007)
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: head/sys/dev/oce/oce_queue.c 257007 2013-10-23 18:58:38Z delphij $ */
39
40
40
41/* $FreeBSD: head/sys/dev/oce/oce_queue.c 252869 2013-07-06 08:30:45Z delphij $ */
42
43
44#include "oce_if.h"
45
46/*****************************************************
47 * local queue functions
48 *****************************************************/
49
50static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
51 uint32_t q_len, uint32_t wq_type);
52static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
53static void oce_wq_free(struct oce_wq *wq);
54static void oce_wq_del(struct oce_wq *wq);
55static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
56 uint32_t q_len,
57 uint32_t frag_size,
58 uint32_t mtu, uint32_t rss);
59static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
60static void oce_rq_free(struct oce_rq *rq);
61static void oce_rq_del(struct oce_rq *rq);
62static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
63 uint32_t q_len,
64 uint32_t item_size,
65 uint32_t eq_delay,
66 uint32_t vector);
67static void oce_eq_del(struct oce_eq *eq);
68static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
69 struct oce_eq *eq, uint32_t q_len);
70static void oce_mq_free(struct oce_mq *mq);
71static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
72 *mbx, size_t req_size, enum qtype qtype);
73struct oce_cq *oce_cq_create(POCE_SOFTC sc,
74 struct oce_eq *eq,
75 uint32_t q_len,
76 uint32_t item_size,
77 uint32_t sol_event,
78 uint32_t is_eventable,
79 uint32_t nodelay, uint32_t ncoalesce);
80static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
81
82
83
84/**
85 * @brief Create and initialize all the queues on the board
86 * @param sc software handle to the device
87 * @returns 0 if successful, or error
88 **/
89int
90oce_queue_init_all(POCE_SOFTC sc)
91{
92 int rc = 0, i, vector;
93 struct oce_wq *wq;
94 struct oce_rq *rq;
95 struct oce_aic_obj *aic;
96
97 /* alloc TX/RX queues */
98 for_all_wq_queues(sc, wq, i) {
99 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
100 NIC_WQ_TYPE_STANDARD);
101 if (!sc->wq[i])
102 goto error;
103
104 }
105
106 for_all_rq_queues(sc, rq, i) {
107 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
108 OCE_MAX_JUMBO_FRAME_SIZE,
109 (i == 0) ? 0 : is_rss_enabled(sc));
110 if (!sc->rq[i])
111 goto error;
112 }
113
114 /* Create network interface on card */
115 if (oce_create_nw_interface(sc))
116 goto error;
117
118 /* create all of the event queues */
119 for (vector = 0; vector < sc->intr_count; vector++) {
120 /* setup aic defaults for each event queue */
121 aic = &sc->aic_obj[vector];
122 aic->max_eqd = OCE_MAX_EQD;
123 aic->min_eqd = OCE_MIN_EQD;
124 aic->et_eqd = OCE_MIN_EQD;
125 aic->enable = TRUE;
126
127 sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
128 0, vector);
129 if (!sc->eq[vector])
130 goto error;
131 }
132
133 /* create Tx, Rx and mcc queues */
134 for_all_wq_queues(sc, wq, i) {
135 rc = oce_wq_create(wq, sc->eq[i]);
136 if (rc)
137 goto error;
138 wq->queue_index = i;
139 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140 }
141
142 for_all_rq_queues(sc, rq, i) {
143 rc = oce_rq_create(rq, sc->if_id,
144 sc->eq[(i == 0) ? 0:(i-1)]);
145 if (rc)
146 goto error;
147 rq->queue_index = i;
148 }
149
150 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
151 if (!sc->mq)
152 goto error;
153
154 return rc;
155
156error:
157 oce_queue_release_all(sc);
158 return 1;
159}
160
161
162
163/**
164 * @brief Releases all mailbox queues created
165 * @param sc software handle to the device
166 */
167void
168oce_queue_release_all(POCE_SOFTC sc)
169{
170 int i = 0;
171 struct oce_wq *wq;
172 struct oce_rq *rq;
173 struct oce_eq *eq;
174
175 for_all_rq_queues(sc, rq, i) {
176 if (rq) {
177 oce_rq_del(sc->rq[i]);
178 oce_rq_free(sc->rq[i]);
179 }
180 }
181
182 for_all_wq_queues(sc, wq, i) {
183 if (wq) {
184 oce_wq_del(sc->wq[i]);
185 oce_wq_free(sc->wq[i]);
186 }
187 }
188
189 if (sc->mq)
190 oce_mq_free(sc->mq);
191
192 for_all_evnt_queues(sc, eq, i) {
193 if (eq)
194 oce_eq_del(sc->eq[i]);
195 }
196}
197
198
199
200/**
201 * @brief Function to create a WQ for NIC Tx
202 * @param sc software handle to the device
203 * @param qlen number of entries in the queue
204 * @param wq_type work queue type
205 * @returns the pointer to the WQ created or NULL on failure
206 */
207static struct
208oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
209{
210 struct oce_wq *wq;
211 int rc = 0, i;
212
213 /* q_len must be min 256 and max 2k */
214 if (q_len < 256 || q_len > 2048) {
215 device_printf(sc->dev,
216 "Invalid q length. Must be "
217 "[256, 2000]: 0x%x\n", q_len);
218 return NULL;
219 }
220
221 /* allocate wq */
222 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
223 if (!wq)
224 return NULL;
225
226 /* Set the wq config */
227 wq->cfg.q_len = q_len;
228 wq->cfg.wq_type = (uint8_t) wq_type;
229 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
230 wq->cfg.nbufs = 2 * wq->cfg.q_len;
231 wq->cfg.nhdl = 2 * wq->cfg.q_len;
232
233 wq->parent = (void *)sc;
234
235 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
236 1, 0,
237 BUS_SPACE_MAXADDR,
238 BUS_SPACE_MAXADDR,
239 NULL, NULL,
240 OCE_MAX_TX_SIZE,
241 OCE_MAX_TX_ELEMENTS,
242 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
243
244 if (rc)
245 goto free_wq;
246
247
248 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
249 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
250 if (rc)
251 goto free_wq;
252 }
253
254 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
255 if (!wq->ring)
256 goto free_wq;
257
258
259 LOCK_CREATE(&wq->tx_lock, "TX_lock");
260
261#if __FreeBSD_version >= 800000
262 /* Allocate buf ring for multiqueue*/
263 wq->br = buf_ring_alloc(4096, M_DEVBUF,
264 M_WAITOK, &wq->tx_lock.mutex);
265 if (!wq->br)
266 goto free_wq;
267#endif
268 return wq;
269
270
271free_wq:
272 device_printf(sc->dev, "Create WQ failed\n");
273 oce_wq_free(wq);
274 return NULL;
275}
276
277
278
279/**
280 * @brief Frees the work queue
281 * @param wq pointer to work queue to free
282 */
283static void
284oce_wq_free(struct oce_wq *wq)
285{
286 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
287 int i;
288
289 taskqueue_drain(taskqueue_swi, &wq->txtask);
290
291 if (wq->ring != NULL) {
292 oce_destroy_ring_buffer(sc, wq->ring);
293 wq->ring = NULL;
294 }
295
296 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
297 if (wq->pckts[i].map != NULL) {
298 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
299 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
300 wq->pckts[i].map = NULL;
301 }
302 }
303
304 if (wq->tag != NULL)
305 bus_dma_tag_destroy(wq->tag);
306 if (wq->br != NULL)
307 buf_ring_free(wq->br, M_DEVBUF);
308
309 LOCK_DESTROY(&wq->tx_lock);
310 free(wq, M_DEVBUF);
311}
312
313
314
315/**
316 * @brief Create a work queue
317 * @param wq pointer to work queue
318 * @param eq pointer to associated event queue
319 */
320static int
321oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
322{
323 POCE_SOFTC sc = wq->parent;
324 struct oce_cq *cq;
325 int rc = 0;
326
327 /* create the CQ */
328 cq = oce_cq_create(sc,
329 eq,
330 CQ_LEN_1024,
331 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
332 if (!cq)
333 return ENXIO;
334
335
336 wq->cq = cq;
337
338 rc = oce_mbox_create_wq(wq);
339 if (rc)
340 goto error;
341
342 wq->qstate = QCREATED;
343 wq->wq_free = wq->cfg.q_len;
344 wq->ring->cidx = 0;
345 wq->ring->pidx = 0;
346
347 eq->cq[eq->cq_valid] = cq;
348 eq->cq_valid++;
349 cq->cb_arg = wq;
350 cq->cq_handler = oce_wq_handler;
351
352 return 0;
353
354error:
355 device_printf(sc->dev, "WQ create failed\n");
356 oce_wq_del(wq);
357 return rc;
358}
359
360
361
362
363/**
364 * @brief Delete a work queue
365 * @param wq pointer to work queue
366 */
367static void
368oce_wq_del(struct oce_wq *wq)
369{
370 struct oce_mbx mbx;
371 struct mbx_delete_nic_wq *fwcmd;
372 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
373
374 if (wq->qstate == QCREATED) {
375 bzero(&mbx, sizeof(struct oce_mbx));
376 /* now fill the command */
377 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
378 fwcmd->params.req.wq_id = wq->wq_id;
379 (void)oce_destroy_q(sc, &mbx,
380 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
381 wq->qstate = QDELETED;
382 }
383
384 if (wq->cq != NULL) {
385 oce_cq_del(sc, wq->cq);
386 wq->cq = NULL;
387 }
388}
389
390
391
392/**
393 * @brief function to allocate receive queue resources
394 * @param sc software handle to the device
395 * @param q_len length of receive queue
396 * @param frag_size size of an receive queue fragment
397 * @param mtu maximum transmission unit
398 * @param rss is-rss-queue flag
399 * @returns the pointer to the RQ created or NULL on failure
400 */
401static struct
402oce_rq *oce_rq_init(POCE_SOFTC sc,
403 uint32_t q_len,
404 uint32_t frag_size,
405 uint32_t mtu, uint32_t rss)
406{
407 struct oce_rq *rq;
408 int rc = 0, i;
409
410 if (OCE_LOG2(frag_size) <= 0)
411 return NULL;
412
413 if ((q_len == 0) || (q_len > 1024))
414 return NULL;
415
416 /* allocate the rq */
417 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
418 if (!rq)
419 return NULL;
420
421
422 rq->cfg.q_len = q_len;
423 rq->cfg.frag_size = frag_size;
424 rq->cfg.mtu = mtu;
425 rq->cfg.eqd = 0;
426 rq->lro_pkts_queued = 0;
427 rq->cfg.is_rss_queue = rss;
428 rq->packets_in = 0;
429 rq->packets_out = 0;
430 rq->pending = 0;
431
432 rq->parent = (void *)sc;
433
434 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
435 1, 0,
436 BUS_SPACE_MAXADDR,
437 BUS_SPACE_MAXADDR,
438 NULL, NULL,
439 OCE_MAX_RX_SIZE,
440 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
441
442 if (rc)
443 goto free_rq;
444
445 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
446 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
447 if (rc)
448 goto free_rq;
449 }
450
451 /* create the ring buffer */
452 rq->ring = oce_create_ring_buffer(sc, q_len,
453 sizeof(struct oce_nic_rqe));
454 if (!rq->ring)
455 goto free_rq;
456
457 LOCK_CREATE(&rq->rx_lock, "RX_lock");
458
459 return rq;
460
461free_rq:
462 device_printf(sc->dev, "Create RQ failed\n");
463 oce_rq_free(rq);
464 return NULL;
465}
466
467
468
469
470/**
471 * @brief Free a receive queue
472 * @param rq pointer to receive queue
473 */
474static void
475oce_rq_free(struct oce_rq *rq)
476{
477 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
478 int i = 0 ;
479
480 if (rq->ring != NULL) {
481 oce_destroy_ring_buffer(sc, rq->ring);
482 rq->ring = NULL;
483 }
484 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
485 if (rq->pckts[i].map != NULL) {
486 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
487 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
488 rq->pckts[i].map = NULL;
489 }
490 if (rq->pckts[i].mbuf) {
491 m_free(rq->pckts[i].mbuf);
492 rq->pckts[i].mbuf = NULL;
493 }
494 }
495
496 if (rq->tag != NULL)
497 bus_dma_tag_destroy(rq->tag);
498
499 LOCK_DESTROY(&rq->rx_lock);
500 free(rq, M_DEVBUF);
501}
502
503
504
505
506/**
507 * @brief Create a receive queue
508 * @param rq receive queue
509 * @param if_id interface identifier index`
510 * @param eq pointer to event queue
511 */
512static int
513oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
514{
515 POCE_SOFTC sc = rq->parent;
516 struct oce_cq *cq;
517
518 cq = oce_cq_create(sc,
519 eq,
520 CQ_LEN_1024,
521 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
522 if (!cq)
523 return ENXIO;
524
525 rq->cq = cq;
526 rq->cfg.if_id = if_id;
527
528 /* Dont create RQ here. Create in if_activate */
529 rq->qstate = 0;
530 rq->ring->cidx = 0;
531 rq->ring->pidx = 0;
532 eq->cq[eq->cq_valid] = cq;
533 eq->cq_valid++;
534 cq->cb_arg = rq;
535 cq->cq_handler = oce_rq_handler;
536
537 return 0;
538
539}
540
541
542
543
544/**
545 * @brief Delete a receive queue
546 * @param rq receive queue
547 */
548static void
549oce_rq_del(struct oce_rq *rq)
550{
551 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
552 struct oce_mbx mbx;
553 struct mbx_delete_nic_rq *fwcmd;
554
555 if (rq->qstate == QCREATED) {
556 bzero(&mbx, sizeof(mbx));
557
558 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
559 fwcmd->params.req.rq_id = rq->rq_id;
560 (void)oce_destroy_q(sc, &mbx,
561 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
562 rq->qstate = QDELETED;
563 }
564
565 if (rq->cq != NULL) {
566 oce_cq_del(sc, rq->cq);
567 rq->cq = NULL;
568 }
569}
570
571
572
573/**
574 * @brief function to create an event queue
575 * @param sc software handle to the device
576 * @param q_len length of event queue
577 * @param item_size size of an event queue item
578 * @param eq_delay event queue delay
579 * @retval eq success, pointer to event queue
580 * @retval NULL failure
581 */
582static struct
583oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
584 uint32_t item_size,
585 uint32_t eq_delay,
586 uint32_t vector)
587{
588 struct oce_eq *eq;
589 int rc = 0;
590
591 /* allocate an eq */
592 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
593 if (eq == NULL)
594 return NULL;
595
596 eq->parent = (void *)sc;
597 eq->eq_id = 0xffff;
598 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
599 if (!eq->ring)
600 goto free_eq;
601
602 eq->eq_cfg.q_len = q_len;
603 eq->eq_cfg.item_size = item_size;
604 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
605
606 rc = oce_mbox_create_eq(eq);
607 if (rc)
608 goto free_eq;
609
610 sc->intrs[sc->neqs++].eq = eq;
611
612 return eq;
613
614free_eq:
615 oce_eq_del(eq);
616 return NULL;
617}
618
619
620
621
622/**
623 * @brief Function to delete an event queue
624 * @param eq pointer to an event queue
625 */
626static void
627oce_eq_del(struct oce_eq *eq)
628{
629 struct oce_mbx mbx;
630 struct mbx_destroy_common_eq *fwcmd;
631 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
632
633 if (eq->eq_id != 0xffff) {
634 bzero(&mbx, sizeof(mbx));
635 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
636 fwcmd->params.req.id = eq->eq_id;
637 (void)oce_destroy_q(sc, &mbx,
638 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
639 }
640
641 if (eq->ring != NULL) {
642 oce_destroy_ring_buffer(sc, eq->ring);
643 eq->ring = NULL;
644 }
645
646 free(eq, M_DEVBUF);
647
648}
649
650
651
652
653/**
654 * @brief Function to create an MQ
655 * @param sc software handle to the device
656 * @param eq the EQ to associate with the MQ for event notification
657 * @param q_len the number of entries to create in the MQ
658 * @returns pointer to the created MQ, failure otherwise
659 */
660static struct oce_mq *
661oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
662{
663 struct oce_mbx mbx;
664 struct mbx_create_common_mq_ex *fwcmd = NULL;
665 struct oce_mq *mq = NULL;
666 int rc = 0;
667 struct oce_cq *cq;
668 oce_mq_ext_ctx_t *ctx;
669 uint32_t num_pages;
670 uint32_t page_size;
671 int version;
672
673 cq = oce_cq_create(sc, eq, CQ_LEN_256,
674 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
675 if (!cq)
676 return NULL;
677
678 /* allocate the mq */
679 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
680 if (!mq) {
681 oce_cq_del(sc, cq);
682 goto error;
683 }
684
685 mq->parent = sc;
686
687 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
688 if (!mq->ring)
689 goto error;
690
691 bzero(&mbx, sizeof(struct oce_mbx));
692
693 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
694 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
695 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
696 MBX_SUBSYSTEM_COMMON,
697 OPCODE_COMMON_CREATE_MQ_EXT,
698 MBX_TIMEOUT_SEC,
699 sizeof(struct mbx_create_common_mq_ex),
700 version);
701
702 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
703 page_size = mq->ring->num_items * mq->ring->item_size;
704
705 ctx = &fwcmd->params.req.context;
706
707 if (IS_XE201(sc)) {
708 ctx->v1.num_pages = num_pages;
709 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
710 ctx->v1.cq_id = cq->cq_id;
711 ctx->v1.valid = 1;
712 ctx->v1.async_cq_id = cq->cq_id;
713 ctx->v1.async_cq_valid = 1;
714 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
715 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
716 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
717 ctx->v1.async_evt_bitmap |=
718 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
719 }
720 else {
721 ctx->v0.num_pages = num_pages;
722 ctx->v0.cq_id = cq->cq_id;
723 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
724 ctx->v0.valid = 1;
725 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
726 ctx->v0.async_evt_bitmap = 0xffffffff;
727 }
728
729 mbx.u0.s.embedded = 1;
730 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
731 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
732
733 rc = oce_mbox_post(sc, &mbx, NULL);
734 if (!rc)
735 rc = fwcmd->hdr.u0.rsp.status;
736 if (rc) {
737 device_printf(sc->dev,"%s failed - cmd status: %d\n",
738 __FUNCTION__, rc);
739 goto error;
740 }
741 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
742 mq->cq = cq;
743 eq->cq[eq->cq_valid] = cq;
744 eq->cq_valid++;
745 mq->cq->eq = eq;
746 mq->cfg.q_len = (uint8_t) q_len;
747 mq->cfg.eqd = 0;
748 mq->qstate = QCREATED;
749
750 mq->cq->cb_arg = mq;
751 mq->cq->cq_handler = oce_mq_handler;
752
753 return mq;
754
755error:
756 device_printf(sc->dev, "MQ create failed\n");
757 oce_mq_free(mq);
758 mq = NULL;
759 return mq;
760}
761
762
763
764
765
766/**
767 * @brief Function to free a mailbox queue
768 * @param mq pointer to a mailbox queue
769 */
770static void
771oce_mq_free(struct oce_mq *mq)
772{
773 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
774 struct oce_mbx mbx;
775 struct mbx_destroy_common_mq *fwcmd;
776
777 if (!mq)
778 return;
779
780 if (mq->ring != NULL) {
781 oce_destroy_ring_buffer(sc, mq->ring);
782 mq->ring = NULL;
783 if (mq->qstate == QCREATED) {
784 bzero(&mbx, sizeof (struct oce_mbx));
785 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
786 fwcmd->params.req.id = mq->mq_id;
787 (void) oce_destroy_q(sc, &mbx,
788 sizeof (struct mbx_destroy_common_mq),
789 QTYPE_MQ);
790 }
791 mq->qstate = QDELETED;
792 }
793
794 if (mq->cq != NULL) {
795 oce_cq_del(sc, mq->cq);
796 mq->cq = NULL;
797 }
798
799 free(mq, M_DEVBUF);
800 mq = NULL;
801}
802
803
804
805/**
806 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
807 * @param sc sofware handle to the device
808 * @param mbx mailbox command to send to the fw to delete the queue
809 * (mbx contains the queue information to delete)
810 * @param req_size the size of the mbx payload dependent on the qtype
811 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
812 * @returns 0 on success, failure otherwise
813 */
814static int
815oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
816 enum qtype qtype)
817{
818 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
819 int opcode;
820 int subsys;
821 int rc = 0;
822
823 switch (qtype) {
824 case QTYPE_EQ:
825 opcode = OPCODE_COMMON_DESTROY_EQ;
826 subsys = MBX_SUBSYSTEM_COMMON;
827 break;
828 case QTYPE_CQ:
829 opcode = OPCODE_COMMON_DESTROY_CQ;
830 subsys = MBX_SUBSYSTEM_COMMON;
831 break;
832 case QTYPE_MQ:
833 opcode = OPCODE_COMMON_DESTROY_MQ;
834 subsys = MBX_SUBSYSTEM_COMMON;
835 break;
836 case QTYPE_WQ:
837 opcode = NIC_DELETE_WQ;
838 subsys = MBX_SUBSYSTEM_NIC;
839 break;
840 case QTYPE_RQ:
841 opcode = NIC_DELETE_RQ;
842 subsys = MBX_SUBSYSTEM_NIC;
843 break;
844 default:
845 return EINVAL;
846 }
847
848 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
849 opcode, MBX_TIMEOUT_SEC, req_size,
850 OCE_MBX_VER_V0);
851
852 mbx->u0.s.embedded = 1;
853 mbx->payload_length = (uint32_t) req_size;
854 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
855
856 rc = oce_mbox_post(sc, mbx, NULL);
857 if (!rc)
858 rc = hdr->u0.rsp.status;
859 if (rc)
860 device_printf(sc->dev,"%s failed - cmd status: %d\n",
861 __FUNCTION__, rc);
862 return rc;
863}
864
865
866
867/**
868 * @brief Function to create a completion queue
869 * @param sc software handle to the device
870 * @param eq optional eq to be associated with to the cq
871 * @param q_len length of completion queue
872 * @param item_size size of completion queue items
873 * @param sol_event command context event
874 * @param is_eventable event table
875 * @param nodelay no delay flag
876 * @param ncoalesce no coalescence flag
877 * @returns pointer to the cq created, NULL on failure
878 */
879struct oce_cq *
880oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
881 uint32_t q_len,
882 uint32_t item_size,
883 uint32_t sol_event,
884 uint32_t is_eventable,
885 uint32_t nodelay, uint32_t ncoalesce)
886{
887 struct oce_cq *cq = NULL;
888 int rc = 0;
889
890 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
891 if (!cq)
892 return NULL;
893
894 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
895 if (!cq->ring)
896 goto error;
897
898 cq->parent = sc;
899 cq->eq = eq;
900 cq->cq_cfg.q_len = q_len;
901 cq->cq_cfg.item_size = item_size;
902 cq->cq_cfg.nodelay = (uint8_t) nodelay;
903
904 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
905 if (rc)
906 goto error;
907
908 sc->cq[sc->ncqs++] = cq;
909
910 return cq;
911
912error:
913 device_printf(sc->dev, "CQ create failed\n");
914 oce_cq_del(sc, cq);
915 return NULL;
916}
917
918
919
920/**
921 * @brief Deletes the completion queue
922 * @param sc software handle to the device
923 * @param cq pointer to a completion queue
924 */
925static void
926oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
927{
928 struct oce_mbx mbx;
929 struct mbx_destroy_common_cq *fwcmd;
930
931 if (cq->ring != NULL) {
932
933 bzero(&mbx, sizeof(struct oce_mbx));
934 /* now fill the command */
935 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
936 fwcmd->params.req.id = cq->cq_id;
937 (void)oce_destroy_q(sc, &mbx,
938 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
939 /*NOW destroy the ring */
940 oce_destroy_ring_buffer(sc, cq->ring);
941 cq->ring = NULL;
942 }
943
944 free(cq, M_DEVBUF);
945 cq = NULL;
946}
947
948
949
950/**
951 * @brief Start a receive queue
952 * @param rq pointer to a receive queue
953 */
954int
955oce_start_rq(struct oce_rq *rq)
956{
957 int rc;
958
959 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
960
961 if (rc == 0)
962 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
963 return rc;
964}
965
966
967
968/**
969 * @brief Start a work queue
970 * @param wq pointer to a work queue
971 */
972int
973oce_start_wq(struct oce_wq *wq)
974{
975 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
976 return 0;
977}
978
979
980
981/**
982 * @brief Start a mailbox queue
983 * @param mq pointer to a mailbox queue
984 */
985int
986oce_start_mq(struct oce_mq *mq)
987{
988 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
989 return 0;
990}
991
992
993
994/**
995 * @brief Function to arm an EQ so that it can generate events
996 * @param sc software handle to the device
997 * @param qid id of the EQ returned by the fw at the time of creation
998 * @param npopped number of EQEs to arm
999 * @param rearm rearm bit enable/disable
1000 * @param clearint bit to clear the interrupt condition because of which
1001 * EQEs are generated
1002 */
1003void
1004oce_arm_eq(POCE_SOFTC sc,
1005 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1006{
1007 eq_db_t eq_db = { 0 };
1008
1009 eq_db.bits.rearm = rearm;
1010 eq_db.bits.event = 1;
1011 eq_db.bits.num_popped = npopped;
1012 eq_db.bits.clrint = clearint;
1013 eq_db.bits.qid = qid;
1014 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1015
1016}
1017
1018
1019
1020
1021/**
1022 * @brief Function to arm a CQ with CQEs
1023 * @param sc software handle to the device
1024 * @param qid id of the CQ returned by the fw at the time of creation
1025 * @param npopped number of CQEs to arm
1026 * @param rearm rearm bit enable/disable
1027 */
1028void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1029{
1030 cq_db_t cq_db = { 0 };
1031
1032 cq_db.bits.rearm = rearm;
1033 cq_db.bits.num_popped = npopped;
1034 cq_db.bits.event = 0;
1035 cq_db.bits.qid = qid;
1036 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1037
1038}
1039
1040
1041
1042
1043/*
1044 * @brief function to cleanup the eqs used during stop
1045 * @param eq pointer to event queue structure
1046 * @returns the number of EQs processed
1047 */
1048void
1049oce_drain_eq(struct oce_eq *eq)
1050{
1051
1052 struct oce_eqe *eqe;
1053 uint16_t num_eqe = 0;
1054 POCE_SOFTC sc = eq->parent;
1055
1056 do {
1057 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1058 if (eqe->evnt == 0)
1059 break;
1060 eqe->evnt = 0;
1061 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1062 BUS_DMASYNC_POSTWRITE);
1063 num_eqe++;
1064 RING_GET(eq->ring, 1);
1065
1066 } while (TRUE);
1067
1068 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1069
1070}
1071
1072
1073
1074void
1075oce_drain_wq_cq(struct oce_wq *wq)
1076{
1077 POCE_SOFTC sc = wq->parent;
1078 struct oce_cq *cq = wq->cq;
1079 struct oce_nic_tx_cqe *cqe;
1080 int num_cqes = 0;
1081
1082 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1083 BUS_DMASYNC_POSTWRITE);
1084
1085 do {
1086 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1087 if (cqe->u0.dw[3] == 0)
1088 break;
1089 cqe->u0.dw[3] = 0;
1090 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1091 BUS_DMASYNC_POSTWRITE);
1092 RING_GET(cq->ring, 1);
1093 num_cqes++;
1094
1095 } while (TRUE);
1096
1097 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1098
1099}
1100
1101
1102/*
1103 * @brief function to drain a MCQ and process its CQEs
1104 * @param dev software handle to the device
1105 * @param cq pointer to the cq to drain
1106 * @returns the number of CQEs processed
1107 */
1108void
1109oce_drain_mq_cq(void *arg)
1110{
1111 /* TODO: additional code. */
1112 return;
1113}
1114
1115
1116
1117/**
1118 * @brief function to process a Recieve queue
1119 * @param arg pointer to the RQ to charge
1120 * @return number of cqes processed
1121 */
1122void
1123oce_drain_rq_cq(struct oce_rq *rq)
1124{
1125 struct oce_nic_rx_cqe *cqe;
1126 uint16_t num_cqe = 0;
1127 struct oce_cq *cq;
1128 POCE_SOFTC sc;
1129
1130 sc = rq->parent;
1131 cq = rq->cq;
1132 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1133 /* dequeue till you reach an invalid cqe */
1134 while (RQ_CQE_VALID(cqe)) {
1135 RQ_CQE_INVALIDATE(cqe);
1136 RING_GET(cq->ring, 1);
1137 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1138 struct oce_nic_rx_cqe);
1139 num_cqe++;
1140 }
1141 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1142
1143 return;
1144}
1145
1146
1147void
1148oce_free_posted_rxbuf(struct oce_rq *rq)
1149{
1150 struct oce_packet_desc *pd;
1151
1152 while (rq->pending) {
1153
1154 pd = &rq->pckts[rq->packets_out];
1155 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1156 bus_dmamap_unload(rq->tag, pd->map);
1157 if (pd->mbuf != NULL) {
1158 m_freem(pd->mbuf);
1159 pd->mbuf = NULL;
1160 }
1161
1162 if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1163 rq->packets_out = 0;
1164 else
1165 rq->packets_out++;
1166
1167 rq->pending--;
1168 }
1169
1170}
1171
1172void
1173oce_stop_rx(POCE_SOFTC sc)
1174{
1175 struct oce_mbx mbx;
1176 struct mbx_delete_nic_rq *fwcmd;
1177 struct oce_rq *rq;
1178 int i = 0;
1179
1180 for_all_rq_queues(sc, rq, i) {
1181 if (rq->qstate == QCREATED) {
1182 /* Delete rxq in firmware */
1183
1184 bzero(&mbx, sizeof(mbx));
1185 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1186 fwcmd->params.req.rq_id = rq->rq_id;
1187
1188 (void)oce_destroy_q(sc, &mbx,
1189 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1190
1191 rq->qstate = QDELETED;
1192
1193 DELAY(1);
1194
1195 /* Free posted RX buffers that are not used */
1196 oce_free_posted_rxbuf(rq);
1197
1198 }
1199 }
1200}
1201
1202
1203
1204int
1205oce_start_rx(POCE_SOFTC sc)
1206{
1207 struct oce_rq *rq;
1208 int rc = 0, i;
1209
1210 for_all_rq_queues(sc, rq, i) {
1211 if (rq->qstate == QCREATED)
1212 continue;
1213 rc = oce_mbox_create_rq(rq);
1214 if (rc)
1215 goto error;
1216 /* reset queue pointers */
1217 rq->qstate = QCREATED;
1218 rq->pending = 0;
1219 rq->ring->cidx = 0;
1220 rq->ring->pidx = 0;
1221 rq->packets_in = 0;
1222 rq->packets_out = 0;
1223 }
1224
1225 DELAY(1);
1226
1227 /* RSS config */
1228 if (is_rss_enabled(sc)) {
1229 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1230 if (rc)
1231 goto error;
1232
1233 }
1234
1235 return rc;
1236error:
1237 device_printf(sc->dev, "Start RX failed\n");
1238 return rc;
1239
1240}
1241
1242
1243
41#include "oce_if.h"
42
43/*****************************************************
44 * local queue functions
45 *****************************************************/
46
47static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
48 uint32_t q_len, uint32_t wq_type);
49static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50static void oce_wq_free(struct oce_wq *wq);
51static void oce_wq_del(struct oce_wq *wq);
52static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
53 uint32_t q_len,
54 uint32_t frag_size,
55 uint32_t mtu, uint32_t rss);
56static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
57static void oce_rq_free(struct oce_rq *rq);
58static void oce_rq_del(struct oce_rq *rq);
59static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
60 uint32_t q_len,
61 uint32_t item_size,
62 uint32_t eq_delay,
63 uint32_t vector);
64static void oce_eq_del(struct oce_eq *eq);
65static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
66 struct oce_eq *eq, uint32_t q_len);
67static void oce_mq_free(struct oce_mq *mq);
68static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
69 *mbx, size_t req_size, enum qtype qtype);
70struct oce_cq *oce_cq_create(POCE_SOFTC sc,
71 struct oce_eq *eq,
72 uint32_t q_len,
73 uint32_t item_size,
74 uint32_t sol_event,
75 uint32_t is_eventable,
76 uint32_t nodelay, uint32_t ncoalesce);
77static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
78
79
80
81/**
82 * @brief Create and initialize all the queues on the board
83 * @param sc software handle to the device
84 * @returns 0 if successful, or error
85 **/
86int
87oce_queue_init_all(POCE_SOFTC sc)
88{
89 int rc = 0, i, vector;
90 struct oce_wq *wq;
91 struct oce_rq *rq;
92 struct oce_aic_obj *aic;
93
94 /* alloc TX/RX queues */
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
97 NIC_WQ_TYPE_STANDARD);
98 if (!sc->wq[i])
99 goto error;
100
101 }
102
103 for_all_rq_queues(sc, rq, i) {
104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
105 OCE_MAX_JUMBO_FRAME_SIZE,
106 (i == 0) ? 0 : is_rss_enabled(sc));
107 if (!sc->rq[i])
108 goto error;
109 }
110
111 /* Create network interface on card */
112 if (oce_create_nw_interface(sc))
113 goto error;
114
115 /* create all of the event queues */
116 for (vector = 0; vector < sc->intr_count; vector++) {
117 /* setup aic defaults for each event queue */
118 aic = &sc->aic_obj[vector];
119 aic->max_eqd = OCE_MAX_EQD;
120 aic->min_eqd = OCE_MIN_EQD;
121 aic->et_eqd = OCE_MIN_EQD;
122 aic->enable = TRUE;
123
124 sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
125 0, vector);
126 if (!sc->eq[vector])
127 goto error;
128 }
129
130 /* create Tx, Rx and mcc queues */
131 for_all_wq_queues(sc, wq, i) {
132 rc = oce_wq_create(wq, sc->eq[i]);
133 if (rc)
134 goto error;
135 wq->queue_index = i;
136 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
137 }
138
139 for_all_rq_queues(sc, rq, i) {
140 rc = oce_rq_create(rq, sc->if_id,
141 sc->eq[(i == 0) ? 0:(i-1)]);
142 if (rc)
143 goto error;
144 rq->queue_index = i;
145 }
146
147 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
148 if (!sc->mq)
149 goto error;
150
151 return rc;
152
153error:
154 oce_queue_release_all(sc);
155 return 1;
156}
157
158
159
160/**
161 * @brief Releases all mailbox queues created
162 * @param sc software handle to the device
163 */
164void
165oce_queue_release_all(POCE_SOFTC sc)
166{
167 int i = 0;
168 struct oce_wq *wq;
169 struct oce_rq *rq;
170 struct oce_eq *eq;
171
172 for_all_rq_queues(sc, rq, i) {
173 if (rq) {
174 oce_rq_del(sc->rq[i]);
175 oce_rq_free(sc->rq[i]);
176 }
177 }
178
179 for_all_wq_queues(sc, wq, i) {
180 if (wq) {
181 oce_wq_del(sc->wq[i]);
182 oce_wq_free(sc->wq[i]);
183 }
184 }
185
186 if (sc->mq)
187 oce_mq_free(sc->mq);
188
189 for_all_evnt_queues(sc, eq, i) {
190 if (eq)
191 oce_eq_del(sc->eq[i]);
192 }
193}
194
195
196
197/**
198 * @brief Function to create a WQ for NIC Tx
199 * @param sc software handle to the device
200 * @param qlen number of entries in the queue
201 * @param wq_type work queue type
202 * @returns the pointer to the WQ created or NULL on failure
203 */
204static struct
205oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
206{
207 struct oce_wq *wq;
208 int rc = 0, i;
209
210 /* q_len must be min 256 and max 2k */
211 if (q_len < 256 || q_len > 2048) {
212 device_printf(sc->dev,
213 "Invalid q length. Must be "
214 "[256, 2000]: 0x%x\n", q_len);
215 return NULL;
216 }
217
218 /* allocate wq */
219 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
220 if (!wq)
221 return NULL;
222
223 /* Set the wq config */
224 wq->cfg.q_len = q_len;
225 wq->cfg.wq_type = (uint8_t) wq_type;
226 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
227 wq->cfg.nbufs = 2 * wq->cfg.q_len;
228 wq->cfg.nhdl = 2 * wq->cfg.q_len;
229
230 wq->parent = (void *)sc;
231
232 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
233 1, 0,
234 BUS_SPACE_MAXADDR,
235 BUS_SPACE_MAXADDR,
236 NULL, NULL,
237 OCE_MAX_TX_SIZE,
238 OCE_MAX_TX_ELEMENTS,
239 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
240
241 if (rc)
242 goto free_wq;
243
244
245 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
246 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
247 if (rc)
248 goto free_wq;
249 }
250
251 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
252 if (!wq->ring)
253 goto free_wq;
254
255
256 LOCK_CREATE(&wq->tx_lock, "TX_lock");
257
258#if __FreeBSD_version >= 800000
259 /* Allocate buf ring for multiqueue*/
260 wq->br = buf_ring_alloc(4096, M_DEVBUF,
261 M_WAITOK, &wq->tx_lock.mutex);
262 if (!wq->br)
263 goto free_wq;
264#endif
265 return wq;
266
267
268free_wq:
269 device_printf(sc->dev, "Create WQ failed\n");
270 oce_wq_free(wq);
271 return NULL;
272}
273
274
275
276/**
277 * @brief Frees the work queue
278 * @param wq pointer to work queue to free
279 */
280static void
281oce_wq_free(struct oce_wq *wq)
282{
283 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
284 int i;
285
286 taskqueue_drain(taskqueue_swi, &wq->txtask);
287
288 if (wq->ring != NULL) {
289 oce_destroy_ring_buffer(sc, wq->ring);
290 wq->ring = NULL;
291 }
292
293 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
294 if (wq->pckts[i].map != NULL) {
295 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
296 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
297 wq->pckts[i].map = NULL;
298 }
299 }
300
301 if (wq->tag != NULL)
302 bus_dma_tag_destroy(wq->tag);
303 if (wq->br != NULL)
304 buf_ring_free(wq->br, M_DEVBUF);
305
306 LOCK_DESTROY(&wq->tx_lock);
307 free(wq, M_DEVBUF);
308}
309
310
311
312/**
313 * @brief Create a work queue
314 * @param wq pointer to work queue
315 * @param eq pointer to associated event queue
316 */
317static int
318oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
319{
320 POCE_SOFTC sc = wq->parent;
321 struct oce_cq *cq;
322 int rc = 0;
323
324 /* create the CQ */
325 cq = oce_cq_create(sc,
326 eq,
327 CQ_LEN_1024,
328 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
329 if (!cq)
330 return ENXIO;
331
332
333 wq->cq = cq;
334
335 rc = oce_mbox_create_wq(wq);
336 if (rc)
337 goto error;
338
339 wq->qstate = QCREATED;
340 wq->wq_free = wq->cfg.q_len;
341 wq->ring->cidx = 0;
342 wq->ring->pidx = 0;
343
344 eq->cq[eq->cq_valid] = cq;
345 eq->cq_valid++;
346 cq->cb_arg = wq;
347 cq->cq_handler = oce_wq_handler;
348
349 return 0;
350
351error:
352 device_printf(sc->dev, "WQ create failed\n");
353 oce_wq_del(wq);
354 return rc;
355}
356
357
358
359
360/**
361 * @brief Delete a work queue
362 * @param wq pointer to work queue
363 */
364static void
365oce_wq_del(struct oce_wq *wq)
366{
367 struct oce_mbx mbx;
368 struct mbx_delete_nic_wq *fwcmd;
369 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
370
371 if (wq->qstate == QCREATED) {
372 bzero(&mbx, sizeof(struct oce_mbx));
373 /* now fill the command */
374 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
375 fwcmd->params.req.wq_id = wq->wq_id;
376 (void)oce_destroy_q(sc, &mbx,
377 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
378 wq->qstate = QDELETED;
379 }
380
381 if (wq->cq != NULL) {
382 oce_cq_del(sc, wq->cq);
383 wq->cq = NULL;
384 }
385}
386
387
388
389/**
390 * @brief function to allocate receive queue resources
391 * @param sc software handle to the device
392 * @param q_len length of receive queue
393 * @param frag_size size of an receive queue fragment
394 * @param mtu maximum transmission unit
395 * @param rss is-rss-queue flag
396 * @returns the pointer to the RQ created or NULL on failure
397 */
398static struct
399oce_rq *oce_rq_init(POCE_SOFTC sc,
400 uint32_t q_len,
401 uint32_t frag_size,
402 uint32_t mtu, uint32_t rss)
403{
404 struct oce_rq *rq;
405 int rc = 0, i;
406
407 if (OCE_LOG2(frag_size) <= 0)
408 return NULL;
409
410 if ((q_len == 0) || (q_len > 1024))
411 return NULL;
412
413 /* allocate the rq */
414 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
415 if (!rq)
416 return NULL;
417
418
419 rq->cfg.q_len = q_len;
420 rq->cfg.frag_size = frag_size;
421 rq->cfg.mtu = mtu;
422 rq->cfg.eqd = 0;
423 rq->lro_pkts_queued = 0;
424 rq->cfg.is_rss_queue = rss;
425 rq->packets_in = 0;
426 rq->packets_out = 0;
427 rq->pending = 0;
428
429 rq->parent = (void *)sc;
430
431 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
432 1, 0,
433 BUS_SPACE_MAXADDR,
434 BUS_SPACE_MAXADDR,
435 NULL, NULL,
436 OCE_MAX_RX_SIZE,
437 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
438
439 if (rc)
440 goto free_rq;
441
442 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
443 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
444 if (rc)
445 goto free_rq;
446 }
447
448 /* create the ring buffer */
449 rq->ring = oce_create_ring_buffer(sc, q_len,
450 sizeof(struct oce_nic_rqe));
451 if (!rq->ring)
452 goto free_rq;
453
454 LOCK_CREATE(&rq->rx_lock, "RX_lock");
455
456 return rq;
457
458free_rq:
459 device_printf(sc->dev, "Create RQ failed\n");
460 oce_rq_free(rq);
461 return NULL;
462}
463
464
465
466
467/**
468 * @brief Free a receive queue
469 * @param rq pointer to receive queue
470 */
471static void
472oce_rq_free(struct oce_rq *rq)
473{
474 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
475 int i = 0 ;
476
477 if (rq->ring != NULL) {
478 oce_destroy_ring_buffer(sc, rq->ring);
479 rq->ring = NULL;
480 }
481 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
482 if (rq->pckts[i].map != NULL) {
483 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
484 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
485 rq->pckts[i].map = NULL;
486 }
487 if (rq->pckts[i].mbuf) {
488 m_free(rq->pckts[i].mbuf);
489 rq->pckts[i].mbuf = NULL;
490 }
491 }
492
493 if (rq->tag != NULL)
494 bus_dma_tag_destroy(rq->tag);
495
496 LOCK_DESTROY(&rq->rx_lock);
497 free(rq, M_DEVBUF);
498}
499
500
501
502
503/**
504 * @brief Create a receive queue
505 * @param rq receive queue
506 * @param if_id interface identifier index`
507 * @param eq pointer to event queue
508 */
509static int
510oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
511{
512 POCE_SOFTC sc = rq->parent;
513 struct oce_cq *cq;
514
515 cq = oce_cq_create(sc,
516 eq,
517 CQ_LEN_1024,
518 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
519 if (!cq)
520 return ENXIO;
521
522 rq->cq = cq;
523 rq->cfg.if_id = if_id;
524
525 /* Dont create RQ here. Create in if_activate */
526 rq->qstate = 0;
527 rq->ring->cidx = 0;
528 rq->ring->pidx = 0;
529 eq->cq[eq->cq_valid] = cq;
530 eq->cq_valid++;
531 cq->cb_arg = rq;
532 cq->cq_handler = oce_rq_handler;
533
534 return 0;
535
536}
537
538
539
540
541/**
542 * @brief Delete a receive queue
543 * @param rq receive queue
544 */
545static void
546oce_rq_del(struct oce_rq *rq)
547{
548 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
549 struct oce_mbx mbx;
550 struct mbx_delete_nic_rq *fwcmd;
551
552 if (rq->qstate == QCREATED) {
553 bzero(&mbx, sizeof(mbx));
554
555 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
556 fwcmd->params.req.rq_id = rq->rq_id;
557 (void)oce_destroy_q(sc, &mbx,
558 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
559 rq->qstate = QDELETED;
560 }
561
562 if (rq->cq != NULL) {
563 oce_cq_del(sc, rq->cq);
564 rq->cq = NULL;
565 }
566}
567
568
569
570/**
571 * @brief function to create an event queue
572 * @param sc software handle to the device
573 * @param q_len length of event queue
574 * @param item_size size of an event queue item
575 * @param eq_delay event queue delay
576 * @retval eq success, pointer to event queue
577 * @retval NULL failure
578 */
579static struct
580oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
581 uint32_t item_size,
582 uint32_t eq_delay,
583 uint32_t vector)
584{
585 struct oce_eq *eq;
586 int rc = 0;
587
588 /* allocate an eq */
589 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
590 if (eq == NULL)
591 return NULL;
592
593 eq->parent = (void *)sc;
594 eq->eq_id = 0xffff;
595 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
596 if (!eq->ring)
597 goto free_eq;
598
599 eq->eq_cfg.q_len = q_len;
600 eq->eq_cfg.item_size = item_size;
601 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
602
603 rc = oce_mbox_create_eq(eq);
604 if (rc)
605 goto free_eq;
606
607 sc->intrs[sc->neqs++].eq = eq;
608
609 return eq;
610
611free_eq:
612 oce_eq_del(eq);
613 return NULL;
614}
615
616
617
618
619/**
620 * @brief Function to delete an event queue
621 * @param eq pointer to an event queue
622 */
623static void
624oce_eq_del(struct oce_eq *eq)
625{
626 struct oce_mbx mbx;
627 struct mbx_destroy_common_eq *fwcmd;
628 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
629
630 if (eq->eq_id != 0xffff) {
631 bzero(&mbx, sizeof(mbx));
632 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
633 fwcmd->params.req.id = eq->eq_id;
634 (void)oce_destroy_q(sc, &mbx,
635 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
636 }
637
638 if (eq->ring != NULL) {
639 oce_destroy_ring_buffer(sc, eq->ring);
640 eq->ring = NULL;
641 }
642
643 free(eq, M_DEVBUF);
644
645}
646
647
648
649
650/**
651 * @brief Function to create an MQ
652 * @param sc software handle to the device
653 * @param eq the EQ to associate with the MQ for event notification
654 * @param q_len the number of entries to create in the MQ
655 * @returns pointer to the created MQ, failure otherwise
656 */
657static struct oce_mq *
658oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
659{
660 struct oce_mbx mbx;
661 struct mbx_create_common_mq_ex *fwcmd = NULL;
662 struct oce_mq *mq = NULL;
663 int rc = 0;
664 struct oce_cq *cq;
665 oce_mq_ext_ctx_t *ctx;
666 uint32_t num_pages;
667 uint32_t page_size;
668 int version;
669
670 cq = oce_cq_create(sc, eq, CQ_LEN_256,
671 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
672 if (!cq)
673 return NULL;
674
675 /* allocate the mq */
676 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
677 if (!mq) {
678 oce_cq_del(sc, cq);
679 goto error;
680 }
681
682 mq->parent = sc;
683
684 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
685 if (!mq->ring)
686 goto error;
687
688 bzero(&mbx, sizeof(struct oce_mbx));
689
690 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
691 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
692 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
693 MBX_SUBSYSTEM_COMMON,
694 OPCODE_COMMON_CREATE_MQ_EXT,
695 MBX_TIMEOUT_SEC,
696 sizeof(struct mbx_create_common_mq_ex),
697 version);
698
699 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
700 page_size = mq->ring->num_items * mq->ring->item_size;
701
702 ctx = &fwcmd->params.req.context;
703
704 if (IS_XE201(sc)) {
705 ctx->v1.num_pages = num_pages;
706 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
707 ctx->v1.cq_id = cq->cq_id;
708 ctx->v1.valid = 1;
709 ctx->v1.async_cq_id = cq->cq_id;
710 ctx->v1.async_cq_valid = 1;
711 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
712 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
713 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
714 ctx->v1.async_evt_bitmap |=
715 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
716 }
717 else {
718 ctx->v0.num_pages = num_pages;
719 ctx->v0.cq_id = cq->cq_id;
720 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
721 ctx->v0.valid = 1;
722 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
723 ctx->v0.async_evt_bitmap = 0xffffffff;
724 }
725
726 mbx.u0.s.embedded = 1;
727 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
728 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
729
730 rc = oce_mbox_post(sc, &mbx, NULL);
731 if (!rc)
732 rc = fwcmd->hdr.u0.rsp.status;
733 if (rc) {
734 device_printf(sc->dev,"%s failed - cmd status: %d\n",
735 __FUNCTION__, rc);
736 goto error;
737 }
738 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
739 mq->cq = cq;
740 eq->cq[eq->cq_valid] = cq;
741 eq->cq_valid++;
742 mq->cq->eq = eq;
743 mq->cfg.q_len = (uint8_t) q_len;
744 mq->cfg.eqd = 0;
745 mq->qstate = QCREATED;
746
747 mq->cq->cb_arg = mq;
748 mq->cq->cq_handler = oce_mq_handler;
749
750 return mq;
751
752error:
753 device_printf(sc->dev, "MQ create failed\n");
754 oce_mq_free(mq);
755 mq = NULL;
756 return mq;
757}
758
759
760
761
762
763/**
764 * @brief Function to free a mailbox queue
765 * @param mq pointer to a mailbox queue
766 */
767static void
768oce_mq_free(struct oce_mq *mq)
769{
770 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
771 struct oce_mbx mbx;
772 struct mbx_destroy_common_mq *fwcmd;
773
774 if (!mq)
775 return;
776
777 if (mq->ring != NULL) {
778 oce_destroy_ring_buffer(sc, mq->ring);
779 mq->ring = NULL;
780 if (mq->qstate == QCREATED) {
781 bzero(&mbx, sizeof (struct oce_mbx));
782 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
783 fwcmd->params.req.id = mq->mq_id;
784 (void) oce_destroy_q(sc, &mbx,
785 sizeof (struct mbx_destroy_common_mq),
786 QTYPE_MQ);
787 }
788 mq->qstate = QDELETED;
789 }
790
791 if (mq->cq != NULL) {
792 oce_cq_del(sc, mq->cq);
793 mq->cq = NULL;
794 }
795
796 free(mq, M_DEVBUF);
797 mq = NULL;
798}
799
800
801
802/**
803 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
804 * @param sc sofware handle to the device
805 * @param mbx mailbox command to send to the fw to delete the queue
806 * (mbx contains the queue information to delete)
807 * @param req_size the size of the mbx payload dependent on the qtype
808 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
809 * @returns 0 on success, failure otherwise
810 */
811static int
812oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
813 enum qtype qtype)
814{
815 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
816 int opcode;
817 int subsys;
818 int rc = 0;
819
820 switch (qtype) {
821 case QTYPE_EQ:
822 opcode = OPCODE_COMMON_DESTROY_EQ;
823 subsys = MBX_SUBSYSTEM_COMMON;
824 break;
825 case QTYPE_CQ:
826 opcode = OPCODE_COMMON_DESTROY_CQ;
827 subsys = MBX_SUBSYSTEM_COMMON;
828 break;
829 case QTYPE_MQ:
830 opcode = OPCODE_COMMON_DESTROY_MQ;
831 subsys = MBX_SUBSYSTEM_COMMON;
832 break;
833 case QTYPE_WQ:
834 opcode = NIC_DELETE_WQ;
835 subsys = MBX_SUBSYSTEM_NIC;
836 break;
837 case QTYPE_RQ:
838 opcode = NIC_DELETE_RQ;
839 subsys = MBX_SUBSYSTEM_NIC;
840 break;
841 default:
842 return EINVAL;
843 }
844
845 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
846 opcode, MBX_TIMEOUT_SEC, req_size,
847 OCE_MBX_VER_V0);
848
849 mbx->u0.s.embedded = 1;
850 mbx->payload_length = (uint32_t) req_size;
851 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
852
853 rc = oce_mbox_post(sc, mbx, NULL);
854 if (!rc)
855 rc = hdr->u0.rsp.status;
856 if (rc)
857 device_printf(sc->dev,"%s failed - cmd status: %d\n",
858 __FUNCTION__, rc);
859 return rc;
860}
861
862
863
864/**
865 * @brief Function to create a completion queue
866 * @param sc software handle to the device
867 * @param eq optional eq to be associated with to the cq
868 * @param q_len length of completion queue
869 * @param item_size size of completion queue items
870 * @param sol_event command context event
871 * @param is_eventable event table
872 * @param nodelay no delay flag
873 * @param ncoalesce no coalescence flag
874 * @returns pointer to the cq created, NULL on failure
875 */
876struct oce_cq *
877oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
878 uint32_t q_len,
879 uint32_t item_size,
880 uint32_t sol_event,
881 uint32_t is_eventable,
882 uint32_t nodelay, uint32_t ncoalesce)
883{
884 struct oce_cq *cq = NULL;
885 int rc = 0;
886
887 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
888 if (!cq)
889 return NULL;
890
891 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
892 if (!cq->ring)
893 goto error;
894
895 cq->parent = sc;
896 cq->eq = eq;
897 cq->cq_cfg.q_len = q_len;
898 cq->cq_cfg.item_size = item_size;
899 cq->cq_cfg.nodelay = (uint8_t) nodelay;
900
901 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
902 if (rc)
903 goto error;
904
905 sc->cq[sc->ncqs++] = cq;
906
907 return cq;
908
909error:
910 device_printf(sc->dev, "CQ create failed\n");
911 oce_cq_del(sc, cq);
912 return NULL;
913}
914
915
916
917/**
918 * @brief Deletes the completion queue
919 * @param sc software handle to the device
920 * @param cq pointer to a completion queue
921 */
922static void
923oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
924{
925 struct oce_mbx mbx;
926 struct mbx_destroy_common_cq *fwcmd;
927
928 if (cq->ring != NULL) {
929
930 bzero(&mbx, sizeof(struct oce_mbx));
931 /* now fill the command */
932 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
933 fwcmd->params.req.id = cq->cq_id;
934 (void)oce_destroy_q(sc, &mbx,
935 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
936 /*NOW destroy the ring */
937 oce_destroy_ring_buffer(sc, cq->ring);
938 cq->ring = NULL;
939 }
940
941 free(cq, M_DEVBUF);
942 cq = NULL;
943}
944
945
946
947/**
948 * @brief Start a receive queue
949 * @param rq pointer to a receive queue
950 */
951int
952oce_start_rq(struct oce_rq *rq)
953{
954 int rc;
955
956 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
957
958 if (rc == 0)
959 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
960 return rc;
961}
962
963
964
965/**
966 * @brief Start a work queue
967 * @param wq pointer to a work queue
968 */
969int
970oce_start_wq(struct oce_wq *wq)
971{
972 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
973 return 0;
974}
975
976
977
978/**
979 * @brief Start a mailbox queue
980 * @param mq pointer to a mailbox queue
981 */
982int
983oce_start_mq(struct oce_mq *mq)
984{
985 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
986 return 0;
987}
988
989
990
991/**
992 * @brief Function to arm an EQ so that it can generate events
993 * @param sc software handle to the device
994 * @param qid id of the EQ returned by the fw at the time of creation
995 * @param npopped number of EQEs to arm
996 * @param rearm rearm bit enable/disable
997 * @param clearint bit to clear the interrupt condition because of which
998 * EQEs are generated
999 */
1000void
1001oce_arm_eq(POCE_SOFTC sc,
1002 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1003{
1004 eq_db_t eq_db = { 0 };
1005
1006 eq_db.bits.rearm = rearm;
1007 eq_db.bits.event = 1;
1008 eq_db.bits.num_popped = npopped;
1009 eq_db.bits.clrint = clearint;
1010 eq_db.bits.qid = qid;
1011 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1012
1013}
1014
1015
1016
1017
1018/**
1019 * @brief Function to arm a CQ with CQEs
1020 * @param sc software handle to the device
1021 * @param qid id of the CQ returned by the fw at the time of creation
1022 * @param npopped number of CQEs to arm
1023 * @param rearm rearm bit enable/disable
1024 */
1025void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1026{
1027 cq_db_t cq_db = { 0 };
1028
1029 cq_db.bits.rearm = rearm;
1030 cq_db.bits.num_popped = npopped;
1031 cq_db.bits.event = 0;
1032 cq_db.bits.qid = qid;
1033 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1034
1035}
1036
1037
1038
1039
1040/*
1041 * @brief function to cleanup the eqs used during stop
1042 * @param eq pointer to event queue structure
1043 * @returns the number of EQs processed
1044 */
1045void
1046oce_drain_eq(struct oce_eq *eq)
1047{
1048
1049 struct oce_eqe *eqe;
1050 uint16_t num_eqe = 0;
1051 POCE_SOFTC sc = eq->parent;
1052
1053 do {
1054 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1055 if (eqe->evnt == 0)
1056 break;
1057 eqe->evnt = 0;
1058 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1059 BUS_DMASYNC_POSTWRITE);
1060 num_eqe++;
1061 RING_GET(eq->ring, 1);
1062
1063 } while (TRUE);
1064
1065 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1066
1067}
1068
1069
1070
1071void
1072oce_drain_wq_cq(struct oce_wq *wq)
1073{
1074 POCE_SOFTC sc = wq->parent;
1075 struct oce_cq *cq = wq->cq;
1076 struct oce_nic_tx_cqe *cqe;
1077 int num_cqes = 0;
1078
1079 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1080 BUS_DMASYNC_POSTWRITE);
1081
1082 do {
1083 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1084 if (cqe->u0.dw[3] == 0)
1085 break;
1086 cqe->u0.dw[3] = 0;
1087 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1088 BUS_DMASYNC_POSTWRITE);
1089 RING_GET(cq->ring, 1);
1090 num_cqes++;
1091
1092 } while (TRUE);
1093
1094 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1095
1096}
1097
1098
1099/*
1100 * @brief function to drain a MCQ and process its CQEs
1101 * @param dev software handle to the device
1102 * @param cq pointer to the cq to drain
1103 * @returns the number of CQEs processed
1104 */
1105void
1106oce_drain_mq_cq(void *arg)
1107{
1108 /* TODO: additional code. */
1109 return;
1110}
1111
1112
1113
1114/**
1115 * @brief function to process a Recieve queue
1116 * @param arg pointer to the RQ to charge
1117 * @return number of cqes processed
1118 */
1119void
1120oce_drain_rq_cq(struct oce_rq *rq)
1121{
1122 struct oce_nic_rx_cqe *cqe;
1123 uint16_t num_cqe = 0;
1124 struct oce_cq *cq;
1125 POCE_SOFTC sc;
1126
1127 sc = rq->parent;
1128 cq = rq->cq;
1129 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1130 /* dequeue till you reach an invalid cqe */
1131 while (RQ_CQE_VALID(cqe)) {
1132 RQ_CQE_INVALIDATE(cqe);
1133 RING_GET(cq->ring, 1);
1134 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1135 struct oce_nic_rx_cqe);
1136 num_cqe++;
1137 }
1138 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1139
1140 return;
1141}
1142
1143
1144void
1145oce_free_posted_rxbuf(struct oce_rq *rq)
1146{
1147 struct oce_packet_desc *pd;
1148
1149 while (rq->pending) {
1150
1151 pd = &rq->pckts[rq->packets_out];
1152 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1153 bus_dmamap_unload(rq->tag, pd->map);
1154 if (pd->mbuf != NULL) {
1155 m_freem(pd->mbuf);
1156 pd->mbuf = NULL;
1157 }
1158
1159 if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1160 rq->packets_out = 0;
1161 else
1162 rq->packets_out++;
1163
1164 rq->pending--;
1165 }
1166
1167}
1168
1169void
1170oce_stop_rx(POCE_SOFTC sc)
1171{
1172 struct oce_mbx mbx;
1173 struct mbx_delete_nic_rq *fwcmd;
1174 struct oce_rq *rq;
1175 int i = 0;
1176
1177 for_all_rq_queues(sc, rq, i) {
1178 if (rq->qstate == QCREATED) {
1179 /* Delete rxq in firmware */
1180
1181 bzero(&mbx, sizeof(mbx));
1182 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1183 fwcmd->params.req.rq_id = rq->rq_id;
1184
1185 (void)oce_destroy_q(sc, &mbx,
1186 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1187
1188 rq->qstate = QDELETED;
1189
1190 DELAY(1);
1191
1192 /* Free posted RX buffers that are not used */
1193 oce_free_posted_rxbuf(rq);
1194
1195 }
1196 }
1197}
1198
1199
1200
1201int
1202oce_start_rx(POCE_SOFTC sc)
1203{
1204 struct oce_rq *rq;
1205 int rc = 0, i;
1206
1207 for_all_rq_queues(sc, rq, i) {
1208 if (rq->qstate == QCREATED)
1209 continue;
1210 rc = oce_mbox_create_rq(rq);
1211 if (rc)
1212 goto error;
1213 /* reset queue pointers */
1214 rq->qstate = QCREATED;
1215 rq->pending = 0;
1216 rq->ring->cidx = 0;
1217 rq->ring->pidx = 0;
1218 rq->packets_in = 0;
1219 rq->packets_out = 0;
1220 }
1221
1222 DELAY(1);
1223
1224 /* RSS config */
1225 if (is_rss_enabled(sc)) {
1226 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1227 if (rc)
1228 goto error;
1229
1230 }
1231
1232 return rc;
1233error:
1234 device_printf(sc->dev, "Start RX failed\n");
1235 return rc;
1236
1237}
1238
1239
1240