Deleted Added
full compact
oce_queue.c (331722) oce_queue.c (338938)
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
1/*-
2 * Copyright (C) 2013 Emulex
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Contact Information:
32 * freebsd-drivers@emulex.com
33 *
34 * Emulex
35 * 3333 Susan Street
36 * Costa Mesa, CA 92626
37 */
38
39/* $FreeBSD: stable/11/sys/dev/oce/oce_queue.c 331722 2018-03-29 02:50:57Z eadler $ */
39/* $FreeBSD: stable/11/sys/dev/oce/oce_queue.c 338938 2018-09-25 23:48:43Z jpaetzel $ */
40
41#include "oce_if.h"
42
43/*****************************************************
44 * local queue functions
45 *****************************************************/
46
47static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
48 uint32_t q_len, uint32_t wq_type);
49static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50static void oce_wq_free(struct oce_wq *wq);
51static void oce_wq_del(struct oce_wq *wq);
52static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
53 uint32_t q_len,
54 uint32_t frag_size,
55 uint32_t mtu, uint32_t rss);
56static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
57static void oce_rq_free(struct oce_rq *rq);
58static void oce_rq_del(struct oce_rq *rq);
59static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
60 uint32_t q_len,
61 uint32_t item_size,
62 uint32_t eq_delay,
63 uint32_t vector);
64static void oce_eq_del(struct oce_eq *eq);
65static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
66 struct oce_eq *eq, uint32_t q_len);
67static void oce_mq_free(struct oce_mq *mq);
68static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
40
41#include "oce_if.h"
42
43/*****************************************************
44 * local queue functions
45 *****************************************************/
46
47static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
48 uint32_t q_len, uint32_t wq_type);
49static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50static void oce_wq_free(struct oce_wq *wq);
51static void oce_wq_del(struct oce_wq *wq);
52static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
53 uint32_t q_len,
54 uint32_t frag_size,
55 uint32_t mtu, uint32_t rss);
56static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
57static void oce_rq_free(struct oce_rq *rq);
58static void oce_rq_del(struct oce_rq *rq);
59static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
60 uint32_t q_len,
61 uint32_t item_size,
62 uint32_t eq_delay,
63 uint32_t vector);
64static void oce_eq_del(struct oce_eq *eq);
65static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
66 struct oce_eq *eq, uint32_t q_len);
67static void oce_mq_free(struct oce_mq *mq);
68static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
69 *mbx, size_t req_size, enum qtype qtype);
69 *mbx, size_t req_size, enum qtype qtype, int version);
70struct oce_cq *oce_cq_create(POCE_SOFTC sc,
71 struct oce_eq *eq,
72 uint32_t q_len,
73 uint32_t item_size,
74 uint32_t sol_event,
75 uint32_t is_eventable,
76 uint32_t nodelay, uint32_t ncoalesce);
77static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
78
79
80
81/**
82 * @brief Create and initialize all the queues on the board
83 * @param sc software handle to the device
84 * @returns 0 if successful, or error
85 **/
86int
87oce_queue_init_all(POCE_SOFTC sc)
88{
89 int rc = 0, i, vector;
90 struct oce_wq *wq;
91 struct oce_rq *rq;
92 struct oce_aic_obj *aic;
93
94 /* alloc TX/RX queues */
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
97 NIC_WQ_TYPE_STANDARD);
98 if (!sc->wq[i])
99 goto error;
100
101 }
102
103 for_all_rq_queues(sc, rq, i) {
104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
105 OCE_MAX_JUMBO_FRAME_SIZE,
106 (i == 0) ? 0 : is_rss_enabled(sc));
107 if (!sc->rq[i])
108 goto error;
109 }
110
111 /* Create network interface on card */
112 if (oce_create_nw_interface(sc))
113 goto error;
114
115 /* create all of the event queues */
116 for (vector = 0; vector < sc->intr_count; vector++) {
117 /* setup aic defaults for each event queue */
118 aic = &sc->aic_obj[vector];
119 aic->max_eqd = OCE_MAX_EQD;
120 aic->min_eqd = OCE_MIN_EQD;
121 aic->et_eqd = OCE_MIN_EQD;
122 aic->enable = TRUE;
70struct oce_cq *oce_cq_create(POCE_SOFTC sc,
71 struct oce_eq *eq,
72 uint32_t q_len,
73 uint32_t item_size,
74 uint32_t sol_event,
75 uint32_t is_eventable,
76 uint32_t nodelay, uint32_t ncoalesce);
77static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
78
79
80
81/**
82 * @brief Create and initialize all the queues on the board
83 * @param sc software handle to the device
84 * @returns 0 if successful, or error
85 **/
86int
87oce_queue_init_all(POCE_SOFTC sc)
88{
89 int rc = 0, i, vector;
90 struct oce_wq *wq;
91 struct oce_rq *rq;
92 struct oce_aic_obj *aic;
93
94 /* alloc TX/RX queues */
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
97 NIC_WQ_TYPE_STANDARD);
98 if (!sc->wq[i])
99 goto error;
100
101 }
102
103 for_all_rq_queues(sc, rq, i) {
104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
105 OCE_MAX_JUMBO_FRAME_SIZE,
106 (i == 0) ? 0 : is_rss_enabled(sc));
107 if (!sc->rq[i])
108 goto error;
109 }
110
111 /* Create network interface on card */
112 if (oce_create_nw_interface(sc))
113 goto error;
114
115 /* create all of the event queues */
116 for (vector = 0; vector < sc->intr_count; vector++) {
117 /* setup aic defaults for each event queue */
118 aic = &sc->aic_obj[vector];
119 aic->max_eqd = OCE_MAX_EQD;
120 aic->min_eqd = OCE_MIN_EQD;
121 aic->et_eqd = OCE_MIN_EQD;
122 aic->enable = TRUE;
123
124 sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
125 EQE_SIZE_4,0, vector);
123
126
124 sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
125 0, vector);
126 if (!sc->eq[vector])
127 goto error;
128 }
129
130 /* create Tx, Rx and mcc queues */
131 for_all_wq_queues(sc, wq, i) {
132 rc = oce_wq_create(wq, sc->eq[i]);
133 if (rc)
134 goto error;
135 wq->queue_index = i;
136 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
137 }
138
139 for_all_rq_queues(sc, rq, i) {
140 rc = oce_rq_create(rq, sc->if_id,
141 sc->eq[(i == 0) ? 0:(i-1)]);
142 if (rc)
143 goto error;
144 rq->queue_index = i;
145 }
146
147 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
148 if (!sc->mq)
149 goto error;
150
151 return rc;
152
153error:
154 oce_queue_release_all(sc);
155 return 1;
156}
157
158
159
160/**
161 * @brief Releases all mailbox queues created
162 * @param sc software handle to the device
163 */
164void
165oce_queue_release_all(POCE_SOFTC sc)
166{
167 int i = 0;
168 struct oce_wq *wq;
169 struct oce_rq *rq;
170 struct oce_eq *eq;
171
127 if (!sc->eq[vector])
128 goto error;
129 }
130
131 /* create Tx, Rx and mcc queues */
132 for_all_wq_queues(sc, wq, i) {
133 rc = oce_wq_create(wq, sc->eq[i]);
134 if (rc)
135 goto error;
136 wq->queue_index = i;
137 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
138 }
139
140 for_all_rq_queues(sc, rq, i) {
141 rc = oce_rq_create(rq, sc->if_id,
142 sc->eq[(i == 0) ? 0:(i-1)]);
143 if (rc)
144 goto error;
145 rq->queue_index = i;
146 }
147
148 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
149 if (!sc->mq)
150 goto error;
151
152 return rc;
153
154error:
155 oce_queue_release_all(sc);
156 return 1;
157}
158
159
160
161/**
162 * @brief Releases all mailbox queues created
163 * @param sc software handle to the device
164 */
165void
166oce_queue_release_all(POCE_SOFTC sc)
167{
168 int i = 0;
169 struct oce_wq *wq;
170 struct oce_rq *rq;
171 struct oce_eq *eq;
172
173 /* before deleting lro queues, we have to disable hwlro */
174 if(sc->enable_hwlro)
175 oce_mbox_nic_set_iface_lro_config(sc, 0);
176
172 for_all_rq_queues(sc, rq, i) {
173 if (rq) {
174 oce_rq_del(sc->rq[i]);
175 oce_rq_free(sc->rq[i]);
176 }
177 }
178
179 for_all_wq_queues(sc, wq, i) {
180 if (wq) {
181 oce_wq_del(sc->wq[i]);
182 oce_wq_free(sc->wq[i]);
183 }
184 }
185
186 if (sc->mq)
187 oce_mq_free(sc->mq);
188
189 for_all_evnt_queues(sc, eq, i) {
190 if (eq)
191 oce_eq_del(sc->eq[i]);
192 }
193}
194
195
196
197/**
198 * @brief Function to create a WQ for NIC Tx
199 * @param sc software handle to the device
200 * @param qlen number of entries in the queue
201 * @param wq_type work queue type
202 * @returns the pointer to the WQ created or NULL on failure
203 */
204static struct
205oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
206{
207 struct oce_wq *wq;
208 int rc = 0, i;
209
210 /* q_len must be min 256 and max 2k */
211 if (q_len < 256 || q_len > 2048) {
212 device_printf(sc->dev,
213 "Invalid q length. Must be "
214 "[256, 2000]: 0x%x\n", q_len);
215 return NULL;
216 }
217
218 /* allocate wq */
219 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
220 if (!wq)
221 return NULL;
222
223 /* Set the wq config */
224 wq->cfg.q_len = q_len;
225 wq->cfg.wq_type = (uint8_t) wq_type;
226 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
227 wq->cfg.nbufs = 2 * wq->cfg.q_len;
228 wq->cfg.nhdl = 2 * wq->cfg.q_len;
229
230 wq->parent = (void *)sc;
231
232 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
233 1, 0,
234 BUS_SPACE_MAXADDR,
235 BUS_SPACE_MAXADDR,
236 NULL, NULL,
237 OCE_MAX_TX_SIZE,
238 OCE_MAX_TX_ELEMENTS,
239 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
240
241 if (rc)
242 goto free_wq;
243
244
245 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
246 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
247 if (rc)
248 goto free_wq;
249 }
250
251 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
252 if (!wq->ring)
253 goto free_wq;
254
255
256 LOCK_CREATE(&wq->tx_lock, "TX_lock");
177 for_all_rq_queues(sc, rq, i) {
178 if (rq) {
179 oce_rq_del(sc->rq[i]);
180 oce_rq_free(sc->rq[i]);
181 }
182 }
183
184 for_all_wq_queues(sc, wq, i) {
185 if (wq) {
186 oce_wq_del(sc->wq[i]);
187 oce_wq_free(sc->wq[i]);
188 }
189 }
190
191 if (sc->mq)
192 oce_mq_free(sc->mq);
193
194 for_all_evnt_queues(sc, eq, i) {
195 if (eq)
196 oce_eq_del(sc->eq[i]);
197 }
198}
199
200
201
202/**
203 * @brief Function to create a WQ for NIC Tx
204 * @param sc software handle to the device
205 * @param qlen number of entries in the queue
206 * @param wq_type work queue type
207 * @returns the pointer to the WQ created or NULL on failure
208 */
209static struct
210oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
211{
212 struct oce_wq *wq;
213 int rc = 0, i;
214
215 /* q_len must be min 256 and max 2k */
216 if (q_len < 256 || q_len > 2048) {
217 device_printf(sc->dev,
218 "Invalid q length. Must be "
219 "[256, 2000]: 0x%x\n", q_len);
220 return NULL;
221 }
222
223 /* allocate wq */
224 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
225 if (!wq)
226 return NULL;
227
228 /* Set the wq config */
229 wq->cfg.q_len = q_len;
230 wq->cfg.wq_type = (uint8_t) wq_type;
231 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
232 wq->cfg.nbufs = 2 * wq->cfg.q_len;
233 wq->cfg.nhdl = 2 * wq->cfg.q_len;
234
235 wq->parent = (void *)sc;
236
237 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
238 1, 0,
239 BUS_SPACE_MAXADDR,
240 BUS_SPACE_MAXADDR,
241 NULL, NULL,
242 OCE_MAX_TX_SIZE,
243 OCE_MAX_TX_ELEMENTS,
244 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
245
246 if (rc)
247 goto free_wq;
248
249
250 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
251 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
252 if (rc)
253 goto free_wq;
254 }
255
256 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
257 if (!wq->ring)
258 goto free_wq;
259
260
261 LOCK_CREATE(&wq->tx_lock, "TX_lock");
262 LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
257
258#if __FreeBSD_version >= 800000
259 /* Allocate buf ring for multiqueue*/
260 wq->br = buf_ring_alloc(4096, M_DEVBUF,
261 M_WAITOK, &wq->tx_lock.mutex);
262 if (!wq->br)
263 goto free_wq;
264#endif
265 return wq;
266
267
268free_wq:
269 device_printf(sc->dev, "Create WQ failed\n");
270 oce_wq_free(wq);
271 return NULL;
272}
273
274
275
276/**
277 * @brief Frees the work queue
278 * @param wq pointer to work queue to free
279 */
280static void
281oce_wq_free(struct oce_wq *wq)
282{
283 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
284 int i;
285
286 taskqueue_drain(taskqueue_swi, &wq->txtask);
287
288 if (wq->ring != NULL) {
289 oce_destroy_ring_buffer(sc, wq->ring);
290 wq->ring = NULL;
291 }
292
293 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
294 if (wq->pckts[i].map != NULL) {
295 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
296 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
297 wq->pckts[i].map = NULL;
298 }
299 }
300
301 if (wq->tag != NULL)
302 bus_dma_tag_destroy(wq->tag);
303 if (wq->br != NULL)
304 buf_ring_free(wq->br, M_DEVBUF);
305
306 LOCK_DESTROY(&wq->tx_lock);
263
264#if __FreeBSD_version >= 800000
265 /* Allocate buf ring for multiqueue*/
266 wq->br = buf_ring_alloc(4096, M_DEVBUF,
267 M_WAITOK, &wq->tx_lock.mutex);
268 if (!wq->br)
269 goto free_wq;
270#endif
271 return wq;
272
273
274free_wq:
275 device_printf(sc->dev, "Create WQ failed\n");
276 oce_wq_free(wq);
277 return NULL;
278}
279
280
281
282/**
283 * @brief Frees the work queue
284 * @param wq pointer to work queue to free
285 */
286static void
287oce_wq_free(struct oce_wq *wq)
288{
289 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
290 int i;
291
292 taskqueue_drain(taskqueue_swi, &wq->txtask);
293
294 if (wq->ring != NULL) {
295 oce_destroy_ring_buffer(sc, wq->ring);
296 wq->ring = NULL;
297 }
298
299 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
300 if (wq->pckts[i].map != NULL) {
301 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
302 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
303 wq->pckts[i].map = NULL;
304 }
305 }
306
307 if (wq->tag != NULL)
308 bus_dma_tag_destroy(wq->tag);
309 if (wq->br != NULL)
310 buf_ring_free(wq->br, M_DEVBUF);
311
312 LOCK_DESTROY(&wq->tx_lock);
313 LOCK_DESTROY(&wq->tx_compl_lock);
307 free(wq, M_DEVBUF);
308}
309
310
311
312/**
313 * @brief Create a work queue
314 * @param wq pointer to work queue
315 * @param eq pointer to associated event queue
316 */
317static int
318oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
319{
320 POCE_SOFTC sc = wq->parent;
321 struct oce_cq *cq;
322 int rc = 0;
323
324 /* create the CQ */
325 cq = oce_cq_create(sc,
326 eq,
327 CQ_LEN_1024,
328 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
329 if (!cq)
330 return ENXIO;
331
332
333 wq->cq = cq;
334
335 rc = oce_mbox_create_wq(wq);
336 if (rc)
337 goto error;
338
339 wq->qstate = QCREATED;
340 wq->wq_free = wq->cfg.q_len;
341 wq->ring->cidx = 0;
342 wq->ring->pidx = 0;
343
344 eq->cq[eq->cq_valid] = cq;
345 eq->cq_valid++;
346 cq->cb_arg = wq;
347 cq->cq_handler = oce_wq_handler;
348
349 return 0;
350
351error:
352 device_printf(sc->dev, "WQ create failed\n");
353 oce_wq_del(wq);
354 return rc;
355}
356
357
358
359
360/**
361 * @brief Delete a work queue
362 * @param wq pointer to work queue
363 */
364static void
365oce_wq_del(struct oce_wq *wq)
366{
367 struct oce_mbx mbx;
368 struct mbx_delete_nic_wq *fwcmd;
369 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
370
371 if (wq->qstate == QCREATED) {
372 bzero(&mbx, sizeof(struct oce_mbx));
373 /* now fill the command */
374 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
375 fwcmd->params.req.wq_id = wq->wq_id;
376 (void)oce_destroy_q(sc, &mbx,
314 free(wq, M_DEVBUF);
315}
316
317
318
319/**
320 * @brief Create a work queue
321 * @param wq pointer to work queue
322 * @param eq pointer to associated event queue
323 */
324static int
325oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
326{
327 POCE_SOFTC sc = wq->parent;
328 struct oce_cq *cq;
329 int rc = 0;
330
331 /* create the CQ */
332 cq = oce_cq_create(sc,
333 eq,
334 CQ_LEN_1024,
335 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
336 if (!cq)
337 return ENXIO;
338
339
340 wq->cq = cq;
341
342 rc = oce_mbox_create_wq(wq);
343 if (rc)
344 goto error;
345
346 wq->qstate = QCREATED;
347 wq->wq_free = wq->cfg.q_len;
348 wq->ring->cidx = 0;
349 wq->ring->pidx = 0;
350
351 eq->cq[eq->cq_valid] = cq;
352 eq->cq_valid++;
353 cq->cb_arg = wq;
354 cq->cq_handler = oce_wq_handler;
355
356 return 0;
357
358error:
359 device_printf(sc->dev, "WQ create failed\n");
360 oce_wq_del(wq);
361 return rc;
362}
363
364
365
366
367/**
368 * @brief Delete a work queue
369 * @param wq pointer to work queue
370 */
371static void
372oce_wq_del(struct oce_wq *wq)
373{
374 struct oce_mbx mbx;
375 struct mbx_delete_nic_wq *fwcmd;
376 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
377
378 if (wq->qstate == QCREATED) {
379 bzero(&mbx, sizeof(struct oce_mbx));
380 /* now fill the command */
381 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
382 fwcmd->params.req.wq_id = wq->wq_id;
383 (void)oce_destroy_q(sc, &mbx,
377 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
384 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
378 wq->qstate = QDELETED;
379 }
380
381 if (wq->cq != NULL) {
382 oce_cq_del(sc, wq->cq);
383 wq->cq = NULL;
384 }
385}
386
387
388
389/**
390 * @brief function to allocate receive queue resources
391 * @param sc software handle to the device
392 * @param q_len length of receive queue
393 * @param frag_size size of an receive queue fragment
394 * @param mtu maximum transmission unit
395 * @param rss is-rss-queue flag
396 * @returns the pointer to the RQ created or NULL on failure
397 */
398static struct
399oce_rq *oce_rq_init(POCE_SOFTC sc,
400 uint32_t q_len,
401 uint32_t frag_size,
402 uint32_t mtu, uint32_t rss)
403{
404 struct oce_rq *rq;
405 int rc = 0, i;
406
407 if (OCE_LOG2(frag_size) <= 0)
408 return NULL;
409
410 if ((q_len == 0) || (q_len > 1024))
411 return NULL;
412
413 /* allocate the rq */
414 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
415 if (!rq)
416 return NULL;
417
418
419 rq->cfg.q_len = q_len;
420 rq->cfg.frag_size = frag_size;
421 rq->cfg.mtu = mtu;
422 rq->cfg.eqd = 0;
423 rq->lro_pkts_queued = 0;
424 rq->cfg.is_rss_queue = rss;
385 wq->qstate = QDELETED;
386 }
387
388 if (wq->cq != NULL) {
389 oce_cq_del(sc, wq->cq);
390 wq->cq = NULL;
391 }
392}
393
394
395
396/**
397 * @brief function to allocate receive queue resources
398 * @param sc software handle to the device
399 * @param q_len length of receive queue
400 * @param frag_size size of an receive queue fragment
401 * @param mtu maximum transmission unit
402 * @param rss is-rss-queue flag
403 * @returns the pointer to the RQ created or NULL on failure
404 */
405static struct
406oce_rq *oce_rq_init(POCE_SOFTC sc,
407 uint32_t q_len,
408 uint32_t frag_size,
409 uint32_t mtu, uint32_t rss)
410{
411 struct oce_rq *rq;
412 int rc = 0, i;
413
414 if (OCE_LOG2(frag_size) <= 0)
415 return NULL;
416
417 if ((q_len == 0) || (q_len > 1024))
418 return NULL;
419
420 /* allocate the rq */
421 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
422 if (!rq)
423 return NULL;
424
425
426 rq->cfg.q_len = q_len;
427 rq->cfg.frag_size = frag_size;
428 rq->cfg.mtu = mtu;
429 rq->cfg.eqd = 0;
430 rq->lro_pkts_queued = 0;
431 rq->cfg.is_rss_queue = rss;
425 rq->packets_in = 0;
426 rq->packets_out = 0;
427 rq->pending = 0;
428
429 rq->parent = (void *)sc;
430
431 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
432 rq->pending = 0;
433
434 rq->parent = (void *)sc;
435
436 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
432 1, 0,
433 BUS_SPACE_MAXADDR,
434 BUS_SPACE_MAXADDR,
435 NULL, NULL,
436 OCE_MAX_RX_SIZE,
437 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
438
437 1, 0,
438 BUS_SPACE_MAXADDR,
439 BUS_SPACE_MAXADDR,
440 NULL, NULL,
441 oce_rq_buf_size,
442 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
439 if (rc)
440 goto free_rq;
441
442 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
443 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
444 if (rc)
445 goto free_rq;
446 }
447
448 /* create the ring buffer */
449 rq->ring = oce_create_ring_buffer(sc, q_len,
450 sizeof(struct oce_nic_rqe));
451 if (!rq->ring)
452 goto free_rq;
453
454 LOCK_CREATE(&rq->rx_lock, "RX_lock");
455
456 return rq;
457
458free_rq:
459 device_printf(sc->dev, "Create RQ failed\n");
460 oce_rq_free(rq);
461 return NULL;
462}
463
464
465
466
467/**
468 * @brief Free a receive queue
469 * @param rq pointer to receive queue
470 */
471static void
472oce_rq_free(struct oce_rq *rq)
473{
474 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
475 int i = 0 ;
476
477 if (rq->ring != NULL) {
478 oce_destroy_ring_buffer(sc, rq->ring);
479 rq->ring = NULL;
480 }
481 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
482 if (rq->pckts[i].map != NULL) {
483 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
484 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
485 rq->pckts[i].map = NULL;
486 }
487 if (rq->pckts[i].mbuf) {
488 m_free(rq->pckts[i].mbuf);
489 rq->pckts[i].mbuf = NULL;
490 }
491 }
492
493 if (rq->tag != NULL)
494 bus_dma_tag_destroy(rq->tag);
495
496 LOCK_DESTROY(&rq->rx_lock);
497 free(rq, M_DEVBUF);
498}
499
500
501
502
503/**
504 * @brief Create a receive queue
505 * @param rq receive queue
506 * @param if_id interface identifier index`
507 * @param eq pointer to event queue
508 */
509static int
510oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
511{
512 POCE_SOFTC sc = rq->parent;
513 struct oce_cq *cq;
514
443 if (rc)
444 goto free_rq;
445
446 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
447 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
448 if (rc)
449 goto free_rq;
450 }
451
452 /* create the ring buffer */
453 rq->ring = oce_create_ring_buffer(sc, q_len,
454 sizeof(struct oce_nic_rqe));
455 if (!rq->ring)
456 goto free_rq;
457
458 LOCK_CREATE(&rq->rx_lock, "RX_lock");
459
460 return rq;
461
462free_rq:
463 device_printf(sc->dev, "Create RQ failed\n");
464 oce_rq_free(rq);
465 return NULL;
466}
467
468
469
470
471/**
472 * @brief Free a receive queue
473 * @param rq pointer to receive queue
474 */
475static void
476oce_rq_free(struct oce_rq *rq)
477{
478 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
479 int i = 0 ;
480
481 if (rq->ring != NULL) {
482 oce_destroy_ring_buffer(sc, rq->ring);
483 rq->ring = NULL;
484 }
485 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
486 if (rq->pckts[i].map != NULL) {
487 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
488 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
489 rq->pckts[i].map = NULL;
490 }
491 if (rq->pckts[i].mbuf) {
492 m_free(rq->pckts[i].mbuf);
493 rq->pckts[i].mbuf = NULL;
494 }
495 }
496
497 if (rq->tag != NULL)
498 bus_dma_tag_destroy(rq->tag);
499
500 LOCK_DESTROY(&rq->rx_lock);
501 free(rq, M_DEVBUF);
502}
503
504
505
506
507/**
508 * @brief Create a receive queue
509 * @param rq receive queue
510 * @param if_id interface identifier index`
511 * @param eq pointer to event queue
512 */
513static int
514oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
515{
516 POCE_SOFTC sc = rq->parent;
517 struct oce_cq *cq;
518
515 cq = oce_cq_create(sc,
516 eq,
517 CQ_LEN_1024,
518 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
519 cq = oce_cq_create(sc, eq,
520 sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
521 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
522
519 if (!cq)
520 return ENXIO;
521
522 rq->cq = cq;
523 rq->cfg.if_id = if_id;
524
525 /* Dont create RQ here. Create in if_activate */
526 rq->qstate = 0;
527 rq->ring->cidx = 0;
528 rq->ring->pidx = 0;
529 eq->cq[eq->cq_valid] = cq;
530 eq->cq_valid++;
531 cq->cb_arg = rq;
532 cq->cq_handler = oce_rq_handler;
533
534 return 0;
535
536}
537
538
539
540
541/**
542 * @brief Delete a receive queue
543 * @param rq receive queue
544 */
545static void
546oce_rq_del(struct oce_rq *rq)
547{
548 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
549 struct oce_mbx mbx;
550 struct mbx_delete_nic_rq *fwcmd;
523 if (!cq)
524 return ENXIO;
525
526 rq->cq = cq;
527 rq->cfg.if_id = if_id;
528
529 /* Dont create RQ here. Create in if_activate */
530 rq->qstate = 0;
531 rq->ring->cidx = 0;
532 rq->ring->pidx = 0;
533 eq->cq[eq->cq_valid] = cq;
534 eq->cq_valid++;
535 cq->cb_arg = rq;
536 cq->cq_handler = oce_rq_handler;
537
538 return 0;
539
540}
541
542
543
544
545/**
546 * @brief Delete a receive queue
547 * @param rq receive queue
548 */
549static void
550oce_rq_del(struct oce_rq *rq)
551{
552 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
553 struct oce_mbx mbx;
554 struct mbx_delete_nic_rq *fwcmd;
555 struct mbx_delete_nic_rq_v1 *fwcmd1;
551
552 if (rq->qstate == QCREATED) {
553 bzero(&mbx, sizeof(mbx));
556
557 if (rq->qstate == QCREATED) {
558 bzero(&mbx, sizeof(mbx));
554
555 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
556 fwcmd->params.req.rq_id = rq->rq_id;
557 (void)oce_destroy_q(sc, &mbx,
558 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
559 if(!rq->islro) {
560 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
561 fwcmd->params.req.rq_id = rq->rq_id;
562 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
563 }else {
564 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
565 fwcmd1->params.req.rq_id = rq->rq_id;
566 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
567 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
568 }
559 rq->qstate = QDELETED;
560 }
561
562 if (rq->cq != NULL) {
563 oce_cq_del(sc, rq->cq);
564 rq->cq = NULL;
565 }
566}
567
568
569
570/**
571 * @brief function to create an event queue
572 * @param sc software handle to the device
573 * @param q_len length of event queue
574 * @param item_size size of an event queue item
575 * @param eq_delay event queue delay
576 * @retval eq success, pointer to event queue
577 * @retval NULL failure
578 */
579static struct
580oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
581 uint32_t item_size,
582 uint32_t eq_delay,
583 uint32_t vector)
584{
585 struct oce_eq *eq;
586 int rc = 0;
587
588 /* allocate an eq */
589 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
590 if (eq == NULL)
591 return NULL;
592
593 eq->parent = (void *)sc;
594 eq->eq_id = 0xffff;
595 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
596 if (!eq->ring)
597 goto free_eq;
598
599 eq->eq_cfg.q_len = q_len;
600 eq->eq_cfg.item_size = item_size;
601 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
602
603 rc = oce_mbox_create_eq(eq);
604 if (rc)
605 goto free_eq;
606
607 sc->intrs[sc->neqs++].eq = eq;
608
609 return eq;
610
611free_eq:
612 oce_eq_del(eq);
613 return NULL;
614}
615
616
617
618
619/**
620 * @brief Function to delete an event queue
621 * @param eq pointer to an event queue
622 */
623static void
624oce_eq_del(struct oce_eq *eq)
625{
626 struct oce_mbx mbx;
627 struct mbx_destroy_common_eq *fwcmd;
628 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
629
630 if (eq->eq_id != 0xffff) {
631 bzero(&mbx, sizeof(mbx));
632 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
633 fwcmd->params.req.id = eq->eq_id;
634 (void)oce_destroy_q(sc, &mbx,
569 rq->qstate = QDELETED;
570 }
571
572 if (rq->cq != NULL) {
573 oce_cq_del(sc, rq->cq);
574 rq->cq = NULL;
575 }
576}
577
578
579
580/**
581 * @brief function to create an event queue
582 * @param sc software handle to the device
583 * @param q_len length of event queue
584 * @param item_size size of an event queue item
585 * @param eq_delay event queue delay
586 * @retval eq success, pointer to event queue
587 * @retval NULL failure
588 */
589static struct
590oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
591 uint32_t item_size,
592 uint32_t eq_delay,
593 uint32_t vector)
594{
595 struct oce_eq *eq;
596 int rc = 0;
597
598 /* allocate an eq */
599 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
600 if (eq == NULL)
601 return NULL;
602
603 eq->parent = (void *)sc;
604 eq->eq_id = 0xffff;
605 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
606 if (!eq->ring)
607 goto free_eq;
608
609 eq->eq_cfg.q_len = q_len;
610 eq->eq_cfg.item_size = item_size;
611 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
612
613 rc = oce_mbox_create_eq(eq);
614 if (rc)
615 goto free_eq;
616
617 sc->intrs[sc->neqs++].eq = eq;
618
619 return eq;
620
621free_eq:
622 oce_eq_del(eq);
623 return NULL;
624}
625
626
627
628
629/**
630 * @brief Function to delete an event queue
631 * @param eq pointer to an event queue
632 */
633static void
634oce_eq_del(struct oce_eq *eq)
635{
636 struct oce_mbx mbx;
637 struct mbx_destroy_common_eq *fwcmd;
638 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
639
640 if (eq->eq_id != 0xffff) {
641 bzero(&mbx, sizeof(mbx));
642 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
643 fwcmd->params.req.id = eq->eq_id;
644 (void)oce_destroy_q(sc, &mbx,
635 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
645 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
636 }
637
638 if (eq->ring != NULL) {
639 oce_destroy_ring_buffer(sc, eq->ring);
640 eq->ring = NULL;
641 }
642
643 free(eq, M_DEVBUF);
644
645}
646
647
648
649
650/**
651 * @brief Function to create an MQ
652 * @param sc software handle to the device
653 * @param eq the EQ to associate with the MQ for event notification
654 * @param q_len the number of entries to create in the MQ
655 * @returns pointer to the created MQ, failure otherwise
656 */
657static struct oce_mq *
658oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
659{
660 struct oce_mbx mbx;
661 struct mbx_create_common_mq_ex *fwcmd = NULL;
662 struct oce_mq *mq = NULL;
663 int rc = 0;
664 struct oce_cq *cq;
665 oce_mq_ext_ctx_t *ctx;
666 uint32_t num_pages;
667 uint32_t page_size;
668 int version;
669
670 cq = oce_cq_create(sc, eq, CQ_LEN_256,
671 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
672 if (!cq)
673 return NULL;
674
675 /* allocate the mq */
676 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
677 if (!mq) {
678 oce_cq_del(sc, cq);
679 goto error;
680 }
681
682 mq->parent = sc;
683
684 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
685 if (!mq->ring)
686 goto error;
687
688 bzero(&mbx, sizeof(struct oce_mbx));
689
690 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
691 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
692 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
693 MBX_SUBSYSTEM_COMMON,
694 OPCODE_COMMON_CREATE_MQ_EXT,
695 MBX_TIMEOUT_SEC,
696 sizeof(struct mbx_create_common_mq_ex),
697 version);
698
699 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
700 page_size = mq->ring->num_items * mq->ring->item_size;
701
702 ctx = &fwcmd->params.req.context;
703
704 if (IS_XE201(sc)) {
705 ctx->v1.num_pages = num_pages;
706 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
707 ctx->v1.cq_id = cq->cq_id;
708 ctx->v1.valid = 1;
709 ctx->v1.async_cq_id = cq->cq_id;
710 ctx->v1.async_cq_valid = 1;
711 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
712 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
713 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
714 ctx->v1.async_evt_bitmap |=
715 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
716 }
717 else {
718 ctx->v0.num_pages = num_pages;
719 ctx->v0.cq_id = cq->cq_id;
720 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
721 ctx->v0.valid = 1;
722 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
723 ctx->v0.async_evt_bitmap = 0xffffffff;
724 }
725
726 mbx.u0.s.embedded = 1;
727 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
728 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
729
730 rc = oce_mbox_post(sc, &mbx, NULL);
731 if (!rc)
732 rc = fwcmd->hdr.u0.rsp.status;
733 if (rc) {
734 device_printf(sc->dev,"%s failed - cmd status: %d\n",
735 __FUNCTION__, rc);
736 goto error;
737 }
738 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
739 mq->cq = cq;
740 eq->cq[eq->cq_valid] = cq;
741 eq->cq_valid++;
742 mq->cq->eq = eq;
743 mq->cfg.q_len = (uint8_t) q_len;
744 mq->cfg.eqd = 0;
745 mq->qstate = QCREATED;
746
747 mq->cq->cb_arg = mq;
748 mq->cq->cq_handler = oce_mq_handler;
749
750 return mq;
751
752error:
753 device_printf(sc->dev, "MQ create failed\n");
754 oce_mq_free(mq);
755 mq = NULL;
756 return mq;
757}
758
759
760
761
762
763/**
764 * @brief Function to free a mailbox queue
765 * @param mq pointer to a mailbox queue
766 */
767static void
768oce_mq_free(struct oce_mq *mq)
769{
770 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
771 struct oce_mbx mbx;
772 struct mbx_destroy_common_mq *fwcmd;
773
774 if (!mq)
775 return;
776
777 if (mq->ring != NULL) {
778 oce_destroy_ring_buffer(sc, mq->ring);
779 mq->ring = NULL;
780 if (mq->qstate == QCREATED) {
781 bzero(&mbx, sizeof (struct oce_mbx));
782 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
783 fwcmd->params.req.id = mq->mq_id;
784 (void) oce_destroy_q(sc, &mbx,
785 sizeof (struct mbx_destroy_common_mq),
646 }
647
648 if (eq->ring != NULL) {
649 oce_destroy_ring_buffer(sc, eq->ring);
650 eq->ring = NULL;
651 }
652
653 free(eq, M_DEVBUF);
654
655}
656
657
658
659
660/**
661 * @brief Function to create an MQ
662 * @param sc software handle to the device
663 * @param eq the EQ to associate with the MQ for event notification
664 * @param q_len the number of entries to create in the MQ
665 * @returns pointer to the created MQ, failure otherwise
666 */
667static struct oce_mq *
668oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
669{
670 struct oce_mbx mbx;
671 struct mbx_create_common_mq_ex *fwcmd = NULL;
672 struct oce_mq *mq = NULL;
673 int rc = 0;
674 struct oce_cq *cq;
675 oce_mq_ext_ctx_t *ctx;
676 uint32_t num_pages;
677 uint32_t page_size;
678 int version;
679
680 cq = oce_cq_create(sc, eq, CQ_LEN_256,
681 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
682 if (!cq)
683 return NULL;
684
685 /* allocate the mq */
686 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
687 if (!mq) {
688 oce_cq_del(sc, cq);
689 goto error;
690 }
691
692 mq->parent = sc;
693
694 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
695 if (!mq->ring)
696 goto error;
697
698 bzero(&mbx, sizeof(struct oce_mbx));
699
700 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
701 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
702 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
703 MBX_SUBSYSTEM_COMMON,
704 OPCODE_COMMON_CREATE_MQ_EXT,
705 MBX_TIMEOUT_SEC,
706 sizeof(struct mbx_create_common_mq_ex),
707 version);
708
709 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
710 page_size = mq->ring->num_items * mq->ring->item_size;
711
712 ctx = &fwcmd->params.req.context;
713
714 if (IS_XE201(sc)) {
715 ctx->v1.num_pages = num_pages;
716 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
717 ctx->v1.cq_id = cq->cq_id;
718 ctx->v1.valid = 1;
719 ctx->v1.async_cq_id = cq->cq_id;
720 ctx->v1.async_cq_valid = 1;
721 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
722 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
723 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
724 ctx->v1.async_evt_bitmap |=
725 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
726 }
727 else {
728 ctx->v0.num_pages = num_pages;
729 ctx->v0.cq_id = cq->cq_id;
730 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
731 ctx->v0.valid = 1;
732 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
733 ctx->v0.async_evt_bitmap = 0xffffffff;
734 }
735
736 mbx.u0.s.embedded = 1;
737 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
738 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
739
740 rc = oce_mbox_post(sc, &mbx, NULL);
741 if (!rc)
742 rc = fwcmd->hdr.u0.rsp.status;
743 if (rc) {
744 device_printf(sc->dev,"%s failed - cmd status: %d\n",
745 __FUNCTION__, rc);
746 goto error;
747 }
748 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
749 mq->cq = cq;
750 eq->cq[eq->cq_valid] = cq;
751 eq->cq_valid++;
752 mq->cq->eq = eq;
753 mq->cfg.q_len = (uint8_t) q_len;
754 mq->cfg.eqd = 0;
755 mq->qstate = QCREATED;
756
757 mq->cq->cb_arg = mq;
758 mq->cq->cq_handler = oce_mq_handler;
759
760 return mq;
761
762error:
763 device_printf(sc->dev, "MQ create failed\n");
764 oce_mq_free(mq);
765 mq = NULL;
766 return mq;
767}
768
769
770
771
772
773/**
774 * @brief Function to free a mailbox queue
775 * @param mq pointer to a mailbox queue
776 */
777static void
778oce_mq_free(struct oce_mq *mq)
779{
780 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
781 struct oce_mbx mbx;
782 struct mbx_destroy_common_mq *fwcmd;
783
784 if (!mq)
785 return;
786
787 if (mq->ring != NULL) {
788 oce_destroy_ring_buffer(sc, mq->ring);
789 mq->ring = NULL;
790 if (mq->qstate == QCREATED) {
791 bzero(&mbx, sizeof (struct oce_mbx));
792 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
793 fwcmd->params.req.id = mq->mq_id;
794 (void) oce_destroy_q(sc, &mbx,
795 sizeof (struct mbx_destroy_common_mq),
786 QTYPE_MQ);
796 QTYPE_MQ, 0);
787 }
788 mq->qstate = QDELETED;
789 }
790
791 if (mq->cq != NULL) {
792 oce_cq_del(sc, mq->cq);
793 mq->cq = NULL;
794 }
795
796 free(mq, M_DEVBUF);
797 mq = NULL;
798}
799
800
801
802/**
803 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
804 * @param sc sofware handle to the device
805 * @param mbx mailbox command to send to the fw to delete the queue
806 * (mbx contains the queue information to delete)
807 * @param req_size the size of the mbx payload dependent on the qtype
808 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
809 * @returns 0 on success, failure otherwise
810 */
811static int
812oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
797 }
798 mq->qstate = QDELETED;
799 }
800
801 if (mq->cq != NULL) {
802 oce_cq_del(sc, mq->cq);
803 mq->cq = NULL;
804 }
805
806 free(mq, M_DEVBUF);
807 mq = NULL;
808}
809
810
811
812/**
813 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
814 * @param sc sofware handle to the device
815 * @param mbx mailbox command to send to the fw to delete the queue
816 * (mbx contains the queue information to delete)
817 * @param req_size the size of the mbx payload dependent on the qtype
818 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
819 * @returns 0 on success, failure otherwise
820 */
821static int
822oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
813 enum qtype qtype)
823 enum qtype qtype, int version)
814{
815 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
816 int opcode;
817 int subsys;
818 int rc = 0;
819
820 switch (qtype) {
821 case QTYPE_EQ:
822 opcode = OPCODE_COMMON_DESTROY_EQ;
823 subsys = MBX_SUBSYSTEM_COMMON;
824 break;
825 case QTYPE_CQ:
826 opcode = OPCODE_COMMON_DESTROY_CQ;
827 subsys = MBX_SUBSYSTEM_COMMON;
828 break;
829 case QTYPE_MQ:
830 opcode = OPCODE_COMMON_DESTROY_MQ;
831 subsys = MBX_SUBSYSTEM_COMMON;
832 break;
833 case QTYPE_WQ:
834 opcode = NIC_DELETE_WQ;
835 subsys = MBX_SUBSYSTEM_NIC;
836 break;
837 case QTYPE_RQ:
838 opcode = NIC_DELETE_RQ;
839 subsys = MBX_SUBSYSTEM_NIC;
840 break;
841 default:
842 return EINVAL;
843 }
844
845 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
846 opcode, MBX_TIMEOUT_SEC, req_size,
824{
825 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
826 int opcode;
827 int subsys;
828 int rc = 0;
829
830 switch (qtype) {
831 case QTYPE_EQ:
832 opcode = OPCODE_COMMON_DESTROY_EQ;
833 subsys = MBX_SUBSYSTEM_COMMON;
834 break;
835 case QTYPE_CQ:
836 opcode = OPCODE_COMMON_DESTROY_CQ;
837 subsys = MBX_SUBSYSTEM_COMMON;
838 break;
839 case QTYPE_MQ:
840 opcode = OPCODE_COMMON_DESTROY_MQ;
841 subsys = MBX_SUBSYSTEM_COMMON;
842 break;
843 case QTYPE_WQ:
844 opcode = NIC_DELETE_WQ;
845 subsys = MBX_SUBSYSTEM_NIC;
846 break;
847 case QTYPE_RQ:
848 opcode = NIC_DELETE_RQ;
849 subsys = MBX_SUBSYSTEM_NIC;
850 break;
851 default:
852 return EINVAL;
853 }
854
855 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
856 opcode, MBX_TIMEOUT_SEC, req_size,
847 OCE_MBX_VER_V0);
857 version);
848
849 mbx->u0.s.embedded = 1;
850 mbx->payload_length = (uint32_t) req_size;
851 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
852
853 rc = oce_mbox_post(sc, mbx, NULL);
854 if (!rc)
855 rc = hdr->u0.rsp.status;
856 if (rc)
857 device_printf(sc->dev,"%s failed - cmd status: %d\n",
858 __FUNCTION__, rc);
859 return rc;
860}
861
862
863
864/**
865 * @brief Function to create a completion queue
866 * @param sc software handle to the device
867 * @param eq optional eq to be associated with to the cq
868 * @param q_len length of completion queue
869 * @param item_size size of completion queue items
870 * @param sol_event command context event
871 * @param is_eventable event table
872 * @param nodelay no delay flag
873 * @param ncoalesce no coalescence flag
874 * @returns pointer to the cq created, NULL on failure
875 */
876struct oce_cq *
877oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
878 uint32_t q_len,
879 uint32_t item_size,
880 uint32_t sol_event,
881 uint32_t is_eventable,
882 uint32_t nodelay, uint32_t ncoalesce)
883{
884 struct oce_cq *cq = NULL;
885 int rc = 0;
886
887 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
888 if (!cq)
889 return NULL;
890
891 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
892 if (!cq->ring)
893 goto error;
894
895 cq->parent = sc;
896 cq->eq = eq;
897 cq->cq_cfg.q_len = q_len;
898 cq->cq_cfg.item_size = item_size;
899 cq->cq_cfg.nodelay = (uint8_t) nodelay;
900
901 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
902 if (rc)
903 goto error;
904
905 sc->cq[sc->ncqs++] = cq;
906
907 return cq;
908
909error:
910 device_printf(sc->dev, "CQ create failed\n");
911 oce_cq_del(sc, cq);
912 return NULL;
913}
914
915
916
917/**
918 * @brief Deletes the completion queue
919 * @param sc software handle to the device
920 * @param cq pointer to a completion queue
921 */
922static void
923oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
924{
925 struct oce_mbx mbx;
926 struct mbx_destroy_common_cq *fwcmd;
927
928 if (cq->ring != NULL) {
929
930 bzero(&mbx, sizeof(struct oce_mbx));
931 /* now fill the command */
932 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
933 fwcmd->params.req.id = cq->cq_id;
934 (void)oce_destroy_q(sc, &mbx,
858
859 mbx->u0.s.embedded = 1;
860 mbx->payload_length = (uint32_t) req_size;
861 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
862
863 rc = oce_mbox_post(sc, mbx, NULL);
864 if (!rc)
865 rc = hdr->u0.rsp.status;
866 if (rc)
867 device_printf(sc->dev,"%s failed - cmd status: %d\n",
868 __FUNCTION__, rc);
869 return rc;
870}
871
872
873
874/**
875 * @brief Function to create a completion queue
876 * @param sc software handle to the device
877 * @param eq optional eq to be associated with to the cq
878 * @param q_len length of completion queue
879 * @param item_size size of completion queue items
880 * @param sol_event command context event
881 * @param is_eventable event table
882 * @param nodelay no delay flag
883 * @param ncoalesce no coalescence flag
884 * @returns pointer to the cq created, NULL on failure
885 */
886struct oce_cq *
887oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
888 uint32_t q_len,
889 uint32_t item_size,
890 uint32_t sol_event,
891 uint32_t is_eventable,
892 uint32_t nodelay, uint32_t ncoalesce)
893{
894 struct oce_cq *cq = NULL;
895 int rc = 0;
896
897 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
898 if (!cq)
899 return NULL;
900
901 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
902 if (!cq->ring)
903 goto error;
904
905 cq->parent = sc;
906 cq->eq = eq;
907 cq->cq_cfg.q_len = q_len;
908 cq->cq_cfg.item_size = item_size;
909 cq->cq_cfg.nodelay = (uint8_t) nodelay;
910
911 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
912 if (rc)
913 goto error;
914
915 sc->cq[sc->ncqs++] = cq;
916
917 return cq;
918
919error:
920 device_printf(sc->dev, "CQ create failed\n");
921 oce_cq_del(sc, cq);
922 return NULL;
923}
924
925
926
927/**
928 * @brief Deletes the completion queue
929 * @param sc software handle to the device
930 * @param cq pointer to a completion queue
931 */
932static void
933oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
934{
935 struct oce_mbx mbx;
936 struct mbx_destroy_common_cq *fwcmd;
937
938 if (cq->ring != NULL) {
939
940 bzero(&mbx, sizeof(struct oce_mbx));
941 /* now fill the command */
942 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
943 fwcmd->params.req.id = cq->cq_id;
944 (void)oce_destroy_q(sc, &mbx,
935 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
945 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
936 /*NOW destroy the ring */
937 oce_destroy_ring_buffer(sc, cq->ring);
938 cq->ring = NULL;
939 }
940
941 free(cq, M_DEVBUF);
942 cq = NULL;
943}
944
945
946
947/**
948 * @brief Start a receive queue
949 * @param rq pointer to a receive queue
950 */
951int
952oce_start_rq(struct oce_rq *rq)
953{
946 /*NOW destroy the ring */
947 oce_destroy_ring_buffer(sc, cq->ring);
948 cq->ring = NULL;
949 }
950
951 free(cq, M_DEVBUF);
952 cq = NULL;
953}
954
955
956
957/**
958 * @brief Start a receive queue
959 * @param rq pointer to a receive queue
960 */
961int
962oce_start_rq(struct oce_rq *rq)
963{
964 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
954 int rc;
955
965 int rc;
966
956 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
967 if(sc->enable_hwlro)
968 rc = oce_alloc_rx_bufs(rq, 960);
969 else
970 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
957
958 if (rc == 0)
959 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
971
972 if (rc == 0)
973 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
974
960 return rc;
961}
962
963
964
965/**
966 * @brief Start a work queue
967 * @param wq pointer to a work queue
968 */
969int
970oce_start_wq(struct oce_wq *wq)
971{
972 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
973 return 0;
974}
975
976
977
978/**
979 * @brief Start a mailbox queue
980 * @param mq pointer to a mailbox queue
981 */
982int
983oce_start_mq(struct oce_mq *mq)
984{
985 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
986 return 0;
987}
988
989
990
991/**
992 * @brief Function to arm an EQ so that it can generate events
993 * @param sc software handle to the device
994 * @param qid id of the EQ returned by the fw at the time of creation
995 * @param npopped number of EQEs to arm
996 * @param rearm rearm bit enable/disable
997 * @param clearint bit to clear the interrupt condition because of which
998 * EQEs are generated
999 */
1000void
1001oce_arm_eq(POCE_SOFTC sc,
1002 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1003{
1004 eq_db_t eq_db = { 0 };
1005
1006 eq_db.bits.rearm = rearm;
1007 eq_db.bits.event = 1;
1008 eq_db.bits.num_popped = npopped;
1009 eq_db.bits.clrint = clearint;
1010 eq_db.bits.qid = qid;
1011 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1012
1013}
1014
1015
1016
1017
1018/**
1019 * @brief Function to arm a CQ with CQEs
1020 * @param sc software handle to the device
1021 * @param qid id of the CQ returned by the fw at the time of creation
1022 * @param npopped number of CQEs to arm
1023 * @param rearm rearm bit enable/disable
1024 */
1025void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1026{
1027 cq_db_t cq_db = { 0 };
1028
1029 cq_db.bits.rearm = rearm;
1030 cq_db.bits.num_popped = npopped;
1031 cq_db.bits.event = 0;
1032 cq_db.bits.qid = qid;
1033 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1034
1035}
1036
1037
1038
1039
1040/*
1041 * @brief function to cleanup the eqs used during stop
1042 * @param eq pointer to event queue structure
1043 * @returns the number of EQs processed
1044 */
1045void
1046oce_drain_eq(struct oce_eq *eq)
1047{
1048
1049 struct oce_eqe *eqe;
1050 uint16_t num_eqe = 0;
1051 POCE_SOFTC sc = eq->parent;
1052
1053 do {
1054 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1055 if (eqe->evnt == 0)
1056 break;
1057 eqe->evnt = 0;
1058 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1059 BUS_DMASYNC_POSTWRITE);
1060 num_eqe++;
1061 RING_GET(eq->ring, 1);
1062
1063 } while (TRUE);
1064
1065 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1066
1067}
1068
1069
1070
1071void
1072oce_drain_wq_cq(struct oce_wq *wq)
1073{
1074 POCE_SOFTC sc = wq->parent;
1075 struct oce_cq *cq = wq->cq;
1076 struct oce_nic_tx_cqe *cqe;
1077 int num_cqes = 0;
1078
1079 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1080 BUS_DMASYNC_POSTWRITE);
1081
1082 do {
1083 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1084 if (cqe->u0.dw[3] == 0)
1085 break;
1086 cqe->u0.dw[3] = 0;
1087 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1088 BUS_DMASYNC_POSTWRITE);
1089 RING_GET(cq->ring, 1);
1090 num_cqes++;
1091
1092 } while (TRUE);
1093
1094 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1095
1096}
1097
1098
1099/*
1100 * @brief function to drain a MCQ and process its CQEs
1101 * @param dev software handle to the device
1102 * @param cq pointer to the cq to drain
1103 * @returns the number of CQEs processed
1104 */
1105void
1106oce_drain_mq_cq(void *arg)
1107{
1108 /* TODO: additional code. */
1109 return;
1110}
1111
1112
1113
1114/**
1115 * @brief function to process a Recieve queue
1116 * @param arg pointer to the RQ to charge
1117 * @return number of cqes processed
1118 */
1119void
1120oce_drain_rq_cq(struct oce_rq *rq)
1121{
1122 struct oce_nic_rx_cqe *cqe;
1123 uint16_t num_cqe = 0;
1124 struct oce_cq *cq;
1125 POCE_SOFTC sc;
1126
1127 sc = rq->parent;
1128 cq = rq->cq;
1129 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1130 /* dequeue till you reach an invalid cqe */
1131 while (RQ_CQE_VALID(cqe)) {
1132 RQ_CQE_INVALIDATE(cqe);
1133 RING_GET(cq->ring, 1);
1134 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1135 struct oce_nic_rx_cqe);
1136 num_cqe++;
1137 }
1138 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1139
1140 return;
1141}
1142
1143
1144void
1145oce_free_posted_rxbuf(struct oce_rq *rq)
1146{
1147 struct oce_packet_desc *pd;
1148
1149 while (rq->pending) {
1150
975 return rc;
976}
977
978
979
980/**
981 * @brief Start a work queue
982 * @param wq pointer to a work queue
983 */
984int
985oce_start_wq(struct oce_wq *wq)
986{
987 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
988 return 0;
989}
990
991
992
993/**
994 * @brief Start a mailbox queue
995 * @param mq pointer to a mailbox queue
996 */
997int
998oce_start_mq(struct oce_mq *mq)
999{
1000 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
1001 return 0;
1002}
1003
1004
1005
1006/**
1007 * @brief Function to arm an EQ so that it can generate events
1008 * @param sc software handle to the device
1009 * @param qid id of the EQ returned by the fw at the time of creation
1010 * @param npopped number of EQEs to arm
1011 * @param rearm rearm bit enable/disable
1012 * @param clearint bit to clear the interrupt condition because of which
1013 * EQEs are generated
1014 */
1015void
1016oce_arm_eq(POCE_SOFTC sc,
1017 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1018{
1019 eq_db_t eq_db = { 0 };
1020
1021 eq_db.bits.rearm = rearm;
1022 eq_db.bits.event = 1;
1023 eq_db.bits.num_popped = npopped;
1024 eq_db.bits.clrint = clearint;
1025 eq_db.bits.qid = qid;
1026 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1027
1028}
1029
1030
1031
1032
1033/**
1034 * @brief Function to arm a CQ with CQEs
1035 * @param sc software handle to the device
1036 * @param qid id of the CQ returned by the fw at the time of creation
1037 * @param npopped number of CQEs to arm
1038 * @param rearm rearm bit enable/disable
1039 */
1040void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1041{
1042 cq_db_t cq_db = { 0 };
1043
1044 cq_db.bits.rearm = rearm;
1045 cq_db.bits.num_popped = npopped;
1046 cq_db.bits.event = 0;
1047 cq_db.bits.qid = qid;
1048 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1049
1050}
1051
1052
1053
1054
1055/*
1056 * @brief function to cleanup the eqs used during stop
1057 * @param eq pointer to event queue structure
1058 * @returns the number of EQs processed
1059 */
1060void
1061oce_drain_eq(struct oce_eq *eq)
1062{
1063
1064 struct oce_eqe *eqe;
1065 uint16_t num_eqe = 0;
1066 POCE_SOFTC sc = eq->parent;
1067
1068 do {
1069 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1070 if (eqe->evnt == 0)
1071 break;
1072 eqe->evnt = 0;
1073 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1074 BUS_DMASYNC_POSTWRITE);
1075 num_eqe++;
1076 RING_GET(eq->ring, 1);
1077
1078 } while (TRUE);
1079
1080 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1081
1082}
1083
1084
1085
1086void
1087oce_drain_wq_cq(struct oce_wq *wq)
1088{
1089 POCE_SOFTC sc = wq->parent;
1090 struct oce_cq *cq = wq->cq;
1091 struct oce_nic_tx_cqe *cqe;
1092 int num_cqes = 0;
1093
1094 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1095 BUS_DMASYNC_POSTWRITE);
1096
1097 do {
1098 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1099 if (cqe->u0.dw[3] == 0)
1100 break;
1101 cqe->u0.dw[3] = 0;
1102 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1103 BUS_DMASYNC_POSTWRITE);
1104 RING_GET(cq->ring, 1);
1105 num_cqes++;
1106
1107 } while (TRUE);
1108
1109 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1110
1111}
1112
1113
1114/*
1115 * @brief function to drain a MCQ and process its CQEs
1116 * @param dev software handle to the device
1117 * @param cq pointer to the cq to drain
1118 * @returns the number of CQEs processed
1119 */
1120void
1121oce_drain_mq_cq(void *arg)
1122{
1123 /* TODO: additional code. */
1124 return;
1125}
1126
1127
1128
1129/**
1130 * @brief function to process a Recieve queue
1131 * @param arg pointer to the RQ to charge
1132 * @return number of cqes processed
1133 */
1134void
1135oce_drain_rq_cq(struct oce_rq *rq)
1136{
1137 struct oce_nic_rx_cqe *cqe;
1138 uint16_t num_cqe = 0;
1139 struct oce_cq *cq;
1140 POCE_SOFTC sc;
1141
1142 sc = rq->parent;
1143 cq = rq->cq;
1144 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1145 /* dequeue till you reach an invalid cqe */
1146 while (RQ_CQE_VALID(cqe)) {
1147 RQ_CQE_INVALIDATE(cqe);
1148 RING_GET(cq->ring, 1);
1149 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1150 struct oce_nic_rx_cqe);
1151 num_cqe++;
1152 }
1153 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1154
1155 return;
1156}
1157
1158
1159void
1160oce_free_posted_rxbuf(struct oce_rq *rq)
1161{
1162 struct oce_packet_desc *pd;
1163
1164 while (rq->pending) {
1165
1151 pd = &rq->pckts[rq->packets_out];
1166 pd = &rq->pckts[rq->ring->cidx];
1152 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1153 bus_dmamap_unload(rq->tag, pd->map);
1154 if (pd->mbuf != NULL) {
1155 m_freem(pd->mbuf);
1156 pd->mbuf = NULL;
1157 }
1158
1167 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1168 bus_dmamap_unload(rq->tag, pd->map);
1169 if (pd->mbuf != NULL) {
1170 m_freem(pd->mbuf);
1171 pd->mbuf = NULL;
1172 }
1173
1159 if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1160 rq->packets_out = 0;
1161 else
1162 rq->packets_out++;
1163
1174 RING_GET(rq->ring,1);
1164 rq->pending--;
1165 }
1166
1167}
1168
1169void
1175 rq->pending--;
1176 }
1177
1178}
1179
1180void
1170oce_stop_rx(POCE_SOFTC sc)
1181oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1171{
1182{
1172 struct oce_mbx mbx;
1173 struct mbx_delete_nic_rq *fwcmd;
1174 struct oce_rq *rq;
1175 int i = 0;
1183 struct oce_cq *cq = rq->cq;
1184 POCE_SOFTC sc = rq->parent;
1185 struct nic_hwlro_singleton_cqe *cqe;
1186 struct nic_hwlro_cqe_part2 *cqe2;
1187 int flush_wait = 0;
1188 int flush_compl = 0;
1189 int num_frags = 0;
1176
1190
1177 for_all_rq_queues(sc, rq, i) {
1178 if (rq->qstate == QCREATED) {
1179 /* Delete rxq in firmware */
1191 for (;;) {
1192 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1193 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1194 if(cqe->valid) {
1195 if(cqe->cqe_type == 0) { /* singleton cqe */
1196 /* we should not get singleton cqe after cqe1 on same rq */
1197 if(rq->cqe_firstpart != NULL) {
1198 device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1199 goto exit_rx_cq_clean_hwlro;
1200 }
1201 num_frags = cqe->pkt_size / rq->cfg.frag_size;
1202 if(cqe->pkt_size % rq->cfg.frag_size)
1203 num_frags++;
1204 oce_discard_rx_comp(rq, num_frags);
1205 /* Check if CQE is flush completion */
1206 if(!cqe->pkt_size)
1207 flush_compl = 1;
1208 cqe->valid = 0;
1209 RING_GET(cq->ring, 1);
1210 }else if(cqe->cqe_type == 0x1) { /* first part */
1211 /* we should not get cqe1 after cqe1 on same rq */
1212 if(rq->cqe_firstpart != NULL) {
1213 device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1214 goto exit_rx_cq_clean_hwlro;
1215 }
1216 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1217 RING_GET(cq->ring, 1);
1218 }else if(cqe->cqe_type == 0x2) { /* second part */
1219 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1220 /* We should not get cqe2 without cqe1 */
1221 if(rq->cqe_firstpart == NULL) {
1222 device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1223 goto exit_rx_cq_clean_hwlro;
1224 }
1225 num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1226 if(cqe2->coalesced_size % rq->cfg.frag_size)
1227 num_frags++;
1228
1229 /* Flush completion will always come in singleton CQE */
1230 oce_discard_rx_comp(rq, num_frags);
1180
1231
1181 bzero(&mbx, sizeof(mbx));
1182 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1183 fwcmd->params.req.rq_id = rq->rq_id;
1232 rq->cqe_firstpart->valid = 0;
1233 cqe2->valid = 0;
1234 rq->cqe_firstpart = NULL;
1235 RING_GET(cq->ring, 1);
1236 }
1237 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1238 if(flush_compl)
1239 break;
1240 }else {
1241 if (flush_wait++ > 100) {
1242 device_printf(sc->dev, "did not receive hwlro flush compl\n");
1243 break;
1244 }
1245 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1246 DELAY(1000);
1247 }
1248 }
1184
1249
1185 (void)oce_destroy_q(sc, &mbx,
1186 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1250 /* After cleanup, leave the CQ in unarmed state */
1251 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1187
1252
1188 rq->qstate = QDELETED;
1253exit_rx_cq_clean_hwlro:
1254 return;
1255}
1189
1256
1190 DELAY(1);
1191
1257
1192 /* Free posted RX buffers that are not used */
1193 oce_free_posted_rxbuf(rq);
1258void
1259oce_rx_cq_clean(struct oce_rq *rq)
1260{
1261 struct oce_nic_rx_cqe *cqe;
1262 struct oce_cq *cq;
1263 POCE_SOFTC sc;
1264 int flush_wait = 0;
1265 int flush_compl = 0;
1266 sc = rq->parent;
1267 cq = rq->cq;
1268
1269 for (;;) {
1270 bus_dmamap_sync(cq->ring->dma.tag,
1271 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1272 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1273 if(RQ_CQE_VALID(cqe)) {
1274 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1275 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1276 /* Check if CQE is flush completion */
1277 if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1278 flush_compl = 1;
1279
1280 RQ_CQE_INVALIDATE(cqe);
1281 RING_GET(cq->ring, 1);
1282#if defined(INET6) || defined(INET)
1283 if (IF_LRO_ENABLED(sc))
1284 oce_rx_flush_lro(rq);
1285#endif
1286 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1287 if(flush_compl)
1288 break;
1289 }else {
1290 if (flush_wait++ > 100) {
1291 device_printf(sc->dev, "did not receive flush compl\n");
1292 break;
1293 }
1294 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1295 DELAY(1000);
1296 }
1297 }
1298
1299 /* After cleanup, leave the CQ in unarmed state */
1300 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1301}
1302
1303void
1304oce_stop_rx(POCE_SOFTC sc)
1305{
1306 struct oce_mbx mbx;
1307 struct mbx_delete_nic_rq *fwcmd;
1308 struct mbx_delete_nic_rq_v1 *fwcmd1;
1309 struct oce_rq *rq;
1310 int i = 0;
1311
1312 /* before deleting disable hwlro */
1313 if(sc->enable_hwlro)
1314 oce_mbox_nic_set_iface_lro_config(sc, 0);
1315
1316 for_all_rq_queues(sc, rq, i) {
1317 if (rq->qstate == QCREATED) {
1318 /* Delete rxq in firmware */
1319 LOCK(&rq->rx_lock);
1320
1321 bzero(&mbx, sizeof(mbx));
1322 if(!rq->islro) {
1323 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1324 fwcmd->params.req.rq_id = rq->rq_id;
1325 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1326 }else {
1327 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1328 fwcmd1->params.req.rq_id = rq->rq_id;
1329 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1330
1331 (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1332 }
1333 rq->qstate = QDELETED;
1334
1335 DELAY(1000);
1194
1336
1195 }
1196 }
1337 if(!rq->islro)
1338 oce_rx_cq_clean(rq);
1339 else
1340 oce_rx_cq_clean_hwlro(rq);
1341
1342 /* Free posted RX buffers that are not used */
1343 oce_free_posted_rxbuf(rq);
1344 UNLOCK(&rq->rx_lock);
1345 }
1346 }
1197}
1198
1199
1200
1201int
1202oce_start_rx(POCE_SOFTC sc)
1203{
1204 struct oce_rq *rq;
1205 int rc = 0, i;
1206
1207 for_all_rq_queues(sc, rq, i) {
1208 if (rq->qstate == QCREATED)
1209 continue;
1347}
1348
1349
1350
1351int
1352oce_start_rx(POCE_SOFTC sc)
1353{
1354 struct oce_rq *rq;
1355 int rc = 0, i;
1356
1357 for_all_rq_queues(sc, rq, i) {
1358 if (rq->qstate == QCREATED)
1359 continue;
1210 rc = oce_mbox_create_rq(rq);
1360 if((i == 0) || (!sc->enable_hwlro)) {
1361 rc = oce_mbox_create_rq(rq);
1362 if (rc)
1363 goto error;
1364 rq->islro = 0;
1365 }else {
1366 rc = oce_mbox_create_rq_v2(rq);
1367 if (rc)
1368 goto error;
1369 rq->islro = 1;
1370 }
1371 /* reset queue pointers */
1372 rq->qstate = QCREATED;
1373 rq->pending = 0;
1374 rq->ring->cidx = 0;
1375 rq->ring->pidx = 0;
1376 }
1377
1378 if(sc->enable_hwlro) {
1379 rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1211 if (rc)
1212 goto error;
1380 if (rc)
1381 goto error;
1213 /* reset queue pointers */
1214 rq->qstate = QCREATED;
1215 rq->pending = 0;
1216 rq->ring->cidx = 0;
1217 rq->ring->pidx = 0;
1218 rq->packets_in = 0;
1219 rq->packets_out = 0;
1220 }
1221
1222 DELAY(1);
1223
1224 /* RSS config */
1225 if (is_rss_enabled(sc)) {
1226 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1227 if (rc)
1228 goto error;
1229
1230 }
1231
1382 }
1383
1384 DELAY(1);
1385
1386 /* RSS config */
1387 if (is_rss_enabled(sc)) {
1388 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1389 if (rc)
1390 goto error;
1391
1392 }
1393
1394 DELAY(1);
1232 return rc;
1233error:
1234 device_printf(sc->dev, "Start RX failed\n");
1235 return rc;
1236
1237}
1238
1239
1240
1395 return rc;
1396error:
1397 device_printf(sc->dev, "Start RX failed\n");
1398 return rc;
1399
1400}
1401
1402
1403