1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27/*
28 * File : ecore_spq.c
29 */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_spq.c 337517 2018-08-09 01:17:35Z davidcs $");
32
33
34#include "bcm_osal.h"
35#include "reg_addr.h"
36#include "ecore_gtt_reg_addr.h"
37#include "ecore_hsi_common.h"
38#include "ecore.h"
39#include "ecore_sp_api.h"
40#include "ecore_spq.h"
41#include "ecore_iro.h"
42#include "ecore_init_fw_funcs.h"
43#include "ecore_cxt.h"
44#include "ecore_int.h"
45#include "ecore_dev_api.h"
46#include "ecore_mcp.h"
47#ifdef CONFIG_ECORE_RDMA
48#include "ecore_rdma.h"
49#endif
50#include "ecore_hw.h"
51#include "ecore_sriov.h"
52#ifdef CONFIG_ECORE_ISCSI
53#include "ecore_iscsi.h"
54#include "ecore_ooo.h"
55#endif
56
57#ifdef _NTDDK_
58#pragma warning(push)
59#pragma warning(disable : 28167)
60#pragma warning(disable : 28123)
61#endif
62
63/***************************************************************************
64 * Structures & Definitions
65 ***************************************************************************/
66
67#define SPQ_HIGH_PRI_RESERVE_DEFAULT	(1)
68
69#define SPQ_BLOCK_DELAY_MAX_ITER	(10)
70#define SPQ_BLOCK_DELAY_US		(10)
71#define SPQ_BLOCK_SLEEP_MAX_ITER	(200)
72#define SPQ_BLOCK_SLEEP_MS		(5)
73
74#ifndef REMOVE_DBG
75/***************************************************************************
76 * Debug [iSCSI] tool
77 ***************************************************************************/
78static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
79				struct event_ring_entry *p_eqe)
80{
81	if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
82		DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
83			  p_eqe->opcode);
84	}
85
86	switch (p_eqe->opcode) {
87	case ISCSI_EVENT_TYPE_INIT_FUNC:
88	case ISCSI_EVENT_TYPE_DESTROY_FUNC:
89		/* NOPE */
90		break;
91	case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
92	case ISCSI_EVENT_TYPE_TERMINATE_CONN:
93		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
94			   "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
95			   p_hwfn->port_id, p_eqe->opcode,
96			   OSAL_LE16_TO_CPU(p_eqe->echo),
97			   p_eqe->fw_return_code,
98			   OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.icid),
99			   OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
100			   p_eqe->data.iscsi_info.error_code);
101		break;
102	case ISCSI_EVENT_TYPE_UPDATE_CONN:
103	case ISCSI_EVENT_TYPE_CLEAR_SQ:
104	case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
105	case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
106	case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
107	case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
108	case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
109	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
110	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
111	case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
112	case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
113	case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
114	case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
115	default:
116		/* NOPE */
117		break;
118	}
119}
120#endif
121
122/***************************************************************************
123 * Blocking Imp. (BLOCK/EBLOCK mode)
124 ***************************************************************************/
125static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
126				  union event_ring_data OSAL_UNUSED *data,
127				  u8 fw_return_code)
128{
129	struct ecore_spq_comp_done *comp_done;
130
131	comp_done = (struct ecore_spq_comp_done *)cookie;
132
133	comp_done->done = 0x1;
134	comp_done->fw_return_code = fw_return_code;
135
136	/* make update visible to waiting thread */
137	OSAL_SMP_WMB(p_hwfn->p_dev);
138}
139
140static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
141					      struct ecore_spq_entry *p_ent,
142					      u8 *p_fw_ret,
143					      bool sleep_between_iter)
144{
145	struct ecore_spq_comp_done *comp_done;
146	u32 iter_cnt;
147
148	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
149	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
150				      : SPQ_BLOCK_DELAY_MAX_ITER;
151#ifndef ASIC_ONLY
152	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
153		iter_cnt *= 5;
154#endif
155
156	while (iter_cnt--) {
157		OSAL_POLL_MODE_DPC(p_hwfn);
158		OSAL_SMP_RMB(p_hwfn->p_dev);
159		if (comp_done->done == 1) {
160			if (p_fw_ret)
161				*p_fw_ret = comp_done->fw_return_code;
162			return ECORE_SUCCESS;
163		}
164
165		if (sleep_between_iter) {
166			OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
167		} else {
168			OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
169		}
170	}
171
172	return ECORE_TIMEOUT;
173}
174
175static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
176					    struct ecore_spq_entry *p_ent,
177					    u8 *p_fw_ret, bool skip_quick_poll)
178{
179	struct ecore_spq_comp_done *comp_done;
180	struct ecore_ptt *p_ptt;
181	enum _ecore_status_t rc;
182
183	/* A relatively short polling period w/o sleeping, to allow the FW to
184	 * complete the ramrod and thus possibly to avoid the following sleeps.
185	 */
186	if (!skip_quick_poll) {
187		rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
188		if (rc == ECORE_SUCCESS)
189			return ECORE_SUCCESS;
190	}
191
192	/* Move to polling with a sleeping period between iterations */
193	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
194	if (rc == ECORE_SUCCESS)
195		return ECORE_SUCCESS;
196
197	p_ptt = ecore_ptt_acquire(p_hwfn);
198	if (!p_ptt)
199		return ECORE_AGAIN;
200
201	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
202	rc = ecore_mcp_drain(p_hwfn, p_ptt);
203	ecore_ptt_release(p_hwfn, p_ptt);
204	if (rc != ECORE_SUCCESS) {
205		DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
206		goto err;
207	}
208
209	/* Retry after drain */
210	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
211	if (rc == ECORE_SUCCESS)
212		return ECORE_SUCCESS;
213
214	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
215	if (comp_done->done == 1) {
216		if (p_fw_ret)
217			*p_fw_ret = comp_done->fw_return_code;
218		return ECORE_SUCCESS;
219	}
220err:
221	DP_NOTICE(p_hwfn, true,
222		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
223		  OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
224		  p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
225		  OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
226
227	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
228
229	return ECORE_BUSY;
230}
231
232/***************************************************************************
233 * SPQ entries inner API
234 ***************************************************************************/
235static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
236						 struct ecore_spq_entry *p_ent)
237{
238	p_ent->flags = 0;
239
240	switch (p_ent->comp_mode) {
241	case ECORE_SPQ_MODE_EBLOCK:
242	case ECORE_SPQ_MODE_BLOCK:
243		p_ent->comp_cb.function = ecore_spq_blocking_cb;
244		break;
245	case ECORE_SPQ_MODE_CB:
246		break;
247	default:
248		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
249			  p_ent->comp_mode);
250		return ECORE_INVAL;
251	}
252
253	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
254		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
255		   p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
256		   p_ent->elem.hdr.protocol_id,
257		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
258		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
259			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
260			   "MODE_CB"));
261
262	return ECORE_SUCCESS;
263}
264
265/***************************************************************************
266 * HSI access
267 ***************************************************************************/
268static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
269				    struct ecore_spq  *p_spq)
270{
271	struct e4_core_conn_context *p_cxt;
272	struct ecore_cxt_info cxt_info;
273	u16 physical_q;
274	enum _ecore_status_t rc;
275
276	cxt_info.iid = p_spq->cid;
277
278	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
279
280	if (rc < 0) {
281		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
282			  p_spq->cid);
283		return;
284	}
285
286	p_cxt = cxt_info.p_cxt;
287
288	/* @@@TBD we zero the context until we have ilt_reset implemented. */
289	OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
290
291	if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
292		SET_FIELD(p_cxt->xstorm_ag_context.flags10,
293			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
294		SET_FIELD(p_cxt->xstorm_ag_context.flags1,
295			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
296		/*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
297			  E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
298		SET_FIELD(p_cxt->xstorm_ag_context.flags9,
299			  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
300	} else { /* E5 */
301		ECORE_E5_MISSING_CODE;
302	}
303
304	/* CDU validation - FIXME currently disabled */
305
306	/* QM physical queue */
307	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
308	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
309
310	p_cxt->xstorm_st_context.spq_base_lo =
311		DMA_LO_LE(p_spq->chain.p_phys_addr);
312	p_cxt->xstorm_st_context.spq_base_hi =
313		DMA_HI_LE(p_spq->chain.p_phys_addr);
314
315	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
316		       p_hwfn->p_consq->chain.p_phys_addr);
317}
318
319static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn		*p_hwfn,
320					      struct ecore_spq		*p_spq,
321					      struct ecore_spq_entry	*p_ent)
322{
323	struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
324	struct core_db_data *p_db_data = &p_spq->db_data;
325	u16 echo = ecore_chain_get_prod_idx(p_chain);
326	struct slow_path_element *elem;
327
328	p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
329	elem = ecore_chain_produce(p_chain);
330	if (!elem) {
331		DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
332		return ECORE_INVAL;
333	}
334
335	*elem = p_ent->elem; /* Struct assignment */
336
337	p_db_data->spq_prod =
338		OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
339
340	/* Make sure the SPQE is updated before the doorbell */
341	OSAL_WMB(p_hwfn->p_dev);
342
343	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
344
345	/* Make sure doorbell was rung */
346	OSAL_WMB(p_hwfn->p_dev);
347
348	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
349		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
350		   p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
351		   p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
352
353	return ECORE_SUCCESS;
354}
355
356/***************************************************************************
357 * Asynchronous events
358 ***************************************************************************/
359
360static enum _ecore_status_t
361ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
362			     struct event_ring_entry *p_eqe)
363{
364	ecore_spq_async_comp_cb cb;
365
366	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) {
367		return ECORE_INVAL;
368	}
369
370	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
371	if (cb) {
372		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
373			  &p_eqe->data, p_eqe->fw_return_code);
374	} else {
375		DP_NOTICE(p_hwfn,
376			  true, "Unknown Async completion for protocol: %d\n",
377			  p_eqe->protocol_id);
378		return ECORE_INVAL;
379	}
380}
381
382enum _ecore_status_t
383ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
384			    enum protocol_type protocol_id,
385			    ecore_spq_async_comp_cb cb)
386{
387	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
388		return ECORE_INVAL;
389	}
390
391	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
392	return ECORE_SUCCESS;
393}
394
395void
396ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
397			      enum protocol_type protocol_id)
398{
399	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
400		return;
401	}
402
403	p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
404}
405
406/***************************************************************************
407 * EQ API
408 ***************************************************************************/
409void ecore_eq_prod_update(struct ecore_hwfn	*p_hwfn,
410			  u16			prod)
411{
412	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
413		USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
414
415	REG_WR16(p_hwfn, addr, prod);
416
417	/* keep prod updates ordered */
418	OSAL_MMIOWB(p_hwfn->p_dev);
419}
420
421enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn	*p_hwfn,
422					 void                   *cookie)
423
424{
425	struct ecore_eq    *p_eq    = cookie;
426	struct ecore_chain *p_chain = &p_eq->chain;
427	enum _ecore_status_t rc = 0;
428
429	/* take a snapshot of the FW consumer */
430	u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
431
432	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
433
434	/* Need to guarantee the fw_cons index we use points to a usuable
435	 * element (to comply with our chain), so our macros would comply
436	 */
437	if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
438	    ecore_chain_get_usable_per_page(p_chain)) {
439		fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
440	}
441
442	/* Complete current segment of eq entries */
443	while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
444		struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
445		if (!p_eqe) {
446			rc = ECORE_INVAL;
447			break;
448		}
449
450		DP_VERBOSE(p_hwfn,
451			   ECORE_MSG_SPQ,
452			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
453			   p_eqe->opcode,	     /* Event Opcode */
454			   p_eqe->protocol_id,	     /* Event Protocol ID */
455			   p_eqe->reserved0,	     /* Reserved */
456			   OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
457							ramrod data on the host
458						      */
459			   p_eqe->fw_return_code,    /* FW return code for SP
460							ramrods
461						      */
462			   p_eqe->flags);
463#ifndef REMOVE_DBG
464		if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
465			ecore_iscsi_eq_dump(p_hwfn, p_eqe);
466#endif
467
468		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
469			if (ecore_async_event_completion(p_hwfn, p_eqe))
470				rc = ECORE_INVAL;
471		} else if (ecore_spq_completion(p_hwfn,
472						p_eqe->echo,
473						p_eqe->fw_return_code,
474						&p_eqe->data)) {
475			rc = ECORE_INVAL;
476		}
477
478		ecore_chain_recycle_consumed(p_chain);
479	}
480
481	ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
482
483	/* Attempt to post pending requests */
484	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
485	rc = ecore_spq_pend_post(p_hwfn);
486	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
487
488	return rc;
489}
490
491enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
492{
493	struct ecore_eq	*p_eq;
494
495	/* Allocate EQ struct */
496	p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
497	if (!p_eq) {
498		DP_NOTICE(p_hwfn, false,
499			  "Failed to allocate `struct ecore_eq'\n");
500		return ECORE_NOMEM;
501	}
502
503	/* Allocate and initialize EQ chain*/
504	if (ecore_chain_alloc(p_hwfn->p_dev,
505			      ECORE_CHAIN_USE_TO_PRODUCE,
506			      ECORE_CHAIN_MODE_PBL,
507			      ECORE_CHAIN_CNT_TYPE_U16,
508			      num_elem,
509			      sizeof(union event_ring_element),
510			      &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
511		DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
512		goto eq_allocate_fail;
513	}
514
515	/* register EQ completion on the SP SB */
516	ecore_int_register_cb(p_hwfn, ecore_eq_completion,
517			      p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
518
519	p_hwfn->p_eq = p_eq;
520	return ECORE_SUCCESS;
521
522eq_allocate_fail:
523	OSAL_FREE(p_hwfn->p_dev, p_eq);
524	return ECORE_NOMEM;
525}
526
527void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
528{
529	ecore_chain_reset(&p_hwfn->p_eq->chain);
530}
531
532void ecore_eq_free(struct ecore_hwfn *p_hwfn)
533{
534	if (!p_hwfn->p_eq)
535		return;
536
537	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
538
539	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
540	p_hwfn->p_eq = OSAL_NULL;
541}
542
543/***************************************************************************
544* CQE API - manipulate EQ functionallity
545***************************************************************************/
546static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
547						 struct eth_slow_path_rx_cqe *cqe,
548						 enum protocol_type protocol)
549{
550	if (IS_VF(p_hwfn->p_dev))
551		return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
552
553	/* @@@tmp - it's possible we'll eventually want to handle some
554	 * actual commands that can arrive here, but for now this is only
555	 * used to complete the ramrod using the echo value on the cqe
556	 */
557	return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
558}
559
560enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
561					      struct eth_slow_path_rx_cqe *cqe)
562{
563	enum _ecore_status_t rc;
564
565	rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
566	if (rc) {
567		DP_NOTICE(p_hwfn, true,
568			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
569			  cqe->ramrod_cmd_id);
570	}
571
572	return rc;
573}
574
575/***************************************************************************
576 * Slow hwfn Queue (spq)
577 ***************************************************************************/
578void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
579{
580	struct ecore_spq *p_spq = p_hwfn->p_spq;
581	struct ecore_spq_entry *p_virt = OSAL_NULL;
582	struct core_db_data *p_db_data;
583	void OSAL_IOMEM *db_addr;
584	dma_addr_t p_phys = 0;
585	u32 i, capacity;
586	enum _ecore_status_t rc;
587
588	OSAL_LIST_INIT(&p_spq->pending);
589	OSAL_LIST_INIT(&p_spq->completion_pending);
590	OSAL_LIST_INIT(&p_spq->free_pool);
591	OSAL_LIST_INIT(&p_spq->unlimited_pending);
592	OSAL_SPIN_LOCK_INIT(&p_spq->lock);
593
594	/* SPQ empty pool */
595	p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
596	p_virt = p_spq->p_virt;
597
598	capacity = ecore_chain_get_capacity(&p_spq->chain);
599	for (i = 0; i < capacity; i++) {
600		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
601
602		OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
603
604		p_virt++;
605		p_phys += sizeof(struct ecore_spq_entry);
606	}
607
608	/* Statistics */
609	p_spq->normal_count		= 0;
610	p_spq->comp_count		= 0;
611	p_spq->comp_sent_count		= 0;
612	p_spq->unlimited_pending_count	= 0;
613
614	OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
615		      SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
616	p_spq->comp_bitmap_idx = 0;
617
618	/* SPQ cid, cannot fail */
619	ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
620	ecore_spq_hw_initialize(p_hwfn, p_spq);
621
622	/* reset the chain itself */
623	ecore_chain_reset(&p_spq->chain);
624
625	/* Initialize the address/data of the SPQ doorbell */
626	p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
627	p_db_data = &p_spq->db_data;
628	OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
629	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
630	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
631	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
632		  DQ_XCM_CORE_SPQ_PROD_CMD);
633	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
634
635	/* Register the SPQ doorbell with the doorbell recovery mechanism */
636	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
637	rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
638				   DB_REC_WIDTH_32B, DB_REC_KERNEL);
639	if (rc != ECORE_SUCCESS)
640		DP_INFO(p_hwfn,
641			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
642}
643
644enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
645{
646	struct ecore_spq_entry *p_virt = OSAL_NULL;
647	struct ecore_spq *p_spq = OSAL_NULL;
648	dma_addr_t p_phys = 0;
649	u32 capacity;
650
651	/* SPQ struct */
652	p_spq =
653	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
654	if (!p_spq) {
655		DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
656		return ECORE_NOMEM;
657	}
658
659	/* SPQ ring  */
660	if (ecore_chain_alloc(p_hwfn->p_dev,
661			      ECORE_CHAIN_USE_TO_PRODUCE,
662			      ECORE_CHAIN_MODE_SINGLE,
663			      ECORE_CHAIN_CNT_TYPE_U16,
664			      0, /* N/A when the mode is SINGLE */
665			      sizeof(struct slow_path_element),
666			      &p_spq->chain, OSAL_NULL)) {
667		DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
668		goto spq_allocate_fail;
669	}
670
671	/* allocate and fill the SPQ elements (incl. ramrod data list) */
672	capacity = ecore_chain_get_capacity(&p_spq->chain);
673	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
674					 capacity *
675					 sizeof(struct ecore_spq_entry));
676	if (!p_virt) {
677		goto spq_allocate_fail;
678	}
679
680	p_spq->p_virt = p_virt;
681	p_spq->p_phys = p_phys;
682
683#ifdef CONFIG_ECORE_LOCK_ALLOC
684	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
685		goto spq_allocate_fail;
686#endif
687
688	p_hwfn->p_spq = p_spq;
689	return ECORE_SUCCESS;
690
691spq_allocate_fail:
692	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
693	OSAL_FREE(p_hwfn->p_dev, p_spq);
694	return ECORE_NOMEM;
695}
696
697void ecore_spq_free(struct ecore_hwfn *p_hwfn)
698{
699	struct ecore_spq *p_spq = p_hwfn->p_spq;
700	void OSAL_IOMEM *db_addr;
701	u32 capacity;
702
703	if (!p_spq)
704		return;
705
706	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
707	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
708	ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
709
710	if (p_spq->p_virt) {
711		capacity = ecore_chain_get_capacity(&p_spq->chain);
712		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
713				       p_spq->p_virt,
714				       p_spq->p_phys,
715				       capacity *
716				       sizeof(struct ecore_spq_entry));
717	}
718
719	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
720#ifdef CONFIG_ECORE_LOCK_ALLOC
721	OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
722#endif
723
724	OSAL_FREE(p_hwfn->p_dev, p_spq);
725	p_hwfn->p_spq = OSAL_NULL;
726}
727
728enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
729					 struct ecore_spq_entry **pp_ent)
730{
731	struct ecore_spq *p_spq = p_hwfn->p_spq;
732	struct ecore_spq_entry *p_ent = OSAL_NULL;
733	enum _ecore_status_t rc = ECORE_SUCCESS;
734
735	OSAL_SPIN_LOCK(&p_spq->lock);
736
737	if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
738
739		p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
740		if (!p_ent) {
741			DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
742			rc = ECORE_NOMEM;
743			goto out_unlock;
744		}
745		p_ent->queue = &p_spq->unlimited_pending;
746	} else {
747		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
748					      struct ecore_spq_entry,
749					      list);
750		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
751		p_ent->queue = &p_spq->pending;
752	}
753
754	*pp_ent = p_ent;
755
756out_unlock:
757	OSAL_SPIN_UNLOCK(&p_spq->lock);
758	return rc;
759}
760
761/* Locked variant; Should be called while the SPQ lock is taken */
762static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
763			      struct ecore_spq_entry *p_ent)
764{
765	OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
766}
767
768void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
769			    struct ecore_spq_entry *p_ent)
770{
771	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
772	__ecore_spq_return_entry(p_hwfn, p_ent);
773	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
774}
775
776/**
777 * @brief ecore_spq_add_entry - adds a new entry to the pending
778 *        list. Should be used while lock is being held.
779 *
780 * Addes an entry to the pending list is there is room (en empty
781 * element is avaliable in the free_pool), or else places the
782 * entry in the unlimited_pending pool.
783 *
784 * @param p_hwfn
785 * @param p_ent
786 * @param priority
787 *
788 * @return enum _ecore_status_t
789 */
790static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
791						struct ecore_spq_entry *p_ent,
792						enum spq_priority priority)
793{
794	struct ecore_spq	*p_spq	= p_hwfn->p_spq;
795
796	if (p_ent->queue == &p_spq->unlimited_pending) {
797		if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
798
799			OSAL_LIST_PUSH_TAIL(&p_ent->list,
800					    &p_spq->unlimited_pending);
801			p_spq->unlimited_pending_count++;
802
803			return ECORE_SUCCESS;
804
805		} else {
806			struct ecore_spq_entry *p_en2;
807
808			p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
809						     struct ecore_spq_entry,
810						     list);
811			OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
812
813			/* Copy the ring element physical pointer to the new
814			 * entry, since we are about to override the entire ring
815			 * entry and don't want to lose the pointer.
816			 */
817			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
818
819			*p_en2 = *p_ent;
820
821			/* EBLOCK responsible to free the allocated p_ent */
822			if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
823				OSAL_FREE(p_hwfn->p_dev, p_ent);
824
825			p_ent = p_en2;
826		}
827	}
828
829	/* entry is to be placed in 'pending' queue */
830	switch (priority) {
831	case ECORE_SPQ_PRIORITY_NORMAL:
832		OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
833		p_spq->normal_count++;
834		break;
835	case ECORE_SPQ_PRIORITY_HIGH:
836		OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
837		p_spq->high_count++;
838		break;
839	default:
840		return ECORE_INVAL;
841	}
842
843	return ECORE_SUCCESS;
844}
845
846/***************************************************************************
847 * Accessor
848 ***************************************************************************/
849
850u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
851{
852	if (!p_hwfn->p_spq) {
853		return 0xffffffff;	/* illegal */
854	}
855	return p_hwfn->p_spq->cid;
856}
857
858/***************************************************************************
859 * Posting new Ramrods
860 ***************************************************************************/
861
862static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
863						osal_list_t	  *head,
864						u32		  keep_reserve)
865{
866	struct ecore_spq	*p_spq = p_hwfn->p_spq;
867	enum _ecore_status_t	rc;
868
869	/* TODO - implementation might be wasteful; will always keep room
870	 * for an additional high priority ramrod (even if one is already
871	 * pending FW)
872	 */
873	while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
874	       !OSAL_LIST_IS_EMPTY(head)) {
875		struct ecore_spq_entry  *p_ent =
876		    OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
877		if (p_ent != OSAL_NULL) {
878#if defined(_NTDDK_)
879#pragma warning(suppress : 6011 28182)
880#endif
881			OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
882			OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
883			p_spq->comp_sent_count++;
884
885			rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
886			if (rc) {
887				OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
888									&p_spq->completion_pending);
889				__ecore_spq_return_entry(p_hwfn, p_ent);
890				return rc;
891			}
892		}
893	}
894
895	return ECORE_SUCCESS;
896}
897
898enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
899{
900	struct ecore_spq *p_spq = p_hwfn->p_spq;
901	struct ecore_spq_entry *p_ent = OSAL_NULL;
902
903	while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
904	{
905		if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
906			break;
907
908		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
909					      struct ecore_spq_entry,
910					      list);
911		if (!p_ent)
912			return ECORE_INVAL;
913
914#if defined(_NTDDK_)
915#pragma warning(suppress : 6011)
916#endif
917		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
918
919		ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
920	}
921
922	return ecore_spq_post_list(p_hwfn, &p_spq->pending,
923				   SPQ_HIGH_PRI_RESERVE_DEFAULT);
924}
925
926enum _ecore_status_t ecore_spq_post(struct ecore_hwfn		*p_hwfn,
927				    struct ecore_spq_entry	*p_ent,
928				    u8                          *fw_return_code)
929{
930	enum _ecore_status_t	rc = ECORE_SUCCESS;
931	struct ecore_spq	*p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
932	bool			b_ret_ent = true;
933
934	if (!p_hwfn)
935		return ECORE_INVAL;
936
937	if (!p_ent) {
938		DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
939		return ECORE_INVAL;
940	}
941
942	if (p_hwfn->p_dev->recov_in_prog) {
943		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
944			   "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
945			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
946		/* Return success to let the flows to be completed successfully
947		 * w/o any error handling.
948		 */
949		return ECORE_SUCCESS;
950	}
951
952	OSAL_SPIN_LOCK(&p_spq->lock);
953
954	/* Complete the entry */
955	rc = ecore_spq_fill_entry(p_hwfn, p_ent);
956
957	/* Check return value after LOCK is taken for cleaner error flow */
958	if (rc)
959		goto spq_post_fail;
960
961	/* Add the request to the pending queue */
962	rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
963	if (rc)
964		goto spq_post_fail;
965
966	rc = ecore_spq_pend_post(p_hwfn);
967	if (rc) {
968		/* Since it's possible that pending failed for a different
969		 * entry [although unlikely], the failed entry was already
970		 * dealt with; No need to return it here.
971		 */
972		b_ret_ent = false;
973		goto spq_post_fail;
974	}
975
976	OSAL_SPIN_UNLOCK(&p_spq->lock);
977
978	if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
979		/* For entries in ECORE BLOCK mode, the completion code cannot
980		 * perform the necessary cleanup - if it did, we couldn't
981		 * access p_ent here to see whether it's successful or not.
982		 * Thus, after gaining the answer perform the cleanup here.
983		 */
984		rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
985				     p_ent->queue == &p_spq->unlimited_pending);
986
987		if (p_ent->queue == &p_spq->unlimited_pending) {
988			/* This is an allocated p_ent which does not need to
989			 * return to pool.
990			 */
991			OSAL_FREE(p_hwfn->p_dev, p_ent);
992
993			/* TBD: handle error flow and remove p_ent from
994			 * completion pending
995			 */
996			return rc;
997		}
998
999		if (rc)
1000			goto spq_post_fail2;
1001
1002		/* return to pool */
1003		ecore_spq_return_entry(p_hwfn, p_ent);
1004	}
1005	return rc;
1006
1007spq_post_fail2:
1008	OSAL_SPIN_LOCK(&p_spq->lock);
1009	OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
1010	ecore_chain_return_produced(&p_spq->chain);
1011
1012spq_post_fail:
1013	/* return to the free pool */
1014	if (b_ret_ent)
1015		__ecore_spq_return_entry(p_hwfn, p_ent);
1016	OSAL_SPIN_UNLOCK(&p_spq->lock);
1017
1018	return rc;
1019}
1020
1021enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
1022					  __le16 echo,
1023					  u8 fw_return_code,
1024					  union event_ring_data	*p_data)
1025{
1026	struct ecore_spq	*p_spq;
1027	struct ecore_spq_entry	*p_ent = OSAL_NULL;
1028	struct ecore_spq_entry	*tmp;
1029	struct ecore_spq_entry	*found = OSAL_NULL;
1030
1031	if (!p_hwfn) {
1032		return ECORE_INVAL;
1033	}
1034
1035	p_spq = p_hwfn->p_spq;
1036	if (!p_spq) {
1037		return ECORE_INVAL;
1038	}
1039
1040	OSAL_SPIN_LOCK(&p_spq->lock);
1041	OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1042				      tmp,
1043				      &p_spq->completion_pending,
1044				      list,
1045				      struct ecore_spq_entry) {
1046
1047		if (p_ent->elem.hdr.echo == echo) {
1048			OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1049					       &p_spq->completion_pending);
1050
1051			/* Avoid overriding of SPQ entries when getting
1052			 * out-of-order completions, by marking the completions
1053			 * in a bitmap and increasing the chain consumer only
1054			 * for the first successive completed entries.
1055			 */
1056			SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1057			while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1058						      p_spq->comp_bitmap_idx)) {
1059				SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1060							p_spq->comp_bitmap_idx);
1061				p_spq->comp_bitmap_idx++;
1062				ecore_chain_return_produced(&p_spq->chain);
1063			}
1064
1065			p_spq->comp_count++;
1066			found = p_ent;
1067			break;
1068		}
1069
1070		/* This is debug and should be relatively uncommon - depends
1071		 * on scenarios which have mutliple per-PF sent ramrods.
1072		 */
1073		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1074			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1075			   OSAL_LE16_TO_CPU(echo),
1076			   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1077	}
1078
1079	/* Release lock before callback, as callback may post
1080	 * an additional ramrod.
1081	 */
1082	OSAL_SPIN_UNLOCK(&p_spq->lock);
1083
1084	if (!found) {
1085		DP_NOTICE(p_hwfn, true,
1086			  "Failed to find an entry this EQE [echo %04x] completes\n",
1087			  OSAL_LE16_TO_CPU(echo));
1088		return ECORE_EXISTS;
1089	}
1090
1091	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1092		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
1093		   OSAL_LE16_TO_CPU(echo),
1094		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1095	if (found->comp_cb.function)
1096		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1097					fw_return_code);
1098	else
1099		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1100
1101	if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1102	    (found->queue == &p_spq->unlimited_pending))
1103		/* EBLOCK  is responsible for returning its own entry into the
1104		 * free list, unless it originally added the entry into the
1105		 * unlimited pending list.
1106		 */
1107		ecore_spq_return_entry(p_hwfn, found);
1108
1109	return ECORE_SUCCESS;
1110}
1111
1112enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1113{
1114	struct ecore_consq *p_consq;
1115
1116	/* Allocate ConsQ struct */
1117	p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1118	if (!p_consq) {
1119		DP_NOTICE(p_hwfn, false,
1120			  "Failed to allocate `struct ecore_consq'\n");
1121		return ECORE_NOMEM;
1122	}
1123
1124	/* Allocate and initialize EQ chain*/
1125	if (ecore_chain_alloc(p_hwfn->p_dev,
1126			      ECORE_CHAIN_USE_TO_PRODUCE,
1127			      ECORE_CHAIN_MODE_PBL,
1128			      ECORE_CHAIN_CNT_TYPE_U16,
1129			      ECORE_CHAIN_PAGE_SIZE/0x80,
1130			      0x80,
1131			      &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1132		DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1133		goto consq_allocate_fail;
1134	}
1135
1136	p_hwfn->p_consq = p_consq;
1137	return ECORE_SUCCESS;
1138
1139consq_allocate_fail:
1140	OSAL_FREE(p_hwfn->p_dev, p_consq);
1141	return ECORE_NOMEM;
1142}
1143
1144void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1145{
1146	ecore_chain_reset(&p_hwfn->p_consq->chain);
1147}
1148
1149void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1150{
1151	if (!p_hwfn->p_consq)
1152		return;
1153
1154	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1155
1156	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1157	p_hwfn->p_consq = OSAL_NULL;
1158}
1159
1160#ifdef _NTDDK_
1161#pragma warning(pop)
1162#endif
1163