1316485Sdavidcs/*
2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc.
3316485Sdavidcs * All rights reserved.
4316485Sdavidcs *
5316485Sdavidcs *  Redistribution and use in source and binary forms, with or without
6316485Sdavidcs *  modification, are permitted provided that the following conditions
7316485Sdavidcs *  are met:
8316485Sdavidcs *
9316485Sdavidcs *  1. Redistributions of source code must retain the above copyright
10316485Sdavidcs *     notice, this list of conditions and the following disclaimer.
11316485Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12316485Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13316485Sdavidcs *     documentation and/or other materials provided with the distribution.
14316485Sdavidcs *
15316485Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16316485Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17316485Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18316485Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19316485Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20316485Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21316485Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22316485Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23316485Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24316485Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25316485Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26316485Sdavidcs */
27316485Sdavidcs/*
28316485Sdavidcs * File : ecore_spq.c
29316485Sdavidcs */
30316485Sdavidcs#include <sys/cdefs.h>
31316485Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_spq.c 337517 2018-08-09 01:17:35Z davidcs $");
32316485Sdavidcs
33316485Sdavidcs
34316485Sdavidcs#include "bcm_osal.h"
35316485Sdavidcs#include "reg_addr.h"
36316485Sdavidcs#include "ecore_gtt_reg_addr.h"
37316485Sdavidcs#include "ecore_hsi_common.h"
38316485Sdavidcs#include "ecore.h"
39316485Sdavidcs#include "ecore_sp_api.h"
40316485Sdavidcs#include "ecore_spq.h"
41316485Sdavidcs#include "ecore_iro.h"
42316485Sdavidcs#include "ecore_init_fw_funcs.h"
43316485Sdavidcs#include "ecore_cxt.h"
44316485Sdavidcs#include "ecore_int.h"
45316485Sdavidcs#include "ecore_dev_api.h"
46316485Sdavidcs#include "ecore_mcp.h"
47337517Sdavidcs#ifdef CONFIG_ECORE_RDMA
48337517Sdavidcs#include "ecore_rdma.h"
49316485Sdavidcs#endif
50316485Sdavidcs#include "ecore_hw.h"
51316485Sdavidcs#include "ecore_sriov.h"
52316485Sdavidcs#ifdef CONFIG_ECORE_ISCSI
53316485Sdavidcs#include "ecore_iscsi.h"
54316485Sdavidcs#include "ecore_ooo.h"
55316485Sdavidcs#endif
56316485Sdavidcs
57337517Sdavidcs#ifdef _NTDDK_
58337517Sdavidcs#pragma warning(push)
59337517Sdavidcs#pragma warning(disable : 28167)
60337517Sdavidcs#pragma warning(disable : 28123)
61337517Sdavidcs#endif
62337517Sdavidcs
63316485Sdavidcs/***************************************************************************
64316485Sdavidcs * Structures & Definitions
65316485Sdavidcs ***************************************************************************/
66316485Sdavidcs
67316485Sdavidcs#define SPQ_HIGH_PRI_RESERVE_DEFAULT	(1)
68316485Sdavidcs
69316485Sdavidcs#define SPQ_BLOCK_DELAY_MAX_ITER	(10)
70316485Sdavidcs#define SPQ_BLOCK_DELAY_US		(10)
71337517Sdavidcs#define SPQ_BLOCK_SLEEP_MAX_ITER	(200)
72316485Sdavidcs#define SPQ_BLOCK_SLEEP_MS		(5)
73316485Sdavidcs
74316485Sdavidcs#ifndef REMOVE_DBG
75316485Sdavidcs/***************************************************************************
76316485Sdavidcs * Debug [iSCSI] tool
77316485Sdavidcs ***************************************************************************/
78316485Sdavidcsstatic void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
79316485Sdavidcs				struct event_ring_entry *p_eqe)
80316485Sdavidcs{
81316485Sdavidcs	if (p_eqe->opcode >= MAX_ISCSI_EQE_OPCODE) {
82316485Sdavidcs		DP_NOTICE(p_hwfn, false, "Unknown iSCSI EQ: %x\n",
83316485Sdavidcs			  p_eqe->opcode);
84316485Sdavidcs	}
85316485Sdavidcs
86316485Sdavidcs	switch (p_eqe->opcode) {
87316485Sdavidcs	case ISCSI_EVENT_TYPE_INIT_FUNC:
88316485Sdavidcs	case ISCSI_EVENT_TYPE_DESTROY_FUNC:
89316485Sdavidcs		/* NOPE */
90316485Sdavidcs		break;
91316485Sdavidcs	case ISCSI_EVENT_TYPE_OFFLOAD_CONN:
92316485Sdavidcs	case ISCSI_EVENT_TYPE_TERMINATE_CONN:
93316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
94316485Sdavidcs			   "iSCSI EQE: Port %x, Op %x, echo %x, FWret %x, CID %x, ConnID %x, ERR %x\n",
95316485Sdavidcs			   p_hwfn->port_id, p_eqe->opcode,
96316485Sdavidcs			   OSAL_LE16_TO_CPU(p_eqe->echo),
97316485Sdavidcs			   p_eqe->fw_return_code,
98337517Sdavidcs			   OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.icid),
99316485Sdavidcs			   OSAL_LE16_TO_CPU(p_eqe->data.iscsi_info.conn_id),
100316485Sdavidcs			   p_eqe->data.iscsi_info.error_code);
101316485Sdavidcs		break;
102316485Sdavidcs	case ISCSI_EVENT_TYPE_UPDATE_CONN:
103316485Sdavidcs	case ISCSI_EVENT_TYPE_CLEAR_SQ:
104316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
105316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
106316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
107316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD:
108316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
109316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
110316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
111316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
112316485Sdavidcs	case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
113316485Sdavidcs	case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
114316485Sdavidcs	case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
115316485Sdavidcs	default:
116316485Sdavidcs		/* NOPE */
117316485Sdavidcs		break;
118316485Sdavidcs	}
119316485Sdavidcs}
120316485Sdavidcs#endif
121316485Sdavidcs
122316485Sdavidcs/***************************************************************************
123316485Sdavidcs * Blocking Imp. (BLOCK/EBLOCK mode)
124316485Sdavidcs ***************************************************************************/
125320164Sdavidcsstatic void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
126320164Sdavidcs				  union event_ring_data OSAL_UNUSED *data,
127320164Sdavidcs				  u8 fw_return_code)
128316485Sdavidcs{
129316485Sdavidcs	struct ecore_spq_comp_done *comp_done;
130316485Sdavidcs
131316485Sdavidcs	comp_done = (struct ecore_spq_comp_done *)cookie;
132316485Sdavidcs
133316485Sdavidcs	comp_done->done = 0x1;
134316485Sdavidcs	comp_done->fw_return_code = fw_return_code;
135316485Sdavidcs
136316485Sdavidcs	/* make update visible to waiting thread */
137316485Sdavidcs	OSAL_SMP_WMB(p_hwfn->p_dev);
138316485Sdavidcs}
139316485Sdavidcs
140316485Sdavidcsstatic enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
141316485Sdavidcs					      struct ecore_spq_entry *p_ent,
142316485Sdavidcs					      u8 *p_fw_ret,
143316485Sdavidcs					      bool sleep_between_iter)
144316485Sdavidcs{
145316485Sdavidcs	struct ecore_spq_comp_done *comp_done;
146316485Sdavidcs	u32 iter_cnt;
147316485Sdavidcs
148316485Sdavidcs	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
149316485Sdavidcs	iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
150316485Sdavidcs				      : SPQ_BLOCK_DELAY_MAX_ITER;
151337517Sdavidcs#ifndef ASIC_ONLY
152337517Sdavidcs	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
153337517Sdavidcs		iter_cnt *= 5;
154337517Sdavidcs#endif
155316485Sdavidcs
156316485Sdavidcs	while (iter_cnt--) {
157316485Sdavidcs		OSAL_POLL_MODE_DPC(p_hwfn);
158316485Sdavidcs		OSAL_SMP_RMB(p_hwfn->p_dev);
159316485Sdavidcs		if (comp_done->done == 1) {
160316485Sdavidcs			if (p_fw_ret)
161316485Sdavidcs				*p_fw_ret = comp_done->fw_return_code;
162316485Sdavidcs			return ECORE_SUCCESS;
163316485Sdavidcs		}
164316485Sdavidcs
165316485Sdavidcs		if (sleep_between_iter) {
166316485Sdavidcs			OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
167316485Sdavidcs		} else {
168316485Sdavidcs			OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
169316485Sdavidcs		}
170316485Sdavidcs	}
171316485Sdavidcs
172316485Sdavidcs	return ECORE_TIMEOUT;
173316485Sdavidcs}
174316485Sdavidcs
175316485Sdavidcsstatic enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
176316485Sdavidcs					    struct ecore_spq_entry *p_ent,
177316485Sdavidcs					    u8 *p_fw_ret, bool skip_quick_poll)
178316485Sdavidcs{
179316485Sdavidcs	struct ecore_spq_comp_done *comp_done;
180320164Sdavidcs	struct ecore_ptt *p_ptt;
181316485Sdavidcs	enum _ecore_status_t rc;
182316485Sdavidcs
183316485Sdavidcs	/* A relatively short polling period w/o sleeping, to allow the FW to
184316485Sdavidcs	 * complete the ramrod and thus possibly to avoid the following sleeps.
185316485Sdavidcs	 */
186316485Sdavidcs	if (!skip_quick_poll) {
187316485Sdavidcs		rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
188316485Sdavidcs		if (rc == ECORE_SUCCESS)
189316485Sdavidcs			return ECORE_SUCCESS;
190316485Sdavidcs	}
191316485Sdavidcs
192316485Sdavidcs	/* Move to polling with a sleeping period between iterations */
193316485Sdavidcs	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
194316485Sdavidcs	if (rc == ECORE_SUCCESS)
195316485Sdavidcs		return ECORE_SUCCESS;
196316485Sdavidcs
197320164Sdavidcs	p_ptt = ecore_ptt_acquire(p_hwfn);
198337517Sdavidcs	if (!p_ptt)
199320164Sdavidcs		return ECORE_AGAIN;
200320164Sdavidcs
201316485Sdavidcs	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
202320164Sdavidcs	rc = ecore_mcp_drain(p_hwfn, p_ptt);
203337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
204316485Sdavidcs	if (rc != ECORE_SUCCESS) {
205316485Sdavidcs		DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
206316485Sdavidcs		goto err;
207316485Sdavidcs	}
208316485Sdavidcs
209316485Sdavidcs	/* Retry after drain */
210316485Sdavidcs	rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
211316485Sdavidcs	if (rc == ECORE_SUCCESS)
212337517Sdavidcs		return ECORE_SUCCESS;
213316485Sdavidcs
214316485Sdavidcs	comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
215316485Sdavidcs	if (comp_done->done == 1) {
216316485Sdavidcs		if (p_fw_ret)
217316485Sdavidcs			*p_fw_ret = comp_done->fw_return_code;
218337517Sdavidcs		return ECORE_SUCCESS;
219316485Sdavidcs	}
220316485Sdavidcserr:
221316485Sdavidcs	DP_NOTICE(p_hwfn, true,
222316485Sdavidcs		  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
223316485Sdavidcs		  OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
224316485Sdavidcs		  p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
225316485Sdavidcs		  OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
226316485Sdavidcs
227316485Sdavidcs	ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
228316485Sdavidcs
229316485Sdavidcs	return ECORE_BUSY;
230316485Sdavidcs}
231316485Sdavidcs
232316485Sdavidcs/***************************************************************************
233316485Sdavidcs * SPQ entries inner API
234316485Sdavidcs ***************************************************************************/
235316485Sdavidcsstatic enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
236316485Sdavidcs						 struct ecore_spq_entry *p_ent)
237316485Sdavidcs{
238316485Sdavidcs	p_ent->flags = 0;
239316485Sdavidcs
240316485Sdavidcs	switch (p_ent->comp_mode) {
241316485Sdavidcs	case ECORE_SPQ_MODE_EBLOCK:
242316485Sdavidcs	case ECORE_SPQ_MODE_BLOCK:
243316485Sdavidcs		p_ent->comp_cb.function = ecore_spq_blocking_cb;
244316485Sdavidcs		break;
245316485Sdavidcs	case ECORE_SPQ_MODE_CB:
246316485Sdavidcs		break;
247316485Sdavidcs	default:
248316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
249316485Sdavidcs			  p_ent->comp_mode);
250316485Sdavidcs		return ECORE_INVAL;
251316485Sdavidcs	}
252316485Sdavidcs
253316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
254316485Sdavidcs		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
255316485Sdavidcs		   p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
256316485Sdavidcs		   p_ent->elem.hdr.protocol_id,
257316485Sdavidcs		   p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
258316485Sdavidcs		   D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
259316485Sdavidcs			   ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
260316485Sdavidcs			   "MODE_CB"));
261316485Sdavidcs
262316485Sdavidcs	return ECORE_SUCCESS;
263316485Sdavidcs}
264316485Sdavidcs
265316485Sdavidcs/***************************************************************************
266316485Sdavidcs * HSI access
267316485Sdavidcs ***************************************************************************/
268316485Sdavidcsstatic void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
269316485Sdavidcs				    struct ecore_spq  *p_spq)
270316485Sdavidcs{
271320164Sdavidcs	struct e4_core_conn_context *p_cxt;
272316485Sdavidcs	struct ecore_cxt_info cxt_info;
273320164Sdavidcs	u16 physical_q;
274316485Sdavidcs	enum _ecore_status_t rc;
275316485Sdavidcs
276316485Sdavidcs	cxt_info.iid = p_spq->cid;
277316485Sdavidcs
278316485Sdavidcs	rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
279316485Sdavidcs
280316485Sdavidcs	if (rc < 0) {
281316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
282316485Sdavidcs			  p_spq->cid);
283316485Sdavidcs		return;
284316485Sdavidcs	}
285316485Sdavidcs
286316485Sdavidcs	p_cxt = cxt_info.p_cxt;
287316485Sdavidcs
288316485Sdavidcs	/* @@@TBD we zero the context until we have ilt_reset implemented. */
289316485Sdavidcs	OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
290316485Sdavidcs
291316485Sdavidcs	if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
292316485Sdavidcs		SET_FIELD(p_cxt->xstorm_ag_context.flags10,
293316485Sdavidcs			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
294316485Sdavidcs		SET_FIELD(p_cxt->xstorm_ag_context.flags1,
295316485Sdavidcs			  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
296316485Sdavidcs		/*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
297316485Sdavidcs			  E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);*/
298316485Sdavidcs		SET_FIELD(p_cxt->xstorm_ag_context.flags9,
299316485Sdavidcs			  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
300316485Sdavidcs	} else { /* E5 */
301316485Sdavidcs		ECORE_E5_MISSING_CODE;
302316485Sdavidcs	}
303316485Sdavidcs
304316485Sdavidcs	/* CDU validation - FIXME currently disabled */
305316485Sdavidcs
306316485Sdavidcs	/* QM physical queue */
307316485Sdavidcs	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
308316485Sdavidcs	p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
309316485Sdavidcs
310316485Sdavidcs	p_cxt->xstorm_st_context.spq_base_lo =
311316485Sdavidcs		DMA_LO_LE(p_spq->chain.p_phys_addr);
312316485Sdavidcs	p_cxt->xstorm_st_context.spq_base_hi =
313316485Sdavidcs		DMA_HI_LE(p_spq->chain.p_phys_addr);
314316485Sdavidcs
315316485Sdavidcs	DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
316316485Sdavidcs		       p_hwfn->p_consq->chain.p_phys_addr);
317316485Sdavidcs}
318316485Sdavidcs
319316485Sdavidcsstatic enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn		*p_hwfn,
320316485Sdavidcs					      struct ecore_spq		*p_spq,
321316485Sdavidcs					      struct ecore_spq_entry	*p_ent)
322316485Sdavidcs{
323316485Sdavidcs	struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
324337517Sdavidcs	struct core_db_data *p_db_data = &p_spq->db_data;
325316485Sdavidcs	u16 echo = ecore_chain_get_prod_idx(p_chain);
326316485Sdavidcs	struct slow_path_element *elem;
327316485Sdavidcs
328316485Sdavidcs	p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
329316485Sdavidcs	elem = ecore_chain_produce(p_chain);
330316485Sdavidcs	if (!elem) {
331316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
332316485Sdavidcs		return ECORE_INVAL;
333316485Sdavidcs	}
334316485Sdavidcs
335337517Sdavidcs	*elem = p_ent->elem; /* Struct assignment */
336316485Sdavidcs
337337517Sdavidcs	p_db_data->spq_prod =
338337517Sdavidcs		OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
339316485Sdavidcs
340337517Sdavidcs	/* Make sure the SPQE is updated before the doorbell */
341316485Sdavidcs	OSAL_WMB(p_hwfn->p_dev);
342316485Sdavidcs
343337517Sdavidcs	DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
344316485Sdavidcs
345337517Sdavidcs	/* Make sure doorbell was rung */
346316485Sdavidcs	OSAL_WMB(p_hwfn->p_dev);
347316485Sdavidcs
348316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
349316485Sdavidcs		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
350337517Sdavidcs		   p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
351337517Sdavidcs		   p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
352316485Sdavidcs
353316485Sdavidcs	return ECORE_SUCCESS;
354316485Sdavidcs}
355316485Sdavidcs
356316485Sdavidcs/***************************************************************************
357316485Sdavidcs * Asynchronous events
358316485Sdavidcs ***************************************************************************/
359316485Sdavidcs
360316485Sdavidcsstatic enum _ecore_status_t
361316485Sdavidcsecore_async_event_completion(struct ecore_hwfn *p_hwfn,
362316485Sdavidcs			     struct event_ring_entry *p_eqe)
363316485Sdavidcs{
364337517Sdavidcs	ecore_spq_async_comp_cb cb;
365337517Sdavidcs
366337517Sdavidcs	if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) {
367337517Sdavidcs		return ECORE_INVAL;
368316485Sdavidcs	}
369316485Sdavidcs
370337517Sdavidcs	cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
371337517Sdavidcs	if (cb) {
372337517Sdavidcs		return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
373337517Sdavidcs			  &p_eqe->data, p_eqe->fw_return_code);
374337517Sdavidcs	} else {
375316485Sdavidcs		DP_NOTICE(p_hwfn,
376337517Sdavidcs			  true, "Unknown Async completion for protocol: %d\n",
377337517Sdavidcs			  p_eqe->protocol_id);
378316485Sdavidcs		return ECORE_INVAL;
379316485Sdavidcs	}
380316485Sdavidcs}
381316485Sdavidcs
382337517Sdavidcsenum _ecore_status_t
383337517Sdavidcsecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
384337517Sdavidcs			    enum protocol_type protocol_id,
385337517Sdavidcs			    ecore_spq_async_comp_cb cb)
386337517Sdavidcs{
387337517Sdavidcs	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
388337517Sdavidcs		return ECORE_INVAL;
389337517Sdavidcs	}
390337517Sdavidcs
391337517Sdavidcs	p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
392337517Sdavidcs	return ECORE_SUCCESS;
393337517Sdavidcs}
394337517Sdavidcs
395337517Sdavidcsvoid
396337517Sdavidcsecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
397337517Sdavidcs			      enum protocol_type protocol_id)
398337517Sdavidcs{
399337517Sdavidcs	if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) {
400337517Sdavidcs		return;
401337517Sdavidcs	}
402337517Sdavidcs
403337517Sdavidcs	p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
404337517Sdavidcs}
405337517Sdavidcs
406316485Sdavidcs/***************************************************************************
407316485Sdavidcs * EQ API
408316485Sdavidcs ***************************************************************************/
409316485Sdavidcsvoid ecore_eq_prod_update(struct ecore_hwfn	*p_hwfn,
410316485Sdavidcs			  u16			prod)
411316485Sdavidcs{
412316485Sdavidcs	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
413316485Sdavidcs		USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
414316485Sdavidcs
415316485Sdavidcs	REG_WR16(p_hwfn, addr, prod);
416316485Sdavidcs
417316485Sdavidcs	/* keep prod updates ordered */
418316485Sdavidcs	OSAL_MMIOWB(p_hwfn->p_dev);
419316485Sdavidcs}
420316485Sdavidcs
421316485Sdavidcsenum _ecore_status_t ecore_eq_completion(struct ecore_hwfn	*p_hwfn,
422316485Sdavidcs					 void                   *cookie)
423316485Sdavidcs
424316485Sdavidcs{
425316485Sdavidcs	struct ecore_eq    *p_eq    = cookie;
426316485Sdavidcs	struct ecore_chain *p_chain = &p_eq->chain;
427316485Sdavidcs	enum _ecore_status_t rc = 0;
428316485Sdavidcs
429316485Sdavidcs	/* take a snapshot of the FW consumer */
430316485Sdavidcs	u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
431316485Sdavidcs
432316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
433316485Sdavidcs
434316485Sdavidcs	/* Need to guarantee the fw_cons index we use points to a usuable
435316485Sdavidcs	 * element (to comply with our chain), so our macros would comply
436316485Sdavidcs	 */
437316485Sdavidcs	if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
438316485Sdavidcs	    ecore_chain_get_usable_per_page(p_chain)) {
439316485Sdavidcs		fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
440316485Sdavidcs	}
441316485Sdavidcs
442316485Sdavidcs	/* Complete current segment of eq entries */
443316485Sdavidcs	while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
444316485Sdavidcs		struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
445316485Sdavidcs		if (!p_eqe) {
446316485Sdavidcs			rc = ECORE_INVAL;
447316485Sdavidcs			break;
448316485Sdavidcs		}
449316485Sdavidcs
450316485Sdavidcs		DP_VERBOSE(p_hwfn,
451316485Sdavidcs			   ECORE_MSG_SPQ,
452316485Sdavidcs			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
453316485Sdavidcs			   p_eqe->opcode,	     /* Event Opcode */
454316485Sdavidcs			   p_eqe->protocol_id,	     /* Event Protocol ID */
455316485Sdavidcs			   p_eqe->reserved0,	     /* Reserved */
456316485Sdavidcs			   OSAL_LE16_TO_CPU(p_eqe->echo),/* Echo value from
457316485Sdavidcs							ramrod data on the host
458316485Sdavidcs						      */
459316485Sdavidcs			   p_eqe->fw_return_code,    /* FW return code for SP
460316485Sdavidcs							ramrods
461316485Sdavidcs						      */
462316485Sdavidcs			   p_eqe->flags);
463316485Sdavidcs#ifndef REMOVE_DBG
464316485Sdavidcs		if (p_eqe->protocol_id == PROTOCOLID_ISCSI)
465316485Sdavidcs			ecore_iscsi_eq_dump(p_hwfn, p_eqe);
466316485Sdavidcs#endif
467316485Sdavidcs
468316485Sdavidcs		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
469316485Sdavidcs			if (ecore_async_event_completion(p_hwfn, p_eqe))
470316485Sdavidcs				rc = ECORE_INVAL;
471316485Sdavidcs		} else if (ecore_spq_completion(p_hwfn,
472316485Sdavidcs						p_eqe->echo,
473316485Sdavidcs						p_eqe->fw_return_code,
474316485Sdavidcs						&p_eqe->data)) {
475316485Sdavidcs			rc = ECORE_INVAL;
476316485Sdavidcs		}
477316485Sdavidcs
478316485Sdavidcs		ecore_chain_recycle_consumed(p_chain);
479316485Sdavidcs	}
480316485Sdavidcs
481316485Sdavidcs	ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
482316485Sdavidcs
483337517Sdavidcs	/* Attempt to post pending requests */
484337517Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
485337517Sdavidcs	rc = ecore_spq_pend_post(p_hwfn);
486337517Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
487337517Sdavidcs
488316485Sdavidcs	return rc;
489316485Sdavidcs}
490316485Sdavidcs
491316485Sdavidcsenum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
492316485Sdavidcs{
493316485Sdavidcs	struct ecore_eq	*p_eq;
494316485Sdavidcs
495316485Sdavidcs	/* Allocate EQ struct */
496316485Sdavidcs	p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
497316485Sdavidcs	if (!p_eq) {
498337517Sdavidcs		DP_NOTICE(p_hwfn, false,
499316485Sdavidcs			  "Failed to allocate `struct ecore_eq'\n");
500316485Sdavidcs		return ECORE_NOMEM;
501316485Sdavidcs	}
502316485Sdavidcs
503316485Sdavidcs	/* Allocate and initialize EQ chain*/
504316485Sdavidcs	if (ecore_chain_alloc(p_hwfn->p_dev,
505316485Sdavidcs			      ECORE_CHAIN_USE_TO_PRODUCE,
506316485Sdavidcs			      ECORE_CHAIN_MODE_PBL,
507316485Sdavidcs			      ECORE_CHAIN_CNT_TYPE_U16,
508316485Sdavidcs			      num_elem,
509316485Sdavidcs			      sizeof(union event_ring_element),
510316485Sdavidcs			      &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
511337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
512316485Sdavidcs		goto eq_allocate_fail;
513316485Sdavidcs	}
514316485Sdavidcs
515316485Sdavidcs	/* register EQ completion on the SP SB */
516316485Sdavidcs	ecore_int_register_cb(p_hwfn, ecore_eq_completion,
517316485Sdavidcs			      p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
518316485Sdavidcs
519316485Sdavidcs	p_hwfn->p_eq = p_eq;
520316485Sdavidcs	return ECORE_SUCCESS;
521316485Sdavidcs
522316485Sdavidcseq_allocate_fail:
523316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_eq);
524316485Sdavidcs	return ECORE_NOMEM;
525316485Sdavidcs}
526316485Sdavidcs
527316485Sdavidcsvoid ecore_eq_setup(struct ecore_hwfn *p_hwfn)
528316485Sdavidcs{
529316485Sdavidcs	ecore_chain_reset(&p_hwfn->p_eq->chain);
530316485Sdavidcs}
531316485Sdavidcs
532316485Sdavidcsvoid ecore_eq_free(struct ecore_hwfn *p_hwfn)
533316485Sdavidcs{
534316485Sdavidcs	if (!p_hwfn->p_eq)
535316485Sdavidcs		return;
536316485Sdavidcs
537316485Sdavidcs	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
538316485Sdavidcs
539316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
540316485Sdavidcs	p_hwfn->p_eq = OSAL_NULL;
541316485Sdavidcs}
542316485Sdavidcs
543316485Sdavidcs/***************************************************************************
544320164Sdavidcs* CQE API - manipulate EQ functionallity
545316485Sdavidcs***************************************************************************/
546316485Sdavidcsstatic enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
547316485Sdavidcs						 struct eth_slow_path_rx_cqe *cqe,
548316485Sdavidcs						 enum protocol_type protocol)
549316485Sdavidcs{
550316485Sdavidcs	if (IS_VF(p_hwfn->p_dev))
551316485Sdavidcs		return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
552316485Sdavidcs
553316485Sdavidcs	/* @@@tmp - it's possible we'll eventually want to handle some
554316485Sdavidcs	 * actual commands that can arrive here, but for now this is only
555316485Sdavidcs	 * used to complete the ramrod using the echo value on the cqe
556316485Sdavidcs	 */
557316485Sdavidcs	return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
558316485Sdavidcs}
559316485Sdavidcs
560316485Sdavidcsenum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
561316485Sdavidcs					      struct eth_slow_path_rx_cqe *cqe)
562316485Sdavidcs{
563316485Sdavidcs	enum _ecore_status_t rc;
564316485Sdavidcs
565316485Sdavidcs	rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
566316485Sdavidcs	if (rc) {
567316485Sdavidcs		DP_NOTICE(p_hwfn, true,
568316485Sdavidcs			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
569316485Sdavidcs			  cqe->ramrod_cmd_id);
570316485Sdavidcs	}
571316485Sdavidcs
572316485Sdavidcs	return rc;
573316485Sdavidcs}
574316485Sdavidcs
575316485Sdavidcs/***************************************************************************
576316485Sdavidcs * Slow hwfn Queue (spq)
577316485Sdavidcs ***************************************************************************/
578316485Sdavidcsvoid ecore_spq_setup(struct ecore_hwfn *p_hwfn)
579316485Sdavidcs{
580316485Sdavidcs	struct ecore_spq *p_spq = p_hwfn->p_spq;
581316485Sdavidcs	struct ecore_spq_entry *p_virt = OSAL_NULL;
582337517Sdavidcs	struct core_db_data *p_db_data;
583337517Sdavidcs	void OSAL_IOMEM *db_addr;
584316485Sdavidcs	dma_addr_t p_phys = 0;
585316485Sdavidcs	u32 i, capacity;
586337517Sdavidcs	enum _ecore_status_t rc;
587316485Sdavidcs
588316485Sdavidcs	OSAL_LIST_INIT(&p_spq->pending);
589316485Sdavidcs	OSAL_LIST_INIT(&p_spq->completion_pending);
590316485Sdavidcs	OSAL_LIST_INIT(&p_spq->free_pool);
591316485Sdavidcs	OSAL_LIST_INIT(&p_spq->unlimited_pending);
592316485Sdavidcs	OSAL_SPIN_LOCK_INIT(&p_spq->lock);
593316485Sdavidcs
594316485Sdavidcs	/* SPQ empty pool */
595316485Sdavidcs	p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
596316485Sdavidcs	p_virt = p_spq->p_virt;
597316485Sdavidcs
598316485Sdavidcs	capacity = ecore_chain_get_capacity(&p_spq->chain);
599316485Sdavidcs	for (i = 0; i < capacity; i++) {
600316485Sdavidcs		DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
601316485Sdavidcs
602316485Sdavidcs		OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
603316485Sdavidcs
604316485Sdavidcs		p_virt++;
605316485Sdavidcs		p_phys += sizeof(struct ecore_spq_entry);
606316485Sdavidcs	}
607316485Sdavidcs
608316485Sdavidcs	/* Statistics */
609316485Sdavidcs	p_spq->normal_count		= 0;
610316485Sdavidcs	p_spq->comp_count		= 0;
611316485Sdavidcs	p_spq->comp_sent_count		= 0;
612316485Sdavidcs	p_spq->unlimited_pending_count	= 0;
613316485Sdavidcs
614316485Sdavidcs	OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
615316485Sdavidcs		      SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
616316485Sdavidcs	p_spq->comp_bitmap_idx = 0;
617316485Sdavidcs
618316485Sdavidcs	/* SPQ cid, cannot fail */
619316485Sdavidcs	ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
620316485Sdavidcs	ecore_spq_hw_initialize(p_hwfn, p_spq);
621316485Sdavidcs
622316485Sdavidcs	/* reset the chain itself */
623316485Sdavidcs	ecore_chain_reset(&p_spq->chain);
624337517Sdavidcs
625337517Sdavidcs	/* Initialize the address/data of the SPQ doorbell */
626337517Sdavidcs	p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
627337517Sdavidcs	p_db_data = &p_spq->db_data;
628337517Sdavidcs	OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
629337517Sdavidcs	SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
630337517Sdavidcs	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
631337517Sdavidcs	SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
632337517Sdavidcs		  DQ_XCM_CORE_SPQ_PROD_CMD);
633337517Sdavidcs	p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
634337517Sdavidcs
635337517Sdavidcs	/* Register the SPQ doorbell with the doorbell recovery mechanism */
636337517Sdavidcs	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
637337517Sdavidcs	rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
638337517Sdavidcs				   DB_REC_WIDTH_32B, DB_REC_KERNEL);
639337517Sdavidcs	if (rc != ECORE_SUCCESS)
640337517Sdavidcs		DP_INFO(p_hwfn,
641337517Sdavidcs			"Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
642316485Sdavidcs}
643316485Sdavidcs
644316485Sdavidcsenum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
645316485Sdavidcs{
646316485Sdavidcs	struct ecore_spq_entry *p_virt = OSAL_NULL;
647316485Sdavidcs	struct ecore_spq *p_spq = OSAL_NULL;
648316485Sdavidcs	dma_addr_t p_phys = 0;
649316485Sdavidcs	u32 capacity;
650316485Sdavidcs
651316485Sdavidcs	/* SPQ struct */
652316485Sdavidcs	p_spq =
653316485Sdavidcs	    OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
654316485Sdavidcs	if (!p_spq) {
655337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
656316485Sdavidcs		return ECORE_NOMEM;
657316485Sdavidcs	}
658316485Sdavidcs
659316485Sdavidcs	/* SPQ ring  */
660316485Sdavidcs	if (ecore_chain_alloc(p_hwfn->p_dev,
661316485Sdavidcs			      ECORE_CHAIN_USE_TO_PRODUCE,
662316485Sdavidcs			      ECORE_CHAIN_MODE_SINGLE,
663316485Sdavidcs			      ECORE_CHAIN_CNT_TYPE_U16,
664316485Sdavidcs			      0, /* N/A when the mode is SINGLE */
665316485Sdavidcs			      sizeof(struct slow_path_element),
666316485Sdavidcs			      &p_spq->chain, OSAL_NULL)) {
667337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
668316485Sdavidcs		goto spq_allocate_fail;
669316485Sdavidcs	}
670316485Sdavidcs
671316485Sdavidcs	/* allocate and fill the SPQ elements (incl. ramrod data list) */
672316485Sdavidcs	capacity = ecore_chain_get_capacity(&p_spq->chain);
673316485Sdavidcs	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
674316485Sdavidcs					 capacity *
675316485Sdavidcs					 sizeof(struct ecore_spq_entry));
676316485Sdavidcs	if (!p_virt) {
677316485Sdavidcs		goto spq_allocate_fail;
678316485Sdavidcs	}
679316485Sdavidcs
680316485Sdavidcs	p_spq->p_virt = p_virt;
681316485Sdavidcs	p_spq->p_phys = p_phys;
682316485Sdavidcs
683320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
684337517Sdavidcs	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
685337517Sdavidcs		goto spq_allocate_fail;
686320164Sdavidcs#endif
687316485Sdavidcs
688316485Sdavidcs	p_hwfn->p_spq = p_spq;
689316485Sdavidcs	return ECORE_SUCCESS;
690316485Sdavidcs
691316485Sdavidcsspq_allocate_fail:
692316485Sdavidcs	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
693316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_spq);
694316485Sdavidcs	return ECORE_NOMEM;
695316485Sdavidcs}
696316485Sdavidcs
697316485Sdavidcsvoid ecore_spq_free(struct ecore_hwfn *p_hwfn)
698316485Sdavidcs{
699316485Sdavidcs	struct ecore_spq *p_spq = p_hwfn->p_spq;
700337517Sdavidcs	void OSAL_IOMEM *db_addr;
701316485Sdavidcs	u32 capacity;
702316485Sdavidcs
703316485Sdavidcs	if (!p_spq)
704316485Sdavidcs		return;
705316485Sdavidcs
706337517Sdavidcs	/* Delete the SPQ doorbell from the doorbell recovery mechanism */
707337517Sdavidcs	db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
708337517Sdavidcs	ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
709337517Sdavidcs
710316485Sdavidcs	if (p_spq->p_virt) {
711316485Sdavidcs		capacity = ecore_chain_get_capacity(&p_spq->chain);
712316485Sdavidcs		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
713316485Sdavidcs				       p_spq->p_virt,
714316485Sdavidcs				       p_spq->p_phys,
715316485Sdavidcs				       capacity *
716316485Sdavidcs				       sizeof(struct ecore_spq_entry));
717316485Sdavidcs	}
718316485Sdavidcs
719316485Sdavidcs	ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
720320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
721316485Sdavidcs	OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
722320164Sdavidcs#endif
723316485Sdavidcs
724316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_spq);
725316485Sdavidcs	p_hwfn->p_spq = OSAL_NULL;
726316485Sdavidcs}
727316485Sdavidcs
728316485Sdavidcsenum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
729316485Sdavidcs					 struct ecore_spq_entry **pp_ent)
730316485Sdavidcs{
731316485Sdavidcs	struct ecore_spq *p_spq = p_hwfn->p_spq;
732316485Sdavidcs	struct ecore_spq_entry *p_ent = OSAL_NULL;
733316485Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
734316485Sdavidcs
735316485Sdavidcs	OSAL_SPIN_LOCK(&p_spq->lock);
736316485Sdavidcs
737316485Sdavidcs	if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
738316485Sdavidcs
739316485Sdavidcs		p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
740316485Sdavidcs		if (!p_ent) {
741337517Sdavidcs			DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
742316485Sdavidcs			rc = ECORE_NOMEM;
743316485Sdavidcs			goto out_unlock;
744316485Sdavidcs		}
745316485Sdavidcs		p_ent->queue = &p_spq->unlimited_pending;
746316485Sdavidcs	} else {
747316485Sdavidcs		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
748316485Sdavidcs					      struct ecore_spq_entry,
749316485Sdavidcs					      list);
750316485Sdavidcs		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
751316485Sdavidcs		p_ent->queue = &p_spq->pending;
752316485Sdavidcs	}
753316485Sdavidcs
754316485Sdavidcs	*pp_ent = p_ent;
755316485Sdavidcs
756316485Sdavidcsout_unlock:
757316485Sdavidcs	OSAL_SPIN_UNLOCK(&p_spq->lock);
758316485Sdavidcs	return rc;
759316485Sdavidcs}
760316485Sdavidcs
761316485Sdavidcs/* Locked variant; Should be called while the SPQ lock is taken */
762316485Sdavidcsstatic void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
763316485Sdavidcs			      struct ecore_spq_entry *p_ent)
764316485Sdavidcs{
765316485Sdavidcs	OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
766316485Sdavidcs}
767316485Sdavidcs
768316485Sdavidcsvoid ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
769316485Sdavidcs			    struct ecore_spq_entry *p_ent)
770316485Sdavidcs{
771316485Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
772316485Sdavidcs	__ecore_spq_return_entry(p_hwfn, p_ent);
773316485Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
774316485Sdavidcs}
775316485Sdavidcs
776316485Sdavidcs/**
777316485Sdavidcs * @brief ecore_spq_add_entry - adds a new entry to the pending
778316485Sdavidcs *        list. Should be used while lock is being held.
779316485Sdavidcs *
780316485Sdavidcs * Addes an entry to the pending list is there is room (en empty
781320164Sdavidcs * element is avaliable in the free_pool), or else places the
782316485Sdavidcs * entry in the unlimited_pending pool.
783316485Sdavidcs *
784316485Sdavidcs * @param p_hwfn
785316485Sdavidcs * @param p_ent
786316485Sdavidcs * @param priority
787316485Sdavidcs *
788316485Sdavidcs * @return enum _ecore_status_t
789316485Sdavidcs */
790316485Sdavidcsstatic enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
791316485Sdavidcs						struct ecore_spq_entry *p_ent,
792316485Sdavidcs						enum spq_priority priority)
793316485Sdavidcs{
794316485Sdavidcs	struct ecore_spq	*p_spq	= p_hwfn->p_spq;
795316485Sdavidcs
796316485Sdavidcs	if (p_ent->queue == &p_spq->unlimited_pending) {
797316485Sdavidcs		if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
798316485Sdavidcs
799316485Sdavidcs			OSAL_LIST_PUSH_TAIL(&p_ent->list,
800316485Sdavidcs					    &p_spq->unlimited_pending);
801316485Sdavidcs			p_spq->unlimited_pending_count++;
802316485Sdavidcs
803316485Sdavidcs			return ECORE_SUCCESS;
804316485Sdavidcs
805316485Sdavidcs		} else {
806316485Sdavidcs			struct ecore_spq_entry *p_en2;
807316485Sdavidcs
808316485Sdavidcs			p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
809316485Sdavidcs						     struct ecore_spq_entry,
810316485Sdavidcs						     list);
811316485Sdavidcs			OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
812316485Sdavidcs
813316485Sdavidcs			/* Copy the ring element physical pointer to the new
814316485Sdavidcs			 * entry, since we are about to override the entire ring
815316485Sdavidcs			 * entry and don't want to lose the pointer.
816316485Sdavidcs			 */
817316485Sdavidcs			p_ent->elem.data_ptr = p_en2->elem.data_ptr;
818316485Sdavidcs
819316485Sdavidcs			*p_en2 = *p_ent;
820316485Sdavidcs
821316485Sdavidcs			/* EBLOCK responsible to free the allocated p_ent */
822316485Sdavidcs			if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
823316485Sdavidcs				OSAL_FREE(p_hwfn->p_dev, p_ent);
824316485Sdavidcs
825316485Sdavidcs			p_ent = p_en2;
826316485Sdavidcs		}
827316485Sdavidcs	}
828316485Sdavidcs
829316485Sdavidcs	/* entry is to be placed in 'pending' queue */
830316485Sdavidcs	switch (priority) {
831316485Sdavidcs	case ECORE_SPQ_PRIORITY_NORMAL:
832316485Sdavidcs		OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
833316485Sdavidcs		p_spq->normal_count++;
834316485Sdavidcs		break;
835316485Sdavidcs	case ECORE_SPQ_PRIORITY_HIGH:
836316485Sdavidcs		OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
837316485Sdavidcs		p_spq->high_count++;
838316485Sdavidcs		break;
839316485Sdavidcs	default:
840316485Sdavidcs		return ECORE_INVAL;
841316485Sdavidcs	}
842316485Sdavidcs
843316485Sdavidcs	return ECORE_SUCCESS;
844316485Sdavidcs}
845316485Sdavidcs
846316485Sdavidcs/***************************************************************************
847316485Sdavidcs * Accessor
848316485Sdavidcs ***************************************************************************/
849316485Sdavidcs
850316485Sdavidcsu32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
851316485Sdavidcs{
852316485Sdavidcs	if (!p_hwfn->p_spq) {
853316485Sdavidcs		return 0xffffffff;	/* illegal */
854316485Sdavidcs	}
855316485Sdavidcs	return p_hwfn->p_spq->cid;
856316485Sdavidcs}
857316485Sdavidcs
858316485Sdavidcs/***************************************************************************
859316485Sdavidcs * Posting new Ramrods
860316485Sdavidcs ***************************************************************************/
861316485Sdavidcs
862316485Sdavidcsstatic enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
863316485Sdavidcs						osal_list_t	  *head,
864316485Sdavidcs						u32		  keep_reserve)
865316485Sdavidcs{
866316485Sdavidcs	struct ecore_spq	*p_spq = p_hwfn->p_spq;
867316485Sdavidcs	enum _ecore_status_t	rc;
868316485Sdavidcs
869316485Sdavidcs	/* TODO - implementation might be wasteful; will always keep room
870316485Sdavidcs	 * for an additional high priority ramrod (even if one is already
871316485Sdavidcs	 * pending FW)
872316485Sdavidcs	 */
873316485Sdavidcs	while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
874316485Sdavidcs	       !OSAL_LIST_IS_EMPTY(head)) {
875316485Sdavidcs		struct ecore_spq_entry  *p_ent =
876316485Sdavidcs		    OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
877316485Sdavidcs		if (p_ent != OSAL_NULL) {
878316485Sdavidcs#if defined(_NTDDK_)
879316485Sdavidcs#pragma warning(suppress : 6011 28182)
880316485Sdavidcs#endif
881316485Sdavidcs			OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
882316485Sdavidcs			OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
883316485Sdavidcs			p_spq->comp_sent_count++;
884316485Sdavidcs
885316485Sdavidcs			rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
886316485Sdavidcs			if (rc) {
887316485Sdavidcs				OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
888316485Sdavidcs									&p_spq->completion_pending);
889316485Sdavidcs				__ecore_spq_return_entry(p_hwfn, p_ent);
890316485Sdavidcs				return rc;
891316485Sdavidcs			}
892316485Sdavidcs		}
893316485Sdavidcs	}
894316485Sdavidcs
895316485Sdavidcs	return ECORE_SUCCESS;
896316485Sdavidcs}
897316485Sdavidcs
898337517Sdavidcsenum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
899316485Sdavidcs{
900316485Sdavidcs	struct ecore_spq *p_spq = p_hwfn->p_spq;
901316485Sdavidcs	struct ecore_spq_entry *p_ent = OSAL_NULL;
902316485Sdavidcs
903316485Sdavidcs	while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool))
904316485Sdavidcs	{
905316485Sdavidcs		if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
906316485Sdavidcs			break;
907316485Sdavidcs
908316485Sdavidcs		p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
909316485Sdavidcs					      struct ecore_spq_entry,
910316485Sdavidcs					      list);
911316485Sdavidcs		if (!p_ent)
912316485Sdavidcs			return ECORE_INVAL;
913316485Sdavidcs
914316485Sdavidcs#if defined(_NTDDK_)
915316485Sdavidcs#pragma warning(suppress : 6011)
916316485Sdavidcs#endif
917316485Sdavidcs		OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
918316485Sdavidcs
919316485Sdavidcs		ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
920316485Sdavidcs	}
921316485Sdavidcs
922316485Sdavidcs	return ecore_spq_post_list(p_hwfn, &p_spq->pending,
923316485Sdavidcs				   SPQ_HIGH_PRI_RESERVE_DEFAULT);
924316485Sdavidcs}
925316485Sdavidcs
926316485Sdavidcsenum _ecore_status_t ecore_spq_post(struct ecore_hwfn		*p_hwfn,
927316485Sdavidcs				    struct ecore_spq_entry	*p_ent,
928316485Sdavidcs				    u8                          *fw_return_code)
929316485Sdavidcs{
930316485Sdavidcs	enum _ecore_status_t	rc = ECORE_SUCCESS;
931316485Sdavidcs	struct ecore_spq	*p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
932316485Sdavidcs	bool			b_ret_ent = true;
933316485Sdavidcs
934316485Sdavidcs	if (!p_hwfn)
935316485Sdavidcs		return ECORE_INVAL;
936316485Sdavidcs
937316485Sdavidcs	if (!p_ent) {
938316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
939316485Sdavidcs		return ECORE_INVAL;
940316485Sdavidcs	}
941316485Sdavidcs
942316485Sdavidcs	if (p_hwfn->p_dev->recov_in_prog) {
943316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
944316485Sdavidcs			   "Recovery is in progress -> skip spq post [cmd %02x protocol %02x]\n",
945316485Sdavidcs			   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
946316485Sdavidcs		/* Return success to let the flows to be completed successfully
947316485Sdavidcs		 * w/o any error handling.
948316485Sdavidcs		 */
949316485Sdavidcs		return ECORE_SUCCESS;
950316485Sdavidcs	}
951316485Sdavidcs
952316485Sdavidcs	OSAL_SPIN_LOCK(&p_spq->lock);
953316485Sdavidcs
954316485Sdavidcs	/* Complete the entry */
955316485Sdavidcs	rc = ecore_spq_fill_entry(p_hwfn, p_ent);
956316485Sdavidcs
957316485Sdavidcs	/* Check return value after LOCK is taken for cleaner error flow */
958316485Sdavidcs	if (rc)
959316485Sdavidcs		goto spq_post_fail;
960316485Sdavidcs
961316485Sdavidcs	/* Add the request to the pending queue */
962316485Sdavidcs	rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
963316485Sdavidcs	if (rc)
964316485Sdavidcs		goto spq_post_fail;
965316485Sdavidcs
966316485Sdavidcs	rc = ecore_spq_pend_post(p_hwfn);
967316485Sdavidcs	if (rc) {
968316485Sdavidcs		/* Since it's possible that pending failed for a different
969316485Sdavidcs		 * entry [although unlikely], the failed entry was already
970316485Sdavidcs		 * dealt with; No need to return it here.
971316485Sdavidcs		 */
972316485Sdavidcs		b_ret_ent = false;
973316485Sdavidcs		goto spq_post_fail;
974316485Sdavidcs	}
975316485Sdavidcs
976316485Sdavidcs	OSAL_SPIN_UNLOCK(&p_spq->lock);
977316485Sdavidcs
978316485Sdavidcs	if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
979316485Sdavidcs		/* For entries in ECORE BLOCK mode, the completion code cannot
980316485Sdavidcs		 * perform the necessary cleanup - if it did, we couldn't
981316485Sdavidcs		 * access p_ent here to see whether it's successful or not.
982320164Sdavidcs		 * Thus, after gaining the answer perform the cleanup here.
983316485Sdavidcs		 */
984316485Sdavidcs		rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
985316485Sdavidcs				     p_ent->queue == &p_spq->unlimited_pending);
986316485Sdavidcs
987316485Sdavidcs		if (p_ent->queue == &p_spq->unlimited_pending) {
988316485Sdavidcs			/* This is an allocated p_ent which does not need to
989316485Sdavidcs			 * return to pool.
990316485Sdavidcs			 */
991316485Sdavidcs			OSAL_FREE(p_hwfn->p_dev, p_ent);
992316485Sdavidcs
993316485Sdavidcs			/* TBD: handle error flow and remove p_ent from
994316485Sdavidcs			 * completion pending
995316485Sdavidcs			 */
996316485Sdavidcs			return rc;
997316485Sdavidcs		}
998316485Sdavidcs
999316485Sdavidcs		if (rc)
1000316485Sdavidcs			goto spq_post_fail2;
1001316485Sdavidcs
1002316485Sdavidcs		/* return to pool */
1003316485Sdavidcs		ecore_spq_return_entry(p_hwfn, p_ent);
1004316485Sdavidcs	}
1005316485Sdavidcs	return rc;
1006316485Sdavidcs
1007316485Sdavidcsspq_post_fail2:
1008316485Sdavidcs	OSAL_SPIN_LOCK(&p_spq->lock);
1009316485Sdavidcs	OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
1010316485Sdavidcs	ecore_chain_return_produced(&p_spq->chain);
1011316485Sdavidcs
1012316485Sdavidcsspq_post_fail:
1013316485Sdavidcs	/* return to the free pool */
1014316485Sdavidcs	if (b_ret_ent)
1015316485Sdavidcs		__ecore_spq_return_entry(p_hwfn, p_ent);
1016316485Sdavidcs	OSAL_SPIN_UNLOCK(&p_spq->lock);
1017316485Sdavidcs
1018316485Sdavidcs	return rc;
1019316485Sdavidcs}
1020316485Sdavidcs
1021316485Sdavidcsenum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
1022316485Sdavidcs					  __le16 echo,
1023316485Sdavidcs					  u8 fw_return_code,
1024316485Sdavidcs					  union event_ring_data	*p_data)
1025316485Sdavidcs{
1026316485Sdavidcs	struct ecore_spq	*p_spq;
1027316485Sdavidcs	struct ecore_spq_entry	*p_ent = OSAL_NULL;
1028316485Sdavidcs	struct ecore_spq_entry	*tmp;
1029316485Sdavidcs	struct ecore_spq_entry	*found = OSAL_NULL;
1030316485Sdavidcs
1031316485Sdavidcs	if (!p_hwfn) {
1032316485Sdavidcs		return ECORE_INVAL;
1033316485Sdavidcs	}
1034316485Sdavidcs
1035316485Sdavidcs	p_spq = p_hwfn->p_spq;
1036316485Sdavidcs	if (!p_spq) {
1037316485Sdavidcs		return ECORE_INVAL;
1038316485Sdavidcs	}
1039316485Sdavidcs
1040316485Sdavidcs	OSAL_SPIN_LOCK(&p_spq->lock);
1041316485Sdavidcs	OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
1042316485Sdavidcs				      tmp,
1043316485Sdavidcs				      &p_spq->completion_pending,
1044316485Sdavidcs				      list,
1045316485Sdavidcs				      struct ecore_spq_entry) {
1046316485Sdavidcs
1047316485Sdavidcs		if (p_ent->elem.hdr.echo == echo) {
1048316485Sdavidcs			OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
1049316485Sdavidcs					       &p_spq->completion_pending);
1050316485Sdavidcs
1051316485Sdavidcs			/* Avoid overriding of SPQ entries when getting
1052316485Sdavidcs			 * out-of-order completions, by marking the completions
1053316485Sdavidcs			 * in a bitmap and increasing the chain consumer only
1054316485Sdavidcs			 * for the first successive completed entries.
1055316485Sdavidcs			 */
1056316485Sdavidcs			SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
1057316485Sdavidcs			while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
1058316485Sdavidcs						      p_spq->comp_bitmap_idx)) {
1059316485Sdavidcs				SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
1060316485Sdavidcs							p_spq->comp_bitmap_idx);
1061316485Sdavidcs				p_spq->comp_bitmap_idx++;
1062316485Sdavidcs				ecore_chain_return_produced(&p_spq->chain);
1063316485Sdavidcs			}
1064316485Sdavidcs
1065316485Sdavidcs			p_spq->comp_count++;
1066316485Sdavidcs			found = p_ent;
1067316485Sdavidcs			break;
1068316485Sdavidcs		}
1069316485Sdavidcs
1070316485Sdavidcs		/* This is debug and should be relatively uncommon - depends
1071316485Sdavidcs		 * on scenarios which have mutliple per-PF sent ramrods.
1072316485Sdavidcs		 */
1073316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1074316485Sdavidcs			   "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
1075316485Sdavidcs			   OSAL_LE16_TO_CPU(echo),
1076316485Sdavidcs			   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
1077316485Sdavidcs	}
1078316485Sdavidcs
1079316485Sdavidcs	/* Release lock before callback, as callback may post
1080316485Sdavidcs	 * an additional ramrod.
1081316485Sdavidcs	 */
1082316485Sdavidcs	OSAL_SPIN_UNLOCK(&p_spq->lock);
1083316485Sdavidcs
1084316485Sdavidcs	if (!found) {
1085316485Sdavidcs		DP_NOTICE(p_hwfn, true,
1086316485Sdavidcs			  "Failed to find an entry this EQE [echo %04x] completes\n",
1087316485Sdavidcs			  OSAL_LE16_TO_CPU(echo));
1088316485Sdavidcs		return ECORE_EXISTS;
1089316485Sdavidcs	}
1090316485Sdavidcs
1091316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1092316485Sdavidcs		   "Complete EQE [echo %04x]: func %p cookie %p)\n",
1093316485Sdavidcs		   OSAL_LE16_TO_CPU(echo),
1094316485Sdavidcs		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1095316485Sdavidcs	if (found->comp_cb.function)
1096316485Sdavidcs		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1097316485Sdavidcs					fw_return_code);
1098316485Sdavidcs	else
1099316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Got a completion without a callback function\n");
1100316485Sdavidcs
1101316485Sdavidcs	if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1102316485Sdavidcs	    (found->queue == &p_spq->unlimited_pending))
1103316485Sdavidcs		/* EBLOCK  is responsible for returning its own entry into the
1104316485Sdavidcs		 * free list, unless it originally added the entry into the
1105316485Sdavidcs		 * unlimited pending list.
1106316485Sdavidcs		 */
1107316485Sdavidcs		ecore_spq_return_entry(p_hwfn, found);
1108316485Sdavidcs
1109337517Sdavidcs	return ECORE_SUCCESS;
1110316485Sdavidcs}
1111316485Sdavidcs
1112316485Sdavidcsenum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1113316485Sdavidcs{
1114316485Sdavidcs	struct ecore_consq *p_consq;
1115316485Sdavidcs
1116316485Sdavidcs	/* Allocate ConsQ struct */
1117316485Sdavidcs	p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1118316485Sdavidcs	if (!p_consq) {
1119337517Sdavidcs		DP_NOTICE(p_hwfn, false,
1120316485Sdavidcs			  "Failed to allocate `struct ecore_consq'\n");
1121316485Sdavidcs		return ECORE_NOMEM;
1122316485Sdavidcs	}
1123316485Sdavidcs
1124316485Sdavidcs	/* Allocate and initialize EQ chain*/
1125316485Sdavidcs	if (ecore_chain_alloc(p_hwfn->p_dev,
1126316485Sdavidcs			      ECORE_CHAIN_USE_TO_PRODUCE,
1127316485Sdavidcs			      ECORE_CHAIN_MODE_PBL,
1128316485Sdavidcs			      ECORE_CHAIN_CNT_TYPE_U16,
1129316485Sdavidcs			      ECORE_CHAIN_PAGE_SIZE/0x80,
1130316485Sdavidcs			      0x80,
1131316485Sdavidcs			      &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1132337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1133316485Sdavidcs		goto consq_allocate_fail;
1134316485Sdavidcs	}
1135316485Sdavidcs
1136316485Sdavidcs	p_hwfn->p_consq = p_consq;
1137316485Sdavidcs	return ECORE_SUCCESS;
1138316485Sdavidcs
1139316485Sdavidcsconsq_allocate_fail:
1140316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_consq);
1141316485Sdavidcs	return ECORE_NOMEM;
1142316485Sdavidcs}
1143316485Sdavidcs
1144316485Sdavidcsvoid ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1145316485Sdavidcs{
1146316485Sdavidcs	ecore_chain_reset(&p_hwfn->p_consq->chain);
1147316485Sdavidcs}
1148316485Sdavidcs
1149316485Sdavidcsvoid ecore_consq_free(struct ecore_hwfn *p_hwfn)
1150316485Sdavidcs{
1151316485Sdavidcs	if (!p_hwfn->p_consq)
1152316485Sdavidcs		return;
1153316485Sdavidcs
1154316485Sdavidcs	ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1155316485Sdavidcs
1156316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1157316485Sdavidcs	p_hwfn->p_consq = OSAL_NULL;
1158316485Sdavidcs}
1159337517Sdavidcs
1160337517Sdavidcs#ifdef _NTDDK_
1161337517Sdavidcs#pragma warning(pop)
1162337517Sdavidcs#endif
1163