1316485Sdavidcs/*
2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc.
3316485Sdavidcs * All rights reserved.
4316485Sdavidcs *
5316485Sdavidcs *  Redistribution and use in source and binary forms, with or without
6316485Sdavidcs *  modification, are permitted provided that the following conditions
7316485Sdavidcs *  are met:
8316485Sdavidcs *
9316485Sdavidcs *  1. Redistributions of source code must retain the above copyright
10316485Sdavidcs *     notice, this list of conditions and the following disclaimer.
11316485Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12316485Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13316485Sdavidcs *     documentation and/or other materials provided with the distribution.
14316485Sdavidcs *
15316485Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16316485Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17316485Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18316485Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19316485Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20316485Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21316485Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22316485Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23316485Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24316485Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25316485Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26316485Sdavidcs */
27316485Sdavidcs
28316485Sdavidcs/*
29316485Sdavidcs * File : ecore_dev.c
30316485Sdavidcs */
31316485Sdavidcs#include <sys/cdefs.h>
32316485Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_dev.c 337517 2018-08-09 01:17:35Z davidcs $");
33316485Sdavidcs
34316485Sdavidcs#include "bcm_osal.h"
35316485Sdavidcs#include "reg_addr.h"
36316485Sdavidcs#include "ecore_gtt_reg_addr.h"
37316485Sdavidcs#include "ecore.h"
38316485Sdavidcs#include "ecore_chain.h"
39316485Sdavidcs#include "ecore_status.h"
40316485Sdavidcs#include "ecore_hw.h"
41316485Sdavidcs#include "ecore_rt_defs.h"
42316485Sdavidcs#include "ecore_init_ops.h"
43316485Sdavidcs#include "ecore_int.h"
44316485Sdavidcs#include "ecore_cxt.h"
45316485Sdavidcs#include "ecore_spq.h"
46316485Sdavidcs#include "ecore_init_fw_funcs.h"
47316485Sdavidcs#include "ecore_sp_commands.h"
48316485Sdavidcs#include "ecore_dev_api.h"
49316485Sdavidcs#include "ecore_sriov.h"
50316485Sdavidcs#include "ecore_vf.h"
51316485Sdavidcs#include "ecore_ll2.h"
52316485Sdavidcs#include "ecore_fcoe.h"
53316485Sdavidcs#include "ecore_iscsi.h"
54316485Sdavidcs#include "ecore_ooo.h"
55316485Sdavidcs#include "ecore_mcp.h"
56316485Sdavidcs#include "ecore_hw_defs.h"
57316485Sdavidcs#include "mcp_public.h"
58337517Sdavidcs#include "ecore_rdma.h"
59316485Sdavidcs#include "ecore_iro.h"
60316485Sdavidcs#include "nvm_cfg.h"
61316485Sdavidcs#include "ecore_dev_api.h"
62316485Sdavidcs#include "ecore_dcbx.h"
63316485Sdavidcs#include "pcics_reg_driver.h"
64316485Sdavidcs#include "ecore_l2.h"
65337517Sdavidcs#ifndef LINUX_REMOVE
66337517Sdavidcs#include "ecore_tcp_ip.h"
67337517Sdavidcs#endif
68316485Sdavidcs
69337517Sdavidcs#ifdef _NTDDK_
70337517Sdavidcs#pragma warning(push)
71337517Sdavidcs#pragma warning(disable : 28167)
72337517Sdavidcs#pragma warning(disable : 28123)
73337517Sdavidcs#endif
74337517Sdavidcs
75316485Sdavidcs/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
76316485Sdavidcs * registers involved are not split and thus configuration is a race where
77316485Sdavidcs * some of the PFs configuration might be lost.
78316485Sdavidcs * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
79316485Sdavidcs * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
80316485Sdavidcs * there's more than a single compiled ecore component in system].
81316485Sdavidcs */
82316485Sdavidcsstatic osal_spinlock_t qm_lock;
83337517Sdavidcsstatic u32 qm_lock_ref_cnt;
84316485Sdavidcs
85337517Sdavidcsvoid ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size)
86337517Sdavidcs{
87337517Sdavidcs	p_dev->ilt_page_size = ilt_page_size;
88337517Sdavidcs}
89337517Sdavidcs
90320164Sdavidcs/******************** Doorbell Recovery *******************/
91320164Sdavidcs/* The doorbell recovery mechanism consists of a list of entries which represent
92320164Sdavidcs * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
93320164Sdavidcs * entity needs to register with the mechanism and provide the parameters
94320164Sdavidcs * describing it's doorbell, including a location where last used doorbell data
95320164Sdavidcs * can be found. The doorbell execute function will traverse the list and
96320164Sdavidcs * doorbell all of the registered entries.
97320164Sdavidcs */
98320164Sdavidcsstruct ecore_db_recovery_entry {
99320164Sdavidcs	osal_list_entry_t	list_entry;
100320164Sdavidcs	void OSAL_IOMEM		*db_addr;
101320164Sdavidcs	void			*db_data;
102320164Sdavidcs	enum ecore_db_rec_width	db_width;
103320164Sdavidcs	enum ecore_db_rec_space	db_space;
104320164Sdavidcs	u8			hwfn_idx;
105320164Sdavidcs};
106320164Sdavidcs
107320164Sdavidcs/* display a single doorbell recovery entry */
108320164Sdavidcsstatic void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
109320164Sdavidcs				struct ecore_db_recovery_entry *db_entry,
110320164Sdavidcs				char *action)
111320164Sdavidcs{
112320164Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
113320164Sdavidcs		   action, db_entry, db_entry->db_addr, db_entry->db_data,
114320164Sdavidcs		   db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
115320164Sdavidcs		   db_entry->db_space == DB_REC_USER ? "user" : "kernel",
116320164Sdavidcs		   db_entry->hwfn_idx);
117320164Sdavidcs}
118320164Sdavidcs
119320164Sdavidcs/* doorbell address sanity (address within doorbell bar range) */
120337517Sdavidcsstatic bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
121337517Sdavidcs			 void *db_data)
122320164Sdavidcs{
123337517Sdavidcs	/* make sure doorbell address  is within the doorbell bar */
124337517Sdavidcs	if (db_addr < p_dev->doorbells || (u8 *)db_addr >
125337517Sdavidcs			(u8 *)p_dev->doorbells + p_dev->db_size) {
126320164Sdavidcs		OSAL_WARN(true,
127320164Sdavidcs			  "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
128320164Sdavidcs			  db_addr, p_dev->doorbells,
129337517Sdavidcs			  (u8 *)p_dev->doorbells + p_dev->db_size);
130320164Sdavidcs		return false;
131320164Sdavidcs	}
132337517Sdavidcs
133337517Sdavidcs	/* make sure doorbell data pointer is not null */
134337517Sdavidcs	if (!db_data) {
135337517Sdavidcs		OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
136337517Sdavidcs		return false;
137337517Sdavidcs	}
138337517Sdavidcs
139337517Sdavidcs	return true;
140320164Sdavidcs}
141320164Sdavidcs
142320164Sdavidcs/* find hwfn according to the doorbell address */
143320164Sdavidcsstatic struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
144320164Sdavidcs					  void OSAL_IOMEM *db_addr)
145320164Sdavidcs{
146320164Sdavidcs	struct ecore_hwfn *p_hwfn;
147320164Sdavidcs
148320164Sdavidcs	/* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
149337517Sdavidcs	if (ECORE_IS_CMT(p_dev))
150320164Sdavidcs		p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
151320164Sdavidcs			&p_dev->hwfns[0] : &p_dev->hwfns[1];
152320164Sdavidcs	else
153320164Sdavidcs		p_hwfn = ECORE_LEADING_HWFN(p_dev);
154320164Sdavidcs
155320164Sdavidcs	return p_hwfn;
156320164Sdavidcs}
157320164Sdavidcs
158320164Sdavidcs/* add a new entry to the doorbell recovery mechanism */
159320164Sdavidcsenum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
160320164Sdavidcs					   void OSAL_IOMEM *db_addr,
161320164Sdavidcs					   void *db_data,
162320164Sdavidcs					   enum ecore_db_rec_width db_width,
163320164Sdavidcs					   enum ecore_db_rec_space db_space)
164320164Sdavidcs{
165320164Sdavidcs	struct ecore_db_recovery_entry *db_entry;
166320164Sdavidcs	struct ecore_hwfn *p_hwfn;
167320164Sdavidcs
168320164Sdavidcs	/* shortcircuit VFs, for now */
169320164Sdavidcs	if (IS_VF(p_dev)) {
170320164Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
171320164Sdavidcs		return ECORE_SUCCESS;
172320164Sdavidcs	}
173320164Sdavidcs
174320164Sdavidcs	/* sanitize doorbell address */
175337517Sdavidcs	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
176320164Sdavidcs		return ECORE_INVAL;
177320164Sdavidcs
178320164Sdavidcs	/* obtain hwfn from doorbell address */
179320164Sdavidcs	p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
180320164Sdavidcs
181320164Sdavidcs	/* create entry */
182320164Sdavidcs	db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
183320164Sdavidcs	if (!db_entry) {
184320164Sdavidcs		DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
185320164Sdavidcs		return ECORE_NOMEM;
186320164Sdavidcs	}
187320164Sdavidcs
188320164Sdavidcs	/* populate entry */
189320164Sdavidcs	db_entry->db_addr = db_addr;
190320164Sdavidcs	db_entry->db_data = db_data;
191320164Sdavidcs	db_entry->db_width = db_width;
192320164Sdavidcs	db_entry->db_space = db_space;
193320164Sdavidcs	db_entry->hwfn_idx = p_hwfn->my_id;
194320164Sdavidcs
195320164Sdavidcs	/* display */
196320164Sdavidcs	ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
197320164Sdavidcs
198320164Sdavidcs	/* protect the list */
199320164Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
200320164Sdavidcs	OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
201320164Sdavidcs			    &p_hwfn->db_recovery_info.list);
202320164Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
203320164Sdavidcs
204320164Sdavidcs	return ECORE_SUCCESS;
205320164Sdavidcs}
206320164Sdavidcs
207320164Sdavidcs/* remove an entry from the doorbell recovery mechanism */
208320164Sdavidcsenum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
209320164Sdavidcs					   void OSAL_IOMEM *db_addr,
210320164Sdavidcs					   void *db_data)
211320164Sdavidcs{
212320164Sdavidcs	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
213320164Sdavidcs	enum _ecore_status_t rc = ECORE_INVAL;
214320164Sdavidcs	struct ecore_hwfn *p_hwfn;
215320164Sdavidcs
216320164Sdavidcs	/* shortcircuit VFs, for now */
217320164Sdavidcs	if (IS_VF(p_dev)) {
218320164Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
219320164Sdavidcs		return ECORE_SUCCESS;
220320164Sdavidcs	}
221320164Sdavidcs
222320164Sdavidcs	/* sanitize doorbell address */
223337517Sdavidcs	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
224320164Sdavidcs		return ECORE_INVAL;
225320164Sdavidcs
226320164Sdavidcs	/* obtain hwfn from doorbell address */
227320164Sdavidcs	p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
228320164Sdavidcs
229320164Sdavidcs	/* protect the list */
230320164Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
231320164Sdavidcs	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
232320164Sdavidcs				 &p_hwfn->db_recovery_info.list,
233320164Sdavidcs				 list_entry,
234320164Sdavidcs				 struct ecore_db_recovery_entry) {
235320164Sdavidcs
236320164Sdavidcs		/* search according to db_data addr since db_addr is not unique (roce) */
237320164Sdavidcs		if (db_entry->db_data == db_data) {
238320164Sdavidcs			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
239320164Sdavidcs			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
240320164Sdavidcs					       &p_hwfn->db_recovery_info.list);
241320164Sdavidcs			rc = ECORE_SUCCESS;
242320164Sdavidcs			break;
243320164Sdavidcs		}
244320164Sdavidcs	}
245320164Sdavidcs
246320164Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
247320164Sdavidcs
248320164Sdavidcs	if (rc == ECORE_INVAL) {
249337517Sdavidcs		/*OSAL_WARN(true,*/
250337517Sdavidcs		DP_NOTICE(p_hwfn, false,
251337517Sdavidcs			  "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
252320164Sdavidcs			  db_data, db_addr);
253337517Sdavidcs	} else
254320164Sdavidcs		OSAL_FREE(p_dev, db_entry);
255320164Sdavidcs
256320164Sdavidcs	return rc;
257320164Sdavidcs}
258320164Sdavidcs
259320164Sdavidcs/* initialize the doorbell recovery mechanism */
260337517Sdavidcsstatic enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
261320164Sdavidcs{
262320164Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
263337517Sdavidcs
264337517Sdavidcs	/* make sure db_size was set in p_dev */
265337517Sdavidcs	if (!p_hwfn->p_dev->db_size) {
266337517Sdavidcs		DP_ERR(p_hwfn->p_dev, "db_size not set\n");
267337517Sdavidcs		return ECORE_INVAL;
268337517Sdavidcs	}
269337517Sdavidcs
270320164Sdavidcs	OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
271320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
272337517Sdavidcs	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
273337517Sdavidcs		return ECORE_NOMEM;
274320164Sdavidcs#endif
275320164Sdavidcs	OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
276320164Sdavidcs	p_hwfn->db_recovery_info.db_recovery_counter = 0;
277337517Sdavidcs
278337517Sdavidcs	return ECORE_SUCCESS;
279320164Sdavidcs}
280320164Sdavidcs
281320164Sdavidcs/* destroy the doorbell recovery mechanism */
282320164Sdavidcsstatic void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
283320164Sdavidcs{
284320164Sdavidcs	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
285320164Sdavidcs
286320164Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
287320164Sdavidcs	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
288320164Sdavidcs		DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
289320164Sdavidcs		while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
290320164Sdavidcs			db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list,
291320164Sdavidcs							 struct ecore_db_recovery_entry,
292320164Sdavidcs							 list_entry);
293320164Sdavidcs			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
294320164Sdavidcs			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
295320164Sdavidcs					       &p_hwfn->db_recovery_info.list);
296320164Sdavidcs			OSAL_FREE(p_hwfn->p_dev, db_entry);
297320164Sdavidcs		}
298320164Sdavidcs	}
299320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
300320164Sdavidcs	OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
301320164Sdavidcs#endif
302320164Sdavidcs	p_hwfn->db_recovery_info.db_recovery_counter = 0;
303320164Sdavidcs}
304320164Sdavidcs
305320164Sdavidcs/* print the content of the doorbell recovery mechanism */
306320164Sdavidcsvoid ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
307320164Sdavidcs{
308320164Sdavidcs	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
309320164Sdavidcs
310320164Sdavidcs	DP_NOTICE(p_hwfn, false,
311320164Sdavidcs		  "Dispalying doorbell recovery database. Counter was %d\n",
312320164Sdavidcs		  p_hwfn->db_recovery_info.db_recovery_counter);
313320164Sdavidcs
314320164Sdavidcs	/* protect the list */
315320164Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
316320164Sdavidcs	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
317320164Sdavidcs				 &p_hwfn->db_recovery_info.list,
318320164Sdavidcs				 list_entry,
319320164Sdavidcs				 struct ecore_db_recovery_entry) {
320320164Sdavidcs		ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
321320164Sdavidcs	}
322320164Sdavidcs
323320164Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
324320164Sdavidcs}
325320164Sdavidcs
326320164Sdavidcs/* ring the doorbell of a single doorbell recovery entry */
327320164Sdavidcsstatic void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
328320164Sdavidcs			    struct ecore_db_recovery_entry *db_entry,
329320164Sdavidcs			    enum ecore_db_rec_exec db_exec)
330320164Sdavidcs{
331337517Sdavidcs	if (db_exec != DB_REC_ONCE) {
332337517Sdavidcs		/* Print according to width */
333337517Sdavidcs		if (db_entry->db_width == DB_REC_WIDTH_32B)
334337517Sdavidcs			DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
335337517Sdavidcs				   "%s doorbell address %p data %x\n",
336337517Sdavidcs				   db_exec == DB_REC_DRY_RUN ?
337337517Sdavidcs				   "would have rung" : "ringing",
338337517Sdavidcs				   db_entry->db_addr,
339337517Sdavidcs				   *(u32 *)db_entry->db_data);
340337517Sdavidcs		else
341337517Sdavidcs			DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
342337517Sdavidcs				   "%s doorbell address %p data %llx\n",
343337517Sdavidcs				   db_exec == DB_REC_DRY_RUN ?
344337517Sdavidcs				   "would have rung" : "ringing",
345337517Sdavidcs				   db_entry->db_addr,
346337517Sdavidcs				   (unsigned long long)*(u64 *)(db_entry->db_data));
347337517Sdavidcs	}
348320164Sdavidcs
349320164Sdavidcs	/* Sanity */
350337517Sdavidcs	if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
351337517Sdavidcs				 db_entry->db_data))
352320164Sdavidcs		return;
353320164Sdavidcs
354320164Sdavidcs	/* Flush the write combined buffer. Since there are multiple doorbelling
355320164Sdavidcs	 * entities using the same address, if we don't flush, a transaction
356320164Sdavidcs	 * could be lost.
357320164Sdavidcs	 */
358320164Sdavidcs	OSAL_WMB(p_hwfn->p_dev);
359320164Sdavidcs
360320164Sdavidcs	/* Ring the doorbell */
361337517Sdavidcs	if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
362320164Sdavidcs		if (db_entry->db_width == DB_REC_WIDTH_32B)
363320164Sdavidcs			DIRECT_REG_WR(p_hwfn, db_entry->db_addr, *(u32 *)(db_entry->db_data));
364320164Sdavidcs		else
365320164Sdavidcs			DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, *(u64 *)(db_entry->db_data));
366320164Sdavidcs	}
367320164Sdavidcs
368320164Sdavidcs	/* Flush the write combined buffer. Next doorbell may come from a
369320164Sdavidcs	 * different entity to the same address...
370320164Sdavidcs	 */
371320164Sdavidcs	OSAL_WMB(p_hwfn->p_dev);
372320164Sdavidcs}
373320164Sdavidcs
374320164Sdavidcs/* traverse the doorbell recovery entry list and ring all the doorbells */
375320164Sdavidcsvoid ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
376320164Sdavidcs			       enum ecore_db_rec_exec db_exec)
377320164Sdavidcs{
378320164Sdavidcs	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
379320164Sdavidcs
380337517Sdavidcs	if (db_exec != DB_REC_ONCE) {
381337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
382337517Sdavidcs			  p_hwfn->db_recovery_info.db_recovery_counter);
383320164Sdavidcs
384337517Sdavidcs		/* track amount of times recovery was executed */
385337517Sdavidcs		p_hwfn->db_recovery_info.db_recovery_counter++;
386337517Sdavidcs	}
387337517Sdavidcs
388320164Sdavidcs	/* protect the list */
389320164Sdavidcs	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
390320164Sdavidcs	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
391320164Sdavidcs				 &p_hwfn->db_recovery_info.list,
392320164Sdavidcs				 list_entry,
393337517Sdavidcs				 struct ecore_db_recovery_entry) {
394320164Sdavidcs		ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
395337517Sdavidcs		if (db_exec == DB_REC_ONCE)
396337517Sdavidcs			break;
397337517Sdavidcs	}
398337517Sdavidcs
399320164Sdavidcs	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
400320164Sdavidcs}
401320164Sdavidcs/******************** Doorbell Recovery end ****************/
402320164Sdavidcs
403337517Sdavidcs/********************************** NIG LLH ***********************************/
404337517Sdavidcs
405337517Sdavidcsenum ecore_llh_filter_type {
406337517Sdavidcs	ECORE_LLH_FILTER_TYPE_MAC,
407337517Sdavidcs	ECORE_LLH_FILTER_TYPE_PROTOCOL,
408337517Sdavidcs};
409337517Sdavidcs
410337517Sdavidcsstruct ecore_llh_mac_filter {
411337517Sdavidcs	u8 addr[ETH_ALEN];
412337517Sdavidcs};
413337517Sdavidcs
414337517Sdavidcsstruct ecore_llh_protocol_filter {
415337517Sdavidcs	enum ecore_llh_prot_filter_type_t type;
416337517Sdavidcs	u16 source_port_or_eth_type;
417337517Sdavidcs	u16 dest_port;
418337517Sdavidcs};
419337517Sdavidcs
420337517Sdavidcsunion ecore_llh_filter {
421337517Sdavidcs	struct ecore_llh_mac_filter mac;
422337517Sdavidcs	struct ecore_llh_protocol_filter protocol;
423337517Sdavidcs};
424337517Sdavidcs
425337517Sdavidcsstruct ecore_llh_filter_info {
426337517Sdavidcs	bool b_enabled;
427337517Sdavidcs	u32 ref_cnt;
428337517Sdavidcs	enum ecore_llh_filter_type type;
429337517Sdavidcs	union ecore_llh_filter filter;
430337517Sdavidcs};
431337517Sdavidcs
432337517Sdavidcsstruct ecore_llh_info {
433337517Sdavidcs	/* Number of LLH filters banks */
434337517Sdavidcs	u8 num_ppfid;
435337517Sdavidcs
436337517Sdavidcs#define MAX_NUM_PPFID	8
437337517Sdavidcs	u8 ppfid_array[MAX_NUM_PPFID];
438337517Sdavidcs
439337517Sdavidcs	/* Array of filters arrays:
440337517Sdavidcs	 * "num_ppfid" elements of filters banks, where each is an array of
441337517Sdavidcs	 * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
442337517Sdavidcs	 */
443337517Sdavidcs	struct ecore_llh_filter_info **pp_filters;
444337517Sdavidcs};
445337517Sdavidcs
446337517Sdavidcsstatic void ecore_llh_free(struct ecore_dev *p_dev)
447337517Sdavidcs{
448337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
449337517Sdavidcs	u32 i;
450337517Sdavidcs
451337517Sdavidcs	if (p_llh_info != OSAL_NULL) {
452337517Sdavidcs		if (p_llh_info->pp_filters != OSAL_NULL) {
453337517Sdavidcs			for (i = 0; i < p_llh_info->num_ppfid; i++)
454337517Sdavidcs				OSAL_FREE(p_dev, p_llh_info->pp_filters[i]);
455337517Sdavidcs		}
456337517Sdavidcs
457337517Sdavidcs		OSAL_FREE(p_dev, p_llh_info->pp_filters);
458337517Sdavidcs	}
459337517Sdavidcs
460337517Sdavidcs	OSAL_FREE(p_dev, p_llh_info);
461337517Sdavidcs	p_dev->p_llh_info = OSAL_NULL;
462337517Sdavidcs}
463337517Sdavidcs
464337517Sdavidcsstatic enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev)
465337517Sdavidcs{
466337517Sdavidcs	struct ecore_llh_info *p_llh_info;
467337517Sdavidcs	u32 size; u8 i;
468337517Sdavidcs
469337517Sdavidcs	p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info));
470337517Sdavidcs	if (!p_llh_info)
471337517Sdavidcs		return ECORE_NOMEM;
472337517Sdavidcs	p_dev->p_llh_info = p_llh_info;
473337517Sdavidcs
474337517Sdavidcs	for (i = 0; i < MAX_NUM_PPFID; i++) {
475337517Sdavidcs		if (!(p_dev->ppfid_bitmap & (0x1 << i)))
476337517Sdavidcs			continue;
477337517Sdavidcs
478337517Sdavidcs		p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
479337517Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n",
480337517Sdavidcs			   p_llh_info->num_ppfid, i);
481337517Sdavidcs		p_llh_info->num_ppfid++;
482337517Sdavidcs	}
483337517Sdavidcs
484337517Sdavidcs	size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
485337517Sdavidcs	p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size);
486337517Sdavidcs	if (!p_llh_info->pp_filters)
487337517Sdavidcs		return ECORE_NOMEM;
488337517Sdavidcs
489337517Sdavidcs	size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
490337517Sdavidcs	       sizeof(**p_llh_info->pp_filters);
491337517Sdavidcs	for (i = 0; i < p_llh_info->num_ppfid; i++) {
492337517Sdavidcs		p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL,
493337517Sdavidcs							size);
494337517Sdavidcs		if (!p_llh_info->pp_filters[i])
495337517Sdavidcs			return ECORE_NOMEM;
496337517Sdavidcs	}
497337517Sdavidcs
498337517Sdavidcs	return ECORE_SUCCESS;
499337517Sdavidcs}
500337517Sdavidcs
501337517Sdavidcsstatic enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev,
502337517Sdavidcs						    u8 ppfid, u8 filter_idx,
503337517Sdavidcs						    const char *action)
504337517Sdavidcs{
505337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
506337517Sdavidcs
507337517Sdavidcs	if (ppfid >= p_llh_info->num_ppfid) {
508337517Sdavidcs		DP_NOTICE(p_dev, false,
509337517Sdavidcs			  "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
510337517Sdavidcs			  action, ppfid, p_llh_info->num_ppfid);
511337517Sdavidcs		return ECORE_INVAL;
512337517Sdavidcs	}
513337517Sdavidcs
514337517Sdavidcs	if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
515337517Sdavidcs		DP_NOTICE(p_dev, false,
516337517Sdavidcs			  "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
517337517Sdavidcs			  action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
518337517Sdavidcs		return ECORE_INVAL;
519337517Sdavidcs	}
520337517Sdavidcs
521337517Sdavidcs	return ECORE_SUCCESS;
522337517Sdavidcs}
523337517Sdavidcs
524337517Sdavidcs#define ECORE_LLH_INVALID_FILTER_IDX	0xff
525337517Sdavidcs
526337517Sdavidcsstatic enum _ecore_status_t
527337517Sdavidcsecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid,
528337517Sdavidcs			       union ecore_llh_filter *p_filter,
529337517Sdavidcs			       u8 *p_filter_idx)
530337517Sdavidcs{
531337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
532337517Sdavidcs	struct ecore_llh_filter_info *p_filters;
533337517Sdavidcs	enum _ecore_status_t rc;
534337517Sdavidcs	u8 i;
535337517Sdavidcs
536337517Sdavidcs	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search");
537337517Sdavidcs	if (rc != ECORE_SUCCESS)
538337517Sdavidcs		return rc;
539337517Sdavidcs
540337517Sdavidcs	*p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
541337517Sdavidcs
542337517Sdavidcs	p_filters = p_llh_info->pp_filters[ppfid];
543337517Sdavidcs	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
544337517Sdavidcs		if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter,
545337517Sdavidcs				 sizeof(*p_filter))) {
546337517Sdavidcs			*p_filter_idx = i;
547337517Sdavidcs			break;
548337517Sdavidcs		}
549337517Sdavidcs	}
550337517Sdavidcs
551337517Sdavidcs	return ECORE_SUCCESS;
552337517Sdavidcs}
553337517Sdavidcs
554337517Sdavidcsstatic enum _ecore_status_t
555337517Sdavidcsecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid,
556337517Sdavidcs			      u8 *p_filter_idx)
557337517Sdavidcs{
558337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
559337517Sdavidcs	struct ecore_llh_filter_info *p_filters;
560337517Sdavidcs	enum _ecore_status_t rc;
561337517Sdavidcs	u8 i;
562337517Sdavidcs
563337517Sdavidcs	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx");
564337517Sdavidcs	if (rc != ECORE_SUCCESS)
565337517Sdavidcs		return rc;
566337517Sdavidcs
567337517Sdavidcs	*p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
568337517Sdavidcs
569337517Sdavidcs	p_filters = p_llh_info->pp_filters[ppfid];
570337517Sdavidcs	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
571337517Sdavidcs		if (!p_filters[i].b_enabled) {
572337517Sdavidcs			*p_filter_idx = i;
573337517Sdavidcs			break;
574337517Sdavidcs		}
575337517Sdavidcs	}
576337517Sdavidcs
577337517Sdavidcs	return ECORE_SUCCESS;
578337517Sdavidcs}
579337517Sdavidcs
580337517Sdavidcsstatic enum _ecore_status_t
581337517Sdavidcs__ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx,
582337517Sdavidcs			      enum ecore_llh_filter_type type,
583337517Sdavidcs			      union ecore_llh_filter *p_filter, u32 *p_ref_cnt)
584337517Sdavidcs{
585337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
586337517Sdavidcs	struct ecore_llh_filter_info *p_filters;
587337517Sdavidcs	enum _ecore_status_t rc;
588337517Sdavidcs
589337517Sdavidcs	rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add");
590337517Sdavidcs	if (rc != ECORE_SUCCESS)
591337517Sdavidcs		return rc;
592337517Sdavidcs
593337517Sdavidcs	p_filters = p_llh_info->pp_filters[ppfid];
594337517Sdavidcs	if (!p_filters[filter_idx].ref_cnt) {
595337517Sdavidcs		p_filters[filter_idx].b_enabled = true;
596337517Sdavidcs		p_filters[filter_idx].type = type;
597337517Sdavidcs		OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter,
598337517Sdavidcs			    sizeof(p_filters[filter_idx].filter));
599337517Sdavidcs	}
600337517Sdavidcs
601337517Sdavidcs	*p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
602337517Sdavidcs
603337517Sdavidcs	return ECORE_SUCCESS;
604337517Sdavidcs}
605337517Sdavidcs
606337517Sdavidcsstatic enum _ecore_status_t
607337517Sdavidcsecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid,
608337517Sdavidcs			    enum ecore_llh_filter_type type,
609337517Sdavidcs			    union ecore_llh_filter *p_filter,
610337517Sdavidcs			    u8 *p_filter_idx, u32 *p_ref_cnt)
611337517Sdavidcs{
612337517Sdavidcs	enum _ecore_status_t rc;
613337517Sdavidcs
614337517Sdavidcs	/* Check if the same filter already exist */
615337517Sdavidcs	rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
616337517Sdavidcs					    p_filter_idx);
617337517Sdavidcs	if (rc != ECORE_SUCCESS)
618337517Sdavidcs		return rc;
619337517Sdavidcs
620337517Sdavidcs	/* Find a new entry in case of a new filter */
621337517Sdavidcs	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
622337517Sdavidcs		rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx);
623337517Sdavidcs		if (rc != ECORE_SUCCESS)
624337517Sdavidcs			return rc;
625337517Sdavidcs	}
626337517Sdavidcs
627337517Sdavidcs	/* No free entry was found */
628337517Sdavidcs	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
629337517Sdavidcs		DP_NOTICE(p_dev, false,
630337517Sdavidcs			  "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
631337517Sdavidcs			  ppfid);
632337517Sdavidcs		return ECORE_NORESOURCES;
633337517Sdavidcs	}
634337517Sdavidcs
635337517Sdavidcs	return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type,
636337517Sdavidcs					     p_filter, p_ref_cnt);
637337517Sdavidcs}
638337517Sdavidcs
639337517Sdavidcsstatic enum _ecore_status_t
640337517Sdavidcs__ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
641337517Sdavidcs				 u8 filter_idx, u32 *p_ref_cnt)
642337517Sdavidcs{
643337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
644337517Sdavidcs	struct ecore_llh_filter_info *p_filters;
645337517Sdavidcs	enum _ecore_status_t rc;
646337517Sdavidcs
647337517Sdavidcs	rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove");
648337517Sdavidcs	if (rc != ECORE_SUCCESS)
649337517Sdavidcs		return rc;
650337517Sdavidcs
651337517Sdavidcs	p_filters = p_llh_info->pp_filters[ppfid];
652337517Sdavidcs	if (!p_filters[filter_idx].ref_cnt) {
653337517Sdavidcs		DP_NOTICE(p_dev, false,
654337517Sdavidcs			  "LLH shadow: trying to remove a filter with ref_cnt=0\n");
655337517Sdavidcs		return ECORE_INVAL;
656337517Sdavidcs	}
657337517Sdavidcs
658337517Sdavidcs	*p_ref_cnt = --p_filters[filter_idx].ref_cnt;
659337517Sdavidcs	if (!p_filters[filter_idx].ref_cnt)
660337517Sdavidcs		OSAL_MEM_ZERO(&p_filters[filter_idx],
661337517Sdavidcs			      sizeof(p_filters[filter_idx]));
662337517Sdavidcs
663337517Sdavidcs	return ECORE_SUCCESS;
664337517Sdavidcs}
665337517Sdavidcs
666337517Sdavidcsstatic enum _ecore_status_t
667337517Sdavidcsecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
668337517Sdavidcs			       union ecore_llh_filter *p_filter,
669337517Sdavidcs			       u8 *p_filter_idx, u32 *p_ref_cnt)
670337517Sdavidcs{
671337517Sdavidcs	enum _ecore_status_t rc;
672337517Sdavidcs
673337517Sdavidcs	rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
674337517Sdavidcs					    p_filter_idx);
675337517Sdavidcs	if (rc != ECORE_SUCCESS)
676337517Sdavidcs		return rc;
677337517Sdavidcs
678337517Sdavidcs	/* No matching filter was found */
679337517Sdavidcs	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
680337517Sdavidcs		DP_NOTICE(p_dev, false,
681337517Sdavidcs			  "Failed to find a filter in the LLH shadow\n");
682337517Sdavidcs		return ECORE_INVAL;
683337517Sdavidcs	}
684337517Sdavidcs
685337517Sdavidcs	return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx,
686337517Sdavidcs						p_ref_cnt);
687337517Sdavidcs}
688337517Sdavidcs
689337517Sdavidcsstatic enum _ecore_status_t
690337517Sdavidcsecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
691337517Sdavidcs{
692337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
693337517Sdavidcs	struct ecore_llh_filter_info *p_filters;
694337517Sdavidcs	enum _ecore_status_t rc;
695337517Sdavidcs
696337517Sdavidcs	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all");
697337517Sdavidcs	if (rc != ECORE_SUCCESS)
698337517Sdavidcs		return rc;
699337517Sdavidcs
700337517Sdavidcs	p_filters = p_llh_info->pp_filters[ppfid];
701337517Sdavidcs	OSAL_MEM_ZERO(p_filters,
702337517Sdavidcs		      NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters));
703337517Sdavidcs
704337517Sdavidcs	return ECORE_SUCCESS;
705337517Sdavidcs}
706337517Sdavidcs
707337517Sdavidcsstatic enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
708337517Sdavidcs					    u8 rel_ppfid, u8 *p_abs_ppfid)
709337517Sdavidcs{
710337517Sdavidcs	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
711337517Sdavidcs
712337517Sdavidcs	if (rel_ppfid >= p_llh_info->num_ppfid) {
713337517Sdavidcs		DP_NOTICE(p_dev, false,
714337517Sdavidcs			  "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
715337517Sdavidcs			  rel_ppfid, (u8)(p_llh_info->num_ppfid - 1));
716337517Sdavidcs		return ECORE_INVAL;
717337517Sdavidcs	}
718337517Sdavidcs
719337517Sdavidcs	*p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid];
720337517Sdavidcs
721337517Sdavidcs	return ECORE_SUCCESS;
722337517Sdavidcs}
723337517Sdavidcs
724337517Sdavidcsstatic enum _ecore_status_t
725337517Sdavidcs__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
726337517Sdavidcs{
727337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
728337517Sdavidcs	enum ecore_eng eng;
729337517Sdavidcs	u8 ppfid;
730337517Sdavidcs	enum _ecore_status_t rc;
731337517Sdavidcs
732337517Sdavidcs	rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt);
733337517Sdavidcs	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
734337517Sdavidcs		DP_NOTICE(p_hwfn, false,
735337517Sdavidcs			  "Failed to get the engine affinity configuration\n");
736337517Sdavidcs		return rc;
737337517Sdavidcs	}
738337517Sdavidcs
739337517Sdavidcs	/* RoCE PF is bound to a single engine */
740337517Sdavidcs	if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
741337517Sdavidcs		eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
742337517Sdavidcs		rc = ecore_llh_set_roce_affinity(p_dev, eng);
743337517Sdavidcs		if (rc != ECORE_SUCCESS) {
744337517Sdavidcs			DP_NOTICE(p_dev, false,
745337517Sdavidcs				  "Failed to set the RoCE engine affinity\n");
746337517Sdavidcs			return rc;
747337517Sdavidcs		}
748337517Sdavidcs
749337517Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_SP,
750337517Sdavidcs			   "LLH: Set the engine affinity of RoCE packets as %d\n",
751337517Sdavidcs			   eng);
752337517Sdavidcs	}
753337517Sdavidcs
754337517Sdavidcs	/* Storage PF is bound to a single engine while L2 PF uses both */
755337517Sdavidcs	if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
756337517Sdavidcs	    ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
757337517Sdavidcs		eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
758337517Sdavidcs	else /* L2_PERSONALITY */
759337517Sdavidcs		eng = ECORE_BOTH_ENG;
760337517Sdavidcs
761337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
762337517Sdavidcs		rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
763337517Sdavidcs		if (rc != ECORE_SUCCESS) {
764337517Sdavidcs			DP_NOTICE(p_dev, false,
765337517Sdavidcs				  "Failed to set the engine affinity of ppfid %d\n",
766337517Sdavidcs				  ppfid);
767337517Sdavidcs			return rc;
768337517Sdavidcs		}
769337517Sdavidcs	}
770337517Sdavidcs
771337517Sdavidcs	DP_VERBOSE(p_dev, ECORE_MSG_SP,
772337517Sdavidcs		   "LLH: Set the engine affinity of non-RoCE packets as %d\n",
773337517Sdavidcs		   eng);
774337517Sdavidcs
775337517Sdavidcs	return ECORE_SUCCESS;
776337517Sdavidcs}
777337517Sdavidcs
778337517Sdavidcsstatic enum _ecore_status_t
779337517Sdavidcsecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
780337517Sdavidcs			   bool avoid_eng_affin)
781337517Sdavidcs{
782337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
783337517Sdavidcs	enum _ecore_status_t rc;
784337517Sdavidcs
785337517Sdavidcs	/* Backwards compatible mode:
786337517Sdavidcs	 * - RoCE packets     - Use engine 0.
787337517Sdavidcs	 * - Non-RoCE packets - Use connection based classification for L2 PFs,
788337517Sdavidcs	 *                      and engine 0 otherwise.
789337517Sdavidcs	 */
790337517Sdavidcs	if (avoid_eng_affin) {
791337517Sdavidcs		enum ecore_eng eng;
792337517Sdavidcs		u8 ppfid;
793337517Sdavidcs
794337517Sdavidcs		if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
795337517Sdavidcs			eng = ECORE_ENG0;
796337517Sdavidcs			rc = ecore_llh_set_roce_affinity(p_dev, eng);
797337517Sdavidcs			if (rc != ECORE_SUCCESS) {
798337517Sdavidcs				DP_NOTICE(p_dev, false,
799337517Sdavidcs					  "Failed to set the RoCE engine affinity\n");
800337517Sdavidcs				return rc;
801337517Sdavidcs			}
802337517Sdavidcs
803337517Sdavidcs			DP_VERBOSE(p_dev, ECORE_MSG_SP,
804337517Sdavidcs				   "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n",
805337517Sdavidcs				   eng);
806337517Sdavidcs		}
807337517Sdavidcs
808337517Sdavidcs		eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
809337517Sdavidcs		       ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0
810337517Sdavidcs							   : ECORE_BOTH_ENG;
811337517Sdavidcs		for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
812337517Sdavidcs			rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
813337517Sdavidcs			if (rc != ECORE_SUCCESS) {
814337517Sdavidcs				DP_NOTICE(p_dev, false,
815337517Sdavidcs					  "Failed to set the engine affinity of ppfid %d\n",
816337517Sdavidcs					  ppfid);
817337517Sdavidcs				return rc;
818337517Sdavidcs			}
819337517Sdavidcs		}
820337517Sdavidcs
821337517Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_SP,
822337517Sdavidcs			   "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n",
823337517Sdavidcs			   eng);
824337517Sdavidcs
825337517Sdavidcs		return ECORE_SUCCESS;
826337517Sdavidcs	}
827337517Sdavidcs
828337517Sdavidcs	return __ecore_llh_set_engine_affin(p_hwfn, p_ptt);
829337517Sdavidcs}
830337517Sdavidcs
831337517Sdavidcsstatic enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
832337517Sdavidcs						 struct ecore_ptt *p_ptt,
833337517Sdavidcs						 bool avoid_eng_affin)
834337517Sdavidcs{
835337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
836337517Sdavidcs	u8 ppfid, abs_ppfid;
837337517Sdavidcs	enum _ecore_status_t rc;
838337517Sdavidcs
839337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
840337517Sdavidcs		u32 addr;
841337517Sdavidcs
842337517Sdavidcs		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
843337517Sdavidcs		if (rc != ECORE_SUCCESS)
844337517Sdavidcs			return rc;
845337517Sdavidcs
846337517Sdavidcs		addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
847337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
848337517Sdavidcs	}
849337517Sdavidcs
850337517Sdavidcs	if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
851337517Sdavidcs	    !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
852337517Sdavidcs		rc = ecore_llh_add_mac_filter(p_dev, 0,
853337517Sdavidcs					      p_hwfn->hw_info.hw_mac_addr);
854337517Sdavidcs		if (rc != ECORE_SUCCESS)
855337517Sdavidcs			DP_NOTICE(p_dev, false,
856337517Sdavidcs				  "Failed to add an LLH filter with the primary MAC\n");
857337517Sdavidcs	}
858337517Sdavidcs
859337517Sdavidcs	if (ECORE_IS_CMT(p_dev)) {
860337517Sdavidcs		rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin);
861337517Sdavidcs		if (rc != ECORE_SUCCESS)
862337517Sdavidcs			return rc;
863337517Sdavidcs	}
864337517Sdavidcs
865337517Sdavidcs	return ECORE_SUCCESS;
866337517Sdavidcs}
867337517Sdavidcs
868337517Sdavidcsu8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev)
869337517Sdavidcs{
870337517Sdavidcs	return p_dev->p_llh_info->num_ppfid;
871337517Sdavidcs}
872337517Sdavidcs
873337517Sdavidcsenum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
874337517Sdavidcs{
875337517Sdavidcs	return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
876337517Sdavidcs}
877337517Sdavidcs
878337517Sdavidcs/* TBD - should be removed when these definitions are available in reg_addr.h */
879337517Sdavidcs#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK		0x3
880337517Sdavidcs#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT		0
881337517Sdavidcs#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK		0x3
882337517Sdavidcs#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT	2
883337517Sdavidcs
884337517Sdavidcsenum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
885337517Sdavidcs						  u8 ppfid, enum ecore_eng eng)
886337517Sdavidcs{
887337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
888337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
889337517Sdavidcs	u32 addr, val, eng_sel;
890337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
891337517Sdavidcs	u8 abs_ppfid;
892337517Sdavidcs
893337517Sdavidcs	if (p_ptt == OSAL_NULL)
894337517Sdavidcs		return ECORE_AGAIN;
895337517Sdavidcs
896337517Sdavidcs	if (!ECORE_IS_CMT(p_dev))
897337517Sdavidcs		goto out;
898337517Sdavidcs
899337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
900337517Sdavidcs	if (rc != ECORE_SUCCESS)
901337517Sdavidcs		goto out;
902337517Sdavidcs
903337517Sdavidcs	switch (eng) {
904337517Sdavidcs	case ECORE_ENG0:
905337517Sdavidcs		eng_sel = 0;
906337517Sdavidcs		break;
907337517Sdavidcs	case ECORE_ENG1:
908337517Sdavidcs		eng_sel = 1;
909337517Sdavidcs		break;
910337517Sdavidcs	case ECORE_BOTH_ENG:
911337517Sdavidcs		eng_sel = 2;
912337517Sdavidcs		break;
913337517Sdavidcs	default:
914337517Sdavidcs		DP_NOTICE(p_dev, false,
915337517Sdavidcs			  "Invalid affinity value for ppfid [%d]\n", eng);
916337517Sdavidcs		rc = ECORE_INVAL;
917337517Sdavidcs		goto out;
918337517Sdavidcs	}
919337517Sdavidcs
920337517Sdavidcs	addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
921337517Sdavidcs	val = ecore_rd(p_hwfn, p_ptt, addr);
922337517Sdavidcs	SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
923337517Sdavidcs	ecore_wr(p_hwfn, p_ptt, addr, val);
924337517Sdavidcs
925337517Sdavidcs	/* The iWARP affinity is set as the affinity of ppfid 0 */
926337517Sdavidcs	if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn))
927337517Sdavidcs		p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0;
928337517Sdavidcsout:
929337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
930337517Sdavidcs
931337517Sdavidcs	return rc;
932337517Sdavidcs}
933337517Sdavidcs
934337517Sdavidcsenum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
935337517Sdavidcs						 enum ecore_eng eng)
936337517Sdavidcs{
937337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
938337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
939337517Sdavidcs	u32 addr, val, eng_sel;
940337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
941337517Sdavidcs	u8 ppfid, abs_ppfid;
942337517Sdavidcs
943337517Sdavidcs	if (p_ptt == OSAL_NULL)
944337517Sdavidcs		return ECORE_AGAIN;
945337517Sdavidcs
946337517Sdavidcs	if (!ECORE_IS_CMT(p_dev))
947337517Sdavidcs		goto out;
948337517Sdavidcs
949337517Sdavidcs	switch (eng) {
950337517Sdavidcs	case ECORE_ENG0:
951337517Sdavidcs		eng_sel = 0;
952337517Sdavidcs		break;
953337517Sdavidcs	case ECORE_ENG1:
954337517Sdavidcs		eng_sel = 1;
955337517Sdavidcs		break;
956337517Sdavidcs	case ECORE_BOTH_ENG:
957337517Sdavidcs		eng_sel = 2;
958337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
959337517Sdavidcs			 0xf /* QP bit 15 */);
960337517Sdavidcs		break;
961337517Sdavidcs	default:
962337517Sdavidcs		DP_NOTICE(p_dev, false,
963337517Sdavidcs			  "Invalid affinity value for RoCE [%d]\n", eng);
964337517Sdavidcs		rc = ECORE_INVAL;
965337517Sdavidcs		goto out;
966337517Sdavidcs	}
967337517Sdavidcs
968337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
969337517Sdavidcs		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
970337517Sdavidcs		if (rc != ECORE_SUCCESS)
971337517Sdavidcs			goto out;
972337517Sdavidcs
973337517Sdavidcs		addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
974337517Sdavidcs		val = ecore_rd(p_hwfn, p_ptt, addr);
975337517Sdavidcs		SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
976337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, addr, val);
977337517Sdavidcs	}
978337517Sdavidcsout:
979337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
980337517Sdavidcs
981337517Sdavidcs	return rc;
982337517Sdavidcs}
983337517Sdavidcs
984337517Sdavidcsstruct ecore_llh_filter_e4_details {
985337517Sdavidcs	u64 value;
986337517Sdavidcs	u32 mode;
987337517Sdavidcs	u32 protocol_type;
988337517Sdavidcs	u32 hdr_sel;
989337517Sdavidcs	u32 enable;
990337517Sdavidcs};
991337517Sdavidcs
992337517Sdavidcsstatic enum _ecore_status_t
993337517Sdavidcsecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
994337517Sdavidcs			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
995337517Sdavidcs			   struct ecore_llh_filter_e4_details *p_details,
996337517Sdavidcs			   bool b_write_access)
997337517Sdavidcs{
998337517Sdavidcs	u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
999337517Sdavidcs	struct ecore_dmae_params params;
1000337517Sdavidcs	enum _ecore_status_t rc;
1001337517Sdavidcs	u32 addr;
1002337517Sdavidcs
1003337517Sdavidcs	/* The NIG/LLH registers that are accessed in this function have only 16
1004337517Sdavidcs	 * rows which are exposed to a PF. I.e. only the 16 filters of its
1005337517Sdavidcs	 * default ppfid
1006337517Sdavidcs	 * Accessing filters of other ppfids requires pretending to other PFs,
1007337517Sdavidcs	 * and thus the usage of the ecore_ppfid_rd/wr() functions.
1008337517Sdavidcs	 */
1009337517Sdavidcs
1010337517Sdavidcs	/* Filter enable - should be done first when removing a filter */
1011337517Sdavidcs	if (b_write_access && !p_details->enable) {
1012337517Sdavidcs		addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
1013337517Sdavidcs		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1014337517Sdavidcs			       p_details->enable);
1015337517Sdavidcs	}
1016337517Sdavidcs
1017337517Sdavidcs	/* Filter value */
1018337517Sdavidcs	addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
1019337517Sdavidcs	OSAL_MEMSET(&params, 0, sizeof(params));
1020337517Sdavidcs
1021337517Sdavidcs	if (b_write_access) {
1022337517Sdavidcs		params.flags = ECORE_DMAE_FLAG_PF_DST;
1023337517Sdavidcs		params.dst_pfid = pfid;
1024337517Sdavidcs		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
1025337517Sdavidcs					 (u64)(osal_uintptr_t)&p_details->value,
1026337517Sdavidcs					 addr, 2 /* size_in_dwords */, &params);
1027337517Sdavidcs	} else {
1028337517Sdavidcs		params.flags = ECORE_DMAE_FLAG_PF_SRC |
1029337517Sdavidcs			       ECORE_DMAE_FLAG_COMPLETION_DST;
1030337517Sdavidcs		params.src_pfid = pfid;
1031337517Sdavidcs		rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
1032337517Sdavidcs					 (u64)(osal_uintptr_t)&p_details->value,
1033337517Sdavidcs					 2 /* size_in_dwords */, &params);
1034337517Sdavidcs	}
1035337517Sdavidcs
1036337517Sdavidcs	if (rc != ECORE_SUCCESS)
1037337517Sdavidcs		return rc;
1038337517Sdavidcs
1039337517Sdavidcs	/* Filter mode */
1040337517Sdavidcs	addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
1041337517Sdavidcs	if (b_write_access)
1042337517Sdavidcs		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
1043337517Sdavidcs	else
1044337517Sdavidcs		p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
1045337517Sdavidcs						 addr);
1046337517Sdavidcs
1047337517Sdavidcs	/* Filter protocol type */
1048337517Sdavidcs	addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
1049337517Sdavidcs	if (b_write_access)
1050337517Sdavidcs		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1051337517Sdavidcs			       p_details->protocol_type);
1052337517Sdavidcs	else
1053337517Sdavidcs		p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt,
1054337517Sdavidcs							  abs_ppfid, addr);
1055337517Sdavidcs
1056337517Sdavidcs	/* Filter header select */
1057337517Sdavidcs	addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
1058337517Sdavidcs	if (b_write_access)
1059337517Sdavidcs		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1060337517Sdavidcs			       p_details->hdr_sel);
1061337517Sdavidcs	else
1062337517Sdavidcs		p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
1063337517Sdavidcs						    addr);
1064337517Sdavidcs
1065337517Sdavidcs	/* Filter enable - should be done last when adding a filter */
1066337517Sdavidcs	if (!b_write_access || p_details->enable) {
1067337517Sdavidcs		addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
1068337517Sdavidcs		if (b_write_access)
1069337517Sdavidcs			ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1070337517Sdavidcs				       p_details->enable);
1071337517Sdavidcs		else
1072337517Sdavidcs			p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt,
1073337517Sdavidcs							   abs_ppfid, addr);
1074337517Sdavidcs	}
1075337517Sdavidcs
1076337517Sdavidcs	return ECORE_SUCCESS;
1077337517Sdavidcs}
1078337517Sdavidcs
1079337517Sdavidcsstatic enum _ecore_status_t
1080337517Sdavidcsecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1081337517Sdavidcs			u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
1082337517Sdavidcs			u32 high, u32 low)
1083337517Sdavidcs{
1084337517Sdavidcs	struct ecore_llh_filter_e4_details filter_details;
1085337517Sdavidcs
1086337517Sdavidcs	filter_details.enable = 1;
1087337517Sdavidcs	filter_details.value = ((u64)high << 32) | low;
1088337517Sdavidcs	filter_details.hdr_sel = 0;
1089337517Sdavidcs	filter_details.protocol_type = filter_prot_type;
1090337517Sdavidcs	filter_details.mode = filter_prot_type ?
1091337517Sdavidcs			      1 : /* protocol-based classification */
1092337517Sdavidcs			      0;  /* MAC-address based classification */
1093337517Sdavidcs
1094337517Sdavidcs	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1095337517Sdavidcs					  &filter_details,
1096337517Sdavidcs					  true /* write access */);
1097337517Sdavidcs}
1098337517Sdavidcs
1099337517Sdavidcsstatic enum _ecore_status_t
1100337517Sdavidcsecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
1101337517Sdavidcs			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
1102337517Sdavidcs{
1103337517Sdavidcs	struct ecore_llh_filter_e4_details filter_details;
1104337517Sdavidcs
1105337517Sdavidcs	OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
1106337517Sdavidcs
1107337517Sdavidcs	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1108337517Sdavidcs					  &filter_details,
1109337517Sdavidcs					  true /* write access */);
1110337517Sdavidcs}
1111337517Sdavidcs
1112337517Sdavidcs/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
1113337517Sdavidcs * Should be removed when the function is implemented.
1114337517Sdavidcs */
1115337517Sdavidcsstatic enum _ecore_status_t
1116337517Sdavidcsecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1117337517Sdavidcs			struct ecore_ptt OSAL_UNUSED *p_ptt,
1118337517Sdavidcs			u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx,
1119337517Sdavidcs			u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high,
1120337517Sdavidcs			u32 OSAL_UNUSED low)
1121337517Sdavidcs{
1122337517Sdavidcs	ECORE_E5_MISSING_CODE;
1123337517Sdavidcs
1124337517Sdavidcs	return ECORE_NOTIMPL;
1125337517Sdavidcs}
1126337517Sdavidcs
1127337517Sdavidcs/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
1128337517Sdavidcs * Should be removed when the function is implemented.
1129337517Sdavidcs */
1130337517Sdavidcsstatic enum _ecore_status_t
1131337517Sdavidcsecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1132337517Sdavidcs			   struct ecore_ptt OSAL_UNUSED *p_ptt,
1133337517Sdavidcs			   u8 OSAL_UNUSED abs_ppfid,
1134337517Sdavidcs			   u8 OSAL_UNUSED filter_idx)
1135337517Sdavidcs{
1136337517Sdavidcs	ECORE_E5_MISSING_CODE;
1137337517Sdavidcs
1138337517Sdavidcs	return ECORE_NOTIMPL;
1139337517Sdavidcs}
1140337517Sdavidcs
1141337517Sdavidcsstatic enum _ecore_status_t
1142337517Sdavidcsecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1143337517Sdavidcs		     u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
1144337517Sdavidcs		     u32 low)
1145337517Sdavidcs{
1146337517Sdavidcs	if (ECORE_IS_E4(p_hwfn->p_dev))
1147337517Sdavidcs		return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1148337517Sdavidcs					       filter_idx, filter_prot_type,
1149337517Sdavidcs					       high, low);
1150337517Sdavidcs	else /* E5 */
1151337517Sdavidcs		return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid,
1152337517Sdavidcs					       filter_idx, filter_prot_type,
1153337517Sdavidcs					       high, low);
1154337517Sdavidcs}
1155337517Sdavidcs
1156337517Sdavidcsstatic enum _ecore_status_t
1157337517Sdavidcsecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1158337517Sdavidcs			u8 abs_ppfid, u8 filter_idx)
1159337517Sdavidcs{
1160337517Sdavidcs	if (ECORE_IS_E4(p_hwfn->p_dev))
1161337517Sdavidcs		return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1162337517Sdavidcs						  filter_idx);
1163337517Sdavidcs	else /* E5 */
1164337517Sdavidcs		return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid,
1165337517Sdavidcs						  filter_idx);
1166337517Sdavidcs}
1167337517Sdavidcs
1168337517Sdavidcsenum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
1169337517Sdavidcs					      u8 mac_addr[ETH_ALEN])
1170337517Sdavidcs{
1171337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1172337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1173337517Sdavidcs	union ecore_llh_filter filter;
1174337517Sdavidcs	u8 filter_idx, abs_ppfid;
1175337517Sdavidcs	u32 high, low, ref_cnt;
1176337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
1177337517Sdavidcs
1178337517Sdavidcs	if (p_ptt == OSAL_NULL)
1179337517Sdavidcs		return ECORE_AGAIN;
1180337517Sdavidcs
1181337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1182337517Sdavidcs		goto out;
1183337517Sdavidcs
1184337517Sdavidcs	OSAL_MEM_ZERO(&filter, sizeof(filter));
1185337517Sdavidcs	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
1186337517Sdavidcs	rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
1187337517Sdavidcs					 ECORE_LLH_FILTER_TYPE_MAC,
1188337517Sdavidcs					 &filter, &filter_idx, &ref_cnt);
1189337517Sdavidcs	if (rc != ECORE_SUCCESS)
1190337517Sdavidcs		goto err;
1191337517Sdavidcs
1192337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1193337517Sdavidcs	if (rc != ECORE_SUCCESS)
1194337517Sdavidcs		goto err;
1195337517Sdavidcs
1196337517Sdavidcs	/* Configure the LLH only in case of a new the filter */
1197337517Sdavidcs	if (ref_cnt == 1) {
1198337517Sdavidcs		high = mac_addr[1] | (mac_addr[0] << 8);
1199337517Sdavidcs		low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
1200337517Sdavidcs		      (mac_addr[2] << 24);
1201337517Sdavidcs		rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1202337517Sdavidcs					  0, high, low);
1203337517Sdavidcs		if (rc != ECORE_SUCCESS)
1204337517Sdavidcs			goto err;
1205337517Sdavidcs	}
1206337517Sdavidcs
1207337517Sdavidcs	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1208337517Sdavidcs		   "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1209337517Sdavidcs		   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1210337517Sdavidcs		   mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
1211337517Sdavidcs		   ref_cnt);
1212337517Sdavidcs
1213337517Sdavidcs	goto out;
1214337517Sdavidcs
1215337517Sdavidcserr:
1216337517Sdavidcs	DP_NOTICE(p_dev, false,
1217337517Sdavidcs		  "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n",
1218337517Sdavidcs		  mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1219337517Sdavidcs		  mac_addr[4], mac_addr[5], ppfid);
1220337517Sdavidcsout:
1221337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1222337517Sdavidcs
1223337517Sdavidcs	return rc;
1224337517Sdavidcs}
1225337517Sdavidcs
1226337517Sdavidcsstatic enum _ecore_status_t
1227337517Sdavidcsecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev,
1228337517Sdavidcs				    enum ecore_llh_prot_filter_type_t type,
1229337517Sdavidcs				    u16 source_port_or_eth_type, u16 dest_port,
1230337517Sdavidcs				    u8 *str, osal_size_t str_len)
1231337517Sdavidcs{
1232337517Sdavidcs	switch (type) {
1233337517Sdavidcs	case ECORE_LLH_FILTER_ETHERTYPE:
1234337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x",
1235337517Sdavidcs			      source_port_or_eth_type);
1236337517Sdavidcs		break;
1237337517Sdavidcs	case ECORE_LLH_FILTER_TCP_SRC_PORT:
1238337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x",
1239337517Sdavidcs			      source_port_or_eth_type);
1240337517Sdavidcs		break;
1241337517Sdavidcs	case ECORE_LLH_FILTER_UDP_SRC_PORT:
1242337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x",
1243337517Sdavidcs			      source_port_or_eth_type);
1244337517Sdavidcs		break;
1245337517Sdavidcs	case ECORE_LLH_FILTER_TCP_DEST_PORT:
1246337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port);
1247337517Sdavidcs		break;
1248337517Sdavidcs	case ECORE_LLH_FILTER_UDP_DEST_PORT:
1249337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port);
1250337517Sdavidcs		break;
1251337517Sdavidcs	case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1252337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
1253337517Sdavidcs			      source_port_or_eth_type, dest_port);
1254337517Sdavidcs		break;
1255337517Sdavidcs	case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1256337517Sdavidcs		OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
1257337517Sdavidcs			      source_port_or_eth_type, dest_port);
1258337517Sdavidcs		break;
1259337517Sdavidcs	default:
1260337517Sdavidcs		DP_NOTICE(p_dev, true,
1261337517Sdavidcs			  "Non valid LLH protocol filter type %d\n", type);
1262337517Sdavidcs		return ECORE_INVAL;
1263337517Sdavidcs	}
1264337517Sdavidcs
1265337517Sdavidcs	return ECORE_SUCCESS;
1266337517Sdavidcs}
1267337517Sdavidcs
1268337517Sdavidcsstatic enum _ecore_status_t
1269337517Sdavidcsecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
1270337517Sdavidcs				  enum ecore_llh_prot_filter_type_t type,
1271337517Sdavidcs				  u16 source_port_or_eth_type, u16 dest_port,
1272337517Sdavidcs				  u32 *p_high, u32 *p_low)
1273337517Sdavidcs{
1274337517Sdavidcs	*p_high = 0;
1275337517Sdavidcs	*p_low = 0;
1276337517Sdavidcs
1277337517Sdavidcs	switch (type) {
1278337517Sdavidcs	case ECORE_LLH_FILTER_ETHERTYPE:
1279337517Sdavidcs		*p_high = source_port_or_eth_type;
1280337517Sdavidcs		break;
1281337517Sdavidcs	case ECORE_LLH_FILTER_TCP_SRC_PORT:
1282337517Sdavidcs	case ECORE_LLH_FILTER_UDP_SRC_PORT:
1283337517Sdavidcs		*p_low = source_port_or_eth_type << 16;
1284337517Sdavidcs		break;
1285337517Sdavidcs	case ECORE_LLH_FILTER_TCP_DEST_PORT:
1286337517Sdavidcs	case ECORE_LLH_FILTER_UDP_DEST_PORT:
1287337517Sdavidcs		*p_low = dest_port;
1288337517Sdavidcs		break;
1289337517Sdavidcs	case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1290337517Sdavidcs	case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1291337517Sdavidcs		*p_low = (source_port_or_eth_type << 16) | dest_port;
1292337517Sdavidcs		break;
1293337517Sdavidcs	default:
1294337517Sdavidcs		DP_NOTICE(p_dev, true,
1295337517Sdavidcs			  "Non valid LLH protocol filter type %d\n", type);
1296337517Sdavidcs		return ECORE_INVAL;
1297337517Sdavidcs	}
1298337517Sdavidcs
1299337517Sdavidcs	return ECORE_SUCCESS;
1300337517Sdavidcs}
1301337517Sdavidcs
1302337517Sdavidcsenum _ecore_status_t
1303337517Sdavidcsecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
1304337517Sdavidcs			      enum ecore_llh_prot_filter_type_t type,
1305337517Sdavidcs			      u16 source_port_or_eth_type, u16 dest_port)
1306337517Sdavidcs{
1307337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1308337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1309337517Sdavidcs	u8 filter_idx, abs_ppfid, str[32], type_bitmap;
1310337517Sdavidcs	union ecore_llh_filter filter;
1311337517Sdavidcs	u32 high, low, ref_cnt;
1312337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
1313337517Sdavidcs
1314337517Sdavidcs	if (p_ptt == OSAL_NULL)
1315337517Sdavidcs		return ECORE_AGAIN;
1316337517Sdavidcs
1317337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
1318337517Sdavidcs		goto out;
1319337517Sdavidcs
1320337517Sdavidcs	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
1321337517Sdavidcs						 source_port_or_eth_type,
1322337517Sdavidcs						 dest_port, str, sizeof(str));
1323337517Sdavidcs	if (rc != ECORE_SUCCESS)
1324337517Sdavidcs		goto err;
1325337517Sdavidcs
1326337517Sdavidcs	OSAL_MEM_ZERO(&filter, sizeof(filter));
1327337517Sdavidcs	filter.protocol.type = type;
1328337517Sdavidcs	filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1329337517Sdavidcs	filter.protocol.dest_port = dest_port;
1330337517Sdavidcs	rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
1331337517Sdavidcs					 ECORE_LLH_FILTER_TYPE_PROTOCOL,
1332337517Sdavidcs					 &filter, &filter_idx, &ref_cnt);
1333337517Sdavidcs	if (rc != ECORE_SUCCESS)
1334337517Sdavidcs		goto err;
1335337517Sdavidcs
1336337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1337337517Sdavidcs	if (rc != ECORE_SUCCESS)
1338337517Sdavidcs		goto err;
1339337517Sdavidcs
1340337517Sdavidcs	/* Configure the LLH only in case of a new the filter */
1341337517Sdavidcs	if (ref_cnt == 1) {
1342337517Sdavidcs		rc = ecore_llh_protocol_filter_to_hilo(p_dev, type,
1343337517Sdavidcs						       source_port_or_eth_type,
1344337517Sdavidcs						       dest_port, &high, &low);
1345337517Sdavidcs		if (rc != ECORE_SUCCESS)
1346337517Sdavidcs			goto err;
1347337517Sdavidcs
1348337517Sdavidcs		type_bitmap = 0x1 << type;
1349337517Sdavidcs		rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1350337517Sdavidcs					  type_bitmap, high, low);
1351337517Sdavidcs		if (rc != ECORE_SUCCESS)
1352337517Sdavidcs			goto err;
1353337517Sdavidcs	}
1354337517Sdavidcs
1355337517Sdavidcs	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1356337517Sdavidcs		   "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1357337517Sdavidcs		   str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1358337517Sdavidcs
1359337517Sdavidcs	goto out;
1360337517Sdavidcs
1361337517Sdavidcserr:
1362337517Sdavidcs	DP_NOTICE(p_hwfn, false,
1363337517Sdavidcs		  "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
1364337517Sdavidcs		  str, ppfid);
1365337517Sdavidcsout:
1366337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1367337517Sdavidcs
1368337517Sdavidcs	return rc;
1369337517Sdavidcs}
1370337517Sdavidcs
1371337517Sdavidcsvoid ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
1372337517Sdavidcs				 u8 mac_addr[ETH_ALEN])
1373337517Sdavidcs{
1374337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1375337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1376337517Sdavidcs	union ecore_llh_filter filter;
1377337517Sdavidcs	u8 filter_idx, abs_ppfid;
1378337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
1379337517Sdavidcs	u32 ref_cnt;
1380337517Sdavidcs
1381337517Sdavidcs	if (p_ptt == OSAL_NULL)
1382337517Sdavidcs		return;
1383337517Sdavidcs
1384337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1385337517Sdavidcs		goto out;
1386337517Sdavidcs
1387337517Sdavidcs	OSAL_MEM_ZERO(&filter, sizeof(filter));
1388337517Sdavidcs	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
1389337517Sdavidcs	rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
1390337517Sdavidcs					    &ref_cnt);
1391337517Sdavidcs	if (rc != ECORE_SUCCESS)
1392337517Sdavidcs		goto err;
1393337517Sdavidcs
1394337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1395337517Sdavidcs	if (rc != ECORE_SUCCESS)
1396337517Sdavidcs		goto err;
1397337517Sdavidcs
1398337517Sdavidcs	/* Remove from the LLH in case the filter is not in use */
1399337517Sdavidcs	if (!ref_cnt) {
1400337517Sdavidcs		rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1401337517Sdavidcs					     filter_idx);
1402337517Sdavidcs		if (rc != ECORE_SUCCESS)
1403337517Sdavidcs			goto err;
1404337517Sdavidcs	}
1405337517Sdavidcs
1406337517Sdavidcs	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1407337517Sdavidcs		   "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1408337517Sdavidcs		   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1409337517Sdavidcs		   mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
1410337517Sdavidcs		   ref_cnt);
1411337517Sdavidcs
1412337517Sdavidcs	goto out;
1413337517Sdavidcs
1414337517Sdavidcserr:
1415337517Sdavidcs	DP_NOTICE(p_dev, false,
1416337517Sdavidcs		  "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n",
1417337517Sdavidcs		  mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1418337517Sdavidcs		  mac_addr[4], mac_addr[5], ppfid);
1419337517Sdavidcsout:
1420337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1421337517Sdavidcs}
1422337517Sdavidcs
1423337517Sdavidcsvoid ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
1424337517Sdavidcs				      enum ecore_llh_prot_filter_type_t type,
1425337517Sdavidcs				      u16 source_port_or_eth_type,
1426337517Sdavidcs				      u16 dest_port)
1427337517Sdavidcs{
1428337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1429337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1430337517Sdavidcs	u8 filter_idx, abs_ppfid, str[32];
1431337517Sdavidcs	union ecore_llh_filter filter;
1432337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
1433337517Sdavidcs	u32 ref_cnt;
1434337517Sdavidcs
1435337517Sdavidcs	if (p_ptt == OSAL_NULL)
1436337517Sdavidcs		return;
1437337517Sdavidcs
1438337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
1439337517Sdavidcs		goto out;
1440337517Sdavidcs
1441337517Sdavidcs	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
1442337517Sdavidcs						 source_port_or_eth_type,
1443337517Sdavidcs						 dest_port, str, sizeof(str));
1444337517Sdavidcs	if (rc != ECORE_SUCCESS)
1445337517Sdavidcs		goto err;
1446337517Sdavidcs
1447337517Sdavidcs	OSAL_MEM_ZERO(&filter, sizeof(filter));
1448337517Sdavidcs	filter.protocol.type = type;
1449337517Sdavidcs	filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1450337517Sdavidcs	filter.protocol.dest_port = dest_port;
1451337517Sdavidcs	rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
1452337517Sdavidcs					    &ref_cnt);
1453337517Sdavidcs	if (rc != ECORE_SUCCESS)
1454337517Sdavidcs		goto err;
1455337517Sdavidcs
1456337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1457337517Sdavidcs	if (rc != ECORE_SUCCESS)
1458337517Sdavidcs		goto err;
1459337517Sdavidcs
1460337517Sdavidcs	/* Remove from the LLH in case the filter is not in use */
1461337517Sdavidcs	if (!ref_cnt) {
1462337517Sdavidcs		rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1463337517Sdavidcs					     filter_idx);
1464337517Sdavidcs		if (rc != ECORE_SUCCESS)
1465337517Sdavidcs			goto err;
1466337517Sdavidcs	}
1467337517Sdavidcs
1468337517Sdavidcs	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1469337517Sdavidcs		   "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1470337517Sdavidcs		   str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1471337517Sdavidcs
1472337517Sdavidcs	goto out;
1473337517Sdavidcs
1474337517Sdavidcserr:
1475337517Sdavidcs	DP_NOTICE(p_dev, false,
1476337517Sdavidcs		  "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
1477337517Sdavidcs		  str, ppfid);
1478337517Sdavidcsout:
1479337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1480337517Sdavidcs}
1481337517Sdavidcs
1482337517Sdavidcsvoid ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
1483337517Sdavidcs{
1484337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1485337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1486337517Sdavidcs	u8 filter_idx, abs_ppfid;
1487337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
1488337517Sdavidcs
1489337517Sdavidcs	if (p_ptt == OSAL_NULL)
1490337517Sdavidcs		return;
1491337517Sdavidcs
1492337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
1493337517Sdavidcs	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1494337517Sdavidcs		goto out;
1495337517Sdavidcs
1496337517Sdavidcs	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1497337517Sdavidcs	if (rc != ECORE_SUCCESS)
1498337517Sdavidcs		goto out;
1499337517Sdavidcs
1500337517Sdavidcs	rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid);
1501337517Sdavidcs	if (rc != ECORE_SUCCESS)
1502337517Sdavidcs		goto out;
1503337517Sdavidcs
1504337517Sdavidcs	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
1505337517Sdavidcs	     filter_idx++) {
1506337517Sdavidcs		if (ECORE_IS_E4(p_dev))
1507337517Sdavidcs			rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
1508337517Sdavidcs							abs_ppfid, filter_idx);
1509337517Sdavidcs		else /* E5 */
1510337517Sdavidcs			rc = ecore_llh_remove_filter_e5(p_hwfn, p_ptt,
1511337517Sdavidcs							abs_ppfid, filter_idx);
1512337517Sdavidcs		if (rc != ECORE_SUCCESS)
1513337517Sdavidcs			goto out;
1514337517Sdavidcs	}
1515337517Sdavidcsout:
1516337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1517337517Sdavidcs}
1518337517Sdavidcs
1519337517Sdavidcsvoid ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
1520337517Sdavidcs{
1521337517Sdavidcs	u8 ppfid;
1522337517Sdavidcs
1523337517Sdavidcs	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
1524337517Sdavidcs	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1525337517Sdavidcs		return;
1526337517Sdavidcs
1527337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
1528337517Sdavidcs		ecore_llh_clear_ppfid_filters(p_dev, ppfid);
1529337517Sdavidcs}
1530337517Sdavidcs
1531337517Sdavidcsenum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
1532337517Sdavidcs					 struct ecore_ptt *p_ptt, u32 addr,
1533337517Sdavidcs					 u32 val)
1534337517Sdavidcs{
1535337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
1536337517Sdavidcs	u8 ppfid, abs_ppfid;
1537337517Sdavidcs	enum _ecore_status_t rc;
1538337517Sdavidcs
1539337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
1540337517Sdavidcs		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1541337517Sdavidcs		if (rc != ECORE_SUCCESS)
1542337517Sdavidcs			return rc;
1543337517Sdavidcs
1544337517Sdavidcs		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val);
1545337517Sdavidcs	}
1546337517Sdavidcs
1547337517Sdavidcs	return ECORE_SUCCESS;
1548337517Sdavidcs}
1549337517Sdavidcs
1550337517Sdavidcsstatic enum _ecore_status_t
1551337517Sdavidcsecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1552337517Sdavidcs			u8 ppfid)
1553337517Sdavidcs{
1554337517Sdavidcs	struct ecore_llh_filter_e4_details filter_details;
1555337517Sdavidcs	u8 abs_ppfid, filter_idx;
1556337517Sdavidcs	u32 addr;
1557337517Sdavidcs	enum _ecore_status_t rc;
1558337517Sdavidcs
1559337517Sdavidcs	rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
1560337517Sdavidcs	if (rc != ECORE_SUCCESS)
1561337517Sdavidcs		return rc;
1562337517Sdavidcs
1563337517Sdavidcs	addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
1564337517Sdavidcs	DP_NOTICE(p_hwfn, false,
1565337517Sdavidcs		  "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n",
1566337517Sdavidcs		  p_hwfn->rel_pf_id, ppfid, abs_ppfid,
1567337517Sdavidcs		  ecore_rd(p_hwfn, p_ptt, addr));
1568337517Sdavidcs
1569337517Sdavidcs	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
1570337517Sdavidcs	     filter_idx++) {
1571337517Sdavidcs		OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
1572337517Sdavidcs		rc =  ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1573337517Sdavidcs						 filter_idx, &filter_details,
1574337517Sdavidcs						 false /* read access */);
1575337517Sdavidcs		if (rc != ECORE_SUCCESS)
1576337517Sdavidcs			return rc;
1577337517Sdavidcs
1578337517Sdavidcs		DP_NOTICE(p_hwfn, false,
1579337517Sdavidcs			  "filter %2hhd: enable %d, value 0x%016llx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
1580337517Sdavidcs			  filter_idx, filter_details.enable,
1581337517Sdavidcs			  (unsigned long long)filter_details.value, filter_details.mode,
1582337517Sdavidcs			  filter_details.protocol_type, filter_details.hdr_sel);
1583337517Sdavidcs	}
1584337517Sdavidcs
1585337517Sdavidcs	return ECORE_SUCCESS;
1586337517Sdavidcs}
1587337517Sdavidcs
1588337517Sdavidcsstatic enum _ecore_status_t
1589337517Sdavidcsecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1590337517Sdavidcs			struct ecore_ptt OSAL_UNUSED *p_ptt,
1591337517Sdavidcs			u8 OSAL_UNUSED ppfid)
1592337517Sdavidcs{
1593337517Sdavidcs	ECORE_E5_MISSING_CODE;
1594337517Sdavidcs
1595337517Sdavidcs	return ECORE_NOTIMPL;
1596337517Sdavidcs}
1597337517Sdavidcs
1598337517Sdavidcsenum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
1599337517Sdavidcs{
1600337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1601337517Sdavidcs	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1602337517Sdavidcs	enum _ecore_status_t rc;
1603337517Sdavidcs
1604337517Sdavidcs	if (p_ptt == OSAL_NULL)
1605337517Sdavidcs		return ECORE_AGAIN;
1606337517Sdavidcs
1607337517Sdavidcs	if (ECORE_IS_E4(p_dev))
1608337517Sdavidcs		rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
1609337517Sdavidcs	else /* E5 */
1610337517Sdavidcs		rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid);
1611337517Sdavidcs
1612337517Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
1613337517Sdavidcs
1614337517Sdavidcs	return rc;
1615337517Sdavidcs}
1616337517Sdavidcs
1617337517Sdavidcsenum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
1618337517Sdavidcs{
1619337517Sdavidcs	u8 ppfid;
1620337517Sdavidcs	enum _ecore_status_t rc;
1621337517Sdavidcs
1622337517Sdavidcs	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
1623337517Sdavidcs		rc = ecore_llh_dump_ppfid(p_dev, ppfid);
1624337517Sdavidcs		if (rc != ECORE_SUCCESS)
1625337517Sdavidcs			return rc;
1626337517Sdavidcs	}
1627337517Sdavidcs
1628337517Sdavidcs	return ECORE_SUCCESS;
1629337517Sdavidcs}
1630337517Sdavidcs
1631337517Sdavidcs/******************************* NIG LLH - End ********************************/
1632337517Sdavidcs
1633316485Sdavidcs/* Configurable */
1634316485Sdavidcs#define ECORE_MIN_DPIS		(4)  /* The minimal number of DPIs required to
1635316485Sdavidcs				      * load the driver. The number was
1636316485Sdavidcs				      * arbitrarily set.
1637316485Sdavidcs				      */
1638316485Sdavidcs
1639316485Sdavidcs/* Derived */
1640320164Sdavidcs#define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
1641316485Sdavidcs
1642320164Sdavidcsstatic u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
1643320164Sdavidcs			     struct ecore_ptt *p_ptt,
1644320164Sdavidcs			     enum BAR_ID bar_id)
1645316485Sdavidcs{
1646316485Sdavidcs	u32 bar_reg = (bar_id == BAR_ID_0 ?
1647316485Sdavidcs		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
1648316485Sdavidcs	u32 val;
1649316485Sdavidcs
1650337517Sdavidcs	if (IS_VF(p_hwfn->p_dev))
1651337517Sdavidcs		return ecore_vf_hw_bar_size(p_hwfn, bar_id);
1652316485Sdavidcs
1653320164Sdavidcs	val = ecore_rd(p_hwfn, p_ptt, bar_reg);
1654316485Sdavidcs	if (val)
1655316485Sdavidcs		return 1 << (val + 15);
1656316485Sdavidcs
1657316485Sdavidcs	/* The above registers were updated in the past only in CMT mode. Since
1658316485Sdavidcs	 * they were found to be useful MFW started updating them from 8.7.7.0.
1659316485Sdavidcs	 * In older MFW versions they are set to 0 which means disabled.
1660316485Sdavidcs	 */
1661337517Sdavidcs	if (ECORE_IS_CMT(p_hwfn->p_dev)) {
1662320164Sdavidcs		DP_INFO(p_hwfn,
1663320164Sdavidcs			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
1664316485Sdavidcs		return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
1665316485Sdavidcs	} else {
1666320164Sdavidcs		DP_INFO(p_hwfn,
1667320164Sdavidcs			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
1668316485Sdavidcs		return 512 * 1024;
1669316485Sdavidcs	}
1670316485Sdavidcs}
1671316485Sdavidcs
1672316485Sdavidcsvoid ecore_init_dp(struct ecore_dev	*p_dev,
1673316485Sdavidcs		   u32			dp_module,
1674316485Sdavidcs		   u8			dp_level,
1675316485Sdavidcs		   void		 *dp_ctx)
1676316485Sdavidcs{
1677316485Sdavidcs	u32 i;
1678316485Sdavidcs
1679316485Sdavidcs	p_dev->dp_level = dp_level;
1680316485Sdavidcs	p_dev->dp_module = dp_module;
1681316485Sdavidcs	p_dev->dp_ctx = dp_ctx;
1682316485Sdavidcs	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1683316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1684316485Sdavidcs
1685316485Sdavidcs		p_hwfn->dp_level = dp_level;
1686316485Sdavidcs		p_hwfn->dp_module = dp_module;
1687316485Sdavidcs		p_hwfn->dp_ctx = dp_ctx;
1688316485Sdavidcs	}
1689316485Sdavidcs}
1690316485Sdavidcs
1691337517Sdavidcsenum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
1692316485Sdavidcs{
1693316485Sdavidcs	u8 i;
1694316485Sdavidcs
1695316485Sdavidcs	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1696316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1697316485Sdavidcs
1698316485Sdavidcs		p_hwfn->p_dev = p_dev;
1699316485Sdavidcs		p_hwfn->my_id = i;
1700316485Sdavidcs		p_hwfn->b_active = false;
1701316485Sdavidcs
1702320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
1703337517Sdavidcs		if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
1704337517Sdavidcs			goto handle_err;
1705320164Sdavidcs#endif
1706337517Sdavidcs		OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
1707316485Sdavidcs	}
1708316485Sdavidcs
1709316485Sdavidcs	/* hwfn 0 is always active */
1710316485Sdavidcs	p_dev->hwfns[0].b_active = true;
1711316485Sdavidcs
1712316485Sdavidcs	/* set the default cache alignment to 128 (may be overridden later) */
1713316485Sdavidcs	p_dev->cache_shift = 7;
1714337517Sdavidcs
1715337517Sdavidcs	p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE;
1716337517Sdavidcs
1717337517Sdavidcs	return ECORE_SUCCESS;
1718337517Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
1719337517Sdavidcshandle_err:
1720337517Sdavidcs	while (--i) {
1721337517Sdavidcs		struct ecore_hwfn *p_hwfn = OSAL_NULL;
1722337517Sdavidcs
1723337517Sdavidcs		p_hwfn = &p_dev->hwfns[i];
1724337517Sdavidcs		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
1725337517Sdavidcs	}
1726337517Sdavidcs	return ECORE_NOMEM;
1727337517Sdavidcs#endif
1728316485Sdavidcs}
1729316485Sdavidcs
1730316485Sdavidcsstatic void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
1731316485Sdavidcs{
1732316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1733316485Sdavidcs
1734316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
1735316485Sdavidcs	qm_info->qm_pq_params = OSAL_NULL;
1736316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
1737316485Sdavidcs	qm_info->qm_vport_params = OSAL_NULL;
1738316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
1739316485Sdavidcs	qm_info->qm_port_params = OSAL_NULL;
1740316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
1741316485Sdavidcs	qm_info->wfq_data = OSAL_NULL;
1742316485Sdavidcs}
1743316485Sdavidcs
1744316485Sdavidcsvoid ecore_resc_free(struct ecore_dev *p_dev)
1745316485Sdavidcs{
1746316485Sdavidcs	int i;
1747316485Sdavidcs
1748316485Sdavidcs	if (IS_VF(p_dev)) {
1749316485Sdavidcs		for_each_hwfn(p_dev, i)
1750316485Sdavidcs			ecore_l2_free(&p_dev->hwfns[i]);
1751316485Sdavidcs		return;
1752316485Sdavidcs	}
1753316485Sdavidcs
1754316485Sdavidcs	OSAL_FREE(p_dev, p_dev->fw_data);
1755316485Sdavidcs	p_dev->fw_data = OSAL_NULL;
1756316485Sdavidcs
1757316485Sdavidcs	OSAL_FREE(p_dev, p_dev->reset_stats);
1758316485Sdavidcs	p_dev->reset_stats = OSAL_NULL;
1759316485Sdavidcs
1760337517Sdavidcs	ecore_llh_free(p_dev);
1761337517Sdavidcs
1762316485Sdavidcs	for_each_hwfn(p_dev, i) {
1763316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1764316485Sdavidcs
1765316485Sdavidcs		ecore_cxt_mngr_free(p_hwfn);
1766316485Sdavidcs		ecore_qm_info_free(p_hwfn);
1767316485Sdavidcs		ecore_spq_free(p_hwfn);
1768316485Sdavidcs		ecore_eq_free(p_hwfn);
1769316485Sdavidcs		ecore_consq_free(p_hwfn);
1770316485Sdavidcs		ecore_int_free(p_hwfn);
1771316485Sdavidcs#ifdef CONFIG_ECORE_LL2
1772316485Sdavidcs		ecore_ll2_free(p_hwfn);
1773316485Sdavidcs#endif
1774316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
1775316485Sdavidcs			ecore_fcoe_free(p_hwfn);
1776337517Sdavidcs
1777316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
1778316485Sdavidcs			ecore_iscsi_free(p_hwfn);
1779316485Sdavidcs			ecore_ooo_free(p_hwfn);
1780316485Sdavidcs		}
1781337517Sdavidcs
1782337517Sdavidcs#ifdef CONFIG_ECORE_ROCE
1783337517Sdavidcs		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
1784337517Sdavidcs			ecore_rdma_info_free(p_hwfn);
1785316485Sdavidcs#endif
1786316485Sdavidcs		ecore_iov_free(p_hwfn);
1787316485Sdavidcs		ecore_l2_free(p_hwfn);
1788316485Sdavidcs		ecore_dmae_info_free(p_hwfn);
1789316485Sdavidcs		ecore_dcbx_info_free(p_hwfn);
1790316485Sdavidcs		/* @@@TBD Flush work-queue ?*/
1791320164Sdavidcs
1792320164Sdavidcs		/* destroy doorbell recovery mechanism */
1793320164Sdavidcs		ecore_db_recovery_teardown(p_hwfn);
1794316485Sdavidcs	}
1795316485Sdavidcs}
1796316485Sdavidcs
1797316485Sdavidcs/******************** QM initialization *******************/
1798316485Sdavidcs/* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
1799316485Sdavidcs#define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
1800316485Sdavidcs#define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
1801316485Sdavidcs
1802316485Sdavidcs/* determines the physical queue flags for a given PF. */
1803316485Sdavidcsstatic u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
1804316485Sdavidcs{
1805316485Sdavidcs	u32 flags;
1806316485Sdavidcs
1807316485Sdavidcs	/* common flags */
1808316485Sdavidcs	flags = PQ_FLAGS_LB;
1809316485Sdavidcs
1810316485Sdavidcs	/* feature flags */
1811316485Sdavidcs	if (IS_ECORE_SRIOV(p_hwfn->p_dev))
1812316485Sdavidcs		flags |= PQ_FLAGS_VFS;
1813316485Sdavidcs	if (IS_ECORE_DCQCN(p_hwfn))
1814316485Sdavidcs		flags |= PQ_FLAGS_RLS;
1815316485Sdavidcs
1816316485Sdavidcs	/* protocol flags */
1817316485Sdavidcs	switch (p_hwfn->hw_info.personality) {
1818316485Sdavidcs	case ECORE_PCI_ETH:
1819316485Sdavidcs		flags |= PQ_FLAGS_MCOS;
1820316485Sdavidcs		break;
1821316485Sdavidcs	case ECORE_PCI_FCOE:
1822316485Sdavidcs		flags |= PQ_FLAGS_OFLD;
1823316485Sdavidcs		break;
1824316485Sdavidcs	case ECORE_PCI_ISCSI:
1825316485Sdavidcs		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
1826316485Sdavidcs		break;
1827316485Sdavidcs	case ECORE_PCI_ETH_ROCE:
1828316485Sdavidcs		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
1829316485Sdavidcs		break;
1830316485Sdavidcs	case ECORE_PCI_ETH_IWARP:
1831316485Sdavidcs		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
1832316485Sdavidcs		break;
1833316485Sdavidcs	default:
1834316485Sdavidcs		DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
1835316485Sdavidcs		return 0;
1836316485Sdavidcs	}
1837316485Sdavidcs
1838316485Sdavidcs	return flags;
1839316485Sdavidcs}
1840316485Sdavidcs
1841316485Sdavidcs
1842316485Sdavidcs/* Getters for resource amounts necessary for qm initialization */
1843316485Sdavidcsu8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
1844316485Sdavidcs{
1845316485Sdavidcs	return p_hwfn->hw_info.num_hw_tc;
1846316485Sdavidcs}
1847316485Sdavidcs
1848316485Sdavidcsu16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
1849316485Sdavidcs{
1850316485Sdavidcs	return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
1851316485Sdavidcs}
1852316485Sdavidcs
1853316485Sdavidcs#define NUM_DEFAULT_RLS 1
1854316485Sdavidcs
1855316485Sdavidcsu16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
1856316485Sdavidcs{
1857316485Sdavidcs	u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
1858316485Sdavidcs
1859316485Sdavidcs	/* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
1860316485Sdavidcs	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
1861316485Sdavidcs				     (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT),
1862316485Sdavidcs						     ROCE_DCQCN_RP_MAX_QPS));
1863316485Sdavidcs
1864316485Sdavidcs	/* make sure after we reserve the default and VF rls we'll have something left */
1865316485Sdavidcs	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
1866316485Sdavidcs		if (IS_ECORE_DCQCN(p_hwfn))
1867316485Sdavidcs			DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
1868316485Sdavidcs		return 0;
1869316485Sdavidcs	}
1870316485Sdavidcs
1871316485Sdavidcs	/* subtract rls necessary for VFs and one default one for the PF */
1872316485Sdavidcs	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
1873316485Sdavidcs
1874316485Sdavidcs	return num_pf_rls;
1875316485Sdavidcs}
1876316485Sdavidcs
1877316485Sdavidcsu16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
1878316485Sdavidcs{
1879316485Sdavidcs	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
1880316485Sdavidcs
1881316485Sdavidcs	/* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
1882316485Sdavidcs	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
1883316485Sdavidcs	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
1884316485Sdavidcs}
1885316485Sdavidcs
1886316485Sdavidcs/* calc amount of PQs according to the requested flags */
1887316485Sdavidcsu16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
1888316485Sdavidcs{
1889316485Sdavidcs	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
1890316485Sdavidcs
1891316485Sdavidcs	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
1892316485Sdavidcs	       (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
1893316485Sdavidcs	       (!!(PQ_FLAGS_LB & pq_flags)) +
1894316485Sdavidcs	       (!!(PQ_FLAGS_OOO & pq_flags)) +
1895316485Sdavidcs	       (!!(PQ_FLAGS_ACK & pq_flags)) +
1896316485Sdavidcs	       (!!(PQ_FLAGS_OFLD & pq_flags)) +
1897316485Sdavidcs	       (!!(PQ_FLAGS_LLT & pq_flags)) +
1898316485Sdavidcs	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn);
1899316485Sdavidcs}
1900316485Sdavidcs
1901316485Sdavidcs/* initialize the top level QM params */
1902316485Sdavidcsstatic void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
1903316485Sdavidcs{
1904316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1905316485Sdavidcs	bool four_port;
1906316485Sdavidcs
1907316485Sdavidcs	/* pq and vport bases for this PF */
1908316485Sdavidcs	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
1909316485Sdavidcs	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
1910316485Sdavidcs
1911316485Sdavidcs	/* rate limiting and weighted fair queueing are always enabled */
1912316485Sdavidcs	qm_info->vport_rl_en = 1;
1913316485Sdavidcs	qm_info->vport_wfq_en = 1;
1914316485Sdavidcs
1915316485Sdavidcs	/* TC config is different for AH 4 port */
1916320164Sdavidcs	four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
1917316485Sdavidcs
1918316485Sdavidcs	/* in AH 4 port we have fewer TCs per port */
1919316485Sdavidcs	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
1920316485Sdavidcs
1921316485Sdavidcs	/* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
1922316485Sdavidcs	if (!qm_info->ooo_tc)
1923316485Sdavidcs		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
1924316485Sdavidcs}
1925316485Sdavidcs
1926316485Sdavidcs/* initialize qm vport params */
1927316485Sdavidcsstatic void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
1928316485Sdavidcs{
1929316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1930316485Sdavidcs	u8 i;
1931316485Sdavidcs
1932316485Sdavidcs	/* all vports participate in weighted fair queueing */
1933316485Sdavidcs	for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
1934316485Sdavidcs		qm_info->qm_vport_params[i].vport_wfq = 1;
1935316485Sdavidcs}
1936316485Sdavidcs
1937316485Sdavidcs/* initialize qm port params */
1938316485Sdavidcsstatic void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
1939316485Sdavidcs{
1940316485Sdavidcs	/* Initialize qm port parameters */
1941320164Sdavidcs	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
1942316485Sdavidcs
1943316485Sdavidcs	/* indicate how ooo and high pri traffic is dealt with */
1944316485Sdavidcs	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
1945316485Sdavidcs		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
1946316485Sdavidcs
1947316485Sdavidcs	for (i = 0; i < num_ports; i++) {
1948316485Sdavidcs		struct init_qm_port_params *p_qm_port =
1949316485Sdavidcs			&p_hwfn->qm_info.qm_port_params[i];
1950316485Sdavidcs
1951316485Sdavidcs		p_qm_port->active = 1;
1952316485Sdavidcs		p_qm_port->active_phys_tcs = active_phys_tcs;
1953320164Sdavidcs		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
1954316485Sdavidcs		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
1955316485Sdavidcs	}
1956316485Sdavidcs}
1957316485Sdavidcs
1958316485Sdavidcs/* Reset the params which must be reset for qm init. QM init may be called as
1959316485Sdavidcs * a result of flows other than driver load (e.g. dcbx renegotiation). Other
1960316485Sdavidcs * params may be affected by the init but would simply recalculate to the same
1961316485Sdavidcs * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
1962316485Sdavidcs * affected as these amounts stay the same.
1963316485Sdavidcs */
1964316485Sdavidcsstatic void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
1965316485Sdavidcs{
1966316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1967316485Sdavidcs
1968316485Sdavidcs	qm_info->num_pqs = 0;
1969316485Sdavidcs	qm_info->num_vports = 0;
1970316485Sdavidcs	qm_info->num_pf_rls = 0;
1971316485Sdavidcs	qm_info->num_vf_pqs = 0;
1972316485Sdavidcs	qm_info->first_vf_pq = 0;
1973316485Sdavidcs	qm_info->first_mcos_pq = 0;
1974316485Sdavidcs	qm_info->first_rl_pq = 0;
1975316485Sdavidcs}
1976316485Sdavidcs
1977316485Sdavidcsstatic void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
1978316485Sdavidcs{
1979316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1980316485Sdavidcs
1981316485Sdavidcs	qm_info->num_vports++;
1982316485Sdavidcs
1983316485Sdavidcs	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
1984316485Sdavidcs		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
1985316485Sdavidcs}
1986316485Sdavidcs
1987316485Sdavidcs/* initialize a single pq and manage qm_info resources accounting.
1988316485Sdavidcs * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
1989316485Sdavidcs * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
1990316485Sdavidcs */
1991316485Sdavidcs
1992316485Sdavidcs/* flags for pq init */
1993316485Sdavidcs#define PQ_INIT_SHARE_VPORT	(1 << 0)
1994316485Sdavidcs#define PQ_INIT_PF_RL		(1 << 1)
1995316485Sdavidcs#define PQ_INIT_VF_RL		(1 << 2)
1996316485Sdavidcs
1997316485Sdavidcs/* defines for pq init */
1998316485Sdavidcs#define PQ_INIT_DEFAULT_WRR_GROUP	1
1999316485Sdavidcs#define PQ_INIT_DEFAULT_TC		0
2000316485Sdavidcs#define PQ_INIT_OFLD_TC			(p_hwfn->hw_info.offload_tc)
2001316485Sdavidcs
2002316485Sdavidcsstatic void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
2003316485Sdavidcs			     struct ecore_qm_info *qm_info,
2004316485Sdavidcs			     u8 tc, u32 pq_init_flags)
2005316485Sdavidcs{
2006316485Sdavidcs	u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
2007316485Sdavidcs
2008316485Sdavidcs	if (pq_idx > max_pq)
2009316485Sdavidcs		DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
2010316485Sdavidcs
2011316485Sdavidcs	/* init pq params */
2012316485Sdavidcs	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
2013316485Sdavidcs	qm_info->qm_pq_params[pq_idx].tc_id = tc;
2014316485Sdavidcs	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
2015316485Sdavidcs	qm_info->qm_pq_params[pq_idx].rl_valid =
2016316485Sdavidcs		(pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
2017316485Sdavidcs
2018316485Sdavidcs	/* qm params accounting */
2019316485Sdavidcs	qm_info->num_pqs++;
2020316485Sdavidcs	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
2021316485Sdavidcs		qm_info->num_vports++;
2022316485Sdavidcs
2023316485Sdavidcs	if (pq_init_flags & PQ_INIT_PF_RL)
2024316485Sdavidcs		qm_info->num_pf_rls++;
2025316485Sdavidcs
2026316485Sdavidcs	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
2027316485Sdavidcs		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
2028316485Sdavidcs
2029316485Sdavidcs	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
2030316485Sdavidcs		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
2031316485Sdavidcs}
2032316485Sdavidcs
2033316485Sdavidcs/* get pq index according to PQ_FLAGS */
2034316485Sdavidcsstatic u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
2035316485Sdavidcs					     u32 pq_flags)
2036316485Sdavidcs{
2037316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2038316485Sdavidcs
2039316485Sdavidcs	/* Can't have multiple flags set here */
2040316485Sdavidcs	if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
2041316485Sdavidcs		goto err;
2042316485Sdavidcs
2043316485Sdavidcs	switch (pq_flags) {
2044316485Sdavidcs	case PQ_FLAGS_RLS:
2045316485Sdavidcs		return &qm_info->first_rl_pq;
2046316485Sdavidcs	case PQ_FLAGS_MCOS:
2047316485Sdavidcs		return &qm_info->first_mcos_pq;
2048316485Sdavidcs	case PQ_FLAGS_LB:
2049316485Sdavidcs		return &qm_info->pure_lb_pq;
2050316485Sdavidcs	case PQ_FLAGS_OOO:
2051316485Sdavidcs		return &qm_info->ooo_pq;
2052316485Sdavidcs	case PQ_FLAGS_ACK:
2053316485Sdavidcs		return &qm_info->pure_ack_pq;
2054316485Sdavidcs	case PQ_FLAGS_OFLD:
2055316485Sdavidcs		return &qm_info->offload_pq;
2056316485Sdavidcs	case PQ_FLAGS_LLT:
2057316485Sdavidcs		return &qm_info->low_latency_pq;
2058316485Sdavidcs	case PQ_FLAGS_VFS:
2059316485Sdavidcs		return &qm_info->first_vf_pq;
2060316485Sdavidcs	default:
2061316485Sdavidcs		goto err;
2062316485Sdavidcs	}
2063316485Sdavidcs
2064316485Sdavidcserr:
2065316485Sdavidcs	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
2066316485Sdavidcs	return OSAL_NULL;
2067316485Sdavidcs}
2068316485Sdavidcs
2069316485Sdavidcs/* save pq index in qm info */
2070316485Sdavidcsstatic void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
2071316485Sdavidcs				  u32 pq_flags, u16 pq_val)
2072316485Sdavidcs{
2073316485Sdavidcs	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
2074316485Sdavidcs
2075316485Sdavidcs	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
2076316485Sdavidcs}
2077316485Sdavidcs
2078316485Sdavidcs/* get tx pq index, with the PQ TX base already set (ready for context init) */
2079316485Sdavidcsu16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
2080316485Sdavidcs{
2081316485Sdavidcs	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
2082316485Sdavidcs
2083316485Sdavidcs	return *base_pq_idx + CM_TX_PQ_BASE;
2084316485Sdavidcs}
2085316485Sdavidcs
2086316485Sdavidcsu16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
2087316485Sdavidcs{
2088316485Sdavidcs	u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
2089316485Sdavidcs
2090316485Sdavidcs	if (tc > max_tc)
2091316485Sdavidcs		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
2092316485Sdavidcs
2093316485Sdavidcs	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
2094316485Sdavidcs}
2095316485Sdavidcs
2096316485Sdavidcsu16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
2097316485Sdavidcs{
2098316485Sdavidcs	u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
2099316485Sdavidcs
2100316485Sdavidcs	if (vf > max_vf)
2101316485Sdavidcs		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
2102316485Sdavidcs
2103316485Sdavidcs	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
2104316485Sdavidcs}
2105316485Sdavidcs
2106316485Sdavidcsu16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
2107316485Sdavidcs{
2108316485Sdavidcs	u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
2109316485Sdavidcs
2110316485Sdavidcs	if (rl > max_rl)
2111316485Sdavidcs		DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
2112316485Sdavidcs
2113316485Sdavidcs	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
2114316485Sdavidcs}
2115316485Sdavidcs
2116316485Sdavidcs/* Functions for creating specific types of pqs */
2117316485Sdavidcsstatic void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
2118316485Sdavidcs{
2119316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2120316485Sdavidcs
2121316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
2122316485Sdavidcs		return;
2123316485Sdavidcs
2124316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
2125316485Sdavidcs	ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
2126316485Sdavidcs}
2127316485Sdavidcs
2128316485Sdavidcsstatic void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
2129316485Sdavidcs{
2130316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2131316485Sdavidcs
2132316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
2133316485Sdavidcs		return;
2134316485Sdavidcs
2135316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
2136316485Sdavidcs	ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
2137316485Sdavidcs}
2138316485Sdavidcs
2139316485Sdavidcsstatic void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
2140316485Sdavidcs{
2141316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2142316485Sdavidcs
2143316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
2144316485Sdavidcs		return;
2145316485Sdavidcs
2146316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
2147316485Sdavidcs	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2148316485Sdavidcs}
2149316485Sdavidcs
2150316485Sdavidcsstatic void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
2151316485Sdavidcs{
2152316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2153316485Sdavidcs
2154316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
2155316485Sdavidcs		return;
2156316485Sdavidcs
2157316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
2158316485Sdavidcs	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2159316485Sdavidcs}
2160316485Sdavidcs
2161316485Sdavidcsstatic void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
2162316485Sdavidcs{
2163316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2164316485Sdavidcs
2165316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
2166316485Sdavidcs		return;
2167316485Sdavidcs
2168316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
2169316485Sdavidcs	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2170316485Sdavidcs}
2171316485Sdavidcs
2172316485Sdavidcsstatic void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
2173316485Sdavidcs{
2174316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2175316485Sdavidcs	u8 tc_idx;
2176316485Sdavidcs
2177316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
2178316485Sdavidcs		return;
2179316485Sdavidcs
2180316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
2181316485Sdavidcs	for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
2182316485Sdavidcs		ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
2183316485Sdavidcs}
2184316485Sdavidcs
2185316485Sdavidcsstatic void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
2186316485Sdavidcs{
2187316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2188316485Sdavidcs	u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
2189316485Sdavidcs
2190316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
2191316485Sdavidcs		return;
2192316485Sdavidcs
2193316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
2194316485Sdavidcs	qm_info->num_vf_pqs = num_vfs;
2195316485Sdavidcs	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
2196316485Sdavidcs		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
2197316485Sdavidcs}
2198316485Sdavidcs
2199316485Sdavidcsstatic void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
2200316485Sdavidcs{
2201316485Sdavidcs	u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
2202316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2203316485Sdavidcs
2204316485Sdavidcs	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
2205316485Sdavidcs		return;
2206316485Sdavidcs
2207316485Sdavidcs	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
2208316485Sdavidcs	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
2209316485Sdavidcs		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
2210316485Sdavidcs}
2211316485Sdavidcs
2212316485Sdavidcsstatic void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
2213316485Sdavidcs{
2214316485Sdavidcs	/* rate limited pqs, must come first (FW assumption) */
2215316485Sdavidcs	ecore_init_qm_rl_pqs(p_hwfn);
2216316485Sdavidcs
2217316485Sdavidcs	/* pqs for multi cos */
2218316485Sdavidcs	ecore_init_qm_mcos_pqs(p_hwfn);
2219316485Sdavidcs
2220316485Sdavidcs	/* pure loopback pq */
2221316485Sdavidcs	ecore_init_qm_lb_pq(p_hwfn);
2222316485Sdavidcs
2223316485Sdavidcs	/* out of order pq */
2224316485Sdavidcs	ecore_init_qm_ooo_pq(p_hwfn);
2225316485Sdavidcs
2226316485Sdavidcs	/* pure ack pq */
2227316485Sdavidcs	ecore_init_qm_pure_ack_pq(p_hwfn);
2228316485Sdavidcs
2229316485Sdavidcs	/* pq for offloaded protocol */
2230316485Sdavidcs	ecore_init_qm_offload_pq(p_hwfn);
2231316485Sdavidcs
2232316485Sdavidcs	/* low latency pq */
2233316485Sdavidcs	ecore_init_qm_low_latency_pq(p_hwfn);
2234316485Sdavidcs
2235316485Sdavidcs	/* done sharing vports */
2236316485Sdavidcs	ecore_init_qm_advance_vport(p_hwfn);
2237316485Sdavidcs
2238316485Sdavidcs	/* pqs for vfs */
2239316485Sdavidcs	ecore_init_qm_vf_pqs(p_hwfn);
2240316485Sdavidcs}
2241316485Sdavidcs
2242316485Sdavidcs/* compare values of getters against resources amounts */
2243316485Sdavidcsstatic enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
2244316485Sdavidcs{
2245316485Sdavidcs	if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
2246316485Sdavidcs		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
2247316485Sdavidcs		return ECORE_INVAL;
2248316485Sdavidcs	}
2249316485Sdavidcs
2250316485Sdavidcs	if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
2251316485Sdavidcs		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
2252316485Sdavidcs		return ECORE_INVAL;
2253316485Sdavidcs	}
2254316485Sdavidcs
2255316485Sdavidcs	return ECORE_SUCCESS;
2256316485Sdavidcs}
2257316485Sdavidcs
2258316485Sdavidcs/*
2259316485Sdavidcs * Function for verbose printing of the qm initialization results
2260316485Sdavidcs */
2261316485Sdavidcsstatic void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
2262316485Sdavidcs{
2263316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2264316485Sdavidcs	struct init_qm_vport_params *vport;
2265316485Sdavidcs	struct init_qm_port_params *port;
2266316485Sdavidcs	struct init_qm_pq_params *pq;
2267316485Sdavidcs	int i, tc;
2268316485Sdavidcs
2269316485Sdavidcs	/* top level params */
2270316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
2271316485Sdavidcs		   qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq);
2272316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
2273316485Sdavidcs		   qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port);
2274316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
2275316485Sdavidcs		   qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
2276316485Sdavidcs
2277316485Sdavidcs	/* port table */
2278320164Sdavidcs	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
2279316485Sdavidcs		port = &(qm_info->qm_port_params[i]);
2280316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
2281316485Sdavidcs			   i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved);
2282316485Sdavidcs	}
2283316485Sdavidcs
2284316485Sdavidcs	/* vport table */
2285316485Sdavidcs	for (i = 0; i < qm_info->num_vports; i++) {
2286316485Sdavidcs		vport = &(qm_info->qm_vport_params[i]);
2287316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
2288316485Sdavidcs			   qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq);
2289316485Sdavidcs		for (tc = 0; tc < NUM_OF_TCS; tc++)
2290316485Sdavidcs			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
2291316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
2292316485Sdavidcs	}
2293316485Sdavidcs
2294316485Sdavidcs	/* pq table */
2295316485Sdavidcs	for (i = 0; i < qm_info->num_pqs; i++) {
2296316485Sdavidcs		pq = &(qm_info->qm_pq_params[i]);
2297316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
2298316485Sdavidcs			   qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid);
2299316485Sdavidcs	}
2300316485Sdavidcs}
2301316485Sdavidcs
2302316485Sdavidcsstatic void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
2303316485Sdavidcs{
2304316485Sdavidcs	/* reset params required for init run */
2305316485Sdavidcs	ecore_init_qm_reset_params(p_hwfn);
2306316485Sdavidcs
2307316485Sdavidcs	/* init QM top level params */
2308316485Sdavidcs	ecore_init_qm_params(p_hwfn);
2309316485Sdavidcs
2310316485Sdavidcs	/* init QM port params */
2311316485Sdavidcs	ecore_init_qm_port_params(p_hwfn);
2312316485Sdavidcs
2313316485Sdavidcs	/* init QM vport params */
2314316485Sdavidcs	ecore_init_qm_vport_params(p_hwfn);
2315316485Sdavidcs
2316316485Sdavidcs	/* init QM physical queue params */
2317316485Sdavidcs	ecore_init_qm_pq_params(p_hwfn);
2318316485Sdavidcs
2319316485Sdavidcs	/* display all that init */
2320316485Sdavidcs	ecore_dp_init_qm_params(p_hwfn);
2321316485Sdavidcs}
2322316485Sdavidcs
2323316485Sdavidcs/* This function reconfigures the QM pf on the fly.
2324316485Sdavidcs * For this purpose we:
2325316485Sdavidcs * 1. reconfigure the QM database
2326316485Sdavidcs * 2. set new values to runtime array
2327316485Sdavidcs * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
2328316485Sdavidcs * 4. activate init tool in QM_PF stage
2329316485Sdavidcs * 5. send an sdm_qm_cmd through rbc interface to release the QM
2330316485Sdavidcs */
2331316485Sdavidcsenum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
2332316485Sdavidcs				     struct ecore_ptt *p_ptt)
2333316485Sdavidcs{
2334316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2335316485Sdavidcs	bool b_rc;
2336316485Sdavidcs	enum _ecore_status_t rc;
2337316485Sdavidcs
2338316485Sdavidcs	/* initialize ecore's qm data structure */
2339316485Sdavidcs	ecore_init_qm_info(p_hwfn);
2340316485Sdavidcs
2341316485Sdavidcs	/* stop PF's qm queues */
2342316485Sdavidcs	OSAL_SPIN_LOCK(&qm_lock);
2343316485Sdavidcs	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
2344316485Sdavidcs				      qm_info->start_pq, qm_info->num_pqs);
2345316485Sdavidcs	OSAL_SPIN_UNLOCK(&qm_lock);
2346316485Sdavidcs	if (!b_rc)
2347316485Sdavidcs		return ECORE_INVAL;
2348316485Sdavidcs
2349316485Sdavidcs	/* clear the QM_PF runtime phase leftovers from previous init */
2350316485Sdavidcs	ecore_init_clear_rt_data(p_hwfn);
2351316485Sdavidcs
2352316485Sdavidcs	/* prepare QM portion of runtime array */
2353337517Sdavidcs	ecore_qm_init_pf(p_hwfn, p_ptt, false);
2354316485Sdavidcs
2355316485Sdavidcs	/* activate init tool on runtime array */
2356316485Sdavidcs	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
2357316485Sdavidcs			    p_hwfn->hw_info.hw_mode);
2358316485Sdavidcs	if (rc != ECORE_SUCCESS)
2359316485Sdavidcs		return rc;
2360316485Sdavidcs
2361316485Sdavidcs	/* start PF's qm queues */
2362316485Sdavidcs	OSAL_SPIN_LOCK(&qm_lock);
2363316485Sdavidcs	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
2364316485Sdavidcs				      qm_info->start_pq, qm_info->num_pqs);
2365316485Sdavidcs	OSAL_SPIN_UNLOCK(&qm_lock);
2366316485Sdavidcs	if (!b_rc)
2367316485Sdavidcs		return ECORE_INVAL;
2368316485Sdavidcs
2369316485Sdavidcs	return ECORE_SUCCESS;
2370316485Sdavidcs}
2371316485Sdavidcs
2372316485Sdavidcsstatic enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
2373316485Sdavidcs{
2374316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2375316485Sdavidcs	enum _ecore_status_t rc;
2376316485Sdavidcs
2377316485Sdavidcs	rc = ecore_init_qm_sanity(p_hwfn);
2378316485Sdavidcs	if (rc != ECORE_SUCCESS)
2379316485Sdavidcs		goto alloc_err;
2380316485Sdavidcs
2381316485Sdavidcs	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2382316485Sdavidcs					    sizeof(struct init_qm_pq_params) *
2383316485Sdavidcs					    ecore_init_qm_get_num_pqs(p_hwfn));
2384316485Sdavidcs	if (!qm_info->qm_pq_params)
2385316485Sdavidcs		goto alloc_err;
2386316485Sdavidcs
2387316485Sdavidcs	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2388316485Sdavidcs					       sizeof(struct init_qm_vport_params) *
2389316485Sdavidcs					       ecore_init_qm_get_num_vports(p_hwfn));
2390316485Sdavidcs	if (!qm_info->qm_vport_params)
2391316485Sdavidcs		goto alloc_err;
2392316485Sdavidcs
2393316485Sdavidcs	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2394316485Sdavidcs					      sizeof(struct init_qm_port_params) *
2395320164Sdavidcs					      p_hwfn->p_dev->num_ports_in_engine);
2396316485Sdavidcs	if (!qm_info->qm_port_params)
2397316485Sdavidcs		goto alloc_err;
2398316485Sdavidcs
2399316485Sdavidcs	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2400316485Sdavidcs					sizeof(struct ecore_wfq_data) *
2401316485Sdavidcs					ecore_init_qm_get_num_vports(p_hwfn));
2402316485Sdavidcs	if (!qm_info->wfq_data)
2403316485Sdavidcs		goto alloc_err;
2404316485Sdavidcs
2405316485Sdavidcs	return ECORE_SUCCESS;
2406316485Sdavidcs
2407316485Sdavidcsalloc_err:
2408316485Sdavidcs	DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
2409316485Sdavidcs	ecore_qm_info_free(p_hwfn);
2410316485Sdavidcs	return ECORE_NOMEM;
2411316485Sdavidcs}
2412316485Sdavidcs/******************** End QM initialization ***************/
2413316485Sdavidcs
2414316485Sdavidcsenum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
2415316485Sdavidcs{
2416316485Sdavidcs	u32 rdma_tasks, excess_tasks;
2417316485Sdavidcs	u32 line_count;
2418320164Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
2419316485Sdavidcs	int i;
2420316485Sdavidcs
2421316485Sdavidcs	if (IS_VF(p_dev)) {
2422316485Sdavidcs		for_each_hwfn(p_dev, i) {
2423316485Sdavidcs			rc = ecore_l2_alloc(&p_dev->hwfns[i]);
2424316485Sdavidcs			if (rc != ECORE_SUCCESS)
2425316485Sdavidcs				return rc;
2426316485Sdavidcs		}
2427316485Sdavidcs		return rc;
2428316485Sdavidcs	}
2429316485Sdavidcs
2430316485Sdavidcs	p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
2431316485Sdavidcs				     sizeof(*p_dev->fw_data));
2432316485Sdavidcs	if (!p_dev->fw_data)
2433316485Sdavidcs		return ECORE_NOMEM;
2434316485Sdavidcs
2435316485Sdavidcs	for_each_hwfn(p_dev, i) {
2436316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2437316485Sdavidcs		u32 n_eqes, num_cons;
2438316485Sdavidcs
2439337517Sdavidcs		/* initialize the doorbell recovery mechanism */
2440337517Sdavidcs		rc = ecore_db_recovery_setup(p_hwfn);
2441337517Sdavidcs		if (rc)
2442337517Sdavidcs			goto alloc_err;
2443337517Sdavidcs
2444316485Sdavidcs		/* First allocate the context manager structure */
2445316485Sdavidcs		rc = ecore_cxt_mngr_alloc(p_hwfn);
2446316485Sdavidcs		if (rc)
2447316485Sdavidcs			goto alloc_err;
2448316485Sdavidcs
2449337517Sdavidcs		/* Set the HW cid/tid numbers (in the context manager)
2450316485Sdavidcs		 * Must be done prior to any further computations.
2451316485Sdavidcs		 */
2452316485Sdavidcs		rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
2453316485Sdavidcs		if (rc)
2454316485Sdavidcs			goto alloc_err;
2455316485Sdavidcs
2456316485Sdavidcs		rc = ecore_alloc_qm_data(p_hwfn);
2457316485Sdavidcs		if (rc)
2458316485Sdavidcs			goto alloc_err;
2459316485Sdavidcs
2460316485Sdavidcs		/* init qm info */
2461316485Sdavidcs		ecore_init_qm_info(p_hwfn);
2462316485Sdavidcs
2463316485Sdavidcs		/* Compute the ILT client partition */
2464316485Sdavidcs		rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2465316485Sdavidcs		if (rc) {
2466316485Sdavidcs			DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n");
2467316485Sdavidcs			/* In case there are not enough ILT lines we reduce the
2468316485Sdavidcs			 * number of RDMA tasks and re-compute.
2469316485Sdavidcs			 */
2470316485Sdavidcs			excess_tasks = ecore_cxt_cfg_ilt_compute_excess(
2471316485Sdavidcs					p_hwfn, line_count);
2472316485Sdavidcs			if (!excess_tasks)
2473316485Sdavidcs				goto alloc_err;
2474316485Sdavidcs
2475316485Sdavidcs			rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
2476316485Sdavidcs			rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks);
2477316485Sdavidcs			if (rc)
2478316485Sdavidcs				goto alloc_err;
2479316485Sdavidcs
2480316485Sdavidcs			rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2481316485Sdavidcs			if (rc) {
2482316485Sdavidcs				DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n",
2483316485Sdavidcs				       line_count);
2484316485Sdavidcs
2485316485Sdavidcs				goto alloc_err;
2486316485Sdavidcs			}
2487316485Sdavidcs		}
2488316485Sdavidcs
2489316485Sdavidcs		/* CID map / ILT shadow table / T2
2490316485Sdavidcs		 * The talbes sizes are determined by the computations above
2491316485Sdavidcs		 */
2492316485Sdavidcs		rc = ecore_cxt_tables_alloc(p_hwfn);
2493316485Sdavidcs		if (rc)
2494316485Sdavidcs			goto alloc_err;
2495316485Sdavidcs
2496316485Sdavidcs		/* SPQ, must follow ILT because initializes SPQ context */
2497316485Sdavidcs		rc = ecore_spq_alloc(p_hwfn);
2498316485Sdavidcs		if (rc)
2499316485Sdavidcs			goto alloc_err;
2500316485Sdavidcs
2501316485Sdavidcs		/* SP status block allocation */
2502316485Sdavidcs		p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
2503316485Sdavidcs							   RESERVED_PTT_DPC);
2504316485Sdavidcs
2505316485Sdavidcs		rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
2506316485Sdavidcs		if (rc)
2507316485Sdavidcs			goto alloc_err;
2508316485Sdavidcs
2509316485Sdavidcs		rc = ecore_iov_alloc(p_hwfn);
2510316485Sdavidcs		if (rc)
2511316485Sdavidcs			goto alloc_err;
2512316485Sdavidcs
2513316485Sdavidcs		/* EQ */
2514316485Sdavidcs		n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
2515316485Sdavidcs		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
2516337517Sdavidcs			u32 n_srq = ecore_cxt_get_total_srq_count(p_hwfn);
2517337517Sdavidcs
2518316485Sdavidcs			/* Calculate the EQ size
2519316485Sdavidcs			 * ---------------------
2520316485Sdavidcs			 * Each ICID may generate up to one event at a time i.e.
2521316485Sdavidcs			 * the event must be handled/cleared before a new one
2522316485Sdavidcs			 * can be generated. We calculate the sum of events per
2523316485Sdavidcs			 * protocol and create an EQ deep enough to handle the
2524316485Sdavidcs			 * worst case:
2525316485Sdavidcs			 * - Core - according to SPQ.
2526316485Sdavidcs			 * - RoCE - per QP there are a couple of ICIDs, one
2527316485Sdavidcs			 *	  responder and one requester, each can
2528337517Sdavidcs			 *	  generate max 2 EQE (err+qp_destroyed) =>
2529337517Sdavidcs			 *	  n_eqes_qp = 4 * n_qp.
2530316485Sdavidcs			 *	  Each CQ can generate an EQE. There are 2 CQs
2531316485Sdavidcs			 *	  per QP => n_eqes_cq = 2 * n_qp.
2532337517Sdavidcs			 *	  Hence the RoCE total is 6 * n_qp or
2533337517Sdavidcs			 *	  3 * num_cons.
2534337517Sdavidcs			 *	  On top of that one eqe shoule be added for
2535337517Sdavidcs			 *	  each XRC SRQ and SRQ.
2536337517Sdavidcs			 * - iWARP - can generate three async per QP (error
2537337517Sdavidcs			 *	  detected and qp in error) and an
2538337517Sdavidcs			 	  additional error per CQ. 4* num_cons.
2539337517Sdavidcs			 	  On top of that one eqe shoule be added for
2540337517Sdavidcs			 *	  each SRQ and XRC SRQ.
2541316485Sdavidcs			 * - ENet - There can be up to two events per VF. One
2542316485Sdavidcs			 *	  for VF-PF channel and another for VF FLR
2543316485Sdavidcs			 *	  initial cleanup. The number of VFs is
2544316485Sdavidcs			 *	  bounded by MAX_NUM_VFS_BB, and is much
2545316485Sdavidcs			 *	  smaller than RoCE's so we avoid exact
2546316485Sdavidcs			 *	  calculation.
2547316485Sdavidcs			 */
2548316485Sdavidcs			if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
2549316485Sdavidcs				num_cons = ecore_cxt_get_proto_cid_count(
2550316485Sdavidcs					p_hwfn, PROTOCOLID_ROCE, OSAL_NULL);
2551337517Sdavidcs				num_cons *= 3;
2552316485Sdavidcs			} else {
2553316485Sdavidcs				num_cons = ecore_cxt_get_proto_cid_count(
2554316485Sdavidcs						p_hwfn, PROTOCOLID_IWARP,
2555316485Sdavidcs						OSAL_NULL);
2556337517Sdavidcs				num_cons *= 4;
2557316485Sdavidcs			}
2558337517Sdavidcs			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
2559316485Sdavidcs		} else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2560316485Sdavidcs			num_cons = ecore_cxt_get_proto_cid_count(
2561316485Sdavidcs					p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL);
2562316485Sdavidcs			n_eqes += 2 * num_cons;
2563316485Sdavidcs		}
2564316485Sdavidcs
2565337517Sdavidcs		if (n_eqes > 0xFF00) {
2566337517Sdavidcs			DP_ERR(p_hwfn, "EQs maxing out at 0xFF00 elements\n");
2567337517Sdavidcs			n_eqes = 0xFF00;
2568316485Sdavidcs		}
2569316485Sdavidcs
2570316485Sdavidcs		rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
2571316485Sdavidcs		if (rc)
2572316485Sdavidcs			goto alloc_err;
2573316485Sdavidcs
2574316485Sdavidcs		rc = ecore_consq_alloc(p_hwfn);
2575316485Sdavidcs		if (rc)
2576316485Sdavidcs			goto alloc_err;
2577316485Sdavidcs
2578316485Sdavidcs		rc = ecore_l2_alloc(p_hwfn);
2579316485Sdavidcs		if (rc != ECORE_SUCCESS)
2580316485Sdavidcs			goto alloc_err;
2581316485Sdavidcs
2582316485Sdavidcs#ifdef CONFIG_ECORE_LL2
2583316485Sdavidcs		if (p_hwfn->using_ll2) {
2584316485Sdavidcs			rc = ecore_ll2_alloc(p_hwfn);
2585316485Sdavidcs			if (rc)
2586316485Sdavidcs				goto alloc_err;
2587316485Sdavidcs		}
2588316485Sdavidcs#endif
2589316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
2590316485Sdavidcs			rc = ecore_fcoe_alloc(p_hwfn);
2591316485Sdavidcs			if (rc)
2592316485Sdavidcs				goto alloc_err;
2593316485Sdavidcs		}
2594337517Sdavidcs
2595316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2596316485Sdavidcs			rc = ecore_iscsi_alloc(p_hwfn);
2597316485Sdavidcs			if (rc)
2598316485Sdavidcs				goto alloc_err;
2599337517Sdavidcs
2600316485Sdavidcs			rc = ecore_ooo_alloc(p_hwfn);
2601316485Sdavidcs			if (rc)
2602316485Sdavidcs				goto alloc_err;
2603316485Sdavidcs		}
2604337517Sdavidcs#ifdef CONFIG_ECORE_ROCE
2605337517Sdavidcs		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
2606337517Sdavidcs			rc = ecore_rdma_info_alloc(p_hwfn);
2607337517Sdavidcs			if (rc)
2608337517Sdavidcs				goto alloc_err;
2609337517Sdavidcs		}
2610316485Sdavidcs#endif
2611316485Sdavidcs
2612316485Sdavidcs		/* DMA info initialization */
2613316485Sdavidcs		rc = ecore_dmae_info_alloc(p_hwfn);
2614316485Sdavidcs		if (rc) {
2615337517Sdavidcs			DP_NOTICE(p_hwfn, false,
2616316485Sdavidcs				  "Failed to allocate memory for dmae_info structure\n");
2617316485Sdavidcs			goto alloc_err;
2618316485Sdavidcs		}
2619316485Sdavidcs
2620316485Sdavidcs		/* DCBX initialization */
2621316485Sdavidcs		rc = ecore_dcbx_info_alloc(p_hwfn);
2622316485Sdavidcs		if (rc) {
2623337517Sdavidcs			DP_NOTICE(p_hwfn, false,
2624316485Sdavidcs				  "Failed to allocate memory for dcbx structure\n");
2625316485Sdavidcs			goto alloc_err;
2626316485Sdavidcs		}
2627337517Sdavidcs	}
2628320164Sdavidcs
2629337517Sdavidcs	rc = ecore_llh_alloc(p_dev);
2630337517Sdavidcs	if (rc != ECORE_SUCCESS) {
2631337517Sdavidcs		DP_NOTICE(p_dev, false,
2632337517Sdavidcs			  "Failed to allocate memory for the llh_info structure\n");
2633337517Sdavidcs		goto alloc_err;
2634316485Sdavidcs	}
2635316485Sdavidcs
2636316485Sdavidcs	p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
2637316485Sdavidcs					 sizeof(*p_dev->reset_stats));
2638316485Sdavidcs	if (!p_dev->reset_stats) {
2639337517Sdavidcs		DP_NOTICE(p_dev, false,
2640316485Sdavidcs			  "Failed to allocate reset statistics\n");
2641316485Sdavidcs		goto alloc_no_mem;
2642316485Sdavidcs	}
2643316485Sdavidcs
2644316485Sdavidcs	return ECORE_SUCCESS;
2645316485Sdavidcs
2646316485Sdavidcsalloc_no_mem:
2647316485Sdavidcs	rc = ECORE_NOMEM;
2648316485Sdavidcsalloc_err:
2649316485Sdavidcs	ecore_resc_free(p_dev);
2650316485Sdavidcs	return rc;
2651316485Sdavidcs}
2652316485Sdavidcs
2653316485Sdavidcsvoid ecore_resc_setup(struct ecore_dev *p_dev)
2654316485Sdavidcs{
2655316485Sdavidcs	int i;
2656316485Sdavidcs
2657316485Sdavidcs	if (IS_VF(p_dev)) {
2658316485Sdavidcs		for_each_hwfn(p_dev, i)
2659316485Sdavidcs			ecore_l2_setup(&p_dev->hwfns[i]);
2660316485Sdavidcs		return;
2661316485Sdavidcs	}
2662316485Sdavidcs
2663316485Sdavidcs	for_each_hwfn(p_dev, i) {
2664316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2665316485Sdavidcs
2666316485Sdavidcs		ecore_cxt_mngr_setup(p_hwfn);
2667316485Sdavidcs		ecore_spq_setup(p_hwfn);
2668316485Sdavidcs		ecore_eq_setup(p_hwfn);
2669316485Sdavidcs		ecore_consq_setup(p_hwfn);
2670316485Sdavidcs
2671316485Sdavidcs		/* Read shadow of current MFW mailbox */
2672316485Sdavidcs		ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
2673316485Sdavidcs		OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
2674316485Sdavidcs			    p_hwfn->mcp_info->mfw_mb_cur,
2675316485Sdavidcs			    p_hwfn->mcp_info->mfw_mb_length);
2676316485Sdavidcs
2677316485Sdavidcs		ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
2678316485Sdavidcs
2679316485Sdavidcs		ecore_l2_setup(p_hwfn);
2680320164Sdavidcs		ecore_iov_setup(p_hwfn);
2681316485Sdavidcs#ifdef CONFIG_ECORE_LL2
2682316485Sdavidcs		if (p_hwfn->using_ll2)
2683316485Sdavidcs			ecore_ll2_setup(p_hwfn);
2684316485Sdavidcs#endif
2685316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
2686316485Sdavidcs			ecore_fcoe_setup(p_hwfn);
2687337517Sdavidcs
2688316485Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2689316485Sdavidcs			ecore_iscsi_setup(p_hwfn);
2690316485Sdavidcs			ecore_ooo_setup(p_hwfn);
2691316485Sdavidcs		}
2692316485Sdavidcs	}
2693316485Sdavidcs}
2694316485Sdavidcs
2695316485Sdavidcs#define FINAL_CLEANUP_POLL_CNT	(100)
2696316485Sdavidcs#define FINAL_CLEANUP_POLL_TIME	(10)
2697316485Sdavidcsenum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
2698316485Sdavidcs					 struct ecore_ptt *p_ptt,
2699316485Sdavidcs					 u16 id, bool is_vf)
2700316485Sdavidcs{
2701316485Sdavidcs	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
2702316485Sdavidcs	enum _ecore_status_t rc = ECORE_TIMEOUT;
2703316485Sdavidcs
2704316485Sdavidcs#ifndef ASIC_ONLY
2705316485Sdavidcs	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
2706316485Sdavidcs	    CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2707316485Sdavidcs		DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
2708316485Sdavidcs		return ECORE_SUCCESS;
2709316485Sdavidcs	}
2710316485Sdavidcs#endif
2711316485Sdavidcs
2712316485Sdavidcs	addr = GTT_BAR0_MAP_REG_USDM_RAM +
2713316485Sdavidcs	       USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
2714316485Sdavidcs
2715316485Sdavidcs	if (is_vf)
2716316485Sdavidcs		id += 0x10;
2717316485Sdavidcs
2718316485Sdavidcs	command |= X_FINAL_CLEANUP_AGG_INT <<
2719316485Sdavidcs		   SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
2720316485Sdavidcs	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
2721316485Sdavidcs	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
2722316485Sdavidcs	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
2723316485Sdavidcs
2724316485Sdavidcs	/* Make sure notification is not set before initiating final cleanup */
2725316485Sdavidcs	if (REG_RD(p_hwfn, addr)) {
2726316485Sdavidcs		DP_NOTICE(p_hwfn, false,
2727316485Sdavidcs			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
2728316485Sdavidcs		REG_WR(p_hwfn, addr, 0);
2729316485Sdavidcs	}
2730316485Sdavidcs
2731316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2732337517Sdavidcs		   "Sending final cleanup for PFVF[%d] [Command %08x]\n",
2733316485Sdavidcs		   id, command);
2734316485Sdavidcs
2735316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
2736316485Sdavidcs
2737316485Sdavidcs	/* Poll until completion */
2738316485Sdavidcs	while (!REG_RD(p_hwfn, addr) && count--)
2739316485Sdavidcs		OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
2740316485Sdavidcs
2741316485Sdavidcs	if (REG_RD(p_hwfn, addr))
2742316485Sdavidcs		rc = ECORE_SUCCESS;
2743316485Sdavidcs	else
2744316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n");
2745316485Sdavidcs
2746316485Sdavidcs	/* Cleanup afterwards */
2747316485Sdavidcs	REG_WR(p_hwfn, addr, 0);
2748316485Sdavidcs
2749316485Sdavidcs	return rc;
2750316485Sdavidcs}
2751316485Sdavidcs
2752316485Sdavidcsstatic enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
2753316485Sdavidcs{
2754316485Sdavidcs	int hw_mode = 0;
2755316485Sdavidcs
2756316485Sdavidcs	if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
2757316485Sdavidcs		hw_mode |= 1 << MODE_BB;
2758316485Sdavidcs	} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
2759316485Sdavidcs		hw_mode |= 1 << MODE_K2;
2760320164Sdavidcs	} else if (ECORE_IS_E5(p_hwfn->p_dev)) {
2761320164Sdavidcs		hw_mode |= 1 << MODE_E5;
2762316485Sdavidcs	} else {
2763316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
2764316485Sdavidcs			  p_hwfn->p_dev->type);
2765316485Sdavidcs		return ECORE_INVAL;
2766316485Sdavidcs	}
2767316485Sdavidcs
2768316485Sdavidcs	/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/
2769320164Sdavidcs	switch (p_hwfn->p_dev->num_ports_in_engine) {
2770316485Sdavidcs	case 1:
2771316485Sdavidcs		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
2772316485Sdavidcs		break;
2773316485Sdavidcs	case 2:
2774316485Sdavidcs		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
2775316485Sdavidcs		break;
2776316485Sdavidcs	case 4:
2777316485Sdavidcs		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
2778316485Sdavidcs		break;
2779316485Sdavidcs	default:
2780316485Sdavidcs		DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n",
2781320164Sdavidcs			  p_hwfn->p_dev->num_ports_in_engine);
2782316485Sdavidcs		return ECORE_INVAL;
2783316485Sdavidcs	}
2784316485Sdavidcs
2785337517Sdavidcs	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
2786337517Sdavidcs			  &p_hwfn->p_dev->mf_bits))
2787316485Sdavidcs		hw_mode |= 1 << MODE_MF_SD;
2788337517Sdavidcs	else
2789316485Sdavidcs		hw_mode |= 1 << MODE_MF_SI;
2790316485Sdavidcs
2791316485Sdavidcs#ifndef ASIC_ONLY
2792316485Sdavidcs	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2793316485Sdavidcs		if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2794316485Sdavidcs			hw_mode |= 1 << MODE_FPGA;
2795316485Sdavidcs		} else {
2796316485Sdavidcs			if (p_hwfn->p_dev->b_is_emul_full)
2797316485Sdavidcs				hw_mode |= 1 << MODE_EMUL_FULL;
2798316485Sdavidcs			else
2799316485Sdavidcs				hw_mode |= 1 << MODE_EMUL_REDUCED;
2800316485Sdavidcs		}
2801316485Sdavidcs	} else
2802316485Sdavidcs#endif
2803316485Sdavidcs	hw_mode |= 1 << MODE_ASIC;
2804316485Sdavidcs
2805337517Sdavidcs	if (ECORE_IS_CMT(p_hwfn->p_dev))
2806316485Sdavidcs		hw_mode |= 1 << MODE_100G;
2807316485Sdavidcs
2808316485Sdavidcs	p_hwfn->hw_info.hw_mode = hw_mode;
2809316485Sdavidcs
2810316485Sdavidcs	DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
2811316485Sdavidcs		   "Configuring function for hw_mode: 0x%08x\n",
2812316485Sdavidcs		   p_hwfn->hw_info.hw_mode);
2813316485Sdavidcs
2814316485Sdavidcs	return ECORE_SUCCESS;
2815316485Sdavidcs}
2816316485Sdavidcs
2817316485Sdavidcs#ifndef ASIC_ONLY
2818316485Sdavidcs/* MFW-replacement initializations for non-ASIC */
2819316485Sdavidcsstatic enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
2820316485Sdavidcs					       struct ecore_ptt *p_ptt)
2821316485Sdavidcs{
2822316485Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
2823316485Sdavidcs	u32 pl_hv = 1;
2824316485Sdavidcs	int i;
2825316485Sdavidcs
2826316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
2827316485Sdavidcs		if (ECORE_IS_AH(p_dev))
2828316485Sdavidcs			pl_hv |= 0x600;
2829316485Sdavidcs		else if (ECORE_IS_E5(p_dev))
2830316485Sdavidcs			ECORE_E5_MISSING_CODE;
2831316485Sdavidcs	}
2832316485Sdavidcs
2833316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
2834316485Sdavidcs
2835316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev) &&
2836316485Sdavidcs	    (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev)))
2837316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
2838316485Sdavidcs			 0x3ffffff);
2839316485Sdavidcs
2840316485Sdavidcs	/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
2841316485Sdavidcs	/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
2842316485Sdavidcs	if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
2843316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
2844316485Sdavidcs
2845316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
2846316485Sdavidcs		if (ECORE_IS_AH(p_dev)) {
2847316485Sdavidcs			/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
2848316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
2849320164Sdavidcs				 (p_dev->num_ports_in_engine >> 1));
2850316485Sdavidcs
2851316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
2852320164Sdavidcs				 p_dev->num_ports_in_engine == 4 ? 0 : 3);
2853316485Sdavidcs		} else if (ECORE_IS_E5(p_dev)) {
2854316485Sdavidcs			ECORE_E5_MISSING_CODE;
2855316485Sdavidcs		}
2856316485Sdavidcs
2857320164Sdavidcs		/* Poll on RBC */
2858320164Sdavidcs		ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
2859320164Sdavidcs		for (i = 0; i < 100; i++) {
2860320164Sdavidcs			OSAL_UDELAY(50);
2861320164Sdavidcs			if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
2862320164Sdavidcs				break;
2863320164Sdavidcs		}
2864320164Sdavidcs		if (i == 100)
2865320164Sdavidcs			DP_NOTICE(p_hwfn, true,
2866320164Sdavidcs				  "RBC done failed to complete in PSWRQ2\n");
2867316485Sdavidcs	}
2868316485Sdavidcs
2869316485Sdavidcs	return ECORE_SUCCESS;
2870316485Sdavidcs}
2871316485Sdavidcs#endif
2872316485Sdavidcs
2873316485Sdavidcs/* Init run time data for all PFs and their VFs on an engine.
2874316485Sdavidcs * TBD - for VFs - Once we have parent PF info for each VF in
2875316485Sdavidcs * shmem available as CAU requires knowledge of parent PF for each VF.
2876316485Sdavidcs */
2877316485Sdavidcsstatic void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
2878316485Sdavidcs{
2879316485Sdavidcs	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
2880316485Sdavidcs	int i, igu_sb_id;
2881316485Sdavidcs
2882316485Sdavidcs	for_each_hwfn(p_dev, i) {
2883316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2884316485Sdavidcs		struct ecore_igu_info *p_igu_info;
2885316485Sdavidcs		struct ecore_igu_block *p_block;
2886316485Sdavidcs		struct cau_sb_entry sb_entry;
2887316485Sdavidcs
2888316485Sdavidcs		p_igu_info = p_hwfn->hw_info.p_igu_info;
2889316485Sdavidcs
2890316485Sdavidcs		for (igu_sb_id = 0;
2891316485Sdavidcs		     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
2892316485Sdavidcs		     igu_sb_id++) {
2893316485Sdavidcs			p_block = &p_igu_info->entry[igu_sb_id];
2894316485Sdavidcs
2895316485Sdavidcs			if (!p_block->is_pf)
2896316485Sdavidcs				continue;
2897316485Sdavidcs
2898316485Sdavidcs			ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
2899316485Sdavidcs						p_block->function_id,
2900316485Sdavidcs						0, 0);
2901316485Sdavidcs			STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
2902316485Sdavidcs					 sb_entry);
2903316485Sdavidcs		}
2904316485Sdavidcs	}
2905316485Sdavidcs}
2906316485Sdavidcs
2907316485Sdavidcsstatic void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
2908316485Sdavidcs				       struct ecore_ptt *p_ptt)
2909316485Sdavidcs{
2910316485Sdavidcs	u32 val, wr_mbs, cache_line_size;
2911316485Sdavidcs
2912316485Sdavidcs	val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
2913316485Sdavidcs	switch (val) {
2914316485Sdavidcs	case 0:
2915316485Sdavidcs		wr_mbs = 128;
2916316485Sdavidcs		break;
2917316485Sdavidcs	case 1:
2918316485Sdavidcs		wr_mbs = 256;
2919316485Sdavidcs		break;
2920316485Sdavidcs	case 2:
2921316485Sdavidcs		wr_mbs = 512;
2922316485Sdavidcs		break;
2923316485Sdavidcs	default:
2924316485Sdavidcs		DP_INFO(p_hwfn,
2925316485Sdavidcs			"Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
2926316485Sdavidcs			val);
2927316485Sdavidcs		return;
2928316485Sdavidcs	}
2929316485Sdavidcs
2930316485Sdavidcs	cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
2931316485Sdavidcs	switch (cache_line_size) {
2932316485Sdavidcs	case 32:
2933316485Sdavidcs		val = 0;
2934316485Sdavidcs		break;
2935316485Sdavidcs	case 64:
2936316485Sdavidcs		val = 1;
2937316485Sdavidcs		break;
2938316485Sdavidcs	case 128:
2939316485Sdavidcs		val = 2;
2940316485Sdavidcs		break;
2941316485Sdavidcs	case 256:
2942316485Sdavidcs		val = 3;
2943316485Sdavidcs		break;
2944316485Sdavidcs	default:
2945316485Sdavidcs		DP_INFO(p_hwfn,
2946316485Sdavidcs			"Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
2947316485Sdavidcs			cache_line_size);
2948316485Sdavidcs	}
2949316485Sdavidcs
2950316485Sdavidcs	if (OSAL_CACHE_LINE_SIZE > wr_mbs)
2951316485Sdavidcs		DP_INFO(p_hwfn,
2952316485Sdavidcs			"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
2953316485Sdavidcs			OSAL_CACHE_LINE_SIZE, wr_mbs);
2954316485Sdavidcs
2955316485Sdavidcs	STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
2956320164Sdavidcs	if (val > 0) {
2957320164Sdavidcs		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
2958320164Sdavidcs		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
2959320164Sdavidcs	}
2960316485Sdavidcs}
2961316485Sdavidcs
2962316485Sdavidcsstatic enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
2963316485Sdavidcs						 struct ecore_ptt *p_ptt,
2964316485Sdavidcs						 int hw_mode)
2965316485Sdavidcs{
2966316485Sdavidcs	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2967316485Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
2968316485Sdavidcs	u8 vf_id, max_num_vfs;
2969316485Sdavidcs	u16 num_pfs, pf_id;
2970316485Sdavidcs	u32 concrete_fid;
2971316485Sdavidcs	enum _ecore_status_t rc	= ECORE_SUCCESS;
2972316485Sdavidcs
2973316485Sdavidcs	ecore_init_cau_rt_data(p_dev);
2974316485Sdavidcs
2975316485Sdavidcs	/* Program GTT windows */
2976320164Sdavidcs	ecore_gtt_init(p_hwfn, p_ptt);
2977316485Sdavidcs
2978316485Sdavidcs#ifndef ASIC_ONLY
2979316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
2980320164Sdavidcs		rc = ecore_hw_init_chip(p_hwfn, p_ptt);
2981316485Sdavidcs		if (rc != ECORE_SUCCESS)
2982316485Sdavidcs			return rc;
2983316485Sdavidcs	}
2984316485Sdavidcs#endif
2985316485Sdavidcs
2986316485Sdavidcs	if (p_hwfn->mcp_info) {
2987316485Sdavidcs		if (p_hwfn->mcp_info->func_info.bandwidth_max)
2988316485Sdavidcs			qm_info->pf_rl_en = 1;
2989316485Sdavidcs		if (p_hwfn->mcp_info->func_info.bandwidth_min)
2990316485Sdavidcs			qm_info->pf_wfq_en = 1;
2991316485Sdavidcs	}
2992316485Sdavidcs
2993316485Sdavidcs	ecore_qm_common_rt_init(p_hwfn,
2994320164Sdavidcs				p_dev->num_ports_in_engine,
2995316485Sdavidcs				qm_info->max_phys_tcs_per_port,
2996316485Sdavidcs				qm_info->pf_rl_en, qm_info->pf_wfq_en,
2997316485Sdavidcs				qm_info->vport_rl_en, qm_info->vport_wfq_en,
2998316485Sdavidcs				qm_info->qm_port_params);
2999316485Sdavidcs
3000316485Sdavidcs	ecore_cxt_hw_init_common(p_hwfn);
3001316485Sdavidcs
3002316485Sdavidcs	ecore_init_cache_line_size(p_hwfn, p_ptt);
3003316485Sdavidcs
3004337517Sdavidcs	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
3005337517Sdavidcs			    hw_mode);
3006316485Sdavidcs	if (rc != ECORE_SUCCESS)
3007316485Sdavidcs		return rc;
3008316485Sdavidcs
3009316485Sdavidcs	/* @@TBD MichalK - should add VALIDATE_VFID to init tool...
3010316485Sdavidcs	 * need to decide with which value, maybe runtime
3011316485Sdavidcs	 */
3012316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
3013316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
3014316485Sdavidcs
3015316485Sdavidcs	if (ECORE_IS_BB(p_dev)) {
3016316485Sdavidcs		/* Workaround clears ROCE search for all functions to prevent
3017337517Sdavidcs		 * involving non initialized function in processing ROCE packet.
3018316485Sdavidcs		 */
3019316485Sdavidcs		num_pfs = NUM_OF_ENG_PFS(p_dev);
3020316485Sdavidcs		for (pf_id = 0; pf_id < num_pfs; pf_id++) {
3021316485Sdavidcs			ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
3022316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
3023316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
3024316485Sdavidcs		}
3025316485Sdavidcs		/* pretend to original PF */
3026316485Sdavidcs		ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
3027316485Sdavidcs	}
3028316485Sdavidcs
3029316485Sdavidcs	/* Workaround for avoiding CCFC execution error when getting packets
3030316485Sdavidcs	 * with CRC errors, and allowing instead the invoking of the FW error
3031316485Sdavidcs	 * handler.
3032316485Sdavidcs	 * This is not done inside the init tool since it currently can't
3033316485Sdavidcs	 * perform a pretending to VFs.
3034316485Sdavidcs	 */
3035316485Sdavidcs	max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
3036316485Sdavidcs	for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
3037316485Sdavidcs		concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
3038316485Sdavidcs		ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
3039316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
3040316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
3041316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
3042316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
3043316485Sdavidcs	}
3044316485Sdavidcs	/* pretend to original PF */
3045316485Sdavidcs	ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
3046316485Sdavidcs
3047316485Sdavidcs	return rc;
3048316485Sdavidcs}
3049316485Sdavidcs
3050316485Sdavidcs#ifndef ASIC_ONLY
3051316485Sdavidcs#define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4)
3052316485Sdavidcs#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5)
3053316485Sdavidcs
3054316485Sdavidcs#define PMEG_IF_BYTE_COUNT	8
3055316485Sdavidcs
3056316485Sdavidcsstatic void ecore_wr_nw_port(struct ecore_hwfn	*p_hwfn,
3057316485Sdavidcs			     struct ecore_ptt	*p_ptt,
3058316485Sdavidcs			     u32		addr,
3059316485Sdavidcs			     u64		data,
3060316485Sdavidcs			     u8			reg_type,
3061316485Sdavidcs			     u8			port)
3062316485Sdavidcs{
3063316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3064316485Sdavidcs		   "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
3065316485Sdavidcs		   ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
3066316485Sdavidcs		   (8 << PMEG_IF_BYTE_COUNT),
3067316485Sdavidcs		   (reg_type << 25) | (addr << 8) | port,
3068316485Sdavidcs		   (u32)((data >> 32) & 0xffffffff),
3069316485Sdavidcs		   (u32)(data & 0xffffffff));
3070316485Sdavidcs
3071316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
3072316485Sdavidcs		 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
3073316485Sdavidcs		  0xffff00fe) |
3074316485Sdavidcs		 (8 << PMEG_IF_BYTE_COUNT));
3075316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
3076316485Sdavidcs		 (reg_type << 25) | (addr << 8) | port);
3077316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
3078316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
3079316485Sdavidcs		 (data >> 32) & 0xffffffff);
3080316485Sdavidcs}
3081316485Sdavidcs
3082316485Sdavidcs#define XLPORT_MODE_REG	(0x20a)
3083316485Sdavidcs#define XLPORT_MAC_CONTROL (0x210)
3084316485Sdavidcs#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
3085316485Sdavidcs#define XLPORT_ENABLE_REG (0x20b)
3086316485Sdavidcs
3087316485Sdavidcs#define XLMAC_CTRL (0x600)
3088316485Sdavidcs#define XLMAC_MODE (0x601)
3089316485Sdavidcs#define XLMAC_RX_MAX_SIZE (0x608)
3090316485Sdavidcs#define XLMAC_TX_CTRL (0x604)
3091316485Sdavidcs#define XLMAC_PAUSE_CTRL (0x60d)
3092316485Sdavidcs#define XLMAC_PFC_CTRL (0x60e)
3093316485Sdavidcs
3094316485Sdavidcsstatic void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
3095316485Sdavidcs				    struct ecore_ptt *p_ptt)
3096316485Sdavidcs{
3097316485Sdavidcs	u8 loopback = 0, port = p_hwfn->port_id * 2;
3098316485Sdavidcs
3099316485Sdavidcs	DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
3100316485Sdavidcs
3101316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG,
3102316485Sdavidcs			 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */
3103316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
3104316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
3105316485Sdavidcs			 0x40, 0, port); /*XLMAC: SOFT RESET */
3106316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE,
3107316485Sdavidcs			 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */
3108316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE,
3109316485Sdavidcs			 0x3fff, 0, port); /* XLMAC: Max Size */
3110316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
3111316485Sdavidcs			 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
3112316485Sdavidcs			 0, port);
3113316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL,
3114316485Sdavidcs			 0x7c000, 0, port);
3115316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
3116316485Sdavidcs			 0x30ffffc000ULL, 0, port);
3117316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2),
3118316485Sdavidcs			 0, port); /* XLMAC: TX_EN, RX_EN */
3119316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
3120316485Sdavidcs			 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
3121316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG,
3122316485Sdavidcs			 1, 0, port); /* Enabled Parallel PFC interface */
3123316485Sdavidcs	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG,
3124316485Sdavidcs			 0xf, 1, port); /* XLPORT port enable */
3125316485Sdavidcs}
3126316485Sdavidcs
3127316485Sdavidcsstatic void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
3128316485Sdavidcs				       struct ecore_ptt *p_ptt)
3129316485Sdavidcs{
3130316485Sdavidcs	u8 port = p_hwfn->port_id;
3131316485Sdavidcs	u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
3132316485Sdavidcs
3133316485Sdavidcs	DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
3134316485Sdavidcs
3135316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
3136316485Sdavidcs		 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
3137316485Sdavidcs		 (port <<
3138316485Sdavidcs		  CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
3139316485Sdavidcs		 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
3140316485Sdavidcs
3141316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
3142316485Sdavidcs		 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
3143316485Sdavidcs
3144316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
3145316485Sdavidcs		 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
3146316485Sdavidcs
3147316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
3148316485Sdavidcs		 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
3149316485Sdavidcs
3150316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
3151316485Sdavidcs		 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
3152316485Sdavidcs
3153316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
3154316485Sdavidcs		 (0xA <<
3155316485Sdavidcs		  ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
3156316485Sdavidcs		 (8 <<
3157316485Sdavidcs		  ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
3158316485Sdavidcs
3159316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
3160316485Sdavidcs		 0xa853);
3161316485Sdavidcs}
3162316485Sdavidcs
3163316485Sdavidcsstatic void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
3164316485Sdavidcs				 struct ecore_ptt *p_ptt)
3165316485Sdavidcs{
3166316485Sdavidcs	if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev))
3167316485Sdavidcs		ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
3168316485Sdavidcs	else /* BB */
3169316485Sdavidcs		ecore_emul_link_init_bb(p_hwfn, p_ptt);
3170316485Sdavidcs
3171316485Sdavidcs	return;
3172316485Sdavidcs}
3173316485Sdavidcs
3174316485Sdavidcsstatic void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
3175316485Sdavidcs			       struct ecore_ptt *p_ptt,  u8 port)
3176316485Sdavidcs{
3177316485Sdavidcs	int port_offset = port ? 0x800 : 0;
3178316485Sdavidcs	u32 xmac_rxctrl	= 0;
3179316485Sdavidcs
3180316485Sdavidcs	/* Reset of XMAC */
3181316485Sdavidcs	/* FIXME: move to common start */
3182316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32),
3183316485Sdavidcs		 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
3184316485Sdavidcs	OSAL_MSLEEP(1);
3185316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
3186316485Sdavidcs		 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
3187316485Sdavidcs
3188316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
3189316485Sdavidcs
3190316485Sdavidcs	/* Set the number of ports on the Warp Core to 10G */
3191316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
3192316485Sdavidcs
3193316485Sdavidcs	/* Soft reset of XMAC */
3194316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
3195316485Sdavidcs		 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
3196316485Sdavidcs	OSAL_MSLEEP(1);
3197316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
3198316485Sdavidcs		 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
3199316485Sdavidcs
3200316485Sdavidcs	/* FIXME: move to common end */
3201316485Sdavidcs	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
3202316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
3203316485Sdavidcs
3204316485Sdavidcs	/* Set Max packet size: initialize XMAC block register for port 0 */
3205316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
3206316485Sdavidcs
3207316485Sdavidcs	/* CRC append for Tx packets: init XMAC block register for port 1 */
3208316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
3209316485Sdavidcs
3210316485Sdavidcs	/* Enable TX and RX: initialize XMAC block register for port 1 */
3211316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
3212316485Sdavidcs		 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
3213316485Sdavidcs	xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
3214316485Sdavidcs			       XMAC_REG_RX_CTRL_BB + port_offset);
3215316485Sdavidcs	xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
3216316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
3217316485Sdavidcs}
3218316485Sdavidcs#endif
3219316485Sdavidcs
3220316485Sdavidcsstatic enum _ecore_status_t
3221316485Sdavidcsecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
3222316485Sdavidcs		       struct ecore_ptt *p_ptt,
3223316485Sdavidcs		       u32 pwm_region_size,
3224316485Sdavidcs		       u32 n_cpus)
3225316485Sdavidcs{
3226337517Sdavidcs	u32 dpi_bit_shift, dpi_count, dpi_page_size;
3227316485Sdavidcs	u32 min_dpis;
3228337517Sdavidcs	u32 n_wids;
3229316485Sdavidcs
3230316485Sdavidcs	/* Calculate DPI size
3231316485Sdavidcs	 * ------------------
3232316485Sdavidcs	 * The PWM region contains Doorbell Pages. The first is reserverd for
3233316485Sdavidcs	 * the kernel for, e.g, L2. The others are free to be used by non-
3234316485Sdavidcs	 * trusted applications, typically from user space. Each page, called a
3235316485Sdavidcs	 * doorbell page is sectioned into windows that allow doorbells to be
3236316485Sdavidcs	 * issued in parallel by the kernel/application. The size of such a
3237316485Sdavidcs	 * window (a.k.a. WID) is 1kB.
3238316485Sdavidcs	 * Summary:
3239316485Sdavidcs	 *    1kB WID x N WIDS = DPI page size
3240316485Sdavidcs	 *    DPI page size x N DPIs = PWM region size
3241316485Sdavidcs	 * Notes:
3242316485Sdavidcs	 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
3243316485Sdavidcs	 * in order to ensure that two applications won't share the same page.
3244316485Sdavidcs	 * It also must contain at least one WID per CPU to allow parallelism.
3245316485Sdavidcs	 * It also must be a power of 2, since it is stored as a bit shift.
3246316485Sdavidcs	 *
3247316485Sdavidcs	 * The DPI page size is stored in a register as 'dpi_bit_shift' so that
3248316485Sdavidcs	 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
3249316485Sdavidcs	 * containing 4 WIDs.
3250316485Sdavidcs	 */
3251337517Sdavidcs	n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
3252337517Sdavidcs	dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
3253320164Sdavidcs	dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1);
3254316485Sdavidcs	dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
3255316485Sdavidcs	dpi_count = pwm_region_size / dpi_page_size;
3256316485Sdavidcs
3257316485Sdavidcs	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
3258316485Sdavidcs	min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
3259316485Sdavidcs
3260316485Sdavidcs	/* Update hwfn */
3261316485Sdavidcs	p_hwfn->dpi_size = dpi_page_size;
3262316485Sdavidcs	p_hwfn->dpi_count = dpi_count;
3263316485Sdavidcs
3264316485Sdavidcs	/* Update registers */
3265316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
3266316485Sdavidcs
3267316485Sdavidcs	if (dpi_count < min_dpis)
3268316485Sdavidcs		return ECORE_NORESOURCES;
3269316485Sdavidcs
3270316485Sdavidcs	return ECORE_SUCCESS;
3271316485Sdavidcs}
3272316485Sdavidcs
3273316485Sdavidcsenum ECORE_ROCE_EDPM_MODE {
3274316485Sdavidcs	ECORE_ROCE_EDPM_MODE_ENABLE	= 0,
3275316485Sdavidcs	ECORE_ROCE_EDPM_MODE_FORCE_ON	= 1,
3276316485Sdavidcs	ECORE_ROCE_EDPM_MODE_DISABLE	= 2,
3277316485Sdavidcs};
3278316485Sdavidcs
3279316485Sdavidcsstatic enum _ecore_status_t
3280316485Sdavidcsecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
3281316485Sdavidcs			      struct ecore_ptt *p_ptt)
3282316485Sdavidcs{
3283337517Sdavidcs	struct ecore_rdma_pf_params *p_rdma_pf_params;
3284316485Sdavidcs	u32 pwm_regsize, norm_regsize;
3285316485Sdavidcs	u32 non_pwm_conn, min_addr_reg1;
3286316485Sdavidcs	u32 db_bar_size, n_cpus = 1;
3287316485Sdavidcs	u32 roce_edpm_mode;
3288316485Sdavidcs	u32 pf_dems_shift;
3289316485Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
3290316485Sdavidcs	u8 cond;
3291316485Sdavidcs
3292320164Sdavidcs	db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
3293337517Sdavidcs	if (ECORE_IS_CMT(p_hwfn->p_dev))
3294316485Sdavidcs		db_bar_size /= 2;
3295316485Sdavidcs
3296316485Sdavidcs	/* Calculate doorbell regions
3297316485Sdavidcs	 * -----------------------------------
3298316485Sdavidcs	 * The doorbell BAR is made of two regions. The first is called normal
3299316485Sdavidcs	 * region and the second is called PWM region. In the normal region
3300316485Sdavidcs	 * each ICID has its own set of addresses so that writing to that
3301316485Sdavidcs	 * specific address identifies the ICID. In the Process Window Mode
3302316485Sdavidcs	 * region the ICID is given in the data written to the doorbell. The
3303316485Sdavidcs	 * above per PF register denotes the offset in the doorbell BAR in which
3304316485Sdavidcs	 * the PWM region begins.
3305316485Sdavidcs	 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
3306316485Sdavidcs	 * non-PWM connection. The calculation below computes the total non-PWM
3307316485Sdavidcs	 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
3308316485Sdavidcs	 * in units of 4,096 bytes.
3309316485Sdavidcs	 */
3310316485Sdavidcs	non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
3311316485Sdavidcs		       ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
3312316485Sdavidcs						     OSAL_NULL) +
3313316485Sdavidcs		       ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
3314316485Sdavidcs						     OSAL_NULL);
3315320164Sdavidcs	norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, OSAL_PAGE_SIZE);
3316316485Sdavidcs	min_addr_reg1 = norm_regsize / 4096;
3317316485Sdavidcs	pwm_regsize = db_bar_size - norm_regsize;
3318316485Sdavidcs
3319316485Sdavidcs	/* Check that the normal and PWM sizes are valid */
3320316485Sdavidcs	if (db_bar_size < norm_regsize) {
3321337517Sdavidcs		DP_ERR(p_hwfn->p_dev,
3322337517Sdavidcs		       "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
3323337517Sdavidcs		       db_bar_size, norm_regsize);
3324316485Sdavidcs		return ECORE_NORESOURCES;
3325316485Sdavidcs	}
3326316485Sdavidcs	if (pwm_regsize < ECORE_MIN_PWM_REGION) {
3327337517Sdavidcs		DP_ERR(p_hwfn->p_dev,
3328337517Sdavidcs		       "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
3329337517Sdavidcs		       pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
3330337517Sdavidcs		       norm_regsize);
3331316485Sdavidcs		return ECORE_NORESOURCES;
3332316485Sdavidcs	}
3333316485Sdavidcs
3334337517Sdavidcs	p_rdma_pf_params = &p_hwfn->pf_params.rdma_pf_params;
3335337517Sdavidcs
3336316485Sdavidcs	/* Calculate number of DPIs */
3337337517Sdavidcs	if (ECORE_IS_IWARP_PERSONALITY(p_hwfn))
3338337517Sdavidcs		p_rdma_pf_params->roce_edpm_mode =  ECORE_ROCE_EDPM_MODE_DISABLE;
3339337517Sdavidcs
3340337517Sdavidcs	if (p_rdma_pf_params->roce_edpm_mode <= ECORE_ROCE_EDPM_MODE_DISABLE) {
3341337517Sdavidcs		roce_edpm_mode = p_rdma_pf_params->roce_edpm_mode;
3342337517Sdavidcs	} else {
3343337517Sdavidcs		DP_ERR(p_hwfn->p_dev,
3344337517Sdavidcs		       "roce edpm mode was configured to an illegal value of %u. Resetting it to 0-Enable EDPM if BAR size is adequate\n",
3345337517Sdavidcs		       p_rdma_pf_params->roce_edpm_mode);
3346337517Sdavidcs		roce_edpm_mode = 0;
3347337517Sdavidcs	}
3348337517Sdavidcs
3349316485Sdavidcs	if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
3350316485Sdavidcs	    ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
3351316485Sdavidcs		/* Either EDPM is mandatory, or we are attempting to allocate a
3352316485Sdavidcs		 * WID per CPU.
3353316485Sdavidcs		 */
3354320164Sdavidcs		n_cpus = OSAL_NUM_CPUS();
3355316485Sdavidcs		rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
3356316485Sdavidcs	}
3357316485Sdavidcs
3358316485Sdavidcs	cond = ((rc != ECORE_SUCCESS) &&
3359316485Sdavidcs		(roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
3360316485Sdavidcs		(roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
3361316485Sdavidcs	if (cond || p_hwfn->dcbx_no_edpm) {
3362316485Sdavidcs		/* Either EDPM is disabled from user configuration, or it is
3363316485Sdavidcs		 * disabled via DCBx, or it is not mandatory and we failed to
3364316485Sdavidcs		 * allocated a WID per CPU.
3365316485Sdavidcs		 */
3366316485Sdavidcs		n_cpus = 1;
3367316485Sdavidcs		rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
3368316485Sdavidcs
3369316485Sdavidcs#ifdef CONFIG_ECORE_ROCE
3370316485Sdavidcs		/* If we entered this flow due to DCBX then the DPM register is
3371316485Sdavidcs		 * already configured.
3372316485Sdavidcs		 */
3373316485Sdavidcs		if (cond)
3374316485Sdavidcs			ecore_rdma_dpm_bar(p_hwfn, p_ptt);
3375316485Sdavidcs#endif
3376316485Sdavidcs	}
3377316485Sdavidcs
3378316485Sdavidcs	p_hwfn->wid_count = (u16)n_cpus;
3379316485Sdavidcs
3380316485Sdavidcs	/* Check return codes from above calls */
3381316485Sdavidcs	if (rc != ECORE_SUCCESS) {
3382320164Sdavidcs#ifndef LINUX_REMOVE
3383316485Sdavidcs		DP_ERR(p_hwfn,
3384337517Sdavidcs		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via user configuration min_dpis or by disabling EDPM via user configuration roce_edpm_mode\n",
3385337517Sdavidcs		       p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
3386316485Sdavidcs		       ECORE_MIN_DPIS);
3387320164Sdavidcs#else
3388320164Sdavidcs		DP_ERR(p_hwfn,
3389337517Sdavidcs		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via the module parameter min_rdma_dpis or by disabling EDPM by setting the module parameter roce_edpm to 2\n",
3390337517Sdavidcs		       p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
3391320164Sdavidcs		       ECORE_MIN_DPIS);
3392320164Sdavidcs#endif
3393337517Sdavidcs		DP_ERR(p_hwfn,
3394337517Sdavidcs		       "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
3395337517Sdavidcs		       norm_regsize, pwm_regsize, p_hwfn->dpi_size,
3396337517Sdavidcs		       p_hwfn->dpi_count,
3397337517Sdavidcs		       ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
3398337517Sdavidcs		       "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
3399337517Sdavidcs
3400316485Sdavidcs		return ECORE_NORESOURCES;
3401316485Sdavidcs	}
3402316485Sdavidcs
3403337517Sdavidcs	DP_INFO(p_hwfn,
3404337517Sdavidcs		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
3405337517Sdavidcs		norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count,
3406337517Sdavidcs		((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
3407337517Sdavidcs		"disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
3408337517Sdavidcs
3409316485Sdavidcs	/* Update hwfn */
3410316485Sdavidcs	p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
3411316485Sdavidcs						      * calculate the doorbell
3412316485Sdavidcs						      * address
3413316485Sdavidcs						      */
3414316485Sdavidcs
3415316485Sdavidcs	/* Update registers */
3416316485Sdavidcs	/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
3417316485Sdavidcs	pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
3418316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
3419316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
3420316485Sdavidcs
3421316485Sdavidcs	return ECORE_SUCCESS;
3422316485Sdavidcs}
3423316485Sdavidcs
3424316485Sdavidcsstatic enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
3425316485Sdavidcs					       struct ecore_ptt *p_ptt,
3426316485Sdavidcs					       int hw_mode)
3427316485Sdavidcs{
3428316485Sdavidcs	enum _ecore_status_t rc	= ECORE_SUCCESS;
3429316485Sdavidcs
3430337517Sdavidcs	/* In CMT the gate should be cleared by the 2nd hwfn */
3431337517Sdavidcs	if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
3432337517Sdavidcs		STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
3433337517Sdavidcs
3434316485Sdavidcs	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
3435316485Sdavidcs			    hw_mode);
3436316485Sdavidcs	if (rc != ECORE_SUCCESS)
3437316485Sdavidcs		return rc;
3438320164Sdavidcs
3439320164Sdavidcs	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
3440320164Sdavidcs
3441316485Sdavidcs#ifndef ASIC_ONLY
3442316485Sdavidcs	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
3443316485Sdavidcs		return ECORE_SUCCESS;
3444316485Sdavidcs
3445316485Sdavidcs	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3446316485Sdavidcs		if (ECORE_IS_AH(p_hwfn->p_dev))
3447316485Sdavidcs			return ECORE_SUCCESS;
3448316485Sdavidcs		else if (ECORE_IS_BB(p_hwfn->p_dev))
3449316485Sdavidcs			ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
3450316485Sdavidcs		else /* E5 */
3451316485Sdavidcs			ECORE_E5_MISSING_CODE;
3452316485Sdavidcs	} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
3453337517Sdavidcs		if (ECORE_IS_CMT(p_hwfn->p_dev)) {
3454316485Sdavidcs			/* Activate OPTE in CMT */
3455316485Sdavidcs			u32 val;
3456316485Sdavidcs
3457316485Sdavidcs			val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
3458316485Sdavidcs			val |= 0x10;
3459316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
3460316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
3461316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
3462316485Sdavidcs			ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
3463316485Sdavidcs			ecore_wr(p_hwfn, p_ptt,
3464316485Sdavidcs				 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
3465316485Sdavidcs			ecore_wr(p_hwfn, p_ptt,
3466316485Sdavidcs				 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
3467316485Sdavidcs			ecore_wr(p_hwfn, p_ptt,
3468316485Sdavidcs				 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
3469316485Sdavidcs				 0x55555555);
3470316485Sdavidcs		}
3471316485Sdavidcs
3472316485Sdavidcs		ecore_emul_link_init(p_hwfn, p_ptt);
3473316485Sdavidcs	} else {
3474316485Sdavidcs		DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
3475316485Sdavidcs	}
3476316485Sdavidcs#endif
3477316485Sdavidcs
3478316485Sdavidcs	return rc;
3479316485Sdavidcs}
3480316485Sdavidcs
3481337517Sdavidcsstatic enum _ecore_status_t
3482337517Sdavidcsecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3483337517Sdavidcs		 int hw_mode, struct ecore_hw_init_params *p_params)
3484316485Sdavidcs{
3485316485Sdavidcs	u8 rel_pf_id = p_hwfn->rel_pf_id;
3486316485Sdavidcs	u32 prs_reg;
3487316485Sdavidcs	enum _ecore_status_t rc	= ECORE_SUCCESS;
3488316485Sdavidcs	u16 ctrl;
3489316485Sdavidcs	int pos;
3490316485Sdavidcs
3491316485Sdavidcs	if (p_hwfn->mcp_info) {
3492316485Sdavidcs		struct ecore_mcp_function_info *p_info;
3493316485Sdavidcs
3494316485Sdavidcs		p_info = &p_hwfn->mcp_info->func_info;
3495316485Sdavidcs		if (p_info->bandwidth_min)
3496316485Sdavidcs			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
3497316485Sdavidcs
3498316485Sdavidcs		/* Update rate limit once we'll actually have a link */
3499316485Sdavidcs		p_hwfn->qm_info.pf_rl = 100000;
3500316485Sdavidcs	}
3501320164Sdavidcs	ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
3502316485Sdavidcs
3503316485Sdavidcs	ecore_int_igu_init_rt(p_hwfn);
3504316485Sdavidcs
3505316485Sdavidcs	/* Set VLAN in NIG if needed */
3506316485Sdavidcs	if (hw_mode & (1 << MODE_MF_SD)) {
3507316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
3508316485Sdavidcs		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
3509316485Sdavidcs		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
3510316485Sdavidcs			     p_hwfn->hw_info.ovlan);
3511337517Sdavidcs
3512337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
3513337517Sdavidcs			   "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
3514337517Sdavidcs		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
3515337517Sdavidcs			     1);
3516316485Sdavidcs	}
3517316485Sdavidcs
3518316485Sdavidcs	/* Enable classification by MAC if needed */
3519316485Sdavidcs	if (hw_mode & (1 << MODE_MF_SI)) {
3520316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n");
3521316485Sdavidcs		STORE_RT_REG(p_hwfn,
3522316485Sdavidcs			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
3523316485Sdavidcs	}
3524316485Sdavidcs
3525316485Sdavidcs	/* Protocl Configuration  - @@@TBD - should we set 0 otherwise?*/
3526316485Sdavidcs	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
3527316485Sdavidcs		     (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
3528316485Sdavidcs	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
3529316485Sdavidcs		     (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
3530316485Sdavidcs	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
3531316485Sdavidcs
3532316485Sdavidcs	/* perform debug configuration when chip is out of reset */
3533316485Sdavidcs	OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
3534316485Sdavidcs
3535337517Sdavidcs	/* Sanity check before the PF init sequence that uses DMAE */
3536337517Sdavidcs	rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
3537337517Sdavidcs	if (rc)
3538316485Sdavidcs		return rc;
3539316485Sdavidcs
3540316485Sdavidcs	/* PF Init sequence */
3541316485Sdavidcs	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
3542316485Sdavidcs	if (rc)
3543316485Sdavidcs		return rc;
3544316485Sdavidcs
3545316485Sdavidcs	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
3546316485Sdavidcs	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
3547316485Sdavidcs	if (rc)
3548316485Sdavidcs		return rc;
3549316485Sdavidcs
3550316485Sdavidcs	/* Pure runtime initializations - directly to the HW  */
3551316485Sdavidcs	ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
3552316485Sdavidcs
3553337517Sdavidcs	/* PCI relaxed ordering is generally beneficial for performance,
3554337517Sdavidcs	 * but can hurt performance or lead to instability on some setups.
3555337517Sdavidcs	 * If management FW is taking care of it go with that, otherwise
3556337517Sdavidcs	 * disable to be on the safe side.
3557316485Sdavidcs	 */
3558316485Sdavidcs	pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
3559316485Sdavidcs	if (!pos) {
3560316485Sdavidcs		DP_NOTICE(p_hwfn, true,
3561316485Sdavidcs			  "Failed to find the PCI Express Capability structure in the PCI config space\n");
3562316485Sdavidcs		return ECORE_IO;
3563316485Sdavidcs	}
3564337517Sdavidcs
3565316485Sdavidcs	OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
3566316485Sdavidcs
3567337517Sdavidcs	if (p_params->pci_rlx_odr_mode == ECORE_ENABLE_RLX_ODR) {
3568337517Sdavidcs		ctrl |= PCI_EXP_DEVCTL_RELAX_EN;
3569337517Sdavidcs		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3570337517Sdavidcs					   pos + PCI_EXP_DEVCTL, ctrl);
3571337517Sdavidcs	} else if (p_params->pci_rlx_odr_mode == ECORE_DISABLE_RLX_ODR) {
3572337517Sdavidcs		ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
3573337517Sdavidcs		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3574337517Sdavidcs					   pos + PCI_EXP_DEVCTL, ctrl);
3575337517Sdavidcs	} else if (ecore_mcp_rlx_odr_supported(p_hwfn)) {
3576337517Sdavidcs		DP_INFO(p_hwfn, "PCI relax ordering configured by MFW\n");
3577337517Sdavidcs	} else {
3578337517Sdavidcs		ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
3579337517Sdavidcs		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3580337517Sdavidcs					   pos + PCI_EXP_DEVCTL, ctrl);
3581337517Sdavidcs	}
3582337517Sdavidcs
3583316485Sdavidcs	rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
3584337517Sdavidcs	if (rc != ECORE_SUCCESS)
3585316485Sdavidcs		return rc;
3586316485Sdavidcs
3587337517Sdavidcs	/* Use the leading hwfn since in CMT only NIG #0 is operational */
3588337517Sdavidcs	if (IS_LEAD_HWFN(p_hwfn)) {
3589337517Sdavidcs		rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt,
3590337517Sdavidcs					  p_params->avoid_eng_affin);
3591318300Sdavidcs		if (rc != ECORE_SUCCESS)
3592337517Sdavidcs			return rc;
3593318300Sdavidcs	}
3594318300Sdavidcs
3595337517Sdavidcs	if (p_params->b_hw_start) {
3596316485Sdavidcs		/* enable interrupts */
3597337517Sdavidcs		rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode);
3598316485Sdavidcs		if (rc != ECORE_SUCCESS)
3599316485Sdavidcs			return rc;
3600316485Sdavidcs
3601316485Sdavidcs		/* send function start command */
3602337517Sdavidcs		rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
3603337517Sdavidcs				       p_params->allow_npar_tx_switch);
3604316485Sdavidcs		if (rc) {
3605316485Sdavidcs			DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n");
3606337517Sdavidcs			return rc;
3607337517Sdavidcs		}
3608337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
3609337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3610337517Sdavidcs				"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
3611316485Sdavidcs
3612337517Sdavidcs		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
3613337517Sdavidcs		{
3614337517Sdavidcs			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
3615337517Sdavidcs					(1 << 2));
3616337517Sdavidcs			ecore_wr(p_hwfn, p_ptt,
3617337517Sdavidcs					PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
3618337517Sdavidcs					0x100);
3619316485Sdavidcs		}
3620337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3621337517Sdavidcs				"PRS_REG_SEARCH registers after start PFn\n");
3622337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
3623337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3624337517Sdavidcs				"PRS_REG_SEARCH_TCP: %x\n", prs_reg);
3625337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
3626337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3627337517Sdavidcs				"PRS_REG_SEARCH_UDP: %x\n", prs_reg);
3628337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
3629337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3630337517Sdavidcs				"PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
3631337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
3632337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3633337517Sdavidcs				"PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
3634337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt,
3635337517Sdavidcs				PRS_REG_SEARCH_TCP_FIRST_FRAG);
3636337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3637337517Sdavidcs				"PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
3638337517Sdavidcs				prs_reg);
3639337517Sdavidcs		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
3640337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3641337517Sdavidcs				"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
3642316485Sdavidcs	}
3643337517Sdavidcs	return ECORE_SUCCESS;
3644316485Sdavidcs}
3645316485Sdavidcs
3646337517Sdavidcsenum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
3647337517Sdavidcs						  struct ecore_ptt *p_ptt,
3648337517Sdavidcs						  bool b_enable)
3649316485Sdavidcs{
3650337517Sdavidcs	u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
3651316485Sdavidcs
3652337517Sdavidcs	/* Configure the PF's internal FID_enable for master transactions */
3653316485Sdavidcs	ecore_wr(p_hwfn, p_ptt,
3654316485Sdavidcs		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
3655316485Sdavidcs
3656337517Sdavidcs	/* Wait until value is set - try for 1 second every 50us */
3657316485Sdavidcs	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
3658316485Sdavidcs		val = ecore_rd(p_hwfn, p_ptt,
3659316485Sdavidcs			       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
3660316485Sdavidcs		if (val == set_val)
3661316485Sdavidcs			break;
3662316485Sdavidcs
3663316485Sdavidcs		OSAL_UDELAY(50);
3664316485Sdavidcs	}
3665316485Sdavidcs
3666316485Sdavidcs	if (val != set_val) {
3667316485Sdavidcs		DP_NOTICE(p_hwfn, true,
3668316485Sdavidcs			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
3669316485Sdavidcs		return ECORE_UNKNOWN_ERROR;
3670316485Sdavidcs	}
3671316485Sdavidcs
3672316485Sdavidcs	return ECORE_SUCCESS;
3673316485Sdavidcs}
3674316485Sdavidcs
3675316485Sdavidcsstatic void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
3676316485Sdavidcs			struct ecore_ptt *p_main_ptt)
3677316485Sdavidcs{
3678316485Sdavidcs	/* Read shadow of current MFW mailbox */
3679316485Sdavidcs	ecore_mcp_read_mb(p_hwfn, p_main_ptt);
3680316485Sdavidcs	OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
3681316485Sdavidcs		    p_hwfn->mcp_info->mfw_mb_cur,
3682316485Sdavidcs		    p_hwfn->mcp_info->mfw_mb_length);
3683316485Sdavidcs}
3684316485Sdavidcs
3685337517Sdavidcsstatic enum _ecore_status_t
3686337517Sdavidcsecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
3687337517Sdavidcs			   struct ecore_load_req_params *p_load_req,
3688337517Sdavidcs			   struct ecore_drv_load_params *p_drv_load)
3689337517Sdavidcs{
3690337517Sdavidcs	/* Make sure that if ecore-client didn't provide inputs, all the
3691337517Sdavidcs	 * expected defaults are indeed zero.
3692337517Sdavidcs	 */
3693337517Sdavidcs	OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
3694337517Sdavidcs	OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
3695337517Sdavidcs	OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
3696337517Sdavidcs
3697337517Sdavidcs	OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
3698337517Sdavidcs
3699337517Sdavidcs	if (p_drv_load == OSAL_NULL)
3700337517Sdavidcs		goto out;
3701337517Sdavidcs
3702337517Sdavidcs	p_load_req->drv_role = p_drv_load->is_crash_kernel ?
3703337517Sdavidcs			       ECORE_DRV_ROLE_KDUMP :
3704337517Sdavidcs			       ECORE_DRV_ROLE_OS;
3705337517Sdavidcs	p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
3706337517Sdavidcs	p_load_req->override_force_load = p_drv_load->override_force_load;
3707337517Sdavidcs
3708337517Sdavidcs	/* Old MFW versions don't support timeout values other than default and
3709337517Sdavidcs	 * none, so these values are replaced according to the fall-back action.
3710337517Sdavidcs	 */
3711337517Sdavidcs
3712337517Sdavidcs	if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
3713337517Sdavidcs	    p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
3714337517Sdavidcs	    (p_hwfn->mcp_info->capabilities &
3715337517Sdavidcs	     FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
3716337517Sdavidcs		p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
3717337517Sdavidcs		goto out;
3718337517Sdavidcs	}
3719337517Sdavidcs
3720337517Sdavidcs	switch (p_drv_load->mfw_timeout_fallback) {
3721337517Sdavidcs	case ECORE_TO_FALLBACK_TO_NONE:
3722337517Sdavidcs		p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
3723337517Sdavidcs		break;
3724337517Sdavidcs	case ECORE_TO_FALLBACK_TO_DEFAULT:
3725337517Sdavidcs		p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
3726337517Sdavidcs		break;
3727337517Sdavidcs	case ECORE_TO_FALLBACK_FAIL_LOAD:
3728337517Sdavidcs		DP_NOTICE(p_hwfn, false,
3729337517Sdavidcs			  "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
3730337517Sdavidcs			  p_drv_load->mfw_timeout_val,
3731337517Sdavidcs			  ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
3732337517Sdavidcs			  ECORE_LOAD_REQ_LOCK_TO_NONE);
3733337517Sdavidcs		return ECORE_ABORTED;
3734337517Sdavidcs	}
3735337517Sdavidcs
3736337517Sdavidcs	DP_INFO(p_hwfn,
3737337517Sdavidcs		"Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
3738337517Sdavidcs		p_drv_load->mfw_timeout_val,
3739337517Sdavidcs		(p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
3740337517Sdavidcs		"default" : "none",
3741337517Sdavidcs		p_load_req->timeout_val);
3742337517Sdavidcsout:
3743337517Sdavidcs	return ECORE_SUCCESS;
3744337517Sdavidcs}
3745337517Sdavidcs
3746316485Sdavidcsstatic enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
3747316485Sdavidcs				    struct ecore_hw_init_params *p_params)
3748316485Sdavidcs{
3749316485Sdavidcs	if (p_params->p_tunn) {
3750316485Sdavidcs		ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
3751316485Sdavidcs		ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
3752316485Sdavidcs	}
3753316485Sdavidcs
3754316485Sdavidcs	p_hwfn->b_int_enabled = 1;
3755316485Sdavidcs
3756316485Sdavidcs	return ECORE_SUCCESS;
3757316485Sdavidcs}
3758316485Sdavidcs
3759337517Sdavidcsstatic void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
3760337517Sdavidcs				   struct ecore_ptt *p_ptt)
3761316485Sdavidcs{
3762337517Sdavidcs	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
3763337517Sdavidcs		 1 << p_hwfn->abs_pf_id);
3764316485Sdavidcs}
3765316485Sdavidcs
3766316485Sdavidcsenum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
3767316485Sdavidcs				   struct ecore_hw_init_params *p_params)
3768316485Sdavidcs{
3769316485Sdavidcs	struct ecore_load_req_params load_req_params;
3770337517Sdavidcs	u32 load_code, resp, param, drv_mb_param;
3771316485Sdavidcs	bool b_default_mtu = true;
3772316485Sdavidcs	struct ecore_hwfn *p_hwfn;
3773337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS, cancel_load;
3774337517Sdavidcs	u16 ether_type;
3775316485Sdavidcs	int i;
3776316485Sdavidcs
3777337517Sdavidcs	if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
3778316485Sdavidcs		DP_NOTICE(p_dev, false,
3779316485Sdavidcs			  "MSI mode is not supported for CMT devices\n");
3780316485Sdavidcs		return ECORE_INVAL;
3781316485Sdavidcs	}
3782316485Sdavidcs
3783316485Sdavidcs	if (IS_PF(p_dev)) {
3784316485Sdavidcs		rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
3785316485Sdavidcs		if (rc != ECORE_SUCCESS)
3786316485Sdavidcs			return rc;
3787316485Sdavidcs	}
3788316485Sdavidcs
3789316485Sdavidcs	for_each_hwfn(p_dev, i) {
3790337517Sdavidcs		p_hwfn = &p_dev->hwfns[i];
3791316485Sdavidcs
3792316485Sdavidcs		/* If management didn't provide a default, set one of our own */
3793316485Sdavidcs		if (!p_hwfn->hw_info.mtu) {
3794316485Sdavidcs			p_hwfn->hw_info.mtu = 1500;
3795316485Sdavidcs			b_default_mtu = false;
3796316485Sdavidcs		}
3797316485Sdavidcs
3798316485Sdavidcs		if (IS_VF(p_dev)) {
3799316485Sdavidcs			ecore_vf_start(p_hwfn, p_params);
3800316485Sdavidcs			continue;
3801316485Sdavidcs		}
3802316485Sdavidcs
3803337517Sdavidcs		rc = ecore_calc_hw_mode(p_hwfn);
3804316485Sdavidcs		if (rc != ECORE_SUCCESS)
3805316485Sdavidcs			return rc;
3806316485Sdavidcs
3807337517Sdavidcs		if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
3808337517Sdavidcs						   &p_dev->mf_bits) ||
3809337517Sdavidcs				     OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
3810337517Sdavidcs						   &p_dev->mf_bits))) {
3811337517Sdavidcs			if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
3812337517Sdavidcs					  &p_dev->mf_bits))
3813337517Sdavidcs				ether_type = ETH_P_8021Q;
3814337517Sdavidcs			else
3815337517Sdavidcs				ether_type = ETH_P_8021AD;
3816337517Sdavidcs			STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3817337517Sdavidcs				     ether_type);
3818337517Sdavidcs			STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3819337517Sdavidcs				     ether_type);
3820337517Sdavidcs			STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3821337517Sdavidcs				     ether_type);
3822337517Sdavidcs			STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
3823337517Sdavidcs				     ether_type);
3824337517Sdavidcs		}
3825337517Sdavidcs
3826337517Sdavidcs		rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
3827337517Sdavidcs						p_params->p_drv_load_params);
3828316485Sdavidcs		if (rc != ECORE_SUCCESS)
3829316485Sdavidcs			return rc;
3830316485Sdavidcs
3831316485Sdavidcs		rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
3832316485Sdavidcs					&load_req_params);
3833316485Sdavidcs		if (rc != ECORE_SUCCESS) {
3834337517Sdavidcs			DP_NOTICE(p_hwfn, false,
3835316485Sdavidcs				  "Failed sending a LOAD_REQ command\n");
3836316485Sdavidcs			return rc;
3837316485Sdavidcs		}
3838316485Sdavidcs
3839316485Sdavidcs		load_code = load_req_params.load_code;
3840316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3841316485Sdavidcs			   "Load request was sent. Load code: 0x%x\n",
3842316485Sdavidcs			   load_code);
3843316485Sdavidcs
3844316485Sdavidcs		ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
3845316485Sdavidcs
3846316485Sdavidcs		/* CQ75580:
3847337517Sdavidcs		 * When coming back from hibernate state, the registers from
3848316485Sdavidcs		 * which shadow is read initially are not initialized. It turns
3849316485Sdavidcs		 * out that these registers get initialized during the call to
3850316485Sdavidcs		 * ecore_mcp_load_req request. So we need to reread them here
3851316485Sdavidcs		 * to get the proper shadow register value.
3852316485Sdavidcs		 * Note: This is a workaround for the missing MFW
3853316485Sdavidcs		 * initialization. It may be removed once the implementation
3854316485Sdavidcs		 * is done.
3855316485Sdavidcs		 */
3856316485Sdavidcs		ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
3857316485Sdavidcs
3858316485Sdavidcs		/* Only relevant for recovery:
3859316485Sdavidcs		 * Clear the indication after the LOAD_REQ command is responded
3860316485Sdavidcs		 * by the MFW.
3861316485Sdavidcs		 */
3862316485Sdavidcs		p_dev->recov_in_prog = false;
3863316485Sdavidcs
3864337517Sdavidcs		if (!qm_lock_ref_cnt) {
3865337517Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
3866337517Sdavidcs			rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
3867337517Sdavidcs			if (rc) {
3868337517Sdavidcs				DP_ERR(p_hwfn, "qm_lock allocation failed\n");
3869337517Sdavidcs				goto qm_lock_fail;
3870337517Sdavidcs			}
3871337517Sdavidcs#endif
3872316485Sdavidcs			OSAL_SPIN_LOCK_INIT(&qm_lock);
3873316485Sdavidcs		}
3874337517Sdavidcs		++qm_lock_ref_cnt;
3875316485Sdavidcs
3876337517Sdavidcs		/* Clean up chip from previous driver if such remains exist.
3877337517Sdavidcs		 * This is not needed when the PF is the first one on the
3878337517Sdavidcs		 * engine, since afterwards we are going to init the FW.
3879337517Sdavidcs		 */
3880337517Sdavidcs		if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
3881337517Sdavidcs			rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
3882337517Sdavidcs						 p_hwfn->rel_pf_id, false);
3883337517Sdavidcs			if (rc != ECORE_SUCCESS) {
3884337517Sdavidcs				ecore_hw_err_notify(p_hwfn,
3885337517Sdavidcs						    ECORE_HW_ERR_RAMROD_FAIL);
3886337517Sdavidcs				goto load_err;
3887337517Sdavidcs			}
3888337517Sdavidcs		}
3889337517Sdavidcs
3890337517Sdavidcs		/* Log and clear previous pglue_b errors if such exist */
3891337517Sdavidcs		ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
3892337517Sdavidcs
3893337517Sdavidcs		/* Enable the PF's internal FID_enable in the PXP */
3894337517Sdavidcs		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
3895337517Sdavidcs						  true);
3896337517Sdavidcs		if (rc != ECORE_SUCCESS)
3897337517Sdavidcs			goto load_err;
3898337517Sdavidcs
3899337517Sdavidcs		/* Clear the pglue_b was_error indication.
3900337517Sdavidcs		 * In E4 it must be done after the BME and the internal
3901337517Sdavidcs		 * FID_enable for the PF are set, since VDMs may cause the
3902337517Sdavidcs		 * indication to be set again.
3903337517Sdavidcs		 */
3904337517Sdavidcs		ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
3905337517Sdavidcs
3906316485Sdavidcs		switch (load_code) {
3907316485Sdavidcs		case FW_MSG_CODE_DRV_LOAD_ENGINE:
3908316485Sdavidcs			rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
3909337517Sdavidcs						  p_hwfn->hw_info.hw_mode);
3910316485Sdavidcs			if (rc != ECORE_SUCCESS)
3911316485Sdavidcs				break;
3912316485Sdavidcs			/* Fall into */
3913316485Sdavidcs		case FW_MSG_CODE_DRV_LOAD_PORT:
3914316485Sdavidcs			rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
3915316485Sdavidcs						p_hwfn->hw_info.hw_mode);
3916316485Sdavidcs			if (rc != ECORE_SUCCESS)
3917316485Sdavidcs				break;
3918316485Sdavidcs			/* Fall into */
3919316485Sdavidcs		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3920316485Sdavidcs			rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
3921316485Sdavidcs					      p_hwfn->hw_info.hw_mode,
3922337517Sdavidcs					      p_params);
3923316485Sdavidcs			break;
3924316485Sdavidcs		default:
3925316485Sdavidcs			DP_NOTICE(p_hwfn, false,
3926316485Sdavidcs				  "Unexpected load code [0x%08x]", load_code);
3927316485Sdavidcs			rc = ECORE_NOTIMPL;
3928316485Sdavidcs			break;
3929316485Sdavidcs		}
3930316485Sdavidcs
3931337517Sdavidcs		if (rc != ECORE_SUCCESS) {
3932337517Sdavidcs			DP_NOTICE(p_hwfn, false,
3933316485Sdavidcs				  "init phase failed for loadcode 0x%x (rc %d)\n",
3934316485Sdavidcs				  load_code, rc);
3935337517Sdavidcs			goto load_err;
3936337517Sdavidcs		}
3937316485Sdavidcs
3938337517Sdavidcs		rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3939337517Sdavidcs		if (rc != ECORE_SUCCESS) {
3940337517Sdavidcs			DP_NOTICE(p_hwfn, false, "Sending load done failed, rc = %d\n", rc);
3941337517Sdavidcs			if (rc == ECORE_NOMEM) {
3942337517Sdavidcs				DP_NOTICE(p_hwfn, false,
3943337517Sdavidcs					  "Sending load done was failed due to memory allocation failure\n");
3944337517Sdavidcs				goto load_err;
3945337517Sdavidcs			}
3946316485Sdavidcs			return rc;
3947316485Sdavidcs		}
3948316485Sdavidcs
3949316485Sdavidcs		/* send DCBX attention request command */
3950316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
3951316485Sdavidcs			   "sending phony dcbx set command to trigger DCBx attention handling\n");
3952337517Sdavidcs		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3953337517Sdavidcs				   DRV_MSG_CODE_SET_DCBX,
3954337517Sdavidcs				   1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
3955337517Sdavidcs				   &param);
3956337517Sdavidcs		if (rc != ECORE_SUCCESS) {
3957337517Sdavidcs			DP_NOTICE(p_hwfn, false,
3958316485Sdavidcs				  "Failed to send DCBX attention request\n");
3959337517Sdavidcs			return rc;
3960316485Sdavidcs		}
3961316485Sdavidcs
3962316485Sdavidcs		p_hwfn->hw_init_done = true;
3963316485Sdavidcs	}
3964316485Sdavidcs
3965316485Sdavidcs	if (IS_PF(p_dev)) {
3966337517Sdavidcs		/* Get pre-negotiated values for stag, bandwidth etc. */
3967316485Sdavidcs		p_hwfn = ECORE_LEADING_HWFN(p_dev);
3968337517Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
3969337517Sdavidcs			   "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
3970337517Sdavidcs		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3971337517Sdavidcs				   DRV_MSG_CODE_GET_OEM_UPDATES,
3972337517Sdavidcs				   1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
3973337517Sdavidcs				   &resp, &param);
3974337517Sdavidcs		if (rc != ECORE_SUCCESS)
3975337517Sdavidcs			DP_NOTICE(p_hwfn, false,
3976337517Sdavidcs				  "Failed to send GET_OEM_UPDATES attention request\n");
3977337517Sdavidcs	}
3978337517Sdavidcs
3979337517Sdavidcs	if (IS_PF(p_dev)) {
3980337517Sdavidcs		p_hwfn = ECORE_LEADING_HWFN(p_dev);
3981316485Sdavidcs		drv_mb_param = STORM_FW_VERSION;
3982316485Sdavidcs		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3983316485Sdavidcs				   DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
3984337517Sdavidcs				   drv_mb_param, &resp, &param);
3985316485Sdavidcs		if (rc != ECORE_SUCCESS)
3986316485Sdavidcs			DP_INFO(p_hwfn, "Failed to update firmware version\n");
3987316485Sdavidcs
3988316485Sdavidcs		if (!b_default_mtu) {
3989316485Sdavidcs			rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
3990316485Sdavidcs						      p_hwfn->hw_info.mtu);
3991316485Sdavidcs			if (rc != ECORE_SUCCESS)
3992316485Sdavidcs				DP_INFO(p_hwfn, "Failed to update default mtu\n");
3993316485Sdavidcs		}
3994316485Sdavidcs
3995316485Sdavidcs		rc = ecore_mcp_ov_update_driver_state(p_hwfn,
3996316485Sdavidcs						      p_hwfn->p_main_ptt,
3997316485Sdavidcs						      ECORE_OV_DRIVER_STATE_DISABLED);
3998316485Sdavidcs		if (rc != ECORE_SUCCESS)
3999316485Sdavidcs			DP_INFO(p_hwfn, "Failed to update driver state\n");
4000316485Sdavidcs
4001316485Sdavidcs		rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
4002316485Sdavidcs						 ECORE_OV_ESWITCH_VEB);
4003316485Sdavidcs		if (rc != ECORE_SUCCESS)
4004316485Sdavidcs			DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
4005316485Sdavidcs	}
4006316485Sdavidcs
4007316485Sdavidcs	return rc;
4008337517Sdavidcs
4009337517Sdavidcsload_err:
4010337517Sdavidcs	--qm_lock_ref_cnt;
4011337517Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
4012337517Sdavidcs	if (!qm_lock_ref_cnt)
4013337517Sdavidcs		OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
4014337517Sdavidcsqm_lock_fail:
4015337517Sdavidcs#endif
4016337517Sdavidcs	/* The MFW load lock should be released also when initialization fails.
4017337517Sdavidcs	 * If supported, use a cancel_load request to update the MFW with the
4018337517Sdavidcs	 * load failure.
4019337517Sdavidcs	 */
4020337517Sdavidcs	cancel_load = ecore_mcp_cancel_load_req(p_hwfn, p_hwfn->p_main_ptt);
4021337517Sdavidcs	if (cancel_load == ECORE_NOTIMPL) {
4022337517Sdavidcs		DP_INFO(p_hwfn,
4023337517Sdavidcs			"Send a load done request instead of cancel load\n");
4024337517Sdavidcs		ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
4025337517Sdavidcs	}
4026337517Sdavidcs	return rc;
4027316485Sdavidcs}
4028316485Sdavidcs
4029316485Sdavidcs#define ECORE_HW_STOP_RETRY_LIMIT	(10)
4030316485Sdavidcsstatic void ecore_hw_timers_stop(struct ecore_dev *p_dev,
4031316485Sdavidcs				 struct ecore_hwfn *p_hwfn,
4032316485Sdavidcs				 struct ecore_ptt *p_ptt)
4033316485Sdavidcs{
4034316485Sdavidcs	int i;
4035316485Sdavidcs
4036316485Sdavidcs	/* close timers */
4037316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
4038316485Sdavidcs	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
4039316485Sdavidcs	for (i = 0;
4040316485Sdavidcs	     i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
4041316485Sdavidcs	     i++) {
4042316485Sdavidcs		if ((!ecore_rd(p_hwfn, p_ptt,
4043316485Sdavidcs			       TM_REG_PF_SCAN_ACTIVE_CONN)) &&
4044316485Sdavidcs		    (!ecore_rd(p_hwfn, p_ptt,
4045316485Sdavidcs			       TM_REG_PF_SCAN_ACTIVE_TASK)))
4046316485Sdavidcs			break;
4047316485Sdavidcs
4048316485Sdavidcs		/* Dependent on number of connection/tasks, possibly
4049316485Sdavidcs		 * 1ms sleep is required between polls
4050316485Sdavidcs		 */
4051316485Sdavidcs		OSAL_MSLEEP(1);
4052316485Sdavidcs	}
4053316485Sdavidcs
4054316485Sdavidcs	if (i < ECORE_HW_STOP_RETRY_LIMIT)
4055316485Sdavidcs		return;
4056316485Sdavidcs
4057337517Sdavidcs	DP_NOTICE(p_hwfn, false,
4058316485Sdavidcs		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
4059316485Sdavidcs		  (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
4060316485Sdavidcs		  (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
4061316485Sdavidcs}
4062316485Sdavidcs
4063316485Sdavidcsvoid ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
4064316485Sdavidcs{
4065316485Sdavidcs	int j;
4066316485Sdavidcs
4067316485Sdavidcs	for_each_hwfn(p_dev, j) {
4068316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
4069316485Sdavidcs		struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
4070316485Sdavidcs
4071316485Sdavidcs		ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
4072316485Sdavidcs	}
4073316485Sdavidcs}
4074316485Sdavidcs
4075316485Sdavidcsstatic enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
4076316485Sdavidcs						 struct ecore_ptt *p_ptt,
4077316485Sdavidcs						 u32 addr, u32 expected_val)
4078316485Sdavidcs{
4079316485Sdavidcs	u32 val = ecore_rd(p_hwfn, p_ptt, addr);
4080316485Sdavidcs
4081316485Sdavidcs	if (val != expected_val) {
4082316485Sdavidcs		DP_NOTICE(p_hwfn, true,
4083316485Sdavidcs			  "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
4084316485Sdavidcs			  addr, val, expected_val);
4085316485Sdavidcs		return ECORE_UNKNOWN_ERROR;
4086316485Sdavidcs	}
4087316485Sdavidcs
4088316485Sdavidcs	return ECORE_SUCCESS;
4089316485Sdavidcs}
4090316485Sdavidcs
4091316485Sdavidcsenum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
4092316485Sdavidcs{
4093316485Sdavidcs	struct ecore_hwfn *p_hwfn;
4094316485Sdavidcs	struct ecore_ptt *p_ptt;
4095316485Sdavidcs	enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
4096316485Sdavidcs	int j;
4097316485Sdavidcs
4098316485Sdavidcs	for_each_hwfn(p_dev, j) {
4099316485Sdavidcs		p_hwfn = &p_dev->hwfns[j];
4100316485Sdavidcs		p_ptt = p_hwfn->p_main_ptt;
4101316485Sdavidcs
4102316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
4103316485Sdavidcs
4104316485Sdavidcs		if (IS_VF(p_dev)) {
4105316485Sdavidcs			ecore_vf_pf_int_cleanup(p_hwfn);
4106316485Sdavidcs			rc = ecore_vf_pf_reset(p_hwfn);
4107316485Sdavidcs			if (rc != ECORE_SUCCESS) {
4108316485Sdavidcs				DP_NOTICE(p_hwfn, true,
4109316485Sdavidcs					  "ecore_vf_pf_reset failed. rc = %d.\n",
4110316485Sdavidcs					  rc);
4111316485Sdavidcs				rc2 = ECORE_UNKNOWN_ERROR;
4112316485Sdavidcs			}
4113316485Sdavidcs			continue;
4114316485Sdavidcs		}
4115316485Sdavidcs
4116316485Sdavidcs		/* mark the hw as uninitialized... */
4117316485Sdavidcs		p_hwfn->hw_init_done = false;
4118316485Sdavidcs
4119316485Sdavidcs		/* Send unload command to MCP */
4120316485Sdavidcs		if (!p_dev->recov_in_prog) {
4121316485Sdavidcs			rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
4122316485Sdavidcs			if (rc != ECORE_SUCCESS) {
4123337517Sdavidcs				DP_NOTICE(p_hwfn, false,
4124316485Sdavidcs					  "Failed sending a UNLOAD_REQ command. rc = %d.\n",
4125316485Sdavidcs					  rc);
4126316485Sdavidcs				rc2 = ECORE_UNKNOWN_ERROR;
4127316485Sdavidcs			}
4128316485Sdavidcs		}
4129316485Sdavidcs
4130316485Sdavidcs		OSAL_DPC_SYNC(p_hwfn);
4131316485Sdavidcs
4132316485Sdavidcs		/* After this point no MFW attentions are expected, e.g. prevent
4133316485Sdavidcs		 * race between pf stop and dcbx pf update.
4134316485Sdavidcs		 */
4135316485Sdavidcs
4136316485Sdavidcs		rc = ecore_sp_pf_stop(p_hwfn);
4137316485Sdavidcs		if (rc != ECORE_SUCCESS) {
4138337517Sdavidcs			DP_NOTICE(p_hwfn, false,
4139316485Sdavidcs				  "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
4140316485Sdavidcs				  rc);
4141316485Sdavidcs			rc2 = ECORE_UNKNOWN_ERROR;
4142316485Sdavidcs		}
4143316485Sdavidcs
4144316485Sdavidcs		/* perform debug action after PF stop was sent */
4145316485Sdavidcs		OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
4146316485Sdavidcs
4147316485Sdavidcs		/* close NIG to BRB gate */
4148316485Sdavidcs		ecore_wr(p_hwfn, p_ptt,
4149316485Sdavidcs			 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
4150316485Sdavidcs
4151316485Sdavidcs		/* close parser */
4152316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
4153316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
4154316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
4155316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
4156316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
4157316485Sdavidcs
4158316485Sdavidcs		/* @@@TBD - clean transmission queues (5.b) */
4159316485Sdavidcs		/* @@@TBD - clean BTB (5.c) */
4160316485Sdavidcs
4161316485Sdavidcs		ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
4162316485Sdavidcs
4163316485Sdavidcs		/* @@@TBD - verify DMAE requests are done (8) */
4164316485Sdavidcs
4165316485Sdavidcs		/* Disable Attention Generation */
4166316485Sdavidcs		ecore_int_igu_disable_int(p_hwfn, p_ptt);
4167316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
4168316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
4169316485Sdavidcs		ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
4170316485Sdavidcs		rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
4171316485Sdavidcs		if (rc != ECORE_SUCCESS) {
4172316485Sdavidcs			DP_NOTICE(p_hwfn, true,
4173316485Sdavidcs				  "Failed to return IGU CAM to default\n");
4174316485Sdavidcs			rc2 = ECORE_UNKNOWN_ERROR;
4175316485Sdavidcs		}
4176316485Sdavidcs
4177316485Sdavidcs		/* Need to wait 1ms to guarantee SBs are cleared */
4178316485Sdavidcs		OSAL_MSLEEP(1);
4179316485Sdavidcs
4180316485Sdavidcs		if (!p_dev->recov_in_prog) {
4181316485Sdavidcs			ecore_verify_reg_val(p_hwfn, p_ptt,
4182316485Sdavidcs					     QM_REG_USG_CNT_PF_TX, 0);
4183316485Sdavidcs			ecore_verify_reg_val(p_hwfn, p_ptt,
4184316485Sdavidcs					     QM_REG_USG_CNT_PF_OTHER, 0);
4185316485Sdavidcs			/* @@@TBD - assert on incorrect xCFC values (10.b) */
4186316485Sdavidcs		}
4187316485Sdavidcs
4188316485Sdavidcs		/* Disable PF in HW blocks */
4189316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
4190316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
4191316485Sdavidcs
4192337517Sdavidcs		if (IS_LEAD_HWFN(p_hwfn) &&
4193337517Sdavidcs		    OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
4194337517Sdavidcs		    !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
4195337517Sdavidcs			ecore_llh_remove_mac_filter(p_dev, 0,
4196337517Sdavidcs						    p_hwfn->hw_info.hw_mac_addr);
4197337517Sdavidcs
4198337517Sdavidcs		--qm_lock_ref_cnt;
4199337517Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
4200337517Sdavidcs		if (!qm_lock_ref_cnt)
4201337517Sdavidcs			OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
4202337517Sdavidcs#endif
4203337517Sdavidcs
4204316485Sdavidcs		if (!p_dev->recov_in_prog) {
4205337517Sdavidcs			rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
4206337517Sdavidcs			if (rc == ECORE_NOMEM) {
4207337517Sdavidcs				DP_NOTICE(p_hwfn, false,
4208337517Sdavidcs					 "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
4209337517Sdavidcs				rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
4210337517Sdavidcs			}
4211316485Sdavidcs			if (rc != ECORE_SUCCESS) {
4212337517Sdavidcs				DP_NOTICE(p_hwfn, false,
4213316485Sdavidcs					  "Failed sending a UNLOAD_DONE command. rc = %d.\n",
4214316485Sdavidcs					  rc);
4215316485Sdavidcs				rc2 = ECORE_UNKNOWN_ERROR;
4216316485Sdavidcs			}
4217316485Sdavidcs		}
4218316485Sdavidcs	} /* hwfn loop */
4219316485Sdavidcs
4220337517Sdavidcs	if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
4221316485Sdavidcs		p_hwfn = ECORE_LEADING_HWFN(p_dev);
4222316485Sdavidcs		p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
4223316485Sdavidcs
4224337517Sdavidcs		 /* Clear the PF's internal FID_enable in the PXP.
4225337517Sdavidcs		  * In CMT this should only be done for first hw-function, and
4226337517Sdavidcs		  * only after all transactions have stopped for all active
4227337517Sdavidcs		  * hw-functions.
4228337517Sdavidcs		  */
4229337517Sdavidcs		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
4230337517Sdavidcs						  false);
4231316485Sdavidcs		if (rc != ECORE_SUCCESS) {
4232316485Sdavidcs			DP_NOTICE(p_hwfn, true,
4233337517Sdavidcs				  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
4234316485Sdavidcs				  rc);
4235316485Sdavidcs			rc2 = ECORE_UNKNOWN_ERROR;
4236316485Sdavidcs		}
4237316485Sdavidcs	}
4238316485Sdavidcs
4239316485Sdavidcs	return rc2;
4240316485Sdavidcs}
4241316485Sdavidcs
4242320164Sdavidcsenum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
4243316485Sdavidcs{
4244316485Sdavidcs	int j;
4245316485Sdavidcs
4246316485Sdavidcs	for_each_hwfn(p_dev, j) {
4247316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
4248320164Sdavidcs		struct ecore_ptt *p_ptt;
4249316485Sdavidcs
4250316485Sdavidcs		if (IS_VF(p_dev)) {
4251316485Sdavidcs			ecore_vf_pf_int_cleanup(p_hwfn);
4252316485Sdavidcs			continue;
4253316485Sdavidcs		}
4254320164Sdavidcs		p_ptt = ecore_ptt_acquire(p_hwfn);
4255320164Sdavidcs		if (!p_ptt)
4256320164Sdavidcs			return ECORE_AGAIN;
4257316485Sdavidcs
4258316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n");
4259316485Sdavidcs
4260316485Sdavidcs		ecore_wr(p_hwfn, p_ptt,
4261316485Sdavidcs			 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
4262316485Sdavidcs
4263316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
4264316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
4265316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
4266316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
4267316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
4268316485Sdavidcs
4269316485Sdavidcs		/* @@@TBD - clean transmission queues (5.b) */
4270316485Sdavidcs		/* @@@TBD - clean BTB (5.c) */
4271316485Sdavidcs
4272316485Sdavidcs		/* @@@TBD - verify DMAE requests are done (8) */
4273316485Sdavidcs
4274316485Sdavidcs		ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
4275316485Sdavidcs		/* Need to wait 1ms to guarantee SBs are cleared */
4276316485Sdavidcs		OSAL_MSLEEP(1);
4277320164Sdavidcs		ecore_ptt_release(p_hwfn, p_ptt);
4278316485Sdavidcs	}
4279320164Sdavidcs
4280320164Sdavidcs	return ECORE_SUCCESS;
4281316485Sdavidcs}
4282316485Sdavidcs
4283320164Sdavidcsenum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
4284316485Sdavidcs{
4285320164Sdavidcs	struct ecore_ptt *p_ptt;
4286316485Sdavidcs
4287316485Sdavidcs	if (IS_VF(p_hwfn->p_dev))
4288320164Sdavidcs		return ECORE_SUCCESS;
4289316485Sdavidcs
4290320164Sdavidcs	p_ptt = ecore_ptt_acquire(p_hwfn);
4291320164Sdavidcs	if (!p_ptt)
4292320164Sdavidcs		return ECORE_AGAIN;
4293320164Sdavidcs
4294316485Sdavidcs	/* If roce info is allocated it means roce is initialized and should
4295316485Sdavidcs	 * be enabled in searcher.
4296316485Sdavidcs	 */
4297337517Sdavidcs	if (p_hwfn->p_rdma_info &&
4298337517Sdavidcs	    p_hwfn->p_rdma_info->active &&
4299337517Sdavidcs	    p_hwfn->b_rdma_enabled_in_prs)
4300337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
4301316485Sdavidcs
4302316485Sdavidcs	/* Re-open incoming traffic */
4303320164Sdavidcs	ecore_wr(p_hwfn, p_ptt,
4304316485Sdavidcs		 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
4305320164Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
4306320164Sdavidcs
4307320164Sdavidcs	return ECORE_SUCCESS;
4308316485Sdavidcs}
4309316485Sdavidcs
4310320164Sdavidcsenum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx,
4311320164Sdavidcs					u32 pattern_size, u32 crc)
4312316485Sdavidcs{
4313337517Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
4314337517Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
4315320164Sdavidcs	struct ecore_ptt *p_ptt;
4316316485Sdavidcs	u32 reg_len = 0;
4317316485Sdavidcs	u32 reg_crc = 0;
4318316485Sdavidcs
4319337517Sdavidcs	p_ptt = ecore_ptt_acquire(p_hwfn);
4320320164Sdavidcs	if (!p_ptt)
4321320164Sdavidcs		return ECORE_AGAIN;
4322320164Sdavidcs
4323316485Sdavidcs	/* Get length and CRC register offsets */
4324316485Sdavidcs	switch (reg_idx)
4325316485Sdavidcs	{
4326316485Sdavidcs	case 0:
4327316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB :
4328316485Sdavidcs				WOL_REG_ACPI_PAT_0_LEN_K2_E5;
4329316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB :
4330316485Sdavidcs				WOL_REG_ACPI_PAT_0_CRC_K2_E5;
4331316485Sdavidcs		break;
4332316485Sdavidcs	case 1:
4333316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB :
4334316485Sdavidcs				WOL_REG_ACPI_PAT_1_LEN_K2_E5;
4335316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB :
4336316485Sdavidcs				WOL_REG_ACPI_PAT_1_CRC_K2_E5;
4337316485Sdavidcs		break;
4338316485Sdavidcs	case 2:
4339316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB :
4340316485Sdavidcs				WOL_REG_ACPI_PAT_2_LEN_K2_E5;
4341316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB :
4342316485Sdavidcs				WOL_REG_ACPI_PAT_2_CRC_K2_E5;
4343316485Sdavidcs		break;
4344316485Sdavidcs	case 3:
4345316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB :
4346316485Sdavidcs				WOL_REG_ACPI_PAT_3_LEN_K2_E5;
4347316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB :
4348316485Sdavidcs				WOL_REG_ACPI_PAT_3_CRC_K2_E5;
4349316485Sdavidcs		break;
4350316485Sdavidcs	case 4:
4351316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB :
4352316485Sdavidcs				WOL_REG_ACPI_PAT_4_LEN_K2_E5;
4353316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB :
4354316485Sdavidcs				WOL_REG_ACPI_PAT_4_CRC_K2_E5;
4355316485Sdavidcs		break;
4356316485Sdavidcs	case 5:
4357316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB :
4358316485Sdavidcs				WOL_REG_ACPI_PAT_5_LEN_K2_E5;
4359316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB :
4360316485Sdavidcs				WOL_REG_ACPI_PAT_5_CRC_K2_E5;
4361316485Sdavidcs		break;
4362316485Sdavidcs	case 6:
4363316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB :
4364316485Sdavidcs				WOL_REG_ACPI_PAT_6_LEN_K2_E5;
4365316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB :
4366316485Sdavidcs				WOL_REG_ACPI_PAT_6_CRC_K2_E5;
4367316485Sdavidcs		break;
4368316485Sdavidcs	case 7:
4369316485Sdavidcs		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB :
4370316485Sdavidcs				WOL_REG_ACPI_PAT_7_LEN_K2_E5;
4371316485Sdavidcs		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB :
4372316485Sdavidcs				WOL_REG_ACPI_PAT_7_CRC_K2_E5;
4373316485Sdavidcs		break;
4374316485Sdavidcs	default:
4375320164Sdavidcs		rc = ECORE_UNKNOWN_ERROR;
4376320164Sdavidcs		goto out;
4377316485Sdavidcs	}
4378316485Sdavidcs
4379316485Sdavidcs	/* Allign pattern size to 4 */
4380316485Sdavidcs	while (pattern_size % 4)
4381316485Sdavidcs		pattern_size++;
4382337517Sdavidcs
4383337517Sdavidcs	/* Write pattern length and crc value */
4384337517Sdavidcs	if (ECORE_IS_BB(p_dev)) {
4385337517Sdavidcs		rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_len, pattern_size);
4386337517Sdavidcs		if (rc != ECORE_SUCCESS) {
4387337517Sdavidcs			DP_NOTICE(p_hwfn, false,
4388337517Sdavidcs				  "Failed to update the ACPI pattern length\n");
4389337517Sdavidcs			return rc;
4390337517Sdavidcs		}
4391337517Sdavidcs
4392337517Sdavidcs		rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_crc, crc);
4393337517Sdavidcs		if (rc != ECORE_SUCCESS) {
4394337517Sdavidcs			DP_NOTICE(p_hwfn, false,
4395337517Sdavidcs				  "Failed to update the ACPI pattern crc value\n");
4396337517Sdavidcs			return rc;
4397337517Sdavidcs		}
4398337517Sdavidcs	} else {
4399337517Sdavidcs		ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_len, pattern_size);
4400337517Sdavidcs		ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_crc, crc);
4401316485Sdavidcs	}
4402316485Sdavidcs
4403316485Sdavidcs	DP_INFO(p_dev,
4404316485Sdavidcs		"ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] "
4405316485Sdavidcs		"reg_len[0x%x=0x%x]\n",
4406316485Sdavidcs		reg_idx, reg_crc, crc, reg_len, pattern_size);
4407320164Sdavidcsout:
4408337517Sdavidcs	 ecore_ptt_release(p_hwfn, p_ptt);
4409316485Sdavidcs
4410320164Sdavidcs	return rc;
4411316485Sdavidcs}
4412316485Sdavidcs
4413320164Sdavidcsvoid ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
4414320164Sdavidcs			    struct ecore_ptt *p_ptt)
4415316485Sdavidcs{
4416316485Sdavidcs	const u32 wake_buffer_clear_offset =
4417320164Sdavidcs		ECORE_IS_BB(p_hwfn->p_dev) ?
4418316485Sdavidcs		NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5;
4419316485Sdavidcs
4420320164Sdavidcs	DP_INFO(p_hwfn->p_dev,
4421316485Sdavidcs		"ecore_wol_buffer_clear: reset "
4422316485Sdavidcs		"REG_WAKE_BUFFER_CLEAR offset=0x%08x\n",
4423316485Sdavidcs		wake_buffer_clear_offset);
4424316485Sdavidcs
4425337517Sdavidcs	if (ECORE_IS_BB(p_hwfn->p_dev)) {
4426337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
4427337517Sdavidcs		ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
4428337517Sdavidcs	} else {
4429337517Sdavidcs		ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
4430337517Sdavidcs		ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
4431337517Sdavidcs	}
4432316485Sdavidcs}
4433316485Sdavidcs
4434320164Sdavidcsenum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
4435320164Sdavidcs					 struct ecore_ptt *p_ptt,
4436320164Sdavidcs					 struct ecore_wake_info *wake_info)
4437316485Sdavidcs{
4438320164Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
4439316485Sdavidcs	u32 *buf = OSAL_NULL;
4440316485Sdavidcs	u32 i    = 0;
4441316485Sdavidcs	const u32 reg_wake_buffer_offest =
4442316485Sdavidcs		ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB :
4443316485Sdavidcs			WOL_REG_WAKE_BUFFER_K2_E5;
4444316485Sdavidcs
4445320164Sdavidcs	wake_info->wk_info    = ecore_rd(p_hwfn, p_ptt,
4446316485Sdavidcs				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB :
4447316485Sdavidcs				WOL_REG_WAKE_INFO_K2_E5);
4448320164Sdavidcs	wake_info->wk_details = ecore_rd(p_hwfn, p_ptt,
4449316485Sdavidcs				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB :
4450316485Sdavidcs				WOL_REG_WAKE_DETAILS_K2_E5);
4451320164Sdavidcs	wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt,
4452316485Sdavidcs				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB :
4453316485Sdavidcs				WOL_REG_WAKE_PKT_LEN_K2_E5);
4454316485Sdavidcs
4455316485Sdavidcs	DP_INFO(p_dev,
4456316485Sdavidcs		"ecore_get_wake_info: REG_WAKE_INFO=0x%08x "
4457316485Sdavidcs		"REG_WAKE_DETAILS=0x%08x "
4458316485Sdavidcs		"REG_WAKE_PKT_LEN=0x%08x\n",
4459316485Sdavidcs		wake_info->wk_info,
4460316485Sdavidcs		wake_info->wk_details,
4461316485Sdavidcs		wake_info->wk_pkt_len);
4462316485Sdavidcs
4463316485Sdavidcs	buf = (u32 *)wake_info->wk_buffer;
4464316485Sdavidcs
4465316485Sdavidcs	for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++)
4466316485Sdavidcs	{
4467316485Sdavidcs		if ((i*sizeof(u32)) >=  sizeof(wake_info->wk_buffer))
4468316485Sdavidcs		{
4469316485Sdavidcs			DP_INFO(p_dev,
4470316485Sdavidcs				"ecore_get_wake_info: i index to 0 high=%d\n",
4471316485Sdavidcs				 i);
4472316485Sdavidcs			break;
4473316485Sdavidcs		}
4474320164Sdavidcs		buf[i] = ecore_rd(p_hwfn, p_ptt,
4475316485Sdavidcs				  reg_wake_buffer_offest + (i * sizeof(u32)));
4476316485Sdavidcs		DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n",
4477316485Sdavidcs			i, buf[i]);
4478316485Sdavidcs	}
4479316485Sdavidcs
4480320164Sdavidcs	ecore_wol_buffer_clear(p_hwfn, p_ptt);
4481316485Sdavidcs
4482316485Sdavidcs	return ECORE_SUCCESS;
4483316485Sdavidcs}
4484316485Sdavidcs
4485316485Sdavidcs/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
4486316485Sdavidcsstatic void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
4487316485Sdavidcs{
4488316485Sdavidcs	ecore_ptt_pool_free(p_hwfn);
4489316485Sdavidcs	OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
4490316485Sdavidcs	p_hwfn->hw_info.p_igu_info = OSAL_NULL;
4491316485Sdavidcs}
4492316485Sdavidcs
4493316485Sdavidcs/* Setup bar access */
4494316485Sdavidcsstatic void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
4495316485Sdavidcs{
4496316485Sdavidcs	/* clear indirect access */
4497316485Sdavidcs	if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) {
4498316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4499316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
4500316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4501316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
4502316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4503316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
4504316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4505316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
4506316485Sdavidcs	} else {
4507316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4508316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
4509316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4510316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
4511316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4512316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
4513316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4514316485Sdavidcs			 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
4515316485Sdavidcs	}
4516316485Sdavidcs
4517337517Sdavidcs	/* Clean previous pglue_b errors if such exist */
4518337517Sdavidcs	ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
4519316485Sdavidcs
4520316485Sdavidcs	/* enable internal target-read */
4521316485Sdavidcs	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4522316485Sdavidcs		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
4523316485Sdavidcs}
4524316485Sdavidcs
4525316485Sdavidcsstatic void get_function_id(struct ecore_hwfn *p_hwfn)
4526316485Sdavidcs{
4527316485Sdavidcs	/* ME Register */
4528316485Sdavidcs	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
4529316485Sdavidcs						  PXP_PF_ME_OPAQUE_ADDR);
4530316485Sdavidcs
4531316485Sdavidcs	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
4532316485Sdavidcs
4533316485Sdavidcs	/* Bits 16-19 from the ME registers are the pf_num */
4534316485Sdavidcs	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
4535316485Sdavidcs	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
4536316485Sdavidcs				      PXP_CONCRETE_FID_PFID);
4537316485Sdavidcs	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
4538316485Sdavidcs				    PXP_CONCRETE_FID_PORT);
4539316485Sdavidcs
4540316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
4541316485Sdavidcs		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
4542316485Sdavidcs		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
4543316485Sdavidcs}
4544316485Sdavidcs
4545316485Sdavidcsvoid ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
4546316485Sdavidcs{
4547316485Sdavidcs	u32 *feat_num = p_hwfn->hw_info.feat_num;
4548316485Sdavidcs	struct ecore_sb_cnt_info sb_cnt;
4549316485Sdavidcs	u32 non_l2_sbs = 0;
4550316485Sdavidcs
4551316485Sdavidcs	OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
4552316485Sdavidcs	ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
4553316485Sdavidcs
4554316485Sdavidcs#ifdef CONFIG_ECORE_ROCE
4555316485Sdavidcs	/* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
4556316485Sdavidcs	 * status blocks equally between L2 / RoCE but with consideration as
4557316485Sdavidcs	 * to how many l2 queues / cnqs we have
4558316485Sdavidcs	 */
4559316485Sdavidcs	if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
4560337517Sdavidcs#ifndef __EXTRACT__LINUX__THROW__
4561316485Sdavidcs		u32 max_cnqs;
4562337517Sdavidcs#endif
4563316485Sdavidcs
4564316485Sdavidcs		feat_num[ECORE_RDMA_CNQ] =
4565316485Sdavidcs			OSAL_MIN_T(u32,
4566316485Sdavidcs				   sb_cnt.cnt / 2,
4567316485Sdavidcs				   RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM));
4568316485Sdavidcs
4569337517Sdavidcs#ifndef __EXTRACT__LINUX__THROW__
4570316485Sdavidcs		/* Upper layer might require less */
4571316485Sdavidcs		max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs;
4572316485Sdavidcs		if (max_cnqs) {
4573316485Sdavidcs			if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE)
4574316485Sdavidcs				max_cnqs = 0;
4575316485Sdavidcs			feat_num[ECORE_RDMA_CNQ] =
4576316485Sdavidcs				OSAL_MIN_T(u32,
4577316485Sdavidcs					   feat_num[ECORE_RDMA_CNQ],
4578316485Sdavidcs					   max_cnqs);
4579316485Sdavidcs		}
4580337517Sdavidcs#endif
4581316485Sdavidcs
4582316485Sdavidcs		non_l2_sbs = feat_num[ECORE_RDMA_CNQ];
4583316485Sdavidcs	}
4584316485Sdavidcs#endif
4585316485Sdavidcs
4586316485Sdavidcs	/* L2 Queues require each: 1 status block. 1 L2 queue */
4587316485Sdavidcs	if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
4588316485Sdavidcs		/* Start by allocating VF queues, then PF's */
4589316485Sdavidcs		feat_num[ECORE_VF_L2_QUE] =
4590316485Sdavidcs			OSAL_MIN_T(u32,
4591316485Sdavidcs				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
4592316485Sdavidcs				   sb_cnt.iov_cnt);
4593316485Sdavidcs		feat_num[ECORE_PF_L2_QUE] =
4594316485Sdavidcs			OSAL_MIN_T(u32,
4595316485Sdavidcs				   sb_cnt.cnt - non_l2_sbs,
4596316485Sdavidcs				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
4597316485Sdavidcs				   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
4598316485Sdavidcs	}
4599316485Sdavidcs
4600316485Sdavidcs	if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
4601316485Sdavidcs		feat_num[ECORE_FCOE_CQ] =
4602316485Sdavidcs			OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
4603316485Sdavidcs							     ECORE_CMDQS_CQS));
4604316485Sdavidcs
4605316485Sdavidcs	if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
4606316485Sdavidcs		feat_num[ECORE_ISCSI_CQ] =
4607316485Sdavidcs			OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
4608316485Sdavidcs							     ECORE_CMDQS_CQS));
4609316485Sdavidcs
4610316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
4611316485Sdavidcs		   "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
4612316485Sdavidcs		   (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
4613316485Sdavidcs		   (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
4614316485Sdavidcs		   (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
4615316485Sdavidcs		   (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
4616316485Sdavidcs		   (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
4617316485Sdavidcs		   (int)sb_cnt.cnt);
4618316485Sdavidcs}
4619316485Sdavidcs
4620316485Sdavidcsconst char *ecore_hw_get_resc_name(enum ecore_resources res_id)
4621316485Sdavidcs{
4622316485Sdavidcs	switch (res_id) {
4623316485Sdavidcs	case ECORE_L2_QUEUE:
4624316485Sdavidcs		return "L2_QUEUE";
4625316485Sdavidcs	case ECORE_VPORT:
4626316485Sdavidcs		return "VPORT";
4627316485Sdavidcs	case ECORE_RSS_ENG:
4628316485Sdavidcs		return "RSS_ENG";
4629316485Sdavidcs	case ECORE_PQ:
4630316485Sdavidcs		return "PQ";
4631316485Sdavidcs	case ECORE_RL:
4632316485Sdavidcs		return "RL";
4633316485Sdavidcs	case ECORE_MAC:
4634316485Sdavidcs		return "MAC";
4635316485Sdavidcs	case ECORE_VLAN:
4636316485Sdavidcs		return "VLAN";
4637316485Sdavidcs	case ECORE_RDMA_CNQ_RAM:
4638316485Sdavidcs		return "RDMA_CNQ_RAM";
4639316485Sdavidcs	case ECORE_ILT:
4640316485Sdavidcs		return "ILT";
4641316485Sdavidcs	case ECORE_LL2_QUEUE:
4642316485Sdavidcs		return "LL2_QUEUE";
4643316485Sdavidcs	case ECORE_CMDQS_CQS:
4644316485Sdavidcs		return "CMDQS_CQS";
4645316485Sdavidcs	case ECORE_RDMA_STATS_QUEUE:
4646316485Sdavidcs		return "RDMA_STATS_QUEUE";
4647316485Sdavidcs	case ECORE_BDQ:
4648316485Sdavidcs		return "BDQ";
4649316485Sdavidcs	case ECORE_SB:
4650316485Sdavidcs		return "SB";
4651316485Sdavidcs	default:
4652316485Sdavidcs		return "UNKNOWN_RESOURCE";
4653316485Sdavidcs	}
4654316485Sdavidcs}
4655316485Sdavidcs
4656316485Sdavidcsstatic enum _ecore_status_t
4657316485Sdavidcs__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
4658320164Sdavidcs			      struct ecore_ptt *p_ptt,
4659320164Sdavidcs			      enum ecore_resources res_id,
4660320164Sdavidcs			      u32 resc_max_val,
4661316485Sdavidcs			      u32 *p_mcp_resp)
4662316485Sdavidcs{
4663316485Sdavidcs	enum _ecore_status_t rc;
4664316485Sdavidcs
4665320164Sdavidcs	rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
4666316485Sdavidcs					resc_max_val, p_mcp_resp);
4667316485Sdavidcs	if (rc != ECORE_SUCCESS) {
4668337517Sdavidcs		DP_NOTICE(p_hwfn, false,
4669316485Sdavidcs			  "MFW response failure for a max value setting of resource %d [%s]\n",
4670316485Sdavidcs			  res_id, ecore_hw_get_resc_name(res_id));
4671316485Sdavidcs		return rc;
4672316485Sdavidcs	}
4673316485Sdavidcs
4674316485Sdavidcs	if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
4675316485Sdavidcs		DP_INFO(p_hwfn,
4676316485Sdavidcs			"Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
4677316485Sdavidcs			res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
4678316485Sdavidcs
4679316485Sdavidcs	return ECORE_SUCCESS;
4680316485Sdavidcs}
4681316485Sdavidcs
4682316485Sdavidcsstatic enum _ecore_status_t
4683320164Sdavidcsecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
4684320164Sdavidcs			    struct ecore_ptt *p_ptt)
4685316485Sdavidcs{
4686316485Sdavidcs	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4687316485Sdavidcs	u32 resc_max_val, mcp_resp;
4688316485Sdavidcs	u8 res_id;
4689316485Sdavidcs	enum _ecore_status_t rc;
4690316485Sdavidcs
4691316485Sdavidcs	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
4692316485Sdavidcs		switch (res_id) {
4693316485Sdavidcs		case ECORE_LL2_QUEUE:
4694316485Sdavidcs			resc_max_val = MAX_NUM_LL2_RX_QUEUES;
4695316485Sdavidcs			break;
4696316485Sdavidcs		case ECORE_RDMA_CNQ_RAM:
4697316485Sdavidcs			/* No need for a case for ECORE_CMDQS_CQS since
4698316485Sdavidcs			 * CNQ/CMDQS are the same resource.
4699316485Sdavidcs			 */
4700337517Sdavidcs			resc_max_val = NUM_OF_GLOBAL_QUEUES;
4701316485Sdavidcs			break;
4702316485Sdavidcs		case ECORE_RDMA_STATS_QUEUE:
4703316485Sdavidcs			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
4704316485Sdavidcs					    : RDMA_NUM_STATISTIC_COUNTERS_BB;
4705316485Sdavidcs			break;
4706316485Sdavidcs		case ECORE_BDQ:
4707316485Sdavidcs			resc_max_val = BDQ_NUM_RESOURCES;
4708316485Sdavidcs			break;
4709316485Sdavidcs		default:
4710316485Sdavidcs			continue;
4711316485Sdavidcs		}
4712316485Sdavidcs
4713320164Sdavidcs		rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
4714316485Sdavidcs						   resc_max_val, &mcp_resp);
4715316485Sdavidcs		if (rc != ECORE_SUCCESS)
4716316485Sdavidcs			return rc;
4717316485Sdavidcs
4718316485Sdavidcs		/* There's no point to continue to the next resource if the
4719316485Sdavidcs		 * command is not supported by the MFW.
4720316485Sdavidcs		 * We do continue if the command is supported but the resource
4721316485Sdavidcs		 * is unknown to the MFW. Such a resource will be later
4722316485Sdavidcs		 * configured with the default allocation values.
4723316485Sdavidcs		 */
4724316485Sdavidcs		if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
4725316485Sdavidcs			return ECORE_NOTIMPL;
4726316485Sdavidcs	}
4727316485Sdavidcs
4728316485Sdavidcs	return ECORE_SUCCESS;
4729316485Sdavidcs}
4730316485Sdavidcs
4731316485Sdavidcsstatic
4732316485Sdavidcsenum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
4733316485Sdavidcs					    enum ecore_resources res_id,
4734316485Sdavidcs					    u32 *p_resc_num, u32 *p_resc_start)
4735316485Sdavidcs{
4736316485Sdavidcs	u8 num_funcs = p_hwfn->num_funcs_on_engine;
4737316485Sdavidcs	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4738316485Sdavidcs
4739316485Sdavidcs	switch (res_id) {
4740316485Sdavidcs	case ECORE_L2_QUEUE:
4741316485Sdavidcs		*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
4742316485Sdavidcs				      MAX_NUM_L2_QUEUES_BB) / num_funcs;
4743316485Sdavidcs		break;
4744316485Sdavidcs	case ECORE_VPORT:
4745316485Sdavidcs		*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
4746316485Sdavidcs				      MAX_NUM_VPORTS_BB) / num_funcs;
4747316485Sdavidcs		break;
4748316485Sdavidcs	case ECORE_RSS_ENG:
4749316485Sdavidcs		*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
4750316485Sdavidcs				      ETH_RSS_ENGINE_NUM_BB) / num_funcs;
4751316485Sdavidcs		break;
4752316485Sdavidcs	case ECORE_PQ:
4753316485Sdavidcs		*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
4754316485Sdavidcs				      MAX_QM_TX_QUEUES_BB) / num_funcs;
4755316485Sdavidcs		*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
4756316485Sdavidcs		break;
4757316485Sdavidcs	case ECORE_RL:
4758316485Sdavidcs		*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
4759316485Sdavidcs		break;
4760316485Sdavidcs	case ECORE_MAC:
4761316485Sdavidcs	case ECORE_VLAN:
4762316485Sdavidcs		/* Each VFC resource can accommodate both a MAC and a VLAN */
4763316485Sdavidcs		*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
4764316485Sdavidcs		break;
4765316485Sdavidcs	case ECORE_ILT:
4766316485Sdavidcs		*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
4767316485Sdavidcs				      PXP_NUM_ILT_RECORDS_BB) / num_funcs;
4768316485Sdavidcs		break;
4769316485Sdavidcs	case ECORE_LL2_QUEUE:
4770316485Sdavidcs		*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
4771316485Sdavidcs		break;
4772316485Sdavidcs	case ECORE_RDMA_CNQ_RAM:
4773316485Sdavidcs	case ECORE_CMDQS_CQS:
4774316485Sdavidcs		/* CNQ/CMDQS are the same resource */
4775337517Sdavidcs		*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
4776316485Sdavidcs		break;
4777316485Sdavidcs	case ECORE_RDMA_STATS_QUEUE:
4778316485Sdavidcs		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
4779316485Sdavidcs				      RDMA_NUM_STATISTIC_COUNTERS_BB) /
4780316485Sdavidcs			      num_funcs;
4781316485Sdavidcs		break;
4782316485Sdavidcs	case ECORE_BDQ:
4783316485Sdavidcs		if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI &&
4784316485Sdavidcs		    p_hwfn->hw_info.personality != ECORE_PCI_FCOE)
4785316485Sdavidcs			*p_resc_num = 0;
4786316485Sdavidcs		else
4787316485Sdavidcs			*p_resc_num = 1;
4788316485Sdavidcs		break;
4789316485Sdavidcs	case ECORE_SB:
4790316485Sdavidcs		/* Since we want its value to reflect whether MFW supports
4791316485Sdavidcs		 * the new scheme, have a default of 0.
4792316485Sdavidcs		 */
4793316485Sdavidcs		*p_resc_num = 0;
4794316485Sdavidcs		break;
4795316485Sdavidcs	default:
4796316485Sdavidcs		return ECORE_INVAL;
4797316485Sdavidcs	}
4798316485Sdavidcs
4799316485Sdavidcs	switch (res_id) {
4800316485Sdavidcs	case ECORE_BDQ:
4801316485Sdavidcs		if (!*p_resc_num)
4802316485Sdavidcs			*p_resc_start = 0;
4803320164Sdavidcs		else if (p_hwfn->p_dev->num_ports_in_engine == 4)
4804316485Sdavidcs			*p_resc_start = p_hwfn->port_id;
4805316485Sdavidcs		else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
4806316485Sdavidcs			*p_resc_start = p_hwfn->port_id;
4807316485Sdavidcs		else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
4808316485Sdavidcs			*p_resc_start = p_hwfn->port_id + 2;
4809316485Sdavidcs		break;
4810316485Sdavidcs	default:
4811316485Sdavidcs		*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
4812316485Sdavidcs		break;
4813316485Sdavidcs	}
4814316485Sdavidcs
4815316485Sdavidcs	return ECORE_SUCCESS;
4816316485Sdavidcs}
4817316485Sdavidcs
4818316485Sdavidcsstatic enum _ecore_status_t
4819316485Sdavidcs__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
4820316485Sdavidcs			 bool drv_resc_alloc)
4821316485Sdavidcs{
4822316485Sdavidcs	u32 dflt_resc_num = 0, dflt_resc_start = 0;
4823316485Sdavidcs	u32 mcp_resp, *p_resc_num, *p_resc_start;
4824316485Sdavidcs	enum _ecore_status_t rc;
4825316485Sdavidcs
4826316485Sdavidcs	p_resc_num = &RESC_NUM(p_hwfn, res_id);
4827316485Sdavidcs	p_resc_start = &RESC_START(p_hwfn, res_id);
4828316485Sdavidcs
4829316485Sdavidcs	rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
4830316485Sdavidcs				    &dflt_resc_start);
4831316485Sdavidcs	if (rc != ECORE_SUCCESS) {
4832316485Sdavidcs		DP_ERR(p_hwfn,
4833316485Sdavidcs		       "Failed to get default amount for resource %d [%s]\n",
4834316485Sdavidcs			res_id, ecore_hw_get_resc_name(res_id));
4835316485Sdavidcs		return rc;
4836316485Sdavidcs	}
4837316485Sdavidcs
4838316485Sdavidcs#ifndef ASIC_ONLY
4839316485Sdavidcs	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
4840316485Sdavidcs		*p_resc_num = dflt_resc_num;
4841316485Sdavidcs		*p_resc_start = dflt_resc_start;
4842316485Sdavidcs		goto out;
4843316485Sdavidcs	}
4844316485Sdavidcs#endif
4845316485Sdavidcs
4846316485Sdavidcs	rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
4847316485Sdavidcs				     &mcp_resp, p_resc_num, p_resc_start);
4848316485Sdavidcs	if (rc != ECORE_SUCCESS) {
4849337517Sdavidcs		DP_NOTICE(p_hwfn, false,
4850316485Sdavidcs			  "MFW response failure for an allocation request for resource %d [%s]\n",
4851316485Sdavidcs			  res_id, ecore_hw_get_resc_name(res_id));
4852316485Sdavidcs		return rc;
4853316485Sdavidcs	}
4854316485Sdavidcs
4855316485Sdavidcs	/* Default driver values are applied in the following cases:
4856316485Sdavidcs	 * - The resource allocation MB command is not supported by the MFW
4857316485Sdavidcs	 * - There is an internal error in the MFW while processing the request
4858316485Sdavidcs	 * - The resource ID is unknown to the MFW
4859316485Sdavidcs	 */
4860316485Sdavidcs	if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
4861316485Sdavidcs		DP_INFO(p_hwfn,
4862316485Sdavidcs			"Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
4863316485Sdavidcs			res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
4864316485Sdavidcs			dflt_resc_num, dflt_resc_start);
4865316485Sdavidcs		*p_resc_num = dflt_resc_num;
4866316485Sdavidcs		*p_resc_start = dflt_resc_start;
4867316485Sdavidcs		goto out;
4868316485Sdavidcs	}
4869316485Sdavidcs
4870316485Sdavidcs	if ((*p_resc_num != dflt_resc_num ||
4871316485Sdavidcs	     *p_resc_start != dflt_resc_start) &&
4872316485Sdavidcs	    res_id != ECORE_SB) {
4873316485Sdavidcs		DP_INFO(p_hwfn,
4874316485Sdavidcs			"MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
4875316485Sdavidcs			res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
4876316485Sdavidcs			*p_resc_start, dflt_resc_num, dflt_resc_start,
4877316485Sdavidcs			drv_resc_alloc ? " - Applying default values" : "");
4878316485Sdavidcs		if (drv_resc_alloc) {
4879316485Sdavidcs			*p_resc_num = dflt_resc_num;
4880316485Sdavidcs			*p_resc_start = dflt_resc_start;
4881316485Sdavidcs		}
4882316485Sdavidcs	}
4883316485Sdavidcsout:
4884316485Sdavidcs	/* PQs have to divide by 8 [that's the HW granularity].
4885316485Sdavidcs	 * Reduce number so it would fit.
4886316485Sdavidcs	 */
4887316485Sdavidcs	if ((res_id == ECORE_PQ) &&
4888316485Sdavidcs	    ((*p_resc_num % 8) || (*p_resc_start % 8))) {
4889316485Sdavidcs		DP_INFO(p_hwfn,
4890316485Sdavidcs			"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
4891316485Sdavidcs			*p_resc_num, (*p_resc_num) & ~0x7,
4892316485Sdavidcs			*p_resc_start, (*p_resc_start) & ~0x7);
4893316485Sdavidcs		*p_resc_num &= ~0x7;
4894316485Sdavidcs		*p_resc_start &= ~0x7;
4895316485Sdavidcs	}
4896316485Sdavidcs
4897316485Sdavidcs	return ECORE_SUCCESS;
4898316485Sdavidcs}
4899316485Sdavidcs
4900316485Sdavidcsstatic enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
4901316485Sdavidcs						   bool drv_resc_alloc)
4902316485Sdavidcs{
4903316485Sdavidcs	enum _ecore_status_t rc;
4904316485Sdavidcs	u8 res_id;
4905316485Sdavidcs
4906316485Sdavidcs	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
4907316485Sdavidcs		rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
4908316485Sdavidcs		if (rc != ECORE_SUCCESS)
4909316485Sdavidcs			return rc;
4910316485Sdavidcs	}
4911316485Sdavidcs
4912316485Sdavidcs	return ECORE_SUCCESS;
4913316485Sdavidcs}
4914316485Sdavidcs
4915337517Sdavidcsstatic enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4916337517Sdavidcs						      struct ecore_ptt *p_ptt)
4917337517Sdavidcs{
4918337517Sdavidcs	u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn);
4919337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
4920337517Sdavidcs	enum _ecore_status_t rc;
4921337517Sdavidcs
4922337517Sdavidcs	rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
4923337517Sdavidcs	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL)
4924337517Sdavidcs		return rc;
4925337517Sdavidcs	else if (rc == ECORE_NOTIMPL)
4926337517Sdavidcs		p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
4927337517Sdavidcs
4928337517Sdavidcs	if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
4929337517Sdavidcs		DP_INFO(p_hwfn,
4930337517Sdavidcs			"Fix the PPFID bitmap to inculde the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
4931337517Sdavidcs			native_ppfid_idx, p_dev->ppfid_bitmap);
4932337517Sdavidcs		p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
4933337517Sdavidcs	}
4934337517Sdavidcs
4935337517Sdavidcs	return ECORE_SUCCESS;
4936337517Sdavidcs}
4937337517Sdavidcs
4938316485Sdavidcsstatic enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
4939320164Sdavidcs					      struct ecore_ptt *p_ptt,
4940316485Sdavidcs					      bool drv_resc_alloc)
4941316485Sdavidcs{
4942316485Sdavidcs	struct ecore_resc_unlock_params resc_unlock_params;
4943316485Sdavidcs	struct ecore_resc_lock_params resc_lock_params;
4944316485Sdavidcs	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4945316485Sdavidcs	u8 res_id;
4946316485Sdavidcs	enum _ecore_status_t rc;
4947316485Sdavidcs#ifndef ASIC_ONLY
4948316485Sdavidcs	u32 *resc_start = p_hwfn->hw_info.resc_start;
4949316485Sdavidcs	u32 *resc_num = p_hwfn->hw_info.resc_num;
4950316485Sdavidcs	/* For AH, an equal share of the ILT lines between the maximal number of
4951316485Sdavidcs	 * PFs is not enough for RoCE. This would be solved by the future
4952316485Sdavidcs	 * resource allocation scheme, but isn't currently present for
4953316485Sdavidcs	 * FPGA/emulation. For now we keep a number that is sufficient for RoCE
4954316485Sdavidcs	 * to work - the BB number of ILT lines divided by its max PFs number.
4955316485Sdavidcs	 */
4956316485Sdavidcs	u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
4957316485Sdavidcs#endif
4958316485Sdavidcs
4959316485Sdavidcs	/* Setting the max values of the soft resources and the following
4960316485Sdavidcs	 * resources allocation queries should be atomic. Since several PFs can
4961316485Sdavidcs	 * run in parallel - a resource lock is needed.
4962316485Sdavidcs	 * If either the resource lock or resource set value commands are not
4963316485Sdavidcs	 * supported - skip the the max values setting, release the lock if
4964316485Sdavidcs	 * needed, and proceed to the queries. Other failures, including a
4965316485Sdavidcs	 * failure to acquire the lock, will cause this function to fail.
4966316485Sdavidcs	 * Old drivers that don't acquire the lock can run in parallel, and
4967316485Sdavidcs	 * their allocation values won't be affected by the updated max values.
4968316485Sdavidcs	 */
4969337517Sdavidcs
4970320164Sdavidcs	ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
4971320164Sdavidcs					 ECORE_RESC_LOCK_RESC_ALLOC, false);
4972316485Sdavidcs
4973337517Sdavidcs	/* Changes on top of the default values to accommodate parallel attempts
4974337517Sdavidcs	 * of several PFs.
4975337517Sdavidcs	 * [10 x 10 msec by default ==> 20 x 50 msec]
4976337517Sdavidcs	 */
4977337517Sdavidcs	resc_lock_params.retry_num *= 2;
4978337517Sdavidcs	resc_lock_params.retry_interval *= 5;
4979337517Sdavidcs
4980320164Sdavidcs	rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
4981316485Sdavidcs	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
4982316485Sdavidcs		return rc;
4983316485Sdavidcs	} else if (rc == ECORE_NOTIMPL) {
4984316485Sdavidcs		DP_INFO(p_hwfn,
4985316485Sdavidcs			"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
4986316485Sdavidcs	} else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
4987316485Sdavidcs		DP_NOTICE(p_hwfn, false,
4988316485Sdavidcs			  "Failed to acquire the resource lock for the resource allocation commands\n");
4989316485Sdavidcs		return ECORE_BUSY;
4990316485Sdavidcs	} else {
4991320164Sdavidcs		rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
4992316485Sdavidcs		if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
4993316485Sdavidcs			DP_NOTICE(p_hwfn, false,
4994316485Sdavidcs				  "Failed to set the max values of the soft resources\n");
4995316485Sdavidcs			goto unlock_and_exit;
4996316485Sdavidcs		} else if (rc == ECORE_NOTIMPL) {
4997316485Sdavidcs			DP_INFO(p_hwfn,
4998316485Sdavidcs				"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
4999320164Sdavidcs			rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5000316485Sdavidcs						   &resc_unlock_params);
5001316485Sdavidcs			if (rc != ECORE_SUCCESS)
5002316485Sdavidcs				DP_INFO(p_hwfn,
5003316485Sdavidcs					"Failed to release the resource lock for the resource allocation commands\n");
5004316485Sdavidcs		}
5005316485Sdavidcs	}
5006316485Sdavidcs
5007316485Sdavidcs	rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
5008316485Sdavidcs	if (rc != ECORE_SUCCESS)
5009316485Sdavidcs		goto unlock_and_exit;
5010316485Sdavidcs
5011316485Sdavidcs	if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
5012320164Sdavidcs		rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5013316485Sdavidcs					   &resc_unlock_params);
5014316485Sdavidcs		if (rc != ECORE_SUCCESS)
5015316485Sdavidcs			DP_INFO(p_hwfn,
5016316485Sdavidcs				"Failed to release the resource lock for the resource allocation commands\n");
5017316485Sdavidcs	}
5018316485Sdavidcs
5019337517Sdavidcs	/* PPFID bitmap */
5020337517Sdavidcs	if (IS_LEAD_HWFN(p_hwfn)) {
5021337517Sdavidcs		rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
5022337517Sdavidcs		if (rc != ECORE_SUCCESS)
5023337517Sdavidcs			return rc;
5024337517Sdavidcs	}
5025337517Sdavidcs
5026316485Sdavidcs#ifndef ASIC_ONLY
5027316485Sdavidcs	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
5028316485Sdavidcs		/* Reduced build contains less PQs */
5029316485Sdavidcs		if (!(p_hwfn->p_dev->b_is_emul_full)) {
5030316485Sdavidcs			resc_num[ECORE_PQ] = 32;
5031316485Sdavidcs			resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
5032316485Sdavidcs					       p_hwfn->enabled_func_idx;
5033316485Sdavidcs		}
5034316485Sdavidcs
5035316485Sdavidcs		/* For AH emulation, since we have a possible maximal number of
5036316485Sdavidcs		 * 16 enabled PFs, in case there are not enough ILT lines -
5037316485Sdavidcs		 * allocate only first PF as RoCE and have all the other ETH
5038316485Sdavidcs		 * only with less ILT lines.
5039316485Sdavidcs		 */
5040316485Sdavidcs		if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
5041316485Sdavidcs			resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
5042316485Sdavidcs							 resc_num[ECORE_ILT],
5043316485Sdavidcs							 roce_min_ilt_lines);
5044316485Sdavidcs	}
5045316485Sdavidcs
5046316485Sdavidcs	/* Correct the common ILT calculation if PF0 has more */
5047316485Sdavidcs	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
5048316485Sdavidcs	    p_hwfn->p_dev->b_is_emul_full &&
5049316485Sdavidcs	    p_hwfn->rel_pf_id &&
5050316485Sdavidcs	    resc_num[ECORE_ILT] < roce_min_ilt_lines)
5051316485Sdavidcs		resc_start[ECORE_ILT] += roce_min_ilt_lines -
5052316485Sdavidcs					 resc_num[ECORE_ILT];
5053316485Sdavidcs#endif
5054316485Sdavidcs
5055316485Sdavidcs	/* Sanity for ILT */
5056316485Sdavidcs	if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
5057316485Sdavidcs	    (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
5058316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n",
5059316485Sdavidcs			  RESC_START(p_hwfn, ECORE_ILT),
5060316485Sdavidcs			  RESC_END(p_hwfn, ECORE_ILT) - 1);
5061316485Sdavidcs		return ECORE_INVAL;
5062316485Sdavidcs	}
5063316485Sdavidcs
5064316485Sdavidcs	/* This will also learn the number of SBs from MFW */
5065320164Sdavidcs	if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
5066316485Sdavidcs		return ECORE_INVAL;
5067316485Sdavidcs
5068316485Sdavidcs	ecore_hw_set_feat(p_hwfn);
5069316485Sdavidcs
5070316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
5071316485Sdavidcs		   "The numbers for each resource are:\n");
5072316485Sdavidcs	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
5073316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
5074316485Sdavidcs			   ecore_hw_get_resc_name(res_id),
5075316485Sdavidcs			   RESC_NUM(p_hwfn, res_id),
5076316485Sdavidcs			   RESC_START(p_hwfn, res_id));
5077316485Sdavidcs
5078316485Sdavidcs	return ECORE_SUCCESS;
5079316485Sdavidcs
5080316485Sdavidcsunlock_and_exit:
5081316485Sdavidcs	if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
5082320164Sdavidcs		ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5083316485Sdavidcs				      &resc_unlock_params);
5084316485Sdavidcs	return rc;
5085316485Sdavidcs}
5086316485Sdavidcs
5087316485Sdavidcsstatic enum _ecore_status_t
5088316485Sdavidcsecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
5089316485Sdavidcs		      struct ecore_ptt *p_ptt,
5090316485Sdavidcs		      struct ecore_hw_prepare_params *p_params)
5091316485Sdavidcs{
5092316485Sdavidcs	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
5093337517Sdavidcs	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
5094316485Sdavidcs	struct ecore_mcp_link_capabilities *p_caps;
5095316485Sdavidcs	struct ecore_mcp_link_params *link;
5096316485Sdavidcs	enum _ecore_status_t rc;
5097337517Sdavidcs	u32 dcbx_mode;  /* __LINUX__THROW__ */
5098316485Sdavidcs
5099316485Sdavidcs	/* Read global nvm_cfg address */
5100316485Sdavidcs	nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
5101316485Sdavidcs
5102316485Sdavidcs	/* Verify MCP has initialized it */
5103316485Sdavidcs	if (!nvm_cfg_addr) {
5104316485Sdavidcs		DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
5105316485Sdavidcs		if (p_params->b_relaxed_probe)
5106316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
5107316485Sdavidcs		return ECORE_INVAL;
5108316485Sdavidcs	}
5109316485Sdavidcs
5110316485Sdavidcs	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
5111316485Sdavidcs	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
5112316485Sdavidcs
5113316485Sdavidcs	addr = MCP_REG_SCRATCH  + nvm_cfg1_offset +
5114316485Sdavidcs		   OFFSETOF(struct nvm_cfg1, glob) +
5115316485Sdavidcs		   OFFSETOF(struct nvm_cfg1_glob, core_cfg);
5116316485Sdavidcs
5117316485Sdavidcs	core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
5118316485Sdavidcs
5119316485Sdavidcs	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
5120316485Sdavidcs		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
5121316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
5122316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
5123316485Sdavidcs		break;
5124316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
5125316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
5126316485Sdavidcs		break;
5127316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
5128316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
5129316485Sdavidcs		break;
5130316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
5131316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
5132316485Sdavidcs		break;
5133316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
5134316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
5135316485Sdavidcs		break;
5136316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
5137316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
5138316485Sdavidcs		break;
5139316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
5140316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
5141316485Sdavidcs		break;
5142316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
5143316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
5144316485Sdavidcs		break;
5145316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
5146316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
5147316485Sdavidcs		break;
5148316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
5149316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
5150316485Sdavidcs		break;
5151316485Sdavidcs	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
5152316485Sdavidcs		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
5153316485Sdavidcs		break;
5154316485Sdavidcs	default:
5155316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
5156316485Sdavidcs			  core_cfg);
5157316485Sdavidcs		break;
5158316485Sdavidcs	}
5159316485Sdavidcs
5160337517Sdavidcs#ifndef __EXTRACT__LINUX__THROW__
5161316485Sdavidcs	/* Read DCBX configuration */
5162316485Sdavidcs	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5163316485Sdavidcs			OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
5164316485Sdavidcs	dcbx_mode = ecore_rd(p_hwfn, p_ptt,
5165316485Sdavidcs			     port_cfg_addr +
5166316485Sdavidcs			     OFFSETOF(struct nvm_cfg1_port, generic_cont0));
5167316485Sdavidcs	dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
5168316485Sdavidcs		>> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
5169316485Sdavidcs	switch (dcbx_mode) {
5170316485Sdavidcs	case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
5171316485Sdavidcs		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
5172316485Sdavidcs		break;
5173316485Sdavidcs	case NVM_CFG1_PORT_DCBX_MODE_CEE:
5174316485Sdavidcs		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
5175316485Sdavidcs		break;
5176316485Sdavidcs	case NVM_CFG1_PORT_DCBX_MODE_IEEE:
5177316485Sdavidcs		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
5178316485Sdavidcs		break;
5179316485Sdavidcs	default:
5180316485Sdavidcs		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
5181316485Sdavidcs	}
5182337517Sdavidcs#endif
5183316485Sdavidcs
5184316485Sdavidcs	/* Read default link configuration */
5185316485Sdavidcs	link = &p_hwfn->mcp_info->link_input;
5186316485Sdavidcs	p_caps = &p_hwfn->mcp_info->link_capabilities;
5187316485Sdavidcs	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5188316485Sdavidcs			OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
5189316485Sdavidcs	link_temp = ecore_rd(p_hwfn, p_ptt,
5190316485Sdavidcs			     port_cfg_addr +
5191316485Sdavidcs			     OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
5192316485Sdavidcs	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
5193316485Sdavidcs	link->speed.advertised_speeds = link_temp;
5194316485Sdavidcs	p_caps->speed_capabilities = link->speed.advertised_speeds;
5195316485Sdavidcs
5196316485Sdavidcs	link_temp = ecore_rd(p_hwfn, p_ptt,
5197316485Sdavidcs				 port_cfg_addr +
5198316485Sdavidcs				 OFFSETOF(struct nvm_cfg1_port, link_settings));
5199316485Sdavidcs	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
5200316485Sdavidcs		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
5201316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
5202316485Sdavidcs		link->speed.autoneg = true;
5203316485Sdavidcs		break;
5204316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
5205316485Sdavidcs		link->speed.forced_speed = 1000;
5206316485Sdavidcs		break;
5207316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
5208316485Sdavidcs		link->speed.forced_speed = 10000;
5209316485Sdavidcs		break;
5210337517Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
5211337517Sdavidcs		link->speed.forced_speed = 20000;
5212337517Sdavidcs		break;
5213316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
5214316485Sdavidcs		link->speed.forced_speed = 25000;
5215316485Sdavidcs		break;
5216316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
5217316485Sdavidcs		link->speed.forced_speed = 40000;
5218316485Sdavidcs		break;
5219316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
5220316485Sdavidcs		link->speed.forced_speed = 50000;
5221316485Sdavidcs		break;
5222316485Sdavidcs	case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
5223316485Sdavidcs		link->speed.forced_speed = 100000;
5224316485Sdavidcs		break;
5225316485Sdavidcs	default:
5226316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n",
5227316485Sdavidcs			  link_temp);
5228316485Sdavidcs	}
5229316485Sdavidcs
5230337517Sdavidcs	p_caps->default_speed = link->speed.forced_speed; /* __LINUX__THROW__ */
5231316485Sdavidcs	p_caps->default_speed_autoneg = link->speed.autoneg;
5232316485Sdavidcs
5233316485Sdavidcs	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
5234316485Sdavidcs	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
5235316485Sdavidcs	link->pause.autoneg = !!(link_temp &
5236316485Sdavidcs				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
5237316485Sdavidcs	link->pause.forced_rx = !!(link_temp &
5238316485Sdavidcs				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
5239316485Sdavidcs	link->pause.forced_tx = !!(link_temp &
5240316485Sdavidcs				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
5241316485Sdavidcs	link->loopback_mode = 0;
5242316485Sdavidcs
5243316485Sdavidcs	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
5244316485Sdavidcs		link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
5245316485Sdavidcs				     OFFSETOF(struct nvm_cfg1_port, ext_phy));
5246316485Sdavidcs		link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
5247316485Sdavidcs		link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
5248316485Sdavidcs		p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
5249316485Sdavidcs		link->eee.enable = true;
5250316485Sdavidcs		switch (link_temp) {
5251316485Sdavidcs		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
5252316485Sdavidcs			p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
5253316485Sdavidcs			link->eee.enable = false;
5254316485Sdavidcs			break;
5255316485Sdavidcs		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
5256316485Sdavidcs			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
5257316485Sdavidcs			break;
5258316485Sdavidcs		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
5259316485Sdavidcs			p_caps->eee_lpi_timer =
5260316485Sdavidcs				EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
5261316485Sdavidcs			break;
5262316485Sdavidcs		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
5263316485Sdavidcs			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
5264316485Sdavidcs			break;
5265316485Sdavidcs		}
5266337517Sdavidcs
5267316485Sdavidcs		link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
5268316485Sdavidcs		link->eee.tx_lpi_enable = link->eee.enable;
5269337517Sdavidcs		link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
5270316485Sdavidcs	} else {
5271316485Sdavidcs		p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
5272316485Sdavidcs	}
5273316485Sdavidcs
5274316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5275316485Sdavidcs		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
5276316485Sdavidcs		   link->speed.forced_speed, link->speed.advertised_speeds,
5277316485Sdavidcs		   link->speed.autoneg, link->pause.autoneg,
5278316485Sdavidcs		   p_caps->default_eee, p_caps->eee_lpi_timer);
5279316485Sdavidcs
5280316485Sdavidcs	/* Read Multi-function information from shmem */
5281316485Sdavidcs	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5282316485Sdavidcs		   OFFSETOF(struct nvm_cfg1, glob) +
5283316485Sdavidcs		   OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
5284316485Sdavidcs
5285316485Sdavidcs	generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
5286316485Sdavidcs
5287316485Sdavidcs	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
5288316485Sdavidcs		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
5289316485Sdavidcs
5290316485Sdavidcs	switch (mf_mode) {
5291316485Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
5292337517Sdavidcs		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
5293337517Sdavidcs		break;
5294337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_UFP:
5295337517Sdavidcs		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
5296337517Sdavidcs					 1 << ECORE_MF_LLH_PROTO_CLSS |
5297337517Sdavidcs					 1 << ECORE_MF_UFP_SPECIFIC |
5298337517Sdavidcs					 1 << ECORE_MF_8021Q_TAGGING;
5299337517Sdavidcs		break;
5300337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_BD:
5301337517Sdavidcs		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
5302337517Sdavidcs					 1 << ECORE_MF_LLH_PROTO_CLSS |
5303337517Sdavidcs					 1 << ECORE_MF_8021AD_TAGGING;
5304337517Sdavidcs		break;
5305337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
5306337517Sdavidcs		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
5307337517Sdavidcs					 1 << ECORE_MF_LLH_PROTO_CLSS |
5308337517Sdavidcs					 1 << ECORE_MF_LL2_NON_UNICAST |
5309337517Sdavidcs					 1 << ECORE_MF_INTER_PF_SWITCH |
5310337517Sdavidcs					 1 << ECORE_MF_DISABLE_ARFS;
5311337517Sdavidcs		break;
5312337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
5313337517Sdavidcs		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
5314337517Sdavidcs					 1 << ECORE_MF_LLH_PROTO_CLSS |
5315337517Sdavidcs					 1 << ECORE_MF_LL2_NON_UNICAST;
5316337517Sdavidcs		if (ECORE_IS_BB(p_hwfn->p_dev))
5317337517Sdavidcs			p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
5318337517Sdavidcs		break;
5319337517Sdavidcs	}
5320337517Sdavidcs	DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
5321337517Sdavidcs		p_hwfn->p_dev->mf_bits);
5322337517Sdavidcs
5323337517Sdavidcs	if (ECORE_IS_CMT(p_hwfn->p_dev))
5324337517Sdavidcs		p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
5325337517Sdavidcs
5326337517Sdavidcs#ifndef __EXTRACT__LINUX__THROW__
5327337517Sdavidcs	/* It's funny since we have another switch, but it's easier
5328337517Sdavidcs	 * to throw this away in linux this way. Long term, it might be
5329337517Sdavidcs	 * better to have have getters for needed ECORE_MF_* fields,
5330337517Sdavidcs	 * convert client code and eliminate this.
5331337517Sdavidcs	 */
5332337517Sdavidcs	switch (mf_mode) {
5333337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
5334316485Sdavidcs		p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
5335316485Sdavidcs		break;
5336316485Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
5337316485Sdavidcs		p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
5338316485Sdavidcs		break;
5339316485Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
5340316485Sdavidcs		p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
5341316485Sdavidcs		break;
5342337517Sdavidcs	case NVM_CFG1_GLOB_MF_MODE_UFP:
5343337517Sdavidcs		p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
5344337517Sdavidcs		break;
5345316485Sdavidcs	}
5346337517Sdavidcs#endif
5347316485Sdavidcs
5348316485Sdavidcs	/* Read Multi-function information from shmem */
5349316485Sdavidcs	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5350316485Sdavidcs		   OFFSETOF(struct nvm_cfg1, glob) +
5351316485Sdavidcs		   OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
5352316485Sdavidcs
5353316485Sdavidcs	device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
5354316485Sdavidcs	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
5355316485Sdavidcs		OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
5356316485Sdavidcs				 &p_hwfn->hw_info.device_capabilities);
5357316485Sdavidcs	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
5358316485Sdavidcs		OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
5359316485Sdavidcs				 &p_hwfn->hw_info.device_capabilities);
5360316485Sdavidcs	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
5361316485Sdavidcs		OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
5362316485Sdavidcs				 &p_hwfn->hw_info.device_capabilities);
5363316485Sdavidcs	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
5364316485Sdavidcs		OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
5365316485Sdavidcs				 &p_hwfn->hw_info.device_capabilities);
5366316485Sdavidcs	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
5367316485Sdavidcs		OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
5368316485Sdavidcs				 &p_hwfn->hw_info.device_capabilities);
5369316485Sdavidcs
5370316485Sdavidcs	rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
5371316485Sdavidcs	if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
5372316485Sdavidcs		rc = ECORE_SUCCESS;
5373316485Sdavidcs		p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
5374316485Sdavidcs	}
5375316485Sdavidcs
5376316485Sdavidcs	return rc;
5377316485Sdavidcs}
5378316485Sdavidcs
5379316485Sdavidcsstatic void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
5380316485Sdavidcs				struct ecore_ptt *p_ptt)
5381316485Sdavidcs{
5382316485Sdavidcs	u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
5383316485Sdavidcs	u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
5384316485Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5385316485Sdavidcs
5386316485Sdavidcs	num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
5387316485Sdavidcs
5388316485Sdavidcs	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
5389316485Sdavidcs	 * in the other bits are selected.
5390316485Sdavidcs	 * Bits 1-15 are for functions 1-15, respectively, and their value is
5391316485Sdavidcs	 * '0' only for enabled functions (function 0 always exists and
5392316485Sdavidcs	 * enabled).
5393316485Sdavidcs	 * In case of CMT in BB, only the "even" functions are enabled, and thus
5394316485Sdavidcs	 * the number of functions for both hwfns is learnt from the same bits.
5395316485Sdavidcs	 */
5396337517Sdavidcs	reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
5397316485Sdavidcs
5398316485Sdavidcs	if (reg_function_hide & 0x1) {
5399316485Sdavidcs		if (ECORE_IS_BB(p_dev)) {
5400337517Sdavidcs			if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
5401316485Sdavidcs				num_funcs = 0;
5402316485Sdavidcs				eng_mask = 0xaaaa;
5403316485Sdavidcs			} else {
5404316485Sdavidcs				num_funcs = 1;
5405316485Sdavidcs				eng_mask = 0x5554;
5406316485Sdavidcs			}
5407316485Sdavidcs		} else {
5408316485Sdavidcs			num_funcs = 1;
5409316485Sdavidcs			eng_mask = 0xfffe;
5410316485Sdavidcs		}
5411316485Sdavidcs
5412316485Sdavidcs		/* Get the number of the enabled functions on the engine */
5413316485Sdavidcs		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
5414316485Sdavidcs		while (tmp) {
5415316485Sdavidcs			if (tmp & 0x1)
5416316485Sdavidcs				num_funcs++;
5417316485Sdavidcs			tmp >>= 0x1;
5418316485Sdavidcs		}
5419316485Sdavidcs
5420316485Sdavidcs		/* Get the PF index within the enabled functions */
5421316485Sdavidcs		low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
5422316485Sdavidcs		tmp = reg_function_hide & eng_mask & low_pfs_mask;
5423316485Sdavidcs		while (tmp) {
5424316485Sdavidcs			if (tmp & 0x1)
5425316485Sdavidcs				enabled_func_idx--;
5426316485Sdavidcs			tmp >>= 0x1;
5427316485Sdavidcs		}
5428316485Sdavidcs	}
5429316485Sdavidcs
5430316485Sdavidcs	p_hwfn->num_funcs_on_engine = num_funcs;
5431316485Sdavidcs	p_hwfn->enabled_func_idx = enabled_func_idx;
5432316485Sdavidcs
5433316485Sdavidcs#ifndef ASIC_ONLY
5434316485Sdavidcs	if (CHIP_REV_IS_FPGA(p_dev)) {
5435316485Sdavidcs		DP_NOTICE(p_hwfn, false,
5436316485Sdavidcs			  "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
5437316485Sdavidcs		p_hwfn->num_funcs_on_engine = 4;
5438316485Sdavidcs	}
5439316485Sdavidcs#endif
5440316485Sdavidcs
5441316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
5442316485Sdavidcs		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
5443316485Sdavidcs		   p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
5444316485Sdavidcs		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
5445316485Sdavidcs}
5446316485Sdavidcs
5447316485Sdavidcsstatic void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
5448316485Sdavidcs				      struct ecore_ptt *p_ptt)
5449316485Sdavidcs{
5450337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5451316485Sdavidcs	u32 port_mode;
5452316485Sdavidcs
5453316485Sdavidcs#ifndef ASIC_ONLY
5454316485Sdavidcs	/* Read the port mode */
5455337517Sdavidcs	if (CHIP_REV_IS_FPGA(p_dev))
5456316485Sdavidcs		port_mode = 4;
5457337517Sdavidcs	else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
5458316485Sdavidcs		/* In CMT on emulation, assume 1 port */
5459316485Sdavidcs		port_mode = 1;
5460316485Sdavidcs	else
5461316485Sdavidcs#endif
5462316485Sdavidcs	port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
5463316485Sdavidcs
5464316485Sdavidcs	if (port_mode < 3) {
5465337517Sdavidcs		p_dev->num_ports_in_engine = 1;
5466316485Sdavidcs	} else if (port_mode <= 5) {
5467337517Sdavidcs		p_dev->num_ports_in_engine = 2;
5468316485Sdavidcs	} else {
5469316485Sdavidcs		DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
5470337517Sdavidcs			  p_dev->num_ports_in_engine);
5471316485Sdavidcs
5472320164Sdavidcs		/* Default num_ports_in_engine to something */
5473337517Sdavidcs		p_dev->num_ports_in_engine = 1;
5474316485Sdavidcs	}
5475316485Sdavidcs}
5476316485Sdavidcs
5477316485Sdavidcsstatic void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
5478316485Sdavidcs					 struct ecore_ptt *p_ptt)
5479316485Sdavidcs{
5480337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5481316485Sdavidcs	u32 port;
5482316485Sdavidcs	int i;
5483316485Sdavidcs
5484337517Sdavidcs	p_dev->num_ports_in_engine = 0;
5485316485Sdavidcs
5486316485Sdavidcs#ifndef ASIC_ONLY
5487337517Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
5488320164Sdavidcs		port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
5489316485Sdavidcs		switch ((port & 0xf000) >> 12) {
5490316485Sdavidcs		case 1:
5491337517Sdavidcs			p_dev->num_ports_in_engine = 1;
5492316485Sdavidcs			break;
5493316485Sdavidcs		case 3:
5494337517Sdavidcs			p_dev->num_ports_in_engine = 2;
5495316485Sdavidcs			break;
5496316485Sdavidcs		case 0xf:
5497337517Sdavidcs			p_dev->num_ports_in_engine = 4;
5498316485Sdavidcs			break;
5499316485Sdavidcs		default:
5500316485Sdavidcs			DP_NOTICE(p_hwfn, false,
5501316485Sdavidcs				  "Unknown port mode in ECO_RESERVED %08x\n",
5502316485Sdavidcs				  port);
5503316485Sdavidcs		}
5504316485Sdavidcs	} else
5505316485Sdavidcs#endif
5506316485Sdavidcs	for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
5507316485Sdavidcs		port = ecore_rd(p_hwfn, p_ptt,
5508316485Sdavidcs				CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4));
5509316485Sdavidcs		if (port & 1)
5510337517Sdavidcs			p_dev->num_ports_in_engine++;
5511316485Sdavidcs	}
5512320164Sdavidcs
5513337517Sdavidcs	if (!p_dev->num_ports_in_engine) {
5514320164Sdavidcs		DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
5515320164Sdavidcs
5516320164Sdavidcs		/* Default num_ports_in_engine to something */
5517337517Sdavidcs		p_dev->num_ports_in_engine = 1;
5518320164Sdavidcs	}
5519316485Sdavidcs}
5520316485Sdavidcs
5521316485Sdavidcsstatic void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
5522316485Sdavidcs				   struct ecore_ptt *p_ptt)
5523316485Sdavidcs{
5524337517Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5525337517Sdavidcs
5526337517Sdavidcs	/* Determine the number of ports per engine */
5527337517Sdavidcs	if (ECORE_IS_BB(p_dev))
5528316485Sdavidcs		ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
5529316485Sdavidcs	else
5530316485Sdavidcs		ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
5531337517Sdavidcs
5532337517Sdavidcs	/* Get the total number of ports of the device */
5533337517Sdavidcs	if (ECORE_IS_CMT(p_dev)) {
5534337517Sdavidcs		/* In CMT there is always only one port */
5535337517Sdavidcs		p_dev->num_ports = 1;
5536337517Sdavidcs#ifndef ASIC_ONLY
5537337517Sdavidcs	} else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
5538337517Sdavidcs		p_dev->num_ports = p_dev->num_ports_in_engine *
5539337517Sdavidcs				   ecore_device_num_engines(p_dev);
5540337517Sdavidcs#endif
5541337517Sdavidcs	} else {
5542337517Sdavidcs		u32 addr, global_offsize, global_addr;
5543337517Sdavidcs
5544337517Sdavidcs		addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
5545337517Sdavidcs					    PUBLIC_GLOBAL);
5546337517Sdavidcs		global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
5547337517Sdavidcs		global_addr = SECTION_ADDR(global_offsize, 0);
5548337517Sdavidcs		addr = global_addr + OFFSETOF(struct public_global, max_ports);
5549337517Sdavidcs		p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
5550337517Sdavidcs	}
5551316485Sdavidcs}
5552316485Sdavidcs
5553337517Sdavidcsstatic void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
5554337517Sdavidcs				   struct ecore_ptt *p_ptt)
5555337517Sdavidcs{
5556337517Sdavidcs	struct ecore_mcp_link_capabilities *p_caps;
5557337517Sdavidcs	u32 eee_status;
5558337517Sdavidcs
5559337517Sdavidcs	p_caps = &p_hwfn->mcp_info->link_capabilities;
5560337517Sdavidcs	if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
5561337517Sdavidcs		return;
5562337517Sdavidcs
5563337517Sdavidcs	p_caps->eee_speed_caps = 0;
5564337517Sdavidcs	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
5565337517Sdavidcs			      OFFSETOF(struct public_port, eee_status));
5566337517Sdavidcs	eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
5567337517Sdavidcs			EEE_SUPPORTED_SPEED_OFFSET;
5568337517Sdavidcs	if (eee_status & EEE_1G_SUPPORTED)
5569337517Sdavidcs		p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
5570337517Sdavidcs	if (eee_status & EEE_10G_ADV)
5571337517Sdavidcs		p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
5572337517Sdavidcs}
5573337517Sdavidcs
5574316485Sdavidcsstatic enum _ecore_status_t
5575316485Sdavidcsecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
5576316485Sdavidcs		  enum ecore_pci_personality personality,
5577316485Sdavidcs		  struct ecore_hw_prepare_params *p_params)
5578316485Sdavidcs{
5579316485Sdavidcs	bool drv_resc_alloc = p_params->drv_resc_alloc;
5580316485Sdavidcs	enum _ecore_status_t rc;
5581316485Sdavidcs
5582316485Sdavidcs	/* Since all information is common, only first hwfns should do this */
5583316485Sdavidcs	if (IS_LEAD_HWFN(p_hwfn)) {
5584316485Sdavidcs		rc = ecore_iov_hw_info(p_hwfn);
5585316485Sdavidcs		if (rc != ECORE_SUCCESS) {
5586316485Sdavidcs			if (p_params->b_relaxed_probe)
5587316485Sdavidcs				p_params->p_relaxed_res =
5588316485Sdavidcs						ECORE_HW_PREPARE_BAD_IOV;
5589316485Sdavidcs			else
5590316485Sdavidcs				return rc;
5591316485Sdavidcs		}
5592316485Sdavidcs	}
5593316485Sdavidcs
5594337517Sdavidcs	if (IS_LEAD_HWFN(p_hwfn))
5595337517Sdavidcs		ecore_hw_info_port_num(p_hwfn, p_ptt);
5596316485Sdavidcs
5597316485Sdavidcs	ecore_mcp_get_capabilities(p_hwfn, p_ptt);
5598316485Sdavidcs
5599316485Sdavidcs#ifndef ASIC_ONLY
5600316485Sdavidcs	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
5601316485Sdavidcs#endif
5602316485Sdavidcs	rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
5603316485Sdavidcs	if (rc != ECORE_SUCCESS)
5604316485Sdavidcs		return rc;
5605316485Sdavidcs#ifndef ASIC_ONLY
5606316485Sdavidcs	}
5607316485Sdavidcs#endif
5608316485Sdavidcs
5609316485Sdavidcs	rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
5610316485Sdavidcs	if (rc != ECORE_SUCCESS) {
5611316485Sdavidcs		if (p_params->b_relaxed_probe)
5612316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
5613316485Sdavidcs		else
5614316485Sdavidcs			return rc;
5615316485Sdavidcs	}
5616316485Sdavidcs
5617316485Sdavidcs#ifndef ASIC_ONLY
5618316485Sdavidcs	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
5619316485Sdavidcs#endif
5620316485Sdavidcs	OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
5621316485Sdavidcs		    p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
5622316485Sdavidcs#ifndef ASIC_ONLY
5623316485Sdavidcs	} else {
5624316485Sdavidcs		static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6};
5625316485Sdavidcs
5626316485Sdavidcs		OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
5627316485Sdavidcs		p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
5628316485Sdavidcs	}
5629316485Sdavidcs#endif
5630316485Sdavidcs
5631316485Sdavidcs	if (ecore_mcp_is_init(p_hwfn)) {
5632316485Sdavidcs		if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
5633316485Sdavidcs			p_hwfn->hw_info.ovlan =
5634316485Sdavidcs				p_hwfn->mcp_info->func_info.ovlan;
5635316485Sdavidcs
5636316485Sdavidcs		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
5637337517Sdavidcs
5638337517Sdavidcs		ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
5639337517Sdavidcs
5640337517Sdavidcs		ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
5641316485Sdavidcs	}
5642316485Sdavidcs
5643316485Sdavidcs	if (personality != ECORE_PCI_DEFAULT) {
5644316485Sdavidcs		p_hwfn->hw_info.personality = personality;
5645316485Sdavidcs	} else if (ecore_mcp_is_init(p_hwfn)) {
5646316485Sdavidcs		enum ecore_pci_personality protocol;
5647316485Sdavidcs
5648316485Sdavidcs		protocol = p_hwfn->mcp_info->func_info.protocol;
5649316485Sdavidcs		p_hwfn->hw_info.personality = protocol;
5650316485Sdavidcs	}
5651316485Sdavidcs
5652316485Sdavidcs#ifndef ASIC_ONLY
5653316485Sdavidcs	/* To overcome ILT lack for emulation, until at least until we'll have
5654316485Sdavidcs	 * a definite answer from system about it, allow only PF0 to be RoCE.
5655316485Sdavidcs	 */
5656316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
5657316485Sdavidcs		if (!p_hwfn->rel_pf_id)
5658316485Sdavidcs			p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
5659316485Sdavidcs		else
5660316485Sdavidcs			p_hwfn->hw_info.personality = ECORE_PCI_ETH;
5661316485Sdavidcs	}
5662316485Sdavidcs#endif
5663316485Sdavidcs
5664316485Sdavidcs	/* although in BB some constellations may support more than 4 tcs,
5665316485Sdavidcs	 * that can result in performance penalty in some cases. 4
5666316485Sdavidcs	 * represents a good tradeoff between performance and flexibility.
5667316485Sdavidcs	 */
5668316485Sdavidcs	p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
5669316485Sdavidcs
5670316485Sdavidcs	/* start out with a single active tc. This can be increased either
5671316485Sdavidcs	 * by dcbx negotiation or by upper layer driver
5672316485Sdavidcs	 */
5673316485Sdavidcs	p_hwfn->hw_info.num_active_tc = 1;
5674316485Sdavidcs
5675316485Sdavidcs	ecore_get_num_funcs(p_hwfn, p_ptt);
5676316485Sdavidcs
5677316485Sdavidcs	if (ecore_mcp_is_init(p_hwfn))
5678316485Sdavidcs		p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
5679316485Sdavidcs
5680316485Sdavidcs	/* In case of forcing the driver's default resource allocation, calling
5681316485Sdavidcs	 * ecore_hw_get_resc() should come after initializing the personality
5682316485Sdavidcs	 * and after getting the number of functions, since the calculation of
5683316485Sdavidcs	 * the resources/features depends on them.
5684316485Sdavidcs	 * This order is not harmful if not forcing.
5685316485Sdavidcs	 */
5686320164Sdavidcs	rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
5687316485Sdavidcs	if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
5688316485Sdavidcs		rc = ECORE_SUCCESS;
5689316485Sdavidcs		p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
5690316485Sdavidcs	}
5691316485Sdavidcs
5692316485Sdavidcs	return rc;
5693316485Sdavidcs}
5694316485Sdavidcs
5695337517Sdavidcs#define ECORE_MAX_DEVICE_NAME_LEN	(8)
5696337517Sdavidcs
5697337517Sdavidcsvoid ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
5698337517Sdavidcs{
5699337517Sdavidcs	u8 n;
5700337517Sdavidcs
5701337517Sdavidcs	n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
5702337517Sdavidcs	OSAL_SNPRINTF(name, n, "%s %c%d", ECORE_IS_BB(p_dev) ? "BB" : "AH",
5703337517Sdavidcs		      'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
5704337517Sdavidcs}
5705337517Sdavidcs
5706320164Sdavidcsstatic enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
5707320164Sdavidcs					       struct ecore_ptt *p_ptt)
5708316485Sdavidcs{
5709320164Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5710316485Sdavidcs	u16 device_id_mask;
5711316485Sdavidcs	u32 tmp;
5712316485Sdavidcs
5713316485Sdavidcs	/* Read Vendor Id / Device Id */
5714316485Sdavidcs	OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
5715316485Sdavidcs				  &p_dev->vendor_id);
5716316485Sdavidcs	OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
5717316485Sdavidcs				  &p_dev->device_id);
5718316485Sdavidcs
5719316485Sdavidcs	/* Determine type */
5720316485Sdavidcs	device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
5721316485Sdavidcs	switch (device_id_mask) {
5722316485Sdavidcs	case ECORE_DEV_ID_MASK_BB:
5723316485Sdavidcs		p_dev->type = ECORE_DEV_TYPE_BB;
5724316485Sdavidcs		break;
5725316485Sdavidcs	case ECORE_DEV_ID_MASK_AH:
5726316485Sdavidcs		p_dev->type = ECORE_DEV_TYPE_AH;
5727316485Sdavidcs		break;
5728320164Sdavidcs	case ECORE_DEV_ID_MASK_E5:
5729320164Sdavidcs		p_dev->type = ECORE_DEV_TYPE_E5;
5730320164Sdavidcs		break;
5731316485Sdavidcs	default:
5732316485Sdavidcs		DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
5733316485Sdavidcs			  p_dev->device_id);
5734316485Sdavidcs		return ECORE_ABORTED;
5735316485Sdavidcs	}
5736316485Sdavidcs
5737337517Sdavidcs	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
5738337517Sdavidcs	p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
5739337517Sdavidcs	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
5740337517Sdavidcs	p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
5741316485Sdavidcs
5742316485Sdavidcs	/* Learn number of HW-functions */
5743320164Sdavidcs	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
5744316485Sdavidcs
5745316485Sdavidcs	if (tmp & (1 << p_hwfn->rel_pf_id)) {
5746316485Sdavidcs		DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
5747316485Sdavidcs		p_dev->num_hwfns = 2;
5748316485Sdavidcs	} else {
5749316485Sdavidcs		p_dev->num_hwfns = 1;
5750316485Sdavidcs	}
5751316485Sdavidcs
5752316485Sdavidcs#ifndef ASIC_ONLY
5753316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
5754316485Sdavidcs		/* For some reason we have problems with this register
5755316485Sdavidcs		 * in B0 emulation; Simply assume no CMT
5756316485Sdavidcs		 */
5757316485Sdavidcs		DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n");
5758316485Sdavidcs		p_dev->num_hwfns = 1;
5759316485Sdavidcs	}
5760316485Sdavidcs#endif
5761316485Sdavidcs
5762337517Sdavidcs	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
5763337517Sdavidcs	p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
5764337517Sdavidcs	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
5765337517Sdavidcs	p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
5766337517Sdavidcs
5767316485Sdavidcs	DP_INFO(p_dev->hwfns,
5768337517Sdavidcs		"Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
5769316485Sdavidcs		ECORE_IS_BB(p_dev) ? "BB" : "AH",
5770316485Sdavidcs		'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
5771316485Sdavidcs		p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
5772316485Sdavidcs		p_dev->chip_metal);
5773316485Sdavidcs
5774337517Sdavidcs	if (ECORE_IS_BB_A0(p_dev)) {
5775316485Sdavidcs		DP_NOTICE(p_dev->hwfns, false,
5776316485Sdavidcs			  "The chip type/rev (BB A0) is not supported!\n");
5777316485Sdavidcs		return ECORE_ABORTED;
5778316485Sdavidcs	}
5779316485Sdavidcs
5780316485Sdavidcs#ifndef ASIC_ONLY
5781316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
5782320164Sdavidcs		ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
5783316485Sdavidcs
5784316485Sdavidcs	if (CHIP_REV_IS_EMUL(p_dev)) {
5785320164Sdavidcs		tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
5786316485Sdavidcs		if (tmp & (1 << 29)) {
5787316485Sdavidcs			DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n");
5788316485Sdavidcs			p_dev->b_is_emul_full = true;
5789316485Sdavidcs		} else {
5790316485Sdavidcs			DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n");
5791316485Sdavidcs		}
5792316485Sdavidcs	}
5793316485Sdavidcs#endif
5794316485Sdavidcs
5795316485Sdavidcs	return ECORE_SUCCESS;
5796316485Sdavidcs}
5797316485Sdavidcs
5798337517Sdavidcs#ifndef LINUX_REMOVE
5799316485Sdavidcsvoid ecore_hw_hibernate_prepare(struct ecore_dev *p_dev)
5800316485Sdavidcs{
5801316485Sdavidcs	int j;
5802316485Sdavidcs
5803316485Sdavidcs	if (IS_VF(p_dev))
5804316485Sdavidcs		return;
5805316485Sdavidcs
5806316485Sdavidcs	for_each_hwfn(p_dev, j) {
5807316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
5808316485Sdavidcs
5809316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n");
5810316485Sdavidcs
5811316485Sdavidcs		p_hwfn->hw_init_done = false;
5812316485Sdavidcs
5813316485Sdavidcs		ecore_ptt_invalidate(p_hwfn);
5814316485Sdavidcs	}
5815316485Sdavidcs}
5816316485Sdavidcs
5817316485Sdavidcsvoid ecore_hw_hibernate_resume(struct ecore_dev *p_dev)
5818316485Sdavidcs{
5819316485Sdavidcs	int j = 0;
5820316485Sdavidcs
5821316485Sdavidcs	if (IS_VF(p_dev))
5822316485Sdavidcs		return;
5823316485Sdavidcs
5824316485Sdavidcs	for_each_hwfn(p_dev, j) {
5825316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
5826316485Sdavidcs		struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
5827316485Sdavidcs
5828316485Sdavidcs		ecore_hw_hwfn_prepare(p_hwfn);
5829316485Sdavidcs
5830316485Sdavidcs		if (!p_ptt)
5831337517Sdavidcs			DP_NOTICE(p_hwfn, false, "ptt acquire failed\n");
5832316485Sdavidcs		else {
5833316485Sdavidcs			ecore_load_mcp_offsets(p_hwfn, p_ptt);
5834316485Sdavidcs			ecore_ptt_release(p_hwfn, p_ptt);
5835316485Sdavidcs		}
5836316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n");
5837316485Sdavidcs	}
5838316485Sdavidcs}
5839316485Sdavidcs
5840337517Sdavidcs#endif
5841337517Sdavidcs
5842337517Sdavidcsstatic enum _ecore_status_t
5843337517Sdavidcsecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
5844337517Sdavidcs			void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
5845337517Sdavidcs			struct ecore_hw_prepare_params *p_params)
5846316485Sdavidcs{
5847316485Sdavidcs	struct ecore_mdump_retain_data mdump_retain;
5848316485Sdavidcs	struct ecore_dev *p_dev = p_hwfn->p_dev;
5849316485Sdavidcs	struct ecore_mdump_info mdump_info;
5850316485Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
5851316485Sdavidcs
5852316485Sdavidcs	/* Split PCI bars evenly between hwfns */
5853316485Sdavidcs	p_hwfn->regview = p_regview;
5854316485Sdavidcs	p_hwfn->doorbells = p_doorbells;
5855337517Sdavidcs	p_hwfn->db_phys_addr = db_phys_addr;
5856316485Sdavidcs
5857337517Sdavidcs#ifndef LINUX_REMOVE
5858337517Sdavidcs       p_hwfn->reg_offset = (u8 *)p_hwfn->regview - (u8 *)p_hwfn->p_dev->regview;
5859337517Sdavidcs       p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - (u8 *)p_hwfn->p_dev->doorbells;
5860337517Sdavidcs#endif
5861337517Sdavidcs
5862316485Sdavidcs	if (IS_VF(p_dev))
5863316485Sdavidcs		return ecore_vf_hw_prepare(p_hwfn);
5864316485Sdavidcs
5865316485Sdavidcs	/* Validate that chip access is feasible */
5866316485Sdavidcs	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
5867316485Sdavidcs		DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n");
5868316485Sdavidcs		if (p_params->b_relaxed_probe)
5869316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
5870316485Sdavidcs		return ECORE_INVAL;
5871316485Sdavidcs	}
5872316485Sdavidcs
5873316485Sdavidcs	get_function_id(p_hwfn);
5874316485Sdavidcs
5875316485Sdavidcs	/* Allocate PTT pool */
5876316485Sdavidcs	rc = ecore_ptt_pool_alloc(p_hwfn);
5877316485Sdavidcs	if (rc) {
5878337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
5879316485Sdavidcs		if (p_params->b_relaxed_probe)
5880316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5881316485Sdavidcs		goto err0;
5882316485Sdavidcs	}
5883316485Sdavidcs
5884316485Sdavidcs	/* Allocate the main PTT */
5885316485Sdavidcs	p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
5886316485Sdavidcs
5887316485Sdavidcs	/* First hwfn learns basic information, e.g., number of hwfns */
5888316485Sdavidcs	if (!p_hwfn->my_id) {
5889320164Sdavidcs		rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
5890316485Sdavidcs		if (rc != ECORE_SUCCESS) {
5891316485Sdavidcs			if (p_params->b_relaxed_probe)
5892316485Sdavidcs				p_params->p_relaxed_res =
5893316485Sdavidcs					ECORE_HW_PREPARE_FAILED_DEV;
5894316485Sdavidcs			goto err1;
5895316485Sdavidcs		}
5896316485Sdavidcs	}
5897316485Sdavidcs
5898316485Sdavidcs	ecore_hw_hwfn_prepare(p_hwfn);
5899316485Sdavidcs
5900316485Sdavidcs	/* Initialize MCP structure */
5901316485Sdavidcs	rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
5902316485Sdavidcs	if (rc) {
5903337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
5904316485Sdavidcs		if (p_params->b_relaxed_probe)
5905316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5906316485Sdavidcs		goto err1;
5907316485Sdavidcs	}
5908316485Sdavidcs
5909316485Sdavidcs	/* Read the device configuration information from the HW and SHMEM */
5910316485Sdavidcs	rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
5911316485Sdavidcs			       p_params->personality, p_params);
5912316485Sdavidcs	if (rc) {
5913337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
5914316485Sdavidcs		goto err2;
5915316485Sdavidcs	}
5916316485Sdavidcs
5917316485Sdavidcs	/* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
5918316485Sdavidcs	 * called, since among others it sets the ports number in an engine.
5919316485Sdavidcs	 */
5920337517Sdavidcs	if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
5921316485Sdavidcs	    !p_dev->recov_in_prog) {
5922316485Sdavidcs		rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
5923316485Sdavidcs		if (rc != ECORE_SUCCESS)
5924316485Sdavidcs			DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
5925316485Sdavidcs	}
5926316485Sdavidcs
5927316485Sdavidcs	/* Check if mdump logs/data are present and update the epoch value */
5928337517Sdavidcs	if (IS_LEAD_HWFN(p_hwfn)) {
5929337517Sdavidcs#ifndef ASIC_ONLY
5930337517Sdavidcs		if (!CHIP_REV_IS_EMUL(p_dev)) {
5931337517Sdavidcs#endif
5932316485Sdavidcs		rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
5933316485Sdavidcs					      &mdump_info);
5934316485Sdavidcs		if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
5935316485Sdavidcs			DP_NOTICE(p_hwfn, false,
5936316485Sdavidcs				  "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
5937316485Sdavidcs
5938316485Sdavidcs		rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
5939316485Sdavidcs						&mdump_retain);
5940316485Sdavidcs		if (rc == ECORE_SUCCESS && mdump_retain.valid)
5941316485Sdavidcs			DP_NOTICE(p_hwfn, false,
5942316485Sdavidcs				  "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
5943316485Sdavidcs				  mdump_retain.epoch, mdump_retain.pf,
5944316485Sdavidcs				  mdump_retain.status);
5945316485Sdavidcs
5946316485Sdavidcs		ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
5947316485Sdavidcs					   p_params->epoch);
5948337517Sdavidcs#ifndef ASIC_ONLY
5949337517Sdavidcs		}
5950337517Sdavidcs#endif
5951316485Sdavidcs	}
5952316485Sdavidcs
5953316485Sdavidcs	/* Allocate the init RT array and initialize the init-ops engine */
5954316485Sdavidcs	rc = ecore_init_alloc(p_hwfn);
5955316485Sdavidcs	if (rc) {
5956337517Sdavidcs		DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
5957316485Sdavidcs		if (p_params->b_relaxed_probe)
5958316485Sdavidcs			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5959316485Sdavidcs		goto err2;
5960316485Sdavidcs	}
5961316485Sdavidcs
5962316485Sdavidcs#ifndef ASIC_ONLY
5963316485Sdavidcs	if (CHIP_REV_IS_FPGA(p_dev)) {
5964316485Sdavidcs		DP_NOTICE(p_hwfn, false,
5965316485Sdavidcs			  "FPGA: workaround; Prevent DMAE parities\n");
5966316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
5967316485Sdavidcs			 7);
5968316485Sdavidcs
5969316485Sdavidcs		DP_NOTICE(p_hwfn, false,
5970316485Sdavidcs			  "FPGA: workaround: Set VF bar0 size\n");
5971316485Sdavidcs		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
5972316485Sdavidcs			 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
5973316485Sdavidcs	}
5974316485Sdavidcs#endif
5975316485Sdavidcs
5976316485Sdavidcs	return rc;
5977316485Sdavidcserr2:
5978316485Sdavidcs	if (IS_LEAD_HWFN(p_hwfn))
5979316485Sdavidcs		ecore_iov_free_hw_info(p_dev);
5980316485Sdavidcs	ecore_mcp_free(p_hwfn);
5981316485Sdavidcserr1:
5982316485Sdavidcs	ecore_hw_hwfn_free(p_hwfn);
5983316485Sdavidcserr0:
5984316485Sdavidcs	return rc;
5985316485Sdavidcs}
5986316485Sdavidcs
5987316485Sdavidcsenum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
5988316485Sdavidcs				      struct ecore_hw_prepare_params *p_params)
5989316485Sdavidcs{
5990316485Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
5991316485Sdavidcs	enum _ecore_status_t rc;
5992316485Sdavidcs
5993316485Sdavidcs	p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
5994316485Sdavidcs	p_dev->allow_mdump = p_params->allow_mdump;
5995316485Sdavidcs
5996316485Sdavidcs	if (p_params->b_relaxed_probe)
5997316485Sdavidcs		p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
5998316485Sdavidcs
5999316485Sdavidcs	/* Store the precompiled init data ptrs */
6000316485Sdavidcs	if (IS_PF(p_dev))
6001316485Sdavidcs		ecore_init_iro_array(p_dev);
6002316485Sdavidcs
6003316485Sdavidcs	/* Initialize the first hwfn - will learn number of hwfns */
6004337517Sdavidcs	rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
6005337517Sdavidcs				     p_dev->doorbells, p_dev->db_phys_addr,
6006337517Sdavidcs				     p_params);
6007316485Sdavidcs	if (rc != ECORE_SUCCESS)
6008316485Sdavidcs		return rc;
6009316485Sdavidcs
6010316485Sdavidcs	p_params->personality = p_hwfn->hw_info.personality;
6011316485Sdavidcs
6012316485Sdavidcs	/* initilalize 2nd hwfn if necessary */
6013337517Sdavidcs	if (ECORE_IS_CMT(p_dev)) {
6014316485Sdavidcs		void OSAL_IOMEM *p_regview, *p_doorbell;
6015316485Sdavidcs		u8 OSAL_IOMEM *addr;
6016337517Sdavidcs		u64 db_phys_addr;
6017337517Sdavidcs		u32 offset;
6018316485Sdavidcs
6019316485Sdavidcs		/* adjust bar offset for second engine */
6020337517Sdavidcs		offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
6021337517Sdavidcs					   BAR_ID_0) / 2;
6022337517Sdavidcs		addr = (u8 OSAL_IOMEM *)p_dev->regview + offset;
6023316485Sdavidcs		p_regview = (void OSAL_IOMEM *)addr;
6024316485Sdavidcs
6025337517Sdavidcs		offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
6026337517Sdavidcs					   BAR_ID_1) / 2;
6027337517Sdavidcs		addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset;
6028316485Sdavidcs		p_doorbell = (void OSAL_IOMEM *)addr;
6029337517Sdavidcs		db_phys_addr = p_dev->db_phys_addr + offset;
6030316485Sdavidcs
6031316485Sdavidcs		/* prepare second hw function */
6032316485Sdavidcs		rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
6033337517Sdavidcs					     p_doorbell, db_phys_addr,
6034337517Sdavidcs					     p_params);
6035316485Sdavidcs
6036316485Sdavidcs		/* in case of error, need to free the previously
6037316485Sdavidcs		 * initiliazed hwfn 0.
6038316485Sdavidcs		 */
6039316485Sdavidcs		if (rc != ECORE_SUCCESS) {
6040316485Sdavidcs			if (p_params->b_relaxed_probe)
6041316485Sdavidcs				p_params->p_relaxed_res =
6042316485Sdavidcs						ECORE_HW_PREPARE_FAILED_ENG2;
6043316485Sdavidcs
6044316485Sdavidcs			if (IS_PF(p_dev)) {
6045316485Sdavidcs				ecore_init_free(p_hwfn);
6046316485Sdavidcs				ecore_mcp_free(p_hwfn);
6047316485Sdavidcs				ecore_hw_hwfn_free(p_hwfn);
6048316485Sdavidcs			} else {
6049337517Sdavidcs				DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
6050316485Sdavidcs			}
6051316485Sdavidcs			return rc;
6052316485Sdavidcs		}
6053316485Sdavidcs	}
6054316485Sdavidcs
6055316485Sdavidcs	return rc;
6056316485Sdavidcs}
6057316485Sdavidcs
6058316485Sdavidcsvoid ecore_hw_remove(struct ecore_dev *p_dev)
6059316485Sdavidcs{
6060316485Sdavidcs	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
6061316485Sdavidcs	int i;
6062316485Sdavidcs
6063316485Sdavidcs	if (IS_PF(p_dev))
6064316485Sdavidcs		ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
6065316485Sdavidcs						 ECORE_OV_DRIVER_STATE_NOT_LOADED);
6066316485Sdavidcs
6067316485Sdavidcs	for_each_hwfn(p_dev, i) {
6068316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6069316485Sdavidcs
6070316485Sdavidcs		if (IS_VF(p_dev)) {
6071316485Sdavidcs			ecore_vf_pf_release(p_hwfn);
6072316485Sdavidcs			continue;
6073316485Sdavidcs		}
6074316485Sdavidcs
6075316485Sdavidcs		ecore_init_free(p_hwfn);
6076316485Sdavidcs		ecore_hw_hwfn_free(p_hwfn);
6077316485Sdavidcs		ecore_mcp_free(p_hwfn);
6078316485Sdavidcs
6079320164Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC
6080337517Sdavidcs		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
6081320164Sdavidcs#endif
6082316485Sdavidcs	}
6083316485Sdavidcs
6084316485Sdavidcs	ecore_iov_free_hw_info(p_dev);
6085316485Sdavidcs}
6086316485Sdavidcs
6087316485Sdavidcsstatic void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
6088316485Sdavidcs				      struct ecore_chain *p_chain)
6089316485Sdavidcs{
6090316485Sdavidcs	void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
6091316485Sdavidcs	dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
6092316485Sdavidcs	struct ecore_chain_next *p_next;
6093316485Sdavidcs	u32 size, i;
6094316485Sdavidcs
6095316485Sdavidcs	if (!p_virt)
6096316485Sdavidcs		return;
6097316485Sdavidcs
6098316485Sdavidcs	size = p_chain->elem_size * p_chain->usable_per_page;
6099316485Sdavidcs
6100316485Sdavidcs	for (i = 0; i < p_chain->page_cnt; i++) {
6101316485Sdavidcs		if (!p_virt)
6102316485Sdavidcs			break;
6103316485Sdavidcs
6104316485Sdavidcs		p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
6105316485Sdavidcs		p_virt_next = p_next->next_virt;
6106316485Sdavidcs		p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
6107316485Sdavidcs
6108316485Sdavidcs		OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
6109316485Sdavidcs				       ECORE_CHAIN_PAGE_SIZE);
6110316485Sdavidcs
6111316485Sdavidcs		p_virt = p_virt_next;
6112316485Sdavidcs		p_phys = p_phys_next;
6113316485Sdavidcs	}
6114316485Sdavidcs}
6115316485Sdavidcs
6116316485Sdavidcsstatic void ecore_chain_free_single(struct ecore_dev *p_dev,
6117316485Sdavidcs				    struct ecore_chain *p_chain)
6118316485Sdavidcs{
6119316485Sdavidcs	if (!p_chain->p_virt_addr)
6120316485Sdavidcs		return;
6121316485Sdavidcs
6122316485Sdavidcs	OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
6123316485Sdavidcs			       p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
6124316485Sdavidcs}
6125316485Sdavidcs
6126316485Sdavidcsstatic void ecore_chain_free_pbl(struct ecore_dev *p_dev,
6127316485Sdavidcs				 struct ecore_chain *p_chain)
6128316485Sdavidcs{
6129316485Sdavidcs	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
6130316485Sdavidcs	u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
6131316485Sdavidcs	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
6132316485Sdavidcs
6133316485Sdavidcs	if (!pp_virt_addr_tbl)
6134316485Sdavidcs		return;
6135316485Sdavidcs
6136316485Sdavidcs	if (!p_pbl_virt)
6137316485Sdavidcs		goto out;
6138316485Sdavidcs
6139316485Sdavidcs	for (i = 0; i < page_cnt; i++) {
6140316485Sdavidcs		if (!pp_virt_addr_tbl[i])
6141316485Sdavidcs			break;
6142316485Sdavidcs
6143316485Sdavidcs		OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
6144316485Sdavidcs				       *(dma_addr_t *)p_pbl_virt,
6145316485Sdavidcs				       ECORE_CHAIN_PAGE_SIZE);
6146316485Sdavidcs
6147316485Sdavidcs		p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
6148316485Sdavidcs	}
6149316485Sdavidcs
6150316485Sdavidcs	pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
6151316485Sdavidcs
6152316485Sdavidcs	if (!p_chain->b_external_pbl) {
6153316485Sdavidcs		OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
6154316485Sdavidcs				       p_chain->pbl_sp.p_phys_table, pbl_size);
6155316485Sdavidcs	}
6156316485Sdavidcsout:
6157316485Sdavidcs	OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
6158316485Sdavidcs	p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
6159316485Sdavidcs}
6160316485Sdavidcs
6161316485Sdavidcsvoid ecore_chain_free(struct ecore_dev *p_dev,
6162316485Sdavidcs		      struct ecore_chain *p_chain)
6163316485Sdavidcs{
6164316485Sdavidcs	switch (p_chain->mode) {
6165316485Sdavidcs	case ECORE_CHAIN_MODE_NEXT_PTR:
6166316485Sdavidcs		ecore_chain_free_next_ptr(p_dev, p_chain);
6167316485Sdavidcs		break;
6168316485Sdavidcs	case ECORE_CHAIN_MODE_SINGLE:
6169316485Sdavidcs		ecore_chain_free_single(p_dev, p_chain);
6170316485Sdavidcs		break;
6171316485Sdavidcs	case ECORE_CHAIN_MODE_PBL:
6172316485Sdavidcs		ecore_chain_free_pbl(p_dev, p_chain);
6173316485Sdavidcs		break;
6174316485Sdavidcs	}
6175316485Sdavidcs}
6176316485Sdavidcs
6177316485Sdavidcsstatic enum _ecore_status_t
6178316485Sdavidcsecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
6179316485Sdavidcs			       enum ecore_chain_cnt_type cnt_type,
6180316485Sdavidcs			       osal_size_t elem_size, u32 page_cnt)
6181316485Sdavidcs{
6182316485Sdavidcs	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
6183316485Sdavidcs
6184316485Sdavidcs	/* The actual chain size can be larger than the maximal possible value
6185316485Sdavidcs	 * after rounding up the requested elements number to pages, and after
6186316485Sdavidcs	 * taking into acount the unusuable elements (next-ptr elements).
6187316485Sdavidcs	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
6188316485Sdavidcs	 * size/capacity fields are of a u32 type.
6189316485Sdavidcs	 */
6190316485Sdavidcs	if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
6191316485Sdavidcs	     chain_size > ((u32)ECORE_U16_MAX + 1)) ||
6192316485Sdavidcs	    (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
6193316485Sdavidcs	     chain_size > ECORE_U32_MAX)) {
6194316485Sdavidcs		DP_NOTICE(p_dev, true,
6195316485Sdavidcs			  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
6196316485Sdavidcs			  (unsigned long long)chain_size);
6197316485Sdavidcs		return ECORE_INVAL;
6198316485Sdavidcs	}
6199316485Sdavidcs
6200316485Sdavidcs	return ECORE_SUCCESS;
6201316485Sdavidcs}
6202316485Sdavidcs
6203316485Sdavidcsstatic enum _ecore_status_t
6204316485Sdavidcsecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
6205316485Sdavidcs{
6206316485Sdavidcs	void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
6207316485Sdavidcs	dma_addr_t p_phys = 0;
6208316485Sdavidcs	u32 i;
6209316485Sdavidcs
6210316485Sdavidcs	for (i = 0; i < p_chain->page_cnt; i++) {
6211316485Sdavidcs		p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
6212316485Sdavidcs						 ECORE_CHAIN_PAGE_SIZE);
6213316485Sdavidcs		if (!p_virt) {
6214337517Sdavidcs			DP_NOTICE(p_dev, false,
6215316485Sdavidcs				  "Failed to allocate chain memory\n");
6216316485Sdavidcs			return ECORE_NOMEM;
6217316485Sdavidcs		}
6218316485Sdavidcs
6219316485Sdavidcs		if (i == 0) {
6220316485Sdavidcs			ecore_chain_init_mem(p_chain, p_virt, p_phys);
6221316485Sdavidcs			ecore_chain_reset(p_chain);
6222316485Sdavidcs		} else {
6223316485Sdavidcs			ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
6224316485Sdavidcs						       p_virt, p_phys);
6225316485Sdavidcs		}
6226316485Sdavidcs
6227316485Sdavidcs		p_virt_prev = p_virt;
6228316485Sdavidcs	}
6229316485Sdavidcs	/* Last page's next element should point to the beginning of the
6230316485Sdavidcs	 * chain.
6231316485Sdavidcs	 */
6232316485Sdavidcs	ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
6233316485Sdavidcs				       p_chain->p_virt_addr,
6234316485Sdavidcs				       p_chain->p_phys_addr);
6235316485Sdavidcs
6236316485Sdavidcs	return ECORE_SUCCESS;
6237316485Sdavidcs}
6238316485Sdavidcs
6239316485Sdavidcsstatic enum _ecore_status_t
6240316485Sdavidcsecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
6241316485Sdavidcs{
6242316485Sdavidcs	dma_addr_t p_phys = 0;
6243316485Sdavidcs	void *p_virt = OSAL_NULL;
6244316485Sdavidcs
6245316485Sdavidcs	p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
6246316485Sdavidcs	if (!p_virt) {
6247337517Sdavidcs		DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
6248316485Sdavidcs		return ECORE_NOMEM;
6249316485Sdavidcs	}
6250316485Sdavidcs
6251316485Sdavidcs	ecore_chain_init_mem(p_chain, p_virt, p_phys);
6252316485Sdavidcs	ecore_chain_reset(p_chain);
6253316485Sdavidcs
6254316485Sdavidcs	return ECORE_SUCCESS;
6255316485Sdavidcs}
6256316485Sdavidcs
6257316485Sdavidcsstatic enum _ecore_status_t
6258316485Sdavidcsecore_chain_alloc_pbl(struct ecore_dev *p_dev,
6259316485Sdavidcs		      struct ecore_chain *p_chain,
6260316485Sdavidcs		      struct ecore_chain_ext_pbl *ext_pbl)
6261316485Sdavidcs{
6262320164Sdavidcs	u32 page_cnt = p_chain->page_cnt, size, i;
6263320164Sdavidcs	dma_addr_t p_phys = 0, p_pbl_phys = 0;
6264320164Sdavidcs	void **pp_virt_addr_tbl = OSAL_NULL;
6265320164Sdavidcs	u8 *p_pbl_virt = OSAL_NULL;
6266316485Sdavidcs	void *p_virt = OSAL_NULL;
6267316485Sdavidcs
6268316485Sdavidcs	size = page_cnt * sizeof(*pp_virt_addr_tbl);
6269316485Sdavidcs	pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
6270316485Sdavidcs	if (!pp_virt_addr_tbl) {
6271337517Sdavidcs		DP_NOTICE(p_dev, false,
6272316485Sdavidcs			  "Failed to allocate memory for the chain virtual addresses table\n");
6273316485Sdavidcs		return ECORE_NOMEM;
6274316485Sdavidcs	}
6275316485Sdavidcs
6276316485Sdavidcs	/* The allocation of the PBL table is done with its full size, since it
6277316485Sdavidcs	 * is expected to be successive.
6278316485Sdavidcs	 * ecore_chain_init_pbl_mem() is called even in a case of an allocation
6279316485Sdavidcs	 * failure, since pp_virt_addr_tbl was previously allocated, and it
6280316485Sdavidcs	 * should be saved to allow its freeing during the error flow.
6281316485Sdavidcs	 */
6282316485Sdavidcs	size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
6283316485Sdavidcs
6284316485Sdavidcs	if (ext_pbl == OSAL_NULL) {
6285316485Sdavidcs		p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
6286316485Sdavidcs	} else {
6287316485Sdavidcs		p_pbl_virt = ext_pbl->p_pbl_virt;
6288316485Sdavidcs		p_pbl_phys = ext_pbl->p_pbl_phys;
6289316485Sdavidcs		p_chain->b_external_pbl = true;
6290316485Sdavidcs	}
6291316485Sdavidcs
6292316485Sdavidcs	ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
6293316485Sdavidcs				 pp_virt_addr_tbl);
6294316485Sdavidcs	if (!p_pbl_virt) {
6295337517Sdavidcs		DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
6296316485Sdavidcs		return ECORE_NOMEM;
6297316485Sdavidcs	}
6298316485Sdavidcs
6299316485Sdavidcs	for (i = 0; i < page_cnt; i++) {
6300316485Sdavidcs		p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
6301316485Sdavidcs						 ECORE_CHAIN_PAGE_SIZE);
6302316485Sdavidcs		if (!p_virt) {
6303337517Sdavidcs			DP_NOTICE(p_dev, false,
6304316485Sdavidcs				  "Failed to allocate chain memory\n");
6305316485Sdavidcs			return ECORE_NOMEM;
6306316485Sdavidcs		}
6307316485Sdavidcs
6308316485Sdavidcs		if (i == 0) {
6309316485Sdavidcs			ecore_chain_init_mem(p_chain, p_virt, p_phys);
6310316485Sdavidcs			ecore_chain_reset(p_chain);
6311316485Sdavidcs		}
6312316485Sdavidcs
6313316485Sdavidcs		/* Fill the PBL table with the physical address of the page */
6314316485Sdavidcs		*(dma_addr_t *)p_pbl_virt = p_phys;
6315316485Sdavidcs		/* Keep the virtual address of the page */
6316316485Sdavidcs		p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
6317316485Sdavidcs
6318316485Sdavidcs		p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
6319316485Sdavidcs	}
6320316485Sdavidcs
6321316485Sdavidcs	return ECORE_SUCCESS;
6322316485Sdavidcs}
6323316485Sdavidcs
6324316485Sdavidcsenum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
6325316485Sdavidcs				       enum ecore_chain_use_mode intended_use,
6326316485Sdavidcs				       enum ecore_chain_mode mode,
6327316485Sdavidcs				       enum ecore_chain_cnt_type cnt_type,
6328316485Sdavidcs				       u32 num_elems, osal_size_t elem_size,
6329316485Sdavidcs				       struct ecore_chain *p_chain,
6330316485Sdavidcs				       struct ecore_chain_ext_pbl *ext_pbl)
6331316485Sdavidcs{
6332316485Sdavidcs	u32 page_cnt;
6333316485Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
6334316485Sdavidcs
6335316485Sdavidcs	if (mode == ECORE_CHAIN_MODE_SINGLE)
6336316485Sdavidcs		page_cnt = 1;
6337316485Sdavidcs	else
6338316485Sdavidcs		page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
6339316485Sdavidcs
6340316485Sdavidcs	rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
6341316485Sdavidcs					    page_cnt);
6342316485Sdavidcs	if (rc) {
6343337517Sdavidcs		DP_NOTICE(p_dev, false,
6344316485Sdavidcs			  "Cannot allocate a chain with the given arguments:\n"
6345316485Sdavidcs			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
6346316485Sdavidcs			  intended_use, mode, cnt_type, num_elems, elem_size);
6347316485Sdavidcs		return rc;
6348316485Sdavidcs	}
6349316485Sdavidcs
6350316485Sdavidcs	ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
6351316485Sdavidcs				mode, cnt_type, p_dev->dp_ctx);
6352316485Sdavidcs
6353316485Sdavidcs	switch (mode) {
6354316485Sdavidcs	case ECORE_CHAIN_MODE_NEXT_PTR:
6355316485Sdavidcs		rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
6356316485Sdavidcs		break;
6357316485Sdavidcs	case ECORE_CHAIN_MODE_SINGLE:
6358316485Sdavidcs		rc = ecore_chain_alloc_single(p_dev, p_chain);
6359316485Sdavidcs		break;
6360316485Sdavidcs	case ECORE_CHAIN_MODE_PBL:
6361316485Sdavidcs		rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
6362316485Sdavidcs		break;
6363316485Sdavidcs	}
6364316485Sdavidcs	if (rc)
6365316485Sdavidcs		goto nomem;
6366316485Sdavidcs
6367316485Sdavidcs	return ECORE_SUCCESS;
6368316485Sdavidcs
6369316485Sdavidcsnomem:
6370316485Sdavidcs	ecore_chain_free(p_dev, p_chain);
6371316485Sdavidcs	return rc;
6372316485Sdavidcs}
6373316485Sdavidcs
6374316485Sdavidcsenum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
6375316485Sdavidcs				       u16 src_id, u16 *dst_id)
6376316485Sdavidcs{
6377316485Sdavidcs	if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
6378316485Sdavidcs		u16 min, max;
6379316485Sdavidcs
6380316485Sdavidcs		min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
6381316485Sdavidcs		max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
6382316485Sdavidcs		DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
6383316485Sdavidcs			  src_id, min, max);
6384316485Sdavidcs
6385316485Sdavidcs		return ECORE_INVAL;
6386316485Sdavidcs	}
6387316485Sdavidcs
6388316485Sdavidcs	*dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
6389316485Sdavidcs
6390316485Sdavidcs	return ECORE_SUCCESS;
6391316485Sdavidcs}
6392316485Sdavidcs
6393316485Sdavidcsenum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
6394316485Sdavidcs				    u8 src_id, u8 *dst_id)
6395316485Sdavidcs{
6396316485Sdavidcs	if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
6397316485Sdavidcs		u8 min, max;
6398316485Sdavidcs
6399316485Sdavidcs		min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
6400316485Sdavidcs		max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
6401316485Sdavidcs		DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n",
6402316485Sdavidcs			  src_id, min, max);
6403316485Sdavidcs
6404316485Sdavidcs		return ECORE_INVAL;
6405316485Sdavidcs	}
6406316485Sdavidcs
6407316485Sdavidcs	*dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
6408316485Sdavidcs
6409316485Sdavidcs	return ECORE_SUCCESS;
6410316485Sdavidcs}
6411316485Sdavidcs
6412316485Sdavidcsenum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
6413316485Sdavidcs				      u8 src_id, u8 *dst_id)
6414316485Sdavidcs{
6415316485Sdavidcs	if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
6416316485Sdavidcs		u8 min, max;
6417316485Sdavidcs
6418316485Sdavidcs		min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
6419316485Sdavidcs		max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
6420316485Sdavidcs		DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
6421316485Sdavidcs			  src_id, min, max);
6422316485Sdavidcs
6423316485Sdavidcs		return ECORE_INVAL;
6424316485Sdavidcs	}
6425316485Sdavidcs
6426316485Sdavidcs	*dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
6427316485Sdavidcs
6428316485Sdavidcs	return ECORE_SUCCESS;
6429316485Sdavidcs}
6430316485Sdavidcs
6431316485Sdavidcsenum _ecore_status_t
6432316485Sdavidcsecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
6433316485Sdavidcs				  struct ecore_ptt *p_ptt)
6434316485Sdavidcs{
6435337517Sdavidcs	if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
6436316485Sdavidcs		ecore_wr(p_hwfn, p_ptt,
6437316485Sdavidcs			 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
6438316485Sdavidcs			 1 << p_hwfn->abs_pf_id / 2);
6439316485Sdavidcs		ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
6440316485Sdavidcs		return ECORE_SUCCESS;
6441316485Sdavidcs	} else {
6442316485Sdavidcs		DP_NOTICE(p_hwfn, false,
6443316485Sdavidcs			  "This function can't be set as default\n");
6444316485Sdavidcs		return ECORE_INVAL;
6445316485Sdavidcs	}
6446316485Sdavidcs}
6447316485Sdavidcs
6448316485Sdavidcsstatic enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
6449316485Sdavidcs					       struct ecore_ptt *p_ptt,
6450316485Sdavidcs					       u32 hw_addr, void *p_eth_qzone,
6451316485Sdavidcs					       osal_size_t eth_qzone_size,
6452316485Sdavidcs					       u8 timeset)
6453316485Sdavidcs{
6454316485Sdavidcs	struct coalescing_timeset *p_coal_timeset;
6455316485Sdavidcs
6456316485Sdavidcs	if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
6457316485Sdavidcs		DP_NOTICE(p_hwfn, true,
6458316485Sdavidcs			  "Coalescing configuration not enabled\n");
6459316485Sdavidcs		return ECORE_INVAL;
6460316485Sdavidcs	}
6461316485Sdavidcs
6462316485Sdavidcs	p_coal_timeset = p_eth_qzone;
6463316485Sdavidcs	OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
6464316485Sdavidcs	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
6465316485Sdavidcs	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
6466316485Sdavidcs	ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
6467316485Sdavidcs
6468316485Sdavidcs	return ECORE_SUCCESS;
6469316485Sdavidcs}
6470316485Sdavidcs
6471316485Sdavidcsenum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
6472316485Sdavidcs					      u16 rx_coal, u16 tx_coal,
6473316485Sdavidcs					      void *p_handle)
6474316485Sdavidcs{
6475316485Sdavidcs	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
6476316485Sdavidcs	enum _ecore_status_t rc = ECORE_SUCCESS;
6477316485Sdavidcs	struct ecore_ptt *p_ptt;
6478316485Sdavidcs
6479316485Sdavidcs	/* TODO - Configuring a single queue's coalescing but
6480316485Sdavidcs	 * claiming all queues are abiding same configuration
6481316485Sdavidcs	 * for PF and VF both.
6482316485Sdavidcs	 */
6483316485Sdavidcs
6484316485Sdavidcs#ifdef CONFIG_ECORE_SRIOV
6485316485Sdavidcs	if (IS_VF(p_hwfn->p_dev))
6486316485Sdavidcs		return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
6487316485Sdavidcs						tx_coal, p_cid);
6488316485Sdavidcs#endif /* #ifdef CONFIG_ECORE_SRIOV */
6489316485Sdavidcs
6490316485Sdavidcs	p_ptt = ecore_ptt_acquire(p_hwfn);
6491316485Sdavidcs	if (!p_ptt)
6492316485Sdavidcs		return ECORE_AGAIN;
6493316485Sdavidcs
6494316485Sdavidcs	if (rx_coal) {
6495316485Sdavidcs		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
6496316485Sdavidcs		if (rc)
6497316485Sdavidcs			goto out;
6498316485Sdavidcs		p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
6499316485Sdavidcs	}
6500316485Sdavidcs
6501316485Sdavidcs	if (tx_coal) {
6502316485Sdavidcs		rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
6503316485Sdavidcs		if (rc)
6504316485Sdavidcs			goto out;
6505316485Sdavidcs		p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
6506316485Sdavidcs	}
6507316485Sdavidcsout:
6508316485Sdavidcs	ecore_ptt_release(p_hwfn, p_ptt);
6509316485Sdavidcs
6510316485Sdavidcs	return rc;
6511316485Sdavidcs}
6512316485Sdavidcs
6513316485Sdavidcsenum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
6514316485Sdavidcs					    struct ecore_ptt *p_ptt,
6515316485Sdavidcs					    u16 coalesce,
6516316485Sdavidcs					    struct ecore_queue_cid *p_cid)
6517316485Sdavidcs{
6518316485Sdavidcs	struct ustorm_eth_queue_zone eth_qzone;
6519316485Sdavidcs	u8 timeset, timer_res;
6520316485Sdavidcs	u32 address;
6521316485Sdavidcs	enum _ecore_status_t rc;
6522316485Sdavidcs
6523316485Sdavidcs	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
6524316485Sdavidcs	if (coalesce <= 0x7F)
6525316485Sdavidcs		timer_res = 0;
6526316485Sdavidcs	else if (coalesce <= 0xFF)
6527316485Sdavidcs		timer_res = 1;
6528316485Sdavidcs	else if (coalesce <= 0x1FF)
6529316485Sdavidcs		timer_res = 2;
6530316485Sdavidcs	else {
6531316485Sdavidcs		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
6532316485Sdavidcs		return ECORE_INVAL;
6533316485Sdavidcs	}
6534316485Sdavidcs	timeset = (u8)(coalesce >> timer_res);
6535316485Sdavidcs
6536316485Sdavidcs	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
6537316485Sdavidcs				     p_cid->sb_igu_id, false);
6538316485Sdavidcs	if (rc != ECORE_SUCCESS)
6539316485Sdavidcs		goto out;
6540316485Sdavidcs
6541316485Sdavidcs	address = BAR0_MAP_REG_USDM_RAM +
6542316485Sdavidcs		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
6543316485Sdavidcs
6544316485Sdavidcs	rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
6545316485Sdavidcs				sizeof(struct ustorm_eth_queue_zone), timeset);
6546316485Sdavidcs	if (rc != ECORE_SUCCESS)
6547316485Sdavidcs		goto out;
6548316485Sdavidcs
6549316485Sdavidcsout:
6550316485Sdavidcs	return rc;
6551316485Sdavidcs}
6552316485Sdavidcs
6553316485Sdavidcsenum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
6554316485Sdavidcs					    struct ecore_ptt *p_ptt,
6555316485Sdavidcs					    u16 coalesce,
6556316485Sdavidcs					    struct ecore_queue_cid *p_cid)
6557316485Sdavidcs{
6558316485Sdavidcs	struct xstorm_eth_queue_zone eth_qzone;
6559316485Sdavidcs	u8 timeset, timer_res;
6560316485Sdavidcs	u32 address;
6561316485Sdavidcs	enum _ecore_status_t rc;
6562316485Sdavidcs
6563316485Sdavidcs	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
6564316485Sdavidcs	if (coalesce <= 0x7F)
6565316485Sdavidcs		timer_res = 0;
6566316485Sdavidcs	else if (coalesce <= 0xFF)
6567316485Sdavidcs		timer_res = 1;
6568316485Sdavidcs	else if (coalesce <= 0x1FF)
6569316485Sdavidcs		timer_res = 2;
6570316485Sdavidcs	else {
6571316485Sdavidcs		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
6572316485Sdavidcs		return ECORE_INVAL;
6573316485Sdavidcs	}
6574316485Sdavidcs	timeset = (u8)(coalesce >> timer_res);
6575316485Sdavidcs
6576316485Sdavidcs	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
6577316485Sdavidcs				     p_cid->sb_igu_id, true);
6578316485Sdavidcs	if (rc != ECORE_SUCCESS)
6579316485Sdavidcs		goto out;
6580316485Sdavidcs
6581316485Sdavidcs	address = BAR0_MAP_REG_XSDM_RAM +
6582316485Sdavidcs		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
6583316485Sdavidcs
6584316485Sdavidcs	rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
6585316485Sdavidcs				sizeof(struct xstorm_eth_queue_zone), timeset);
6586316485Sdavidcsout:
6587316485Sdavidcs	return rc;
6588316485Sdavidcs}
6589316485Sdavidcs
6590316485Sdavidcs/* Calculate final WFQ values for all vports and configure it.
6591316485Sdavidcs * After this configuration each vport must have
6592316485Sdavidcs * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
6593316485Sdavidcs */
6594316485Sdavidcsstatic void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
6595316485Sdavidcs					       struct ecore_ptt *p_ptt,
6596316485Sdavidcs					       u32 min_pf_rate)
6597316485Sdavidcs{
6598316485Sdavidcs	struct init_qm_vport_params *vport_params;
6599316485Sdavidcs	int i;
6600316485Sdavidcs
6601316485Sdavidcs	vport_params = p_hwfn->qm_info.qm_vport_params;
6602316485Sdavidcs
6603316485Sdavidcs	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6604316485Sdavidcs		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
6605316485Sdavidcs
6606316485Sdavidcs		vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
6607316485Sdavidcs					    min_pf_rate;
6608316485Sdavidcs		ecore_init_vport_wfq(p_hwfn, p_ptt,
6609316485Sdavidcs				     vport_params[i].first_tx_pq_id,
6610316485Sdavidcs				     vport_params[i].vport_wfq);
6611316485Sdavidcs	}
6612316485Sdavidcs}
6613316485Sdavidcs
6614320164Sdavidcsstatic void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
6615316485Sdavidcs
6616316485Sdavidcs{
6617316485Sdavidcs	int i;
6618316485Sdavidcs
6619316485Sdavidcs	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
6620316485Sdavidcs		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
6621316485Sdavidcs}
6622316485Sdavidcs
6623316485Sdavidcsstatic void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
6624320164Sdavidcs					     struct ecore_ptt *p_ptt)
6625316485Sdavidcs{
6626316485Sdavidcs	struct init_qm_vport_params *vport_params;
6627316485Sdavidcs	int i;
6628316485Sdavidcs
6629316485Sdavidcs	vport_params = p_hwfn->qm_info.qm_vport_params;
6630316485Sdavidcs
6631316485Sdavidcs	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6632320164Sdavidcs		ecore_init_wfq_default_param(p_hwfn);
6633316485Sdavidcs		ecore_init_vport_wfq(p_hwfn, p_ptt,
6634316485Sdavidcs				     vport_params[i].first_tx_pq_id,
6635316485Sdavidcs				     vport_params[i].vport_wfq);
6636316485Sdavidcs	}
6637316485Sdavidcs}
6638316485Sdavidcs
6639316485Sdavidcs/* This function performs several validations for WFQ
6640316485Sdavidcs * configuration and required min rate for a given vport
6641316485Sdavidcs * 1. req_rate must be greater than one percent of min_pf_rate.
6642316485Sdavidcs * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
6643316485Sdavidcs *    rates to get less than one percent of min_pf_rate.
6644316485Sdavidcs * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
6645316485Sdavidcs */
6646316485Sdavidcsstatic enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
6647316485Sdavidcs						 u16 vport_id, u32 req_rate,
6648316485Sdavidcs						 u32 min_pf_rate)
6649316485Sdavidcs{
6650316485Sdavidcs	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
6651316485Sdavidcs	int non_requested_count = 0, req_count = 0, i, num_vports;
6652316485Sdavidcs
6653316485Sdavidcs	num_vports = p_hwfn->qm_info.num_vports;
6654316485Sdavidcs
6655316485Sdavidcs	/* Accounting for the vports which are configured for WFQ explicitly */
6656316485Sdavidcs	for (i = 0; i < num_vports; i++) {
6657316485Sdavidcs		u32 tmp_speed;
6658316485Sdavidcs
6659316485Sdavidcs		if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
6660316485Sdavidcs			req_count++;
6661316485Sdavidcs			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
6662316485Sdavidcs			total_req_min_rate += tmp_speed;
6663316485Sdavidcs		}
6664316485Sdavidcs	}
6665316485Sdavidcs
6666316485Sdavidcs	/* Include current vport data as well */
6667316485Sdavidcs	req_count++;
6668316485Sdavidcs	total_req_min_rate += req_rate;
6669316485Sdavidcs	non_requested_count = num_vports - req_count;
6670316485Sdavidcs
6671316485Sdavidcs	/* validate possible error cases */
6672316485Sdavidcs	if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
6673316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6674316485Sdavidcs			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
6675316485Sdavidcs			   vport_id, req_rate, min_pf_rate);
6676316485Sdavidcs		return ECORE_INVAL;
6677316485Sdavidcs	}
6678316485Sdavidcs
6679316485Sdavidcs	/* TBD - for number of vports greater than 100 */
6680316485Sdavidcs	if (num_vports > ECORE_WFQ_UNIT) {
6681316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6682316485Sdavidcs			   "Number of vports is greater than %d\n",
6683316485Sdavidcs			   ECORE_WFQ_UNIT);
6684316485Sdavidcs		return ECORE_INVAL;
6685316485Sdavidcs	}
6686316485Sdavidcs
6687316485Sdavidcs	if (total_req_min_rate > min_pf_rate) {
6688316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6689316485Sdavidcs			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
6690316485Sdavidcs			   total_req_min_rate, min_pf_rate);
6691316485Sdavidcs		return ECORE_INVAL;
6692316485Sdavidcs	}
6693316485Sdavidcs
6694316485Sdavidcs	/* Data left for non requested vports */
6695316485Sdavidcs	total_left_rate = min_pf_rate - total_req_min_rate;
6696316485Sdavidcs	left_rate_per_vp = total_left_rate / non_requested_count;
6697316485Sdavidcs
6698316485Sdavidcs	/* validate if non requested get < 1% of min bw */
6699316485Sdavidcs	if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
6700316485Sdavidcs		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6701316485Sdavidcs			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
6702316485Sdavidcs			   left_rate_per_vp, min_pf_rate);
6703316485Sdavidcs		return ECORE_INVAL;
6704316485Sdavidcs	}
6705316485Sdavidcs
6706316485Sdavidcs	/* now req_rate for given vport passes all scenarios.
6707316485Sdavidcs	 * assign final wfq rates to all vports.
6708316485Sdavidcs	 */
6709316485Sdavidcs	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
6710316485Sdavidcs	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
6711316485Sdavidcs
6712316485Sdavidcs	for (i = 0; i < num_vports; i++) {
6713316485Sdavidcs		if (p_hwfn->qm_info.wfq_data[i].configured)
6714316485Sdavidcs			continue;
6715316485Sdavidcs
6716316485Sdavidcs		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
6717316485Sdavidcs	}
6718316485Sdavidcs
6719316485Sdavidcs	return ECORE_SUCCESS;
6720316485Sdavidcs}
6721316485Sdavidcs
6722316485Sdavidcsstatic int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
6723316485Sdavidcs				       struct ecore_ptt *p_ptt,
6724316485Sdavidcs				       u16 vp_id, u32 rate)
6725316485Sdavidcs{
6726316485Sdavidcs	struct ecore_mcp_link_state *p_link;
6727316485Sdavidcs	int rc = ECORE_SUCCESS;
6728316485Sdavidcs
6729316485Sdavidcs	p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
6730316485Sdavidcs
6731316485Sdavidcs	if (!p_link->min_pf_rate) {
6732316485Sdavidcs		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
6733316485Sdavidcs		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
6734316485Sdavidcs		return rc;
6735316485Sdavidcs	}
6736316485Sdavidcs
6737316485Sdavidcs	rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
6738316485Sdavidcs
6739316485Sdavidcs	if (rc == ECORE_SUCCESS)
6740316485Sdavidcs		ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
6741316485Sdavidcs						   p_link->min_pf_rate);
6742316485Sdavidcs	else
6743316485Sdavidcs		DP_NOTICE(p_hwfn, false,
6744316485Sdavidcs			  "Validation failed while configuring min rate\n");
6745316485Sdavidcs
6746316485Sdavidcs	return rc;
6747316485Sdavidcs}
6748316485Sdavidcs
6749316485Sdavidcsstatic int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
6750316485Sdavidcs						   struct ecore_ptt *p_ptt,
6751316485Sdavidcs						   u32 min_pf_rate)
6752316485Sdavidcs{
6753316485Sdavidcs	bool use_wfq = false;
6754316485Sdavidcs	int rc = ECORE_SUCCESS;
6755316485Sdavidcs	u16 i;
6756316485Sdavidcs
6757316485Sdavidcs	/* Validate all pre configured vports for wfq */
6758316485Sdavidcs	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6759316485Sdavidcs		u32 rate;
6760316485Sdavidcs
6761316485Sdavidcs		if (!p_hwfn->qm_info.wfq_data[i].configured)
6762316485Sdavidcs			continue;
6763316485Sdavidcs
6764316485Sdavidcs		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
6765316485Sdavidcs		use_wfq = true;
6766316485Sdavidcs
6767316485Sdavidcs		rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
6768316485Sdavidcs		if (rc != ECORE_SUCCESS) {
6769316485Sdavidcs			DP_NOTICE(p_hwfn, false,
6770316485Sdavidcs				  "WFQ validation failed while configuring min rate\n");
6771316485Sdavidcs			break;
6772316485Sdavidcs		}
6773316485Sdavidcs	}
6774316485Sdavidcs
6775316485Sdavidcs	if (rc == ECORE_SUCCESS && use_wfq)
6776316485Sdavidcs		ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
6777316485Sdavidcs	else
6778320164Sdavidcs		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
6779316485Sdavidcs
6780316485Sdavidcs	return rc;
6781316485Sdavidcs}
6782316485Sdavidcs
6783316485Sdavidcs/* Main API for ecore clients to configure vport min rate.
6784316485Sdavidcs * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
6785316485Sdavidcs * rate - Speed in Mbps needs to be assigned to a given vport.
6786316485Sdavidcs */
6787316485Sdavidcsint ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
6788316485Sdavidcs{
6789316485Sdavidcs	int i, rc = ECORE_INVAL;
6790316485Sdavidcs
6791316485Sdavidcs	/* TBD - for multiple hardware functions - that is 100 gig */
6792337517Sdavidcs	if (ECORE_IS_CMT(p_dev)) {
6793316485Sdavidcs		DP_NOTICE(p_dev, false,
6794316485Sdavidcs			  "WFQ configuration is not supported for this device\n");
6795316485Sdavidcs		return rc;
6796316485Sdavidcs	}
6797316485Sdavidcs
6798316485Sdavidcs	for_each_hwfn(p_dev, i) {
6799316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6800316485Sdavidcs		struct ecore_ptt *p_ptt;
6801316485Sdavidcs
6802316485Sdavidcs		p_ptt = ecore_ptt_acquire(p_hwfn);
6803316485Sdavidcs		if (!p_ptt)
6804316485Sdavidcs			return ECORE_TIMEOUT;
6805316485Sdavidcs
6806316485Sdavidcs		rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
6807316485Sdavidcs
6808316485Sdavidcs		if (rc != ECORE_SUCCESS) {
6809316485Sdavidcs			ecore_ptt_release(p_hwfn, p_ptt);
6810316485Sdavidcs			return rc;
6811316485Sdavidcs		}
6812316485Sdavidcs
6813316485Sdavidcs		ecore_ptt_release(p_hwfn, p_ptt);
6814316485Sdavidcs	}
6815316485Sdavidcs
6816316485Sdavidcs	return rc;
6817316485Sdavidcs}
6818316485Sdavidcs
6819316485Sdavidcs/* API to configure WFQ from mcp link change */
6820316485Sdavidcsvoid ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
6821316485Sdavidcs					   struct ecore_ptt *p_ptt,
6822316485Sdavidcs					   u32 min_pf_rate)
6823316485Sdavidcs{
6824316485Sdavidcs	int i;
6825316485Sdavidcs
6826316485Sdavidcs	/* TBD - for multiple hardware functions - that is 100 gig */
6827337517Sdavidcs	if (ECORE_IS_CMT(p_dev)) {
6828316485Sdavidcs		DP_VERBOSE(p_dev, ECORE_MSG_LINK,
6829316485Sdavidcs			   "WFQ configuration is not supported for this device\n");
6830316485Sdavidcs		return;
6831316485Sdavidcs	}
6832316485Sdavidcs
6833316485Sdavidcs	for_each_hwfn(p_dev, i) {
6834316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6835316485Sdavidcs
6836316485Sdavidcs		__ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
6837316485Sdavidcs							min_pf_rate);
6838316485Sdavidcs	}
6839316485Sdavidcs}
6840316485Sdavidcs
6841316485Sdavidcsint __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
6842316485Sdavidcs				       struct ecore_ptt *p_ptt,
6843316485Sdavidcs				       struct ecore_mcp_link_state *p_link,
6844316485Sdavidcs				       u8 max_bw)
6845316485Sdavidcs{
6846316485Sdavidcs	int rc = ECORE_SUCCESS;
6847316485Sdavidcs
6848316485Sdavidcs	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
6849316485Sdavidcs
6850316485Sdavidcs	if (!p_link->line_speed && (max_bw != 100))
6851316485Sdavidcs		return rc;
6852316485Sdavidcs
6853316485Sdavidcs	p_link->speed = (p_link->line_speed * max_bw) / 100;
6854316485Sdavidcs	p_hwfn->qm_info.pf_rl = p_link->speed;
6855316485Sdavidcs
6856316485Sdavidcs	/* Since the limiter also affects Tx-switched traffic, we don't want it
6857316485Sdavidcs	 * to limit such traffic in case there's no actual limit.
6858316485Sdavidcs	 * In that case, set limit to imaginary high boundary.
6859316485Sdavidcs	 */
6860316485Sdavidcs	if (max_bw == 100)
6861316485Sdavidcs		p_hwfn->qm_info.pf_rl = 100000;
6862316485Sdavidcs
6863316485Sdavidcs	rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
6864316485Sdavidcs			      p_hwfn->qm_info.pf_rl);
6865316485Sdavidcs
6866316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6867316485Sdavidcs		   "Configured MAX bandwidth to be %08x Mb/sec\n",
6868316485Sdavidcs		   p_link->speed);
6869316485Sdavidcs
6870316485Sdavidcs	return rc;
6871316485Sdavidcs}
6872316485Sdavidcs
6873316485Sdavidcs/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
6874316485Sdavidcsint ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
6875316485Sdavidcs{
6876316485Sdavidcs	int i, rc = ECORE_INVAL;
6877316485Sdavidcs
6878316485Sdavidcs	if (max_bw < 1 || max_bw > 100) {
6879316485Sdavidcs		DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
6880316485Sdavidcs		return rc;
6881316485Sdavidcs	}
6882316485Sdavidcs
6883316485Sdavidcs	for_each_hwfn(p_dev, i) {
6884316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6885316485Sdavidcs		struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
6886316485Sdavidcs		struct ecore_mcp_link_state *p_link;
6887316485Sdavidcs		struct ecore_ptt *p_ptt;
6888316485Sdavidcs
6889316485Sdavidcs		p_link = &p_lead->mcp_info->link_output;
6890316485Sdavidcs
6891316485Sdavidcs		p_ptt = ecore_ptt_acquire(p_hwfn);
6892316485Sdavidcs		if (!p_ptt)
6893316485Sdavidcs			return ECORE_TIMEOUT;
6894316485Sdavidcs
6895316485Sdavidcs		rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
6896316485Sdavidcs							p_link, max_bw);
6897316485Sdavidcs
6898316485Sdavidcs		ecore_ptt_release(p_hwfn, p_ptt);
6899316485Sdavidcs
6900316485Sdavidcs		if (rc != ECORE_SUCCESS)
6901316485Sdavidcs			break;
6902316485Sdavidcs	}
6903316485Sdavidcs
6904316485Sdavidcs	return rc;
6905316485Sdavidcs}
6906316485Sdavidcs
6907316485Sdavidcsint __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
6908316485Sdavidcs				       struct ecore_ptt *p_ptt,
6909316485Sdavidcs				       struct ecore_mcp_link_state *p_link,
6910316485Sdavidcs				       u8 min_bw)
6911316485Sdavidcs{
6912316485Sdavidcs	int rc = ECORE_SUCCESS;
6913316485Sdavidcs
6914316485Sdavidcs	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
6915316485Sdavidcs	p_hwfn->qm_info.pf_wfq = min_bw;
6916316485Sdavidcs
6917316485Sdavidcs	if (!p_link->line_speed)
6918316485Sdavidcs		return rc;
6919316485Sdavidcs
6920316485Sdavidcs	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
6921316485Sdavidcs
6922316485Sdavidcs	rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
6923316485Sdavidcs
6924316485Sdavidcs	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6925316485Sdavidcs		   "Configured MIN bandwidth to be %d Mb/sec\n",
6926316485Sdavidcs		   p_link->min_pf_rate);
6927316485Sdavidcs
6928316485Sdavidcs	return rc;
6929316485Sdavidcs}
6930316485Sdavidcs
6931316485Sdavidcs/* Main API to configure PF min bandwidth where bw range is [1-100] */
6932316485Sdavidcsint ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
6933316485Sdavidcs{
6934316485Sdavidcs	int i, rc = ECORE_INVAL;
6935316485Sdavidcs
6936316485Sdavidcs	if (min_bw < 1 || min_bw > 100) {
6937316485Sdavidcs		DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
6938316485Sdavidcs		return rc;
6939316485Sdavidcs	}
6940316485Sdavidcs
6941316485Sdavidcs	for_each_hwfn(p_dev, i) {
6942316485Sdavidcs		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6943316485Sdavidcs		struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
6944316485Sdavidcs		struct ecore_mcp_link_state *p_link;
6945316485Sdavidcs		struct ecore_ptt *p_ptt;
6946316485Sdavidcs
6947316485Sdavidcs		p_link = &p_lead->mcp_info->link_output;
6948316485Sdavidcs
6949316485Sdavidcs		p_ptt = ecore_ptt_acquire(p_hwfn);
6950316485Sdavidcs		if (!p_ptt)
6951316485Sdavidcs			return ECORE_TIMEOUT;
6952316485Sdavidcs
6953316485Sdavidcs		rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
6954316485Sdavidcs							p_link, min_bw);
6955316485Sdavidcs		if (rc != ECORE_SUCCESS) {
6956316485Sdavidcs			ecore_ptt_release(p_hwfn, p_ptt);
6957316485Sdavidcs			return rc;
6958316485Sdavidcs		}
6959316485Sdavidcs
6960316485Sdavidcs		if (p_link->min_pf_rate) {
6961316485Sdavidcs			u32 min_rate = p_link->min_pf_rate;
6962316485Sdavidcs
6963316485Sdavidcs			rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
6964316485Sdavidcs								     p_ptt,
6965316485Sdavidcs								     min_rate);
6966316485Sdavidcs		}
6967316485Sdavidcs
6968316485Sdavidcs		ecore_ptt_release(p_hwfn, p_ptt);
6969316485Sdavidcs	}
6970316485Sdavidcs
6971316485Sdavidcs	return rc;
6972316485Sdavidcs}
6973316485Sdavidcs
6974316485Sdavidcsvoid ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
6975316485Sdavidcs{
6976316485Sdavidcs	struct ecore_mcp_link_state *p_link;
6977316485Sdavidcs
6978316485Sdavidcs	p_link = &p_hwfn->mcp_info->link_output;
6979316485Sdavidcs
6980316485Sdavidcs	if (p_link->min_pf_rate)
6981320164Sdavidcs		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
6982316485Sdavidcs
6983316485Sdavidcs	OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
6984316485Sdavidcs		    sizeof(*p_hwfn->qm_info.wfq_data) *
6985316485Sdavidcs				p_hwfn->qm_info.num_vports);
6986316485Sdavidcs}
6987316485Sdavidcs
6988316485Sdavidcsint ecore_device_num_engines(struct ecore_dev *p_dev)
6989316485Sdavidcs{
6990316485Sdavidcs	return ECORE_IS_BB(p_dev) ? 2 : 1;
6991316485Sdavidcs}
6992316485Sdavidcs
6993316485Sdavidcsint ecore_device_num_ports(struct ecore_dev *p_dev)
6994316485Sdavidcs{
6995337517Sdavidcs	return p_dev->num_ports;
6996316485Sdavidcs}
6997316485Sdavidcs
6998316485Sdavidcsvoid ecore_set_fw_mac_addr(__le16 *fw_msb,
6999316485Sdavidcs			  __le16 *fw_mid,
7000316485Sdavidcs			  __le16 *fw_lsb,
7001316485Sdavidcs			  u8 *mac)
7002316485Sdavidcs{
7003316485Sdavidcs	((u8 *)fw_msb)[0] = mac[1];
7004316485Sdavidcs	((u8 *)fw_msb)[1] = mac[0];
7005316485Sdavidcs	((u8 *)fw_mid)[0] = mac[3];
7006316485Sdavidcs	((u8 *)fw_mid)[1] = mac[2];
7007316485Sdavidcs	((u8 *)fw_lsb)[0] = mac[5];
7008316485Sdavidcs	((u8 *)fw_lsb)[1] = mac[4];
7009316485Sdavidcs}
7010337517Sdavidcs
7011337517Sdavidcsvoid ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable)
7012337517Sdavidcs{
7013337517Sdavidcs	if (p_dev->recov_in_prog != !b_enable) {
7014337517Sdavidcs		DP_INFO(p_dev, "%s access to the device\n",
7015337517Sdavidcs			b_enable ?  "Enable" : "Disable");
7016337517Sdavidcs		p_dev->recov_in_prog = !b_enable;
7017337517Sdavidcs	}
7018337517Sdavidcs}
7019337517Sdavidcs
7020337517Sdavidcs#ifdef _NTDDK_
7021337517Sdavidcs#pragma warning(pop)
7022337517Sdavidcs#endif
7023