1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_dev.c
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_dev.c 337517 2018-08-09 01:17:35Z davidcs $");
33
34#include "bcm_osal.h"
35#include "reg_addr.h"
36#include "ecore_gtt_reg_addr.h"
37#include "ecore.h"
38#include "ecore_chain.h"
39#include "ecore_status.h"
40#include "ecore_hw.h"
41#include "ecore_rt_defs.h"
42#include "ecore_init_ops.h"
43#include "ecore_int.h"
44#include "ecore_cxt.h"
45#include "ecore_spq.h"
46#include "ecore_init_fw_funcs.h"
47#include "ecore_sp_commands.h"
48#include "ecore_dev_api.h"
49#include "ecore_sriov.h"
50#include "ecore_vf.h"
51#include "ecore_ll2.h"
52#include "ecore_fcoe.h"
53#include "ecore_iscsi.h"
54#include "ecore_ooo.h"
55#include "ecore_mcp.h"
56#include "ecore_hw_defs.h"
57#include "mcp_public.h"
58#include "ecore_rdma.h"
59#include "ecore_iro.h"
60#include "nvm_cfg.h"
61#include "ecore_dev_api.h"
62#include "ecore_dcbx.h"
63#include "pcics_reg_driver.h"
64#include "ecore_l2.h"
65#ifndef LINUX_REMOVE
66#include "ecore_tcp_ip.h"
67#endif
68
69#ifdef _NTDDK_
70#pragma warning(push)
71#pragma warning(disable : 28167)
72#pragma warning(disable : 28123)
73#endif
74
75/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
76 * registers involved are not split and thus configuration is a race where
77 * some of the PFs configuration might be lost.
78 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
79 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
80 * there's more than a single compiled ecore component in system].
81 */
82static osal_spinlock_t qm_lock;
83static u32 qm_lock_ref_cnt;
84
85void ecore_set_ilt_page_size(struct ecore_dev *p_dev, u8 ilt_page_size)
86{
87	p_dev->ilt_page_size = ilt_page_size;
88}
89
90/******************** Doorbell Recovery *******************/
91/* The doorbell recovery mechanism consists of a list of entries which represent
92 * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
93 * entity needs to register with the mechanism and provide the parameters
94 * describing it's doorbell, including a location where last used doorbell data
95 * can be found. The doorbell execute function will traverse the list and
96 * doorbell all of the registered entries.
97 */
98struct ecore_db_recovery_entry {
99	osal_list_entry_t	list_entry;
100	void OSAL_IOMEM		*db_addr;
101	void			*db_data;
102	enum ecore_db_rec_width	db_width;
103	enum ecore_db_rec_space	db_space;
104	u8			hwfn_idx;
105};
106
107/* display a single doorbell recovery entry */
108static void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
109				struct ecore_db_recovery_entry *db_entry,
110				char *action)
111{
112	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
113		   action, db_entry, db_entry->db_addr, db_entry->db_data,
114		   db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
115		   db_entry->db_space == DB_REC_USER ? "user" : "kernel",
116		   db_entry->hwfn_idx);
117}
118
119/* doorbell address sanity (address within doorbell bar range) */
120static bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
121			 void *db_data)
122{
123	/* make sure doorbell address  is within the doorbell bar */
124	if (db_addr < p_dev->doorbells || (u8 *)db_addr >
125			(u8 *)p_dev->doorbells + p_dev->db_size) {
126		OSAL_WARN(true,
127			  "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
128			  db_addr, p_dev->doorbells,
129			  (u8 *)p_dev->doorbells + p_dev->db_size);
130		return false;
131	}
132
133	/* make sure doorbell data pointer is not null */
134	if (!db_data) {
135		OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
136		return false;
137	}
138
139	return true;
140}
141
142/* find hwfn according to the doorbell address */
143static struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
144					  void OSAL_IOMEM *db_addr)
145{
146	struct ecore_hwfn *p_hwfn;
147
148	/* in CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
149	if (ECORE_IS_CMT(p_dev))
150		p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
151			&p_dev->hwfns[0] : &p_dev->hwfns[1];
152	else
153		p_hwfn = ECORE_LEADING_HWFN(p_dev);
154
155	return p_hwfn;
156}
157
158/* add a new entry to the doorbell recovery mechanism */
159enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
160					   void OSAL_IOMEM *db_addr,
161					   void *db_data,
162					   enum ecore_db_rec_width db_width,
163					   enum ecore_db_rec_space db_space)
164{
165	struct ecore_db_recovery_entry *db_entry;
166	struct ecore_hwfn *p_hwfn;
167
168	/* shortcircuit VFs, for now */
169	if (IS_VF(p_dev)) {
170		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
171		return ECORE_SUCCESS;
172	}
173
174	/* sanitize doorbell address */
175	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
176		return ECORE_INVAL;
177
178	/* obtain hwfn from doorbell address */
179	p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
180
181	/* create entry */
182	db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
183	if (!db_entry) {
184		DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
185		return ECORE_NOMEM;
186	}
187
188	/* populate entry */
189	db_entry->db_addr = db_addr;
190	db_entry->db_data = db_data;
191	db_entry->db_width = db_width;
192	db_entry->db_space = db_space;
193	db_entry->hwfn_idx = p_hwfn->my_id;
194
195	/* display */
196	ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
197
198	/* protect the list */
199	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
200	OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
201			    &p_hwfn->db_recovery_info.list);
202	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
203
204	return ECORE_SUCCESS;
205}
206
207/* remove an entry from the doorbell recovery mechanism */
208enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
209					   void OSAL_IOMEM *db_addr,
210					   void *db_data)
211{
212	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
213	enum _ecore_status_t rc = ECORE_INVAL;
214	struct ecore_hwfn *p_hwfn;
215
216	/* shortcircuit VFs, for now */
217	if (IS_VF(p_dev)) {
218		DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
219		return ECORE_SUCCESS;
220	}
221
222	/* sanitize doorbell address */
223	if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
224		return ECORE_INVAL;
225
226	/* obtain hwfn from doorbell address */
227	p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
228
229	/* protect the list */
230	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
231	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
232				 &p_hwfn->db_recovery_info.list,
233				 list_entry,
234				 struct ecore_db_recovery_entry) {
235
236		/* search according to db_data addr since db_addr is not unique (roce) */
237		if (db_entry->db_data == db_data) {
238			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
239			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
240					       &p_hwfn->db_recovery_info.list);
241			rc = ECORE_SUCCESS;
242			break;
243		}
244	}
245
246	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
247
248	if (rc == ECORE_INVAL) {
249		/*OSAL_WARN(true,*/
250		DP_NOTICE(p_hwfn, false,
251			  "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
252			  db_data, db_addr);
253	} else
254		OSAL_FREE(p_dev, db_entry);
255
256	return rc;
257}
258
259/* initialize the doorbell recovery mechanism */
260static enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
261{
262	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
263
264	/* make sure db_size was set in p_dev */
265	if (!p_hwfn->p_dev->db_size) {
266		DP_ERR(p_hwfn->p_dev, "db_size not set\n");
267		return ECORE_INVAL;
268	}
269
270	OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
271#ifdef CONFIG_ECORE_LOCK_ALLOC
272	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
273		return ECORE_NOMEM;
274#endif
275	OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
276	p_hwfn->db_recovery_info.db_recovery_counter = 0;
277
278	return ECORE_SUCCESS;
279}
280
281/* destroy the doorbell recovery mechanism */
282static void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
283{
284	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
285
286	DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
287	if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
288		DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
289		while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
290			db_entry = OSAL_LIST_FIRST_ENTRY(&p_hwfn->db_recovery_info.list,
291							 struct ecore_db_recovery_entry,
292							 list_entry);
293			ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
294			OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
295					       &p_hwfn->db_recovery_info.list);
296			OSAL_FREE(p_hwfn->p_dev, db_entry);
297		}
298	}
299#ifdef CONFIG_ECORE_LOCK_ALLOC
300	OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
301#endif
302	p_hwfn->db_recovery_info.db_recovery_counter = 0;
303}
304
305/* print the content of the doorbell recovery mechanism */
306void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
307{
308	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
309
310	DP_NOTICE(p_hwfn, false,
311		  "Dispalying doorbell recovery database. Counter was %d\n",
312		  p_hwfn->db_recovery_info.db_recovery_counter);
313
314	/* protect the list */
315	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
316	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
317				 &p_hwfn->db_recovery_info.list,
318				 list_entry,
319				 struct ecore_db_recovery_entry) {
320		ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
321	}
322
323	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
324}
325
326/* ring the doorbell of a single doorbell recovery entry */
327static void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
328			    struct ecore_db_recovery_entry *db_entry,
329			    enum ecore_db_rec_exec db_exec)
330{
331	if (db_exec != DB_REC_ONCE) {
332		/* Print according to width */
333		if (db_entry->db_width == DB_REC_WIDTH_32B)
334			DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
335				   "%s doorbell address %p data %x\n",
336				   db_exec == DB_REC_DRY_RUN ?
337				   "would have rung" : "ringing",
338				   db_entry->db_addr,
339				   *(u32 *)db_entry->db_data);
340		else
341			DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
342				   "%s doorbell address %p data %llx\n",
343				   db_exec == DB_REC_DRY_RUN ?
344				   "would have rung" : "ringing",
345				   db_entry->db_addr,
346				   (unsigned long long)*(u64 *)(db_entry->db_data));
347	}
348
349	/* Sanity */
350	if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
351				 db_entry->db_data))
352		return;
353
354	/* Flush the write combined buffer. Since there are multiple doorbelling
355	 * entities using the same address, if we don't flush, a transaction
356	 * could be lost.
357	 */
358	OSAL_WMB(p_hwfn->p_dev);
359
360	/* Ring the doorbell */
361	if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
362		if (db_entry->db_width == DB_REC_WIDTH_32B)
363			DIRECT_REG_WR(p_hwfn, db_entry->db_addr, *(u32 *)(db_entry->db_data));
364		else
365			DIRECT_REG_WR64(p_hwfn, db_entry->db_addr, *(u64 *)(db_entry->db_data));
366	}
367
368	/* Flush the write combined buffer. Next doorbell may come from a
369	 * different entity to the same address...
370	 */
371	OSAL_WMB(p_hwfn->p_dev);
372}
373
374/* traverse the doorbell recovery entry list and ring all the doorbells */
375void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
376			       enum ecore_db_rec_exec db_exec)
377{
378	struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
379
380	if (db_exec != DB_REC_ONCE) {
381		DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
382			  p_hwfn->db_recovery_info.db_recovery_counter);
383
384		/* track amount of times recovery was executed */
385		p_hwfn->db_recovery_info.db_recovery_counter++;
386	}
387
388	/* protect the list */
389	OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
390	OSAL_LIST_FOR_EACH_ENTRY(db_entry,
391				 &p_hwfn->db_recovery_info.list,
392				 list_entry,
393				 struct ecore_db_recovery_entry) {
394		ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
395		if (db_exec == DB_REC_ONCE)
396			break;
397	}
398
399	OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
400}
401/******************** Doorbell Recovery end ****************/
402
403/********************************** NIG LLH ***********************************/
404
405enum ecore_llh_filter_type {
406	ECORE_LLH_FILTER_TYPE_MAC,
407	ECORE_LLH_FILTER_TYPE_PROTOCOL,
408};
409
410struct ecore_llh_mac_filter {
411	u8 addr[ETH_ALEN];
412};
413
414struct ecore_llh_protocol_filter {
415	enum ecore_llh_prot_filter_type_t type;
416	u16 source_port_or_eth_type;
417	u16 dest_port;
418};
419
420union ecore_llh_filter {
421	struct ecore_llh_mac_filter mac;
422	struct ecore_llh_protocol_filter protocol;
423};
424
425struct ecore_llh_filter_info {
426	bool b_enabled;
427	u32 ref_cnt;
428	enum ecore_llh_filter_type type;
429	union ecore_llh_filter filter;
430};
431
432struct ecore_llh_info {
433	/* Number of LLH filters banks */
434	u8 num_ppfid;
435
436#define MAX_NUM_PPFID	8
437	u8 ppfid_array[MAX_NUM_PPFID];
438
439	/* Array of filters arrays:
440	 * "num_ppfid" elements of filters banks, where each is an array of
441	 * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
442	 */
443	struct ecore_llh_filter_info **pp_filters;
444};
445
446static void ecore_llh_free(struct ecore_dev *p_dev)
447{
448	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
449	u32 i;
450
451	if (p_llh_info != OSAL_NULL) {
452		if (p_llh_info->pp_filters != OSAL_NULL) {
453			for (i = 0; i < p_llh_info->num_ppfid; i++)
454				OSAL_FREE(p_dev, p_llh_info->pp_filters[i]);
455		}
456
457		OSAL_FREE(p_dev, p_llh_info->pp_filters);
458	}
459
460	OSAL_FREE(p_dev, p_llh_info);
461	p_dev->p_llh_info = OSAL_NULL;
462}
463
464static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev)
465{
466	struct ecore_llh_info *p_llh_info;
467	u32 size; u8 i;
468
469	p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info));
470	if (!p_llh_info)
471		return ECORE_NOMEM;
472	p_dev->p_llh_info = p_llh_info;
473
474	for (i = 0; i < MAX_NUM_PPFID; i++) {
475		if (!(p_dev->ppfid_bitmap & (0x1 << i)))
476			continue;
477
478		p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
479		DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n",
480			   p_llh_info->num_ppfid, i);
481		p_llh_info->num_ppfid++;
482	}
483
484	size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
485	p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size);
486	if (!p_llh_info->pp_filters)
487		return ECORE_NOMEM;
488
489	size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
490	       sizeof(**p_llh_info->pp_filters);
491	for (i = 0; i < p_llh_info->num_ppfid; i++) {
492		p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL,
493							size);
494		if (!p_llh_info->pp_filters[i])
495			return ECORE_NOMEM;
496	}
497
498	return ECORE_SUCCESS;
499}
500
501static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev,
502						    u8 ppfid, u8 filter_idx,
503						    const char *action)
504{
505	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
506
507	if (ppfid >= p_llh_info->num_ppfid) {
508		DP_NOTICE(p_dev, false,
509			  "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
510			  action, ppfid, p_llh_info->num_ppfid);
511		return ECORE_INVAL;
512	}
513
514	if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
515		DP_NOTICE(p_dev, false,
516			  "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
517			  action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
518		return ECORE_INVAL;
519	}
520
521	return ECORE_SUCCESS;
522}
523
524#define ECORE_LLH_INVALID_FILTER_IDX	0xff
525
526static enum _ecore_status_t
527ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid,
528			       union ecore_llh_filter *p_filter,
529			       u8 *p_filter_idx)
530{
531	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
532	struct ecore_llh_filter_info *p_filters;
533	enum _ecore_status_t rc;
534	u8 i;
535
536	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search");
537	if (rc != ECORE_SUCCESS)
538		return rc;
539
540	*p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
541
542	p_filters = p_llh_info->pp_filters[ppfid];
543	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
544		if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter,
545				 sizeof(*p_filter))) {
546			*p_filter_idx = i;
547			break;
548		}
549	}
550
551	return ECORE_SUCCESS;
552}
553
554static enum _ecore_status_t
555ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid,
556			      u8 *p_filter_idx)
557{
558	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
559	struct ecore_llh_filter_info *p_filters;
560	enum _ecore_status_t rc;
561	u8 i;
562
563	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx");
564	if (rc != ECORE_SUCCESS)
565		return rc;
566
567	*p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
568
569	p_filters = p_llh_info->pp_filters[ppfid];
570	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
571		if (!p_filters[i].b_enabled) {
572			*p_filter_idx = i;
573			break;
574		}
575	}
576
577	return ECORE_SUCCESS;
578}
579
580static enum _ecore_status_t
581__ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx,
582			      enum ecore_llh_filter_type type,
583			      union ecore_llh_filter *p_filter, u32 *p_ref_cnt)
584{
585	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
586	struct ecore_llh_filter_info *p_filters;
587	enum _ecore_status_t rc;
588
589	rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add");
590	if (rc != ECORE_SUCCESS)
591		return rc;
592
593	p_filters = p_llh_info->pp_filters[ppfid];
594	if (!p_filters[filter_idx].ref_cnt) {
595		p_filters[filter_idx].b_enabled = true;
596		p_filters[filter_idx].type = type;
597		OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter,
598			    sizeof(p_filters[filter_idx].filter));
599	}
600
601	*p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
602
603	return ECORE_SUCCESS;
604}
605
606static enum _ecore_status_t
607ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid,
608			    enum ecore_llh_filter_type type,
609			    union ecore_llh_filter *p_filter,
610			    u8 *p_filter_idx, u32 *p_ref_cnt)
611{
612	enum _ecore_status_t rc;
613
614	/* Check if the same filter already exist */
615	rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
616					    p_filter_idx);
617	if (rc != ECORE_SUCCESS)
618		return rc;
619
620	/* Find a new entry in case of a new filter */
621	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
622		rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx);
623		if (rc != ECORE_SUCCESS)
624			return rc;
625	}
626
627	/* No free entry was found */
628	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
629		DP_NOTICE(p_dev, false,
630			  "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
631			  ppfid);
632		return ECORE_NORESOURCES;
633	}
634
635	return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type,
636					     p_filter, p_ref_cnt);
637}
638
639static enum _ecore_status_t
640__ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
641				 u8 filter_idx, u32 *p_ref_cnt)
642{
643	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
644	struct ecore_llh_filter_info *p_filters;
645	enum _ecore_status_t rc;
646
647	rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove");
648	if (rc != ECORE_SUCCESS)
649		return rc;
650
651	p_filters = p_llh_info->pp_filters[ppfid];
652	if (!p_filters[filter_idx].ref_cnt) {
653		DP_NOTICE(p_dev, false,
654			  "LLH shadow: trying to remove a filter with ref_cnt=0\n");
655		return ECORE_INVAL;
656	}
657
658	*p_ref_cnt = --p_filters[filter_idx].ref_cnt;
659	if (!p_filters[filter_idx].ref_cnt)
660		OSAL_MEM_ZERO(&p_filters[filter_idx],
661			      sizeof(p_filters[filter_idx]));
662
663	return ECORE_SUCCESS;
664}
665
666static enum _ecore_status_t
667ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
668			       union ecore_llh_filter *p_filter,
669			       u8 *p_filter_idx, u32 *p_ref_cnt)
670{
671	enum _ecore_status_t rc;
672
673	rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
674					    p_filter_idx);
675	if (rc != ECORE_SUCCESS)
676		return rc;
677
678	/* No matching filter was found */
679	if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
680		DP_NOTICE(p_dev, false,
681			  "Failed to find a filter in the LLH shadow\n");
682		return ECORE_INVAL;
683	}
684
685	return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx,
686						p_ref_cnt);
687}
688
689static enum _ecore_status_t
690ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
691{
692	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
693	struct ecore_llh_filter_info *p_filters;
694	enum _ecore_status_t rc;
695
696	rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all");
697	if (rc != ECORE_SUCCESS)
698		return rc;
699
700	p_filters = p_llh_info->pp_filters[ppfid];
701	OSAL_MEM_ZERO(p_filters,
702		      NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters));
703
704	return ECORE_SUCCESS;
705}
706
707static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
708					    u8 rel_ppfid, u8 *p_abs_ppfid)
709{
710	struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
711
712	if (rel_ppfid >= p_llh_info->num_ppfid) {
713		DP_NOTICE(p_dev, false,
714			  "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
715			  rel_ppfid, (u8)(p_llh_info->num_ppfid - 1));
716		return ECORE_INVAL;
717	}
718
719	*p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid];
720
721	return ECORE_SUCCESS;
722}
723
724static enum _ecore_status_t
725__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
726{
727	struct ecore_dev *p_dev = p_hwfn->p_dev;
728	enum ecore_eng eng;
729	u8 ppfid;
730	enum _ecore_status_t rc;
731
732	rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt);
733	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
734		DP_NOTICE(p_hwfn, false,
735			  "Failed to get the engine affinity configuration\n");
736		return rc;
737	}
738
739	/* RoCE PF is bound to a single engine */
740	if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
741		eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
742		rc = ecore_llh_set_roce_affinity(p_dev, eng);
743		if (rc != ECORE_SUCCESS) {
744			DP_NOTICE(p_dev, false,
745				  "Failed to set the RoCE engine affinity\n");
746			return rc;
747		}
748
749		DP_VERBOSE(p_dev, ECORE_MSG_SP,
750			   "LLH: Set the engine affinity of RoCE packets as %d\n",
751			   eng);
752	}
753
754	/* Storage PF is bound to a single engine while L2 PF uses both */
755	if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
756	    ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
757		eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
758	else /* L2_PERSONALITY */
759		eng = ECORE_BOTH_ENG;
760
761	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
762		rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
763		if (rc != ECORE_SUCCESS) {
764			DP_NOTICE(p_dev, false,
765				  "Failed to set the engine affinity of ppfid %d\n",
766				  ppfid);
767			return rc;
768		}
769	}
770
771	DP_VERBOSE(p_dev, ECORE_MSG_SP,
772		   "LLH: Set the engine affinity of non-RoCE packets as %d\n",
773		   eng);
774
775	return ECORE_SUCCESS;
776}
777
778static enum _ecore_status_t
779ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
780			   bool avoid_eng_affin)
781{
782	struct ecore_dev *p_dev = p_hwfn->p_dev;
783	enum _ecore_status_t rc;
784
785	/* Backwards compatible mode:
786	 * - RoCE packets     - Use engine 0.
787	 * - Non-RoCE packets - Use connection based classification for L2 PFs,
788	 *                      and engine 0 otherwise.
789	 */
790	if (avoid_eng_affin) {
791		enum ecore_eng eng;
792		u8 ppfid;
793
794		if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
795			eng = ECORE_ENG0;
796			rc = ecore_llh_set_roce_affinity(p_dev, eng);
797			if (rc != ECORE_SUCCESS) {
798				DP_NOTICE(p_dev, false,
799					  "Failed to set the RoCE engine affinity\n");
800				return rc;
801			}
802
803			DP_VERBOSE(p_dev, ECORE_MSG_SP,
804				   "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n",
805				   eng);
806		}
807
808		eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
809		       ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0
810							   : ECORE_BOTH_ENG;
811		for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
812			rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
813			if (rc != ECORE_SUCCESS) {
814				DP_NOTICE(p_dev, false,
815					  "Failed to set the engine affinity of ppfid %d\n",
816					  ppfid);
817				return rc;
818			}
819		}
820
821		DP_VERBOSE(p_dev, ECORE_MSG_SP,
822			   "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n",
823			   eng);
824
825		return ECORE_SUCCESS;
826	}
827
828	return __ecore_llh_set_engine_affin(p_hwfn, p_ptt);
829}
830
831static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
832						 struct ecore_ptt *p_ptt,
833						 bool avoid_eng_affin)
834{
835	struct ecore_dev *p_dev = p_hwfn->p_dev;
836	u8 ppfid, abs_ppfid;
837	enum _ecore_status_t rc;
838
839	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
840		u32 addr;
841
842		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
843		if (rc != ECORE_SUCCESS)
844			return rc;
845
846		addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
847		ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
848	}
849
850	if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
851	    !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
852		rc = ecore_llh_add_mac_filter(p_dev, 0,
853					      p_hwfn->hw_info.hw_mac_addr);
854		if (rc != ECORE_SUCCESS)
855			DP_NOTICE(p_dev, false,
856				  "Failed to add an LLH filter with the primary MAC\n");
857	}
858
859	if (ECORE_IS_CMT(p_dev)) {
860		rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin);
861		if (rc != ECORE_SUCCESS)
862			return rc;
863	}
864
865	return ECORE_SUCCESS;
866}
867
868u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev)
869{
870	return p_dev->p_llh_info->num_ppfid;
871}
872
873enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
874{
875	return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
876}
877
878/* TBD - should be removed when these definitions are available in reg_addr.h */
879#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK		0x3
880#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT		0
881#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK		0x3
882#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT	2
883
884enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
885						  u8 ppfid, enum ecore_eng eng)
886{
887	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
888	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
889	u32 addr, val, eng_sel;
890	enum _ecore_status_t rc = ECORE_SUCCESS;
891	u8 abs_ppfid;
892
893	if (p_ptt == OSAL_NULL)
894		return ECORE_AGAIN;
895
896	if (!ECORE_IS_CMT(p_dev))
897		goto out;
898
899	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
900	if (rc != ECORE_SUCCESS)
901		goto out;
902
903	switch (eng) {
904	case ECORE_ENG0:
905		eng_sel = 0;
906		break;
907	case ECORE_ENG1:
908		eng_sel = 1;
909		break;
910	case ECORE_BOTH_ENG:
911		eng_sel = 2;
912		break;
913	default:
914		DP_NOTICE(p_dev, false,
915			  "Invalid affinity value for ppfid [%d]\n", eng);
916		rc = ECORE_INVAL;
917		goto out;
918	}
919
920	addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
921	val = ecore_rd(p_hwfn, p_ptt, addr);
922	SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
923	ecore_wr(p_hwfn, p_ptt, addr, val);
924
925	/* The iWARP affinity is set as the affinity of ppfid 0 */
926	if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn))
927		p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0;
928out:
929	ecore_ptt_release(p_hwfn, p_ptt);
930
931	return rc;
932}
933
934enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
935						 enum ecore_eng eng)
936{
937	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
938	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
939	u32 addr, val, eng_sel;
940	enum _ecore_status_t rc = ECORE_SUCCESS;
941	u8 ppfid, abs_ppfid;
942
943	if (p_ptt == OSAL_NULL)
944		return ECORE_AGAIN;
945
946	if (!ECORE_IS_CMT(p_dev))
947		goto out;
948
949	switch (eng) {
950	case ECORE_ENG0:
951		eng_sel = 0;
952		break;
953	case ECORE_ENG1:
954		eng_sel = 1;
955		break;
956	case ECORE_BOTH_ENG:
957		eng_sel = 2;
958		ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
959			 0xf /* QP bit 15 */);
960		break;
961	default:
962		DP_NOTICE(p_dev, false,
963			  "Invalid affinity value for RoCE [%d]\n", eng);
964		rc = ECORE_INVAL;
965		goto out;
966	}
967
968	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
969		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
970		if (rc != ECORE_SUCCESS)
971			goto out;
972
973		addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
974		val = ecore_rd(p_hwfn, p_ptt, addr);
975		SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
976		ecore_wr(p_hwfn, p_ptt, addr, val);
977	}
978out:
979	ecore_ptt_release(p_hwfn, p_ptt);
980
981	return rc;
982}
983
984struct ecore_llh_filter_e4_details {
985	u64 value;
986	u32 mode;
987	u32 protocol_type;
988	u32 hdr_sel;
989	u32 enable;
990};
991
992static enum _ecore_status_t
993ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
994			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
995			   struct ecore_llh_filter_e4_details *p_details,
996			   bool b_write_access)
997{
998	u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
999	struct ecore_dmae_params params;
1000	enum _ecore_status_t rc;
1001	u32 addr;
1002
1003	/* The NIG/LLH registers that are accessed in this function have only 16
1004	 * rows which are exposed to a PF. I.e. only the 16 filters of its
1005	 * default ppfid
1006	 * Accessing filters of other ppfids requires pretending to other PFs,
1007	 * and thus the usage of the ecore_ppfid_rd/wr() functions.
1008	 */
1009
1010	/* Filter enable - should be done first when removing a filter */
1011	if (b_write_access && !p_details->enable) {
1012		addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
1013		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1014			       p_details->enable);
1015	}
1016
1017	/* Filter value */
1018	addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
1019	OSAL_MEMSET(&params, 0, sizeof(params));
1020
1021	if (b_write_access) {
1022		params.flags = ECORE_DMAE_FLAG_PF_DST;
1023		params.dst_pfid = pfid;
1024		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
1025					 (u64)(osal_uintptr_t)&p_details->value,
1026					 addr, 2 /* size_in_dwords */, &params);
1027	} else {
1028		params.flags = ECORE_DMAE_FLAG_PF_SRC |
1029			       ECORE_DMAE_FLAG_COMPLETION_DST;
1030		params.src_pfid = pfid;
1031		rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
1032					 (u64)(osal_uintptr_t)&p_details->value,
1033					 2 /* size_in_dwords */, &params);
1034	}
1035
1036	if (rc != ECORE_SUCCESS)
1037		return rc;
1038
1039	/* Filter mode */
1040	addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
1041	if (b_write_access)
1042		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
1043	else
1044		p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
1045						 addr);
1046
1047	/* Filter protocol type */
1048	addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
1049	if (b_write_access)
1050		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1051			       p_details->protocol_type);
1052	else
1053		p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt,
1054							  abs_ppfid, addr);
1055
1056	/* Filter header select */
1057	addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
1058	if (b_write_access)
1059		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1060			       p_details->hdr_sel);
1061	else
1062		p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
1063						    addr);
1064
1065	/* Filter enable - should be done last when adding a filter */
1066	if (!b_write_access || p_details->enable) {
1067		addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
1068		if (b_write_access)
1069			ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
1070				       p_details->enable);
1071		else
1072			p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt,
1073							   abs_ppfid, addr);
1074	}
1075
1076	return ECORE_SUCCESS;
1077}
1078
1079static enum _ecore_status_t
1080ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1081			u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
1082			u32 high, u32 low)
1083{
1084	struct ecore_llh_filter_e4_details filter_details;
1085
1086	filter_details.enable = 1;
1087	filter_details.value = ((u64)high << 32) | low;
1088	filter_details.hdr_sel = 0;
1089	filter_details.protocol_type = filter_prot_type;
1090	filter_details.mode = filter_prot_type ?
1091			      1 : /* protocol-based classification */
1092			      0;  /* MAC-address based classification */
1093
1094	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1095					  &filter_details,
1096					  true /* write access */);
1097}
1098
1099static enum _ecore_status_t
1100ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
1101			   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
1102{
1103	struct ecore_llh_filter_e4_details filter_details;
1104
1105	OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
1106
1107	return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1108					  &filter_details,
1109					  true /* write access */);
1110}
1111
1112/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
1113 * Should be removed when the function is implemented.
1114 */
1115static enum _ecore_status_t
1116ecore_llh_add_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1117			struct ecore_ptt OSAL_UNUSED *p_ptt,
1118			u8 OSAL_UNUSED abs_ppfid, u8 OSAL_UNUSED filter_idx,
1119			u8 OSAL_UNUSED filter_prot_type, u32 OSAL_UNUSED high,
1120			u32 OSAL_UNUSED low)
1121{
1122	ECORE_E5_MISSING_CODE;
1123
1124	return ECORE_NOTIMPL;
1125}
1126
1127/* OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
1128 * Should be removed when the function is implemented.
1129 */
1130static enum _ecore_status_t
1131ecore_llh_remove_filter_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1132			   struct ecore_ptt OSAL_UNUSED *p_ptt,
1133			   u8 OSAL_UNUSED abs_ppfid,
1134			   u8 OSAL_UNUSED filter_idx)
1135{
1136	ECORE_E5_MISSING_CODE;
1137
1138	return ECORE_NOTIMPL;
1139}
1140
1141static enum _ecore_status_t
1142ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1143		     u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
1144		     u32 low)
1145{
1146	if (ECORE_IS_E4(p_hwfn->p_dev))
1147		return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1148					       filter_idx, filter_prot_type,
1149					       high, low);
1150	else /* E5 */
1151		return ecore_llh_add_filter_e5(p_hwfn, p_ptt, abs_ppfid,
1152					       filter_idx, filter_prot_type,
1153					       high, low);
1154}
1155
1156static enum _ecore_status_t
1157ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1158			u8 abs_ppfid, u8 filter_idx)
1159{
1160	if (ECORE_IS_E4(p_hwfn->p_dev))
1161		return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1162						  filter_idx);
1163	else /* E5 */
1164		return ecore_llh_remove_filter_e5(p_hwfn, p_ptt, abs_ppfid,
1165						  filter_idx);
1166}
1167
1168enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
1169					      u8 mac_addr[ETH_ALEN])
1170{
1171	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1172	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1173	union ecore_llh_filter filter;
1174	u8 filter_idx, abs_ppfid;
1175	u32 high, low, ref_cnt;
1176	enum _ecore_status_t rc = ECORE_SUCCESS;
1177
1178	if (p_ptt == OSAL_NULL)
1179		return ECORE_AGAIN;
1180
1181	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1182		goto out;
1183
1184	OSAL_MEM_ZERO(&filter, sizeof(filter));
1185	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
1186	rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
1187					 ECORE_LLH_FILTER_TYPE_MAC,
1188					 &filter, &filter_idx, &ref_cnt);
1189	if (rc != ECORE_SUCCESS)
1190		goto err;
1191
1192	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1193	if (rc != ECORE_SUCCESS)
1194		goto err;
1195
1196	/* Configure the LLH only in case of a new the filter */
1197	if (ref_cnt == 1) {
1198		high = mac_addr[1] | (mac_addr[0] << 8);
1199		low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
1200		      (mac_addr[2] << 24);
1201		rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1202					  0, high, low);
1203		if (rc != ECORE_SUCCESS)
1204			goto err;
1205	}
1206
1207	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1208		   "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1209		   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1210		   mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
1211		   ref_cnt);
1212
1213	goto out;
1214
1215err:
1216	DP_NOTICE(p_dev, false,
1217		  "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n",
1218		  mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1219		  mac_addr[4], mac_addr[5], ppfid);
1220out:
1221	ecore_ptt_release(p_hwfn, p_ptt);
1222
1223	return rc;
1224}
1225
1226static enum _ecore_status_t
1227ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev,
1228				    enum ecore_llh_prot_filter_type_t type,
1229				    u16 source_port_or_eth_type, u16 dest_port,
1230				    u8 *str, osal_size_t str_len)
1231{
1232	switch (type) {
1233	case ECORE_LLH_FILTER_ETHERTYPE:
1234		OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x",
1235			      source_port_or_eth_type);
1236		break;
1237	case ECORE_LLH_FILTER_TCP_SRC_PORT:
1238		OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x",
1239			      source_port_or_eth_type);
1240		break;
1241	case ECORE_LLH_FILTER_UDP_SRC_PORT:
1242		OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x",
1243			      source_port_or_eth_type);
1244		break;
1245	case ECORE_LLH_FILTER_TCP_DEST_PORT:
1246		OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port);
1247		break;
1248	case ECORE_LLH_FILTER_UDP_DEST_PORT:
1249		OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port);
1250		break;
1251	case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1252		OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
1253			      source_port_or_eth_type, dest_port);
1254		break;
1255	case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1256		OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
1257			      source_port_or_eth_type, dest_port);
1258		break;
1259	default:
1260		DP_NOTICE(p_dev, true,
1261			  "Non valid LLH protocol filter type %d\n", type);
1262		return ECORE_INVAL;
1263	}
1264
1265	return ECORE_SUCCESS;
1266}
1267
1268static enum _ecore_status_t
1269ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
1270				  enum ecore_llh_prot_filter_type_t type,
1271				  u16 source_port_or_eth_type, u16 dest_port,
1272				  u32 *p_high, u32 *p_low)
1273{
1274	*p_high = 0;
1275	*p_low = 0;
1276
1277	switch (type) {
1278	case ECORE_LLH_FILTER_ETHERTYPE:
1279		*p_high = source_port_or_eth_type;
1280		break;
1281	case ECORE_LLH_FILTER_TCP_SRC_PORT:
1282	case ECORE_LLH_FILTER_UDP_SRC_PORT:
1283		*p_low = source_port_or_eth_type << 16;
1284		break;
1285	case ECORE_LLH_FILTER_TCP_DEST_PORT:
1286	case ECORE_LLH_FILTER_UDP_DEST_PORT:
1287		*p_low = dest_port;
1288		break;
1289	case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
1290	case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
1291		*p_low = (source_port_or_eth_type << 16) | dest_port;
1292		break;
1293	default:
1294		DP_NOTICE(p_dev, true,
1295			  "Non valid LLH protocol filter type %d\n", type);
1296		return ECORE_INVAL;
1297	}
1298
1299	return ECORE_SUCCESS;
1300}
1301
1302enum _ecore_status_t
1303ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
1304			      enum ecore_llh_prot_filter_type_t type,
1305			      u16 source_port_or_eth_type, u16 dest_port)
1306{
1307	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1308	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1309	u8 filter_idx, abs_ppfid, str[32], type_bitmap;
1310	union ecore_llh_filter filter;
1311	u32 high, low, ref_cnt;
1312	enum _ecore_status_t rc = ECORE_SUCCESS;
1313
1314	if (p_ptt == OSAL_NULL)
1315		return ECORE_AGAIN;
1316
1317	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
1318		goto out;
1319
1320	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
1321						 source_port_or_eth_type,
1322						 dest_port, str, sizeof(str));
1323	if (rc != ECORE_SUCCESS)
1324		goto err;
1325
1326	OSAL_MEM_ZERO(&filter, sizeof(filter));
1327	filter.protocol.type = type;
1328	filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1329	filter.protocol.dest_port = dest_port;
1330	rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
1331					 ECORE_LLH_FILTER_TYPE_PROTOCOL,
1332					 &filter, &filter_idx, &ref_cnt);
1333	if (rc != ECORE_SUCCESS)
1334		goto err;
1335
1336	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1337	if (rc != ECORE_SUCCESS)
1338		goto err;
1339
1340	/* Configure the LLH only in case of a new the filter */
1341	if (ref_cnt == 1) {
1342		rc = ecore_llh_protocol_filter_to_hilo(p_dev, type,
1343						       source_port_or_eth_type,
1344						       dest_port, &high, &low);
1345		if (rc != ECORE_SUCCESS)
1346			goto err;
1347
1348		type_bitmap = 0x1 << type;
1349		rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
1350					  type_bitmap, high, low);
1351		if (rc != ECORE_SUCCESS)
1352			goto err;
1353	}
1354
1355	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1356		   "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1357		   str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1358
1359	goto out;
1360
1361err:
1362	DP_NOTICE(p_hwfn, false,
1363		  "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
1364		  str, ppfid);
1365out:
1366	ecore_ptt_release(p_hwfn, p_ptt);
1367
1368	return rc;
1369}
1370
1371void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
1372				 u8 mac_addr[ETH_ALEN])
1373{
1374	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1375	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1376	union ecore_llh_filter filter;
1377	u8 filter_idx, abs_ppfid;
1378	enum _ecore_status_t rc = ECORE_SUCCESS;
1379	u32 ref_cnt;
1380
1381	if (p_ptt == OSAL_NULL)
1382		return;
1383
1384	if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1385		goto out;
1386
1387	OSAL_MEM_ZERO(&filter, sizeof(filter));
1388	OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
1389	rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
1390					    &ref_cnt);
1391	if (rc != ECORE_SUCCESS)
1392		goto err;
1393
1394	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1395	if (rc != ECORE_SUCCESS)
1396		goto err;
1397
1398	/* Remove from the LLH in case the filter is not in use */
1399	if (!ref_cnt) {
1400		rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1401					     filter_idx);
1402		if (rc != ECORE_SUCCESS)
1403			goto err;
1404	}
1405
1406	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1407		   "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1408		   mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1409		   mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
1410		   ref_cnt);
1411
1412	goto out;
1413
1414err:
1415	DP_NOTICE(p_dev, false,
1416		  "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n",
1417		  mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
1418		  mac_addr[4], mac_addr[5], ppfid);
1419out:
1420	ecore_ptt_release(p_hwfn, p_ptt);
1421}
1422
1423void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
1424				      enum ecore_llh_prot_filter_type_t type,
1425				      u16 source_port_or_eth_type,
1426				      u16 dest_port)
1427{
1428	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1429	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1430	u8 filter_idx, abs_ppfid, str[32];
1431	union ecore_llh_filter filter;
1432	enum _ecore_status_t rc = ECORE_SUCCESS;
1433	u32 ref_cnt;
1434
1435	if (p_ptt == OSAL_NULL)
1436		return;
1437
1438	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
1439		goto out;
1440
1441	rc = ecore_llh_protocol_filter_stringify(p_dev, type,
1442						 source_port_or_eth_type,
1443						 dest_port, str, sizeof(str));
1444	if (rc != ECORE_SUCCESS)
1445		goto err;
1446
1447	OSAL_MEM_ZERO(&filter, sizeof(filter));
1448	filter.protocol.type = type;
1449	filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
1450	filter.protocol.dest_port = dest_port;
1451	rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
1452					    &ref_cnt);
1453	if (rc != ECORE_SUCCESS)
1454		goto err;
1455
1456	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1457	if (rc != ECORE_SUCCESS)
1458		goto err;
1459
1460	/* Remove from the LLH in case the filter is not in use */
1461	if (!ref_cnt) {
1462		rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
1463					     filter_idx);
1464		if (rc != ECORE_SUCCESS)
1465			goto err;
1466	}
1467
1468	DP_VERBOSE(p_dev, ECORE_MSG_SP,
1469		   "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
1470		   str, ppfid, abs_ppfid, filter_idx, ref_cnt);
1471
1472	goto out;
1473
1474err:
1475	DP_NOTICE(p_dev, false,
1476		  "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
1477		  str, ppfid);
1478out:
1479	ecore_ptt_release(p_hwfn, p_ptt);
1480}
1481
1482void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
1483{
1484	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1485	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1486	u8 filter_idx, abs_ppfid;
1487	enum _ecore_status_t rc = ECORE_SUCCESS;
1488
1489	if (p_ptt == OSAL_NULL)
1490		return;
1491
1492	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
1493	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1494		goto out;
1495
1496	rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1497	if (rc != ECORE_SUCCESS)
1498		goto out;
1499
1500	rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid);
1501	if (rc != ECORE_SUCCESS)
1502		goto out;
1503
1504	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
1505	     filter_idx++) {
1506		if (ECORE_IS_E4(p_dev))
1507			rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
1508							abs_ppfid, filter_idx);
1509		else /* E5 */
1510			rc = ecore_llh_remove_filter_e5(p_hwfn, p_ptt,
1511							abs_ppfid, filter_idx);
1512		if (rc != ECORE_SUCCESS)
1513			goto out;
1514	}
1515out:
1516	ecore_ptt_release(p_hwfn, p_ptt);
1517}
1518
1519void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
1520{
1521	u8 ppfid;
1522
1523	if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
1524	    !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
1525		return;
1526
1527	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
1528		ecore_llh_clear_ppfid_filters(p_dev, ppfid);
1529}
1530
1531enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
1532					 struct ecore_ptt *p_ptt, u32 addr,
1533					 u32 val)
1534{
1535	struct ecore_dev *p_dev = p_hwfn->p_dev;
1536	u8 ppfid, abs_ppfid;
1537	enum _ecore_status_t rc;
1538
1539	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
1540		rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
1541		if (rc != ECORE_SUCCESS)
1542			return rc;
1543
1544		ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val);
1545	}
1546
1547	return ECORE_SUCCESS;
1548}
1549
1550static enum _ecore_status_t
1551ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1552			u8 ppfid)
1553{
1554	struct ecore_llh_filter_e4_details filter_details;
1555	u8 abs_ppfid, filter_idx;
1556	u32 addr;
1557	enum _ecore_status_t rc;
1558
1559	rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
1560	if (rc != ECORE_SUCCESS)
1561		return rc;
1562
1563	addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
1564	DP_NOTICE(p_hwfn, false,
1565		  "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n",
1566		  p_hwfn->rel_pf_id, ppfid, abs_ppfid,
1567		  ecore_rd(p_hwfn, p_ptt, addr));
1568
1569	for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
1570	     filter_idx++) {
1571		OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
1572		rc =  ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
1573						 filter_idx, &filter_details,
1574						 false /* read access */);
1575		if (rc != ECORE_SUCCESS)
1576			return rc;
1577
1578		DP_NOTICE(p_hwfn, false,
1579			  "filter %2hhd: enable %d, value 0x%016llx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
1580			  filter_idx, filter_details.enable,
1581			  (unsigned long long)filter_details.value, filter_details.mode,
1582			  filter_details.protocol_type, filter_details.hdr_sel);
1583	}
1584
1585	return ECORE_SUCCESS;
1586}
1587
1588static enum _ecore_status_t
1589ecore_llh_dump_ppfid_e5(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
1590			struct ecore_ptt OSAL_UNUSED *p_ptt,
1591			u8 OSAL_UNUSED ppfid)
1592{
1593	ECORE_E5_MISSING_CODE;
1594
1595	return ECORE_NOTIMPL;
1596}
1597
1598enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
1599{
1600	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1601	struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
1602	enum _ecore_status_t rc;
1603
1604	if (p_ptt == OSAL_NULL)
1605		return ECORE_AGAIN;
1606
1607	if (ECORE_IS_E4(p_dev))
1608		rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
1609	else /* E5 */
1610		rc = ecore_llh_dump_ppfid_e5(p_hwfn, p_ptt, ppfid);
1611
1612	ecore_ptt_release(p_hwfn, p_ptt);
1613
1614	return rc;
1615}
1616
1617enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
1618{
1619	u8 ppfid;
1620	enum _ecore_status_t rc;
1621
1622	for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
1623		rc = ecore_llh_dump_ppfid(p_dev, ppfid);
1624		if (rc != ECORE_SUCCESS)
1625			return rc;
1626	}
1627
1628	return ECORE_SUCCESS;
1629}
1630
1631/******************************* NIG LLH - End ********************************/
1632
1633/* Configurable */
1634#define ECORE_MIN_DPIS		(4)  /* The minimal number of DPIs required to
1635				      * load the driver. The number was
1636				      * arbitrarily set.
1637				      */
1638
1639/* Derived */
1640#define ECORE_MIN_PWM_REGION	(ECORE_WID_SIZE * ECORE_MIN_DPIS)
1641
1642static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
1643			     struct ecore_ptt *p_ptt,
1644			     enum BAR_ID bar_id)
1645{
1646	u32 bar_reg = (bar_id == BAR_ID_0 ?
1647		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
1648	u32 val;
1649
1650	if (IS_VF(p_hwfn->p_dev))
1651		return ecore_vf_hw_bar_size(p_hwfn, bar_id);
1652
1653	val = ecore_rd(p_hwfn, p_ptt, bar_reg);
1654	if (val)
1655		return 1 << (val + 15);
1656
1657	/* The above registers were updated in the past only in CMT mode. Since
1658	 * they were found to be useful MFW started updating them from 8.7.7.0.
1659	 * In older MFW versions they are set to 0 which means disabled.
1660	 */
1661	if (ECORE_IS_CMT(p_hwfn->p_dev)) {
1662		DP_INFO(p_hwfn,
1663			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
1664		return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
1665	} else {
1666		DP_INFO(p_hwfn,
1667			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
1668		return 512 * 1024;
1669	}
1670}
1671
1672void ecore_init_dp(struct ecore_dev	*p_dev,
1673		   u32			dp_module,
1674		   u8			dp_level,
1675		   void		 *dp_ctx)
1676{
1677	u32 i;
1678
1679	p_dev->dp_level = dp_level;
1680	p_dev->dp_module = dp_module;
1681	p_dev->dp_ctx = dp_ctx;
1682	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1683		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1684
1685		p_hwfn->dp_level = dp_level;
1686		p_hwfn->dp_module = dp_module;
1687		p_hwfn->dp_ctx = dp_ctx;
1688	}
1689}
1690
1691enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
1692{
1693	u8 i;
1694
1695	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
1696		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1697
1698		p_hwfn->p_dev = p_dev;
1699		p_hwfn->my_id = i;
1700		p_hwfn->b_active = false;
1701
1702#ifdef CONFIG_ECORE_LOCK_ALLOC
1703		if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
1704			goto handle_err;
1705#endif
1706		OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
1707	}
1708
1709	/* hwfn 0 is always active */
1710	p_dev->hwfns[0].b_active = true;
1711
1712	/* set the default cache alignment to 128 (may be overridden later) */
1713	p_dev->cache_shift = 7;
1714
1715	p_dev->ilt_page_size = ECORE_DEFAULT_ILT_PAGE_SIZE;
1716
1717	return ECORE_SUCCESS;
1718#ifdef CONFIG_ECORE_LOCK_ALLOC
1719handle_err:
1720	while (--i) {
1721		struct ecore_hwfn *p_hwfn = OSAL_NULL;
1722
1723		p_hwfn = &p_dev->hwfns[i];
1724		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
1725	}
1726	return ECORE_NOMEM;
1727#endif
1728}
1729
1730static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
1731{
1732	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1733
1734	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
1735	qm_info->qm_pq_params = OSAL_NULL;
1736	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
1737	qm_info->qm_vport_params = OSAL_NULL;
1738	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
1739	qm_info->qm_port_params = OSAL_NULL;
1740	OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
1741	qm_info->wfq_data = OSAL_NULL;
1742}
1743
1744void ecore_resc_free(struct ecore_dev *p_dev)
1745{
1746	int i;
1747
1748	if (IS_VF(p_dev)) {
1749		for_each_hwfn(p_dev, i)
1750			ecore_l2_free(&p_dev->hwfns[i]);
1751		return;
1752	}
1753
1754	OSAL_FREE(p_dev, p_dev->fw_data);
1755	p_dev->fw_data = OSAL_NULL;
1756
1757	OSAL_FREE(p_dev, p_dev->reset_stats);
1758	p_dev->reset_stats = OSAL_NULL;
1759
1760	ecore_llh_free(p_dev);
1761
1762	for_each_hwfn(p_dev, i) {
1763		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1764
1765		ecore_cxt_mngr_free(p_hwfn);
1766		ecore_qm_info_free(p_hwfn);
1767		ecore_spq_free(p_hwfn);
1768		ecore_eq_free(p_hwfn);
1769		ecore_consq_free(p_hwfn);
1770		ecore_int_free(p_hwfn);
1771#ifdef CONFIG_ECORE_LL2
1772		ecore_ll2_free(p_hwfn);
1773#endif
1774		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
1775			ecore_fcoe_free(p_hwfn);
1776
1777		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
1778			ecore_iscsi_free(p_hwfn);
1779			ecore_ooo_free(p_hwfn);
1780		}
1781
1782#ifdef CONFIG_ECORE_ROCE
1783		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
1784			ecore_rdma_info_free(p_hwfn);
1785#endif
1786		ecore_iov_free(p_hwfn);
1787		ecore_l2_free(p_hwfn);
1788		ecore_dmae_info_free(p_hwfn);
1789		ecore_dcbx_info_free(p_hwfn);
1790		/* @@@TBD Flush work-queue ?*/
1791
1792		/* destroy doorbell recovery mechanism */
1793		ecore_db_recovery_teardown(p_hwfn);
1794	}
1795}
1796
1797/******************** QM initialization *******************/
1798/* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
1799#define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
1800#define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
1801
1802/* determines the physical queue flags for a given PF. */
1803static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
1804{
1805	u32 flags;
1806
1807	/* common flags */
1808	flags = PQ_FLAGS_LB;
1809
1810	/* feature flags */
1811	if (IS_ECORE_SRIOV(p_hwfn->p_dev))
1812		flags |= PQ_FLAGS_VFS;
1813	if (IS_ECORE_DCQCN(p_hwfn))
1814		flags |= PQ_FLAGS_RLS;
1815
1816	/* protocol flags */
1817	switch (p_hwfn->hw_info.personality) {
1818	case ECORE_PCI_ETH:
1819		flags |= PQ_FLAGS_MCOS;
1820		break;
1821	case ECORE_PCI_FCOE:
1822		flags |= PQ_FLAGS_OFLD;
1823		break;
1824	case ECORE_PCI_ISCSI:
1825		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
1826		break;
1827	case ECORE_PCI_ETH_ROCE:
1828		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
1829		break;
1830	case ECORE_PCI_ETH_IWARP:
1831		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
1832		break;
1833	default:
1834		DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
1835		return 0;
1836	}
1837
1838	return flags;
1839}
1840
1841
1842/* Getters for resource amounts necessary for qm initialization */
1843u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
1844{
1845	return p_hwfn->hw_info.num_hw_tc;
1846}
1847
1848u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
1849{
1850	return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
1851}
1852
1853#define NUM_DEFAULT_RLS 1
1854
1855u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
1856{
1857	u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
1858
1859	/* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
1860	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
1861				     (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT),
1862						     ROCE_DCQCN_RP_MAX_QPS));
1863
1864	/* make sure after we reserve the default and VF rls we'll have something left */
1865	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
1866		if (IS_ECORE_DCQCN(p_hwfn))
1867			DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
1868		return 0;
1869	}
1870
1871	/* subtract rls necessary for VFs and one default one for the PF */
1872	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
1873
1874	return num_pf_rls;
1875}
1876
1877u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
1878{
1879	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
1880
1881	/* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
1882	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
1883	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
1884}
1885
1886/* calc amount of PQs according to the requested flags */
1887u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
1888{
1889	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
1890
1891	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
1892	       (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
1893	       (!!(PQ_FLAGS_LB & pq_flags)) +
1894	       (!!(PQ_FLAGS_OOO & pq_flags)) +
1895	       (!!(PQ_FLAGS_ACK & pq_flags)) +
1896	       (!!(PQ_FLAGS_OFLD & pq_flags)) +
1897	       (!!(PQ_FLAGS_LLT & pq_flags)) +
1898	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn);
1899}
1900
1901/* initialize the top level QM params */
1902static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
1903{
1904	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1905	bool four_port;
1906
1907	/* pq and vport bases for this PF */
1908	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
1909	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
1910
1911	/* rate limiting and weighted fair queueing are always enabled */
1912	qm_info->vport_rl_en = 1;
1913	qm_info->vport_wfq_en = 1;
1914
1915	/* TC config is different for AH 4 port */
1916	four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
1917
1918	/* in AH 4 port we have fewer TCs per port */
1919	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
1920
1921	/* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
1922	if (!qm_info->ooo_tc)
1923		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
1924}
1925
1926/* initialize qm vport params */
1927static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
1928{
1929	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1930	u8 i;
1931
1932	/* all vports participate in weighted fair queueing */
1933	for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
1934		qm_info->qm_vport_params[i].vport_wfq = 1;
1935}
1936
1937/* initialize qm port params */
1938static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
1939{
1940	/* Initialize qm port parameters */
1941	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
1942
1943	/* indicate how ooo and high pri traffic is dealt with */
1944	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
1945		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
1946
1947	for (i = 0; i < num_ports; i++) {
1948		struct init_qm_port_params *p_qm_port =
1949			&p_hwfn->qm_info.qm_port_params[i];
1950
1951		p_qm_port->active = 1;
1952		p_qm_port->active_phys_tcs = active_phys_tcs;
1953		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
1954		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
1955	}
1956}
1957
1958/* Reset the params which must be reset for qm init. QM init may be called as
1959 * a result of flows other than driver load (e.g. dcbx renegotiation). Other
1960 * params may be affected by the init but would simply recalculate to the same
1961 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
1962 * affected as these amounts stay the same.
1963 */
1964static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
1965{
1966	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1967
1968	qm_info->num_pqs = 0;
1969	qm_info->num_vports = 0;
1970	qm_info->num_pf_rls = 0;
1971	qm_info->num_vf_pqs = 0;
1972	qm_info->first_vf_pq = 0;
1973	qm_info->first_mcos_pq = 0;
1974	qm_info->first_rl_pq = 0;
1975}
1976
1977static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
1978{
1979	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1980
1981	qm_info->num_vports++;
1982
1983	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
1984		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
1985}
1986
1987/* initialize a single pq and manage qm_info resources accounting.
1988 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
1989 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
1990 */
1991
1992/* flags for pq init */
1993#define PQ_INIT_SHARE_VPORT	(1 << 0)
1994#define PQ_INIT_PF_RL		(1 << 1)
1995#define PQ_INIT_VF_RL		(1 << 2)
1996
1997/* defines for pq init */
1998#define PQ_INIT_DEFAULT_WRR_GROUP	1
1999#define PQ_INIT_DEFAULT_TC		0
2000#define PQ_INIT_OFLD_TC			(p_hwfn->hw_info.offload_tc)
2001
2002static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
2003			     struct ecore_qm_info *qm_info,
2004			     u8 tc, u32 pq_init_flags)
2005{
2006	u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
2007
2008	if (pq_idx > max_pq)
2009		DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
2010
2011	/* init pq params */
2012	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
2013	qm_info->qm_pq_params[pq_idx].tc_id = tc;
2014	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
2015	qm_info->qm_pq_params[pq_idx].rl_valid =
2016		(pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
2017
2018	/* qm params accounting */
2019	qm_info->num_pqs++;
2020	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
2021		qm_info->num_vports++;
2022
2023	if (pq_init_flags & PQ_INIT_PF_RL)
2024		qm_info->num_pf_rls++;
2025
2026	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
2027		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
2028
2029	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
2030		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
2031}
2032
2033/* get pq index according to PQ_FLAGS */
2034static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
2035					     u32 pq_flags)
2036{
2037	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2038
2039	/* Can't have multiple flags set here */
2040	if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
2041		goto err;
2042
2043	switch (pq_flags) {
2044	case PQ_FLAGS_RLS:
2045		return &qm_info->first_rl_pq;
2046	case PQ_FLAGS_MCOS:
2047		return &qm_info->first_mcos_pq;
2048	case PQ_FLAGS_LB:
2049		return &qm_info->pure_lb_pq;
2050	case PQ_FLAGS_OOO:
2051		return &qm_info->ooo_pq;
2052	case PQ_FLAGS_ACK:
2053		return &qm_info->pure_ack_pq;
2054	case PQ_FLAGS_OFLD:
2055		return &qm_info->offload_pq;
2056	case PQ_FLAGS_LLT:
2057		return &qm_info->low_latency_pq;
2058	case PQ_FLAGS_VFS:
2059		return &qm_info->first_vf_pq;
2060	default:
2061		goto err;
2062	}
2063
2064err:
2065	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
2066	return OSAL_NULL;
2067}
2068
2069/* save pq index in qm info */
2070static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
2071				  u32 pq_flags, u16 pq_val)
2072{
2073	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
2074
2075	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
2076}
2077
2078/* get tx pq index, with the PQ TX base already set (ready for context init) */
2079u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
2080{
2081	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
2082
2083	return *base_pq_idx + CM_TX_PQ_BASE;
2084}
2085
2086u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
2087{
2088	u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
2089
2090	if (tc > max_tc)
2091		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
2092
2093	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
2094}
2095
2096u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
2097{
2098	u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
2099
2100	if (vf > max_vf)
2101		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
2102
2103	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
2104}
2105
2106u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
2107{
2108	u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
2109
2110	if (rl > max_rl)
2111		DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
2112
2113	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
2114}
2115
2116/* Functions for creating specific types of pqs */
2117static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
2118{
2119	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2120
2121	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
2122		return;
2123
2124	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
2125	ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
2126}
2127
2128static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
2129{
2130	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2131
2132	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
2133		return;
2134
2135	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
2136	ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
2137}
2138
2139static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
2140{
2141	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2142
2143	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
2144		return;
2145
2146	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
2147	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2148}
2149
2150static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
2151{
2152	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2153
2154	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
2155		return;
2156
2157	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
2158	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2159}
2160
2161static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
2162{
2163	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2164
2165	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
2166		return;
2167
2168	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
2169	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
2170}
2171
2172static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
2173{
2174	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2175	u8 tc_idx;
2176
2177	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
2178		return;
2179
2180	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
2181	for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
2182		ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
2183}
2184
2185static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
2186{
2187	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2188	u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
2189
2190	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
2191		return;
2192
2193	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
2194	qm_info->num_vf_pqs = num_vfs;
2195	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
2196		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
2197}
2198
2199static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
2200{
2201	u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
2202	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2203
2204	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
2205		return;
2206
2207	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
2208	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
2209		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
2210}
2211
2212static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
2213{
2214	/* rate limited pqs, must come first (FW assumption) */
2215	ecore_init_qm_rl_pqs(p_hwfn);
2216
2217	/* pqs for multi cos */
2218	ecore_init_qm_mcos_pqs(p_hwfn);
2219
2220	/* pure loopback pq */
2221	ecore_init_qm_lb_pq(p_hwfn);
2222
2223	/* out of order pq */
2224	ecore_init_qm_ooo_pq(p_hwfn);
2225
2226	/* pure ack pq */
2227	ecore_init_qm_pure_ack_pq(p_hwfn);
2228
2229	/* pq for offloaded protocol */
2230	ecore_init_qm_offload_pq(p_hwfn);
2231
2232	/* low latency pq */
2233	ecore_init_qm_low_latency_pq(p_hwfn);
2234
2235	/* done sharing vports */
2236	ecore_init_qm_advance_vport(p_hwfn);
2237
2238	/* pqs for vfs */
2239	ecore_init_qm_vf_pqs(p_hwfn);
2240}
2241
2242/* compare values of getters against resources amounts */
2243static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
2244{
2245	if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
2246		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
2247		return ECORE_INVAL;
2248	}
2249
2250	if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
2251		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
2252		return ECORE_INVAL;
2253	}
2254
2255	return ECORE_SUCCESS;
2256}
2257
2258/*
2259 * Function for verbose printing of the qm initialization results
2260 */
2261static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
2262{
2263	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2264	struct init_qm_vport_params *vport;
2265	struct init_qm_port_params *port;
2266	struct init_qm_pq_params *pq;
2267	int i, tc;
2268
2269	/* top level params */
2270	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
2271		   qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq);
2272	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
2273		   qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port);
2274	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
2275		   qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
2276
2277	/* port table */
2278	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
2279		port = &(qm_info->qm_port_params[i]);
2280		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
2281			   i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved);
2282	}
2283
2284	/* vport table */
2285	for (i = 0; i < qm_info->num_vports; i++) {
2286		vport = &(qm_info->qm_vport_params[i]);
2287		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
2288			   qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq);
2289		for (tc = 0; tc < NUM_OF_TCS; tc++)
2290			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
2291		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
2292	}
2293
2294	/* pq table */
2295	for (i = 0; i < qm_info->num_pqs; i++) {
2296		pq = &(qm_info->qm_pq_params[i]);
2297		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
2298			   qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid);
2299	}
2300}
2301
2302static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
2303{
2304	/* reset params required for init run */
2305	ecore_init_qm_reset_params(p_hwfn);
2306
2307	/* init QM top level params */
2308	ecore_init_qm_params(p_hwfn);
2309
2310	/* init QM port params */
2311	ecore_init_qm_port_params(p_hwfn);
2312
2313	/* init QM vport params */
2314	ecore_init_qm_vport_params(p_hwfn);
2315
2316	/* init QM physical queue params */
2317	ecore_init_qm_pq_params(p_hwfn);
2318
2319	/* display all that init */
2320	ecore_dp_init_qm_params(p_hwfn);
2321}
2322
2323/* This function reconfigures the QM pf on the fly.
2324 * For this purpose we:
2325 * 1. reconfigure the QM database
2326 * 2. set new values to runtime array
2327 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
2328 * 4. activate init tool in QM_PF stage
2329 * 5. send an sdm_qm_cmd through rbc interface to release the QM
2330 */
2331enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
2332				     struct ecore_ptt *p_ptt)
2333{
2334	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2335	bool b_rc;
2336	enum _ecore_status_t rc;
2337
2338	/* initialize ecore's qm data structure */
2339	ecore_init_qm_info(p_hwfn);
2340
2341	/* stop PF's qm queues */
2342	OSAL_SPIN_LOCK(&qm_lock);
2343	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
2344				      qm_info->start_pq, qm_info->num_pqs);
2345	OSAL_SPIN_UNLOCK(&qm_lock);
2346	if (!b_rc)
2347		return ECORE_INVAL;
2348
2349	/* clear the QM_PF runtime phase leftovers from previous init */
2350	ecore_init_clear_rt_data(p_hwfn);
2351
2352	/* prepare QM portion of runtime array */
2353	ecore_qm_init_pf(p_hwfn, p_ptt, false);
2354
2355	/* activate init tool on runtime array */
2356	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
2357			    p_hwfn->hw_info.hw_mode);
2358	if (rc != ECORE_SUCCESS)
2359		return rc;
2360
2361	/* start PF's qm queues */
2362	OSAL_SPIN_LOCK(&qm_lock);
2363	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
2364				      qm_info->start_pq, qm_info->num_pqs);
2365	OSAL_SPIN_UNLOCK(&qm_lock);
2366	if (!b_rc)
2367		return ECORE_INVAL;
2368
2369	return ECORE_SUCCESS;
2370}
2371
2372static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
2373{
2374	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2375	enum _ecore_status_t rc;
2376
2377	rc = ecore_init_qm_sanity(p_hwfn);
2378	if (rc != ECORE_SUCCESS)
2379		goto alloc_err;
2380
2381	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2382					    sizeof(struct init_qm_pq_params) *
2383					    ecore_init_qm_get_num_pqs(p_hwfn));
2384	if (!qm_info->qm_pq_params)
2385		goto alloc_err;
2386
2387	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2388					       sizeof(struct init_qm_vport_params) *
2389					       ecore_init_qm_get_num_vports(p_hwfn));
2390	if (!qm_info->qm_vport_params)
2391		goto alloc_err;
2392
2393	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2394					      sizeof(struct init_qm_port_params) *
2395					      p_hwfn->p_dev->num_ports_in_engine);
2396	if (!qm_info->qm_port_params)
2397		goto alloc_err;
2398
2399	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
2400					sizeof(struct ecore_wfq_data) *
2401					ecore_init_qm_get_num_vports(p_hwfn));
2402	if (!qm_info->wfq_data)
2403		goto alloc_err;
2404
2405	return ECORE_SUCCESS;
2406
2407alloc_err:
2408	DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
2409	ecore_qm_info_free(p_hwfn);
2410	return ECORE_NOMEM;
2411}
2412/******************** End QM initialization ***************/
2413
2414enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
2415{
2416	u32 rdma_tasks, excess_tasks;
2417	u32 line_count;
2418	enum _ecore_status_t rc = ECORE_SUCCESS;
2419	int i;
2420
2421	if (IS_VF(p_dev)) {
2422		for_each_hwfn(p_dev, i) {
2423			rc = ecore_l2_alloc(&p_dev->hwfns[i]);
2424			if (rc != ECORE_SUCCESS)
2425				return rc;
2426		}
2427		return rc;
2428	}
2429
2430	p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
2431				     sizeof(*p_dev->fw_data));
2432	if (!p_dev->fw_data)
2433		return ECORE_NOMEM;
2434
2435	for_each_hwfn(p_dev, i) {
2436		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2437		u32 n_eqes, num_cons;
2438
2439		/* initialize the doorbell recovery mechanism */
2440		rc = ecore_db_recovery_setup(p_hwfn);
2441		if (rc)
2442			goto alloc_err;
2443
2444		/* First allocate the context manager structure */
2445		rc = ecore_cxt_mngr_alloc(p_hwfn);
2446		if (rc)
2447			goto alloc_err;
2448
2449		/* Set the HW cid/tid numbers (in the context manager)
2450		 * Must be done prior to any further computations.
2451		 */
2452		rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
2453		if (rc)
2454			goto alloc_err;
2455
2456		rc = ecore_alloc_qm_data(p_hwfn);
2457		if (rc)
2458			goto alloc_err;
2459
2460		/* init qm info */
2461		ecore_init_qm_info(p_hwfn);
2462
2463		/* Compute the ILT client partition */
2464		rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2465		if (rc) {
2466			DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n");
2467			/* In case there are not enough ILT lines we reduce the
2468			 * number of RDMA tasks and re-compute.
2469			 */
2470			excess_tasks = ecore_cxt_cfg_ilt_compute_excess(
2471					p_hwfn, line_count);
2472			if (!excess_tasks)
2473				goto alloc_err;
2474
2475			rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
2476			rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks);
2477			if (rc)
2478				goto alloc_err;
2479
2480			rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
2481			if (rc) {
2482				DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n",
2483				       line_count);
2484
2485				goto alloc_err;
2486			}
2487		}
2488
2489		/* CID map / ILT shadow table / T2
2490		 * The talbes sizes are determined by the computations above
2491		 */
2492		rc = ecore_cxt_tables_alloc(p_hwfn);
2493		if (rc)
2494			goto alloc_err;
2495
2496		/* SPQ, must follow ILT because initializes SPQ context */
2497		rc = ecore_spq_alloc(p_hwfn);
2498		if (rc)
2499			goto alloc_err;
2500
2501		/* SP status block allocation */
2502		p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
2503							   RESERVED_PTT_DPC);
2504
2505		rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
2506		if (rc)
2507			goto alloc_err;
2508
2509		rc = ecore_iov_alloc(p_hwfn);
2510		if (rc)
2511			goto alloc_err;
2512
2513		/* EQ */
2514		n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
2515		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
2516			u32 n_srq = ecore_cxt_get_total_srq_count(p_hwfn);
2517
2518			/* Calculate the EQ size
2519			 * ---------------------
2520			 * Each ICID may generate up to one event at a time i.e.
2521			 * the event must be handled/cleared before a new one
2522			 * can be generated. We calculate the sum of events per
2523			 * protocol and create an EQ deep enough to handle the
2524			 * worst case:
2525			 * - Core - according to SPQ.
2526			 * - RoCE - per QP there are a couple of ICIDs, one
2527			 *	  responder and one requester, each can
2528			 *	  generate max 2 EQE (err+qp_destroyed) =>
2529			 *	  n_eqes_qp = 4 * n_qp.
2530			 *	  Each CQ can generate an EQE. There are 2 CQs
2531			 *	  per QP => n_eqes_cq = 2 * n_qp.
2532			 *	  Hence the RoCE total is 6 * n_qp or
2533			 *	  3 * num_cons.
2534			 *	  On top of that one eqe shoule be added for
2535			 *	  each XRC SRQ and SRQ.
2536			 * - iWARP - can generate three async per QP (error
2537			 *	  detected and qp in error) and an
2538			 	  additional error per CQ. 4* num_cons.
2539			 	  On top of that one eqe shoule be added for
2540			 *	  each SRQ and XRC SRQ.
2541			 * - ENet - There can be up to two events per VF. One
2542			 *	  for VF-PF channel and another for VF FLR
2543			 *	  initial cleanup. The number of VFs is
2544			 *	  bounded by MAX_NUM_VFS_BB, and is much
2545			 *	  smaller than RoCE's so we avoid exact
2546			 *	  calculation.
2547			 */
2548			if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
2549				num_cons = ecore_cxt_get_proto_cid_count(
2550					p_hwfn, PROTOCOLID_ROCE, OSAL_NULL);
2551				num_cons *= 3;
2552			} else {
2553				num_cons = ecore_cxt_get_proto_cid_count(
2554						p_hwfn, PROTOCOLID_IWARP,
2555						OSAL_NULL);
2556				num_cons *= 4;
2557			}
2558			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
2559		} else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2560			num_cons = ecore_cxt_get_proto_cid_count(
2561					p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL);
2562			n_eqes += 2 * num_cons;
2563		}
2564
2565		if (n_eqes > 0xFF00) {
2566			DP_ERR(p_hwfn, "EQs maxing out at 0xFF00 elements\n");
2567			n_eqes = 0xFF00;
2568		}
2569
2570		rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
2571		if (rc)
2572			goto alloc_err;
2573
2574		rc = ecore_consq_alloc(p_hwfn);
2575		if (rc)
2576			goto alloc_err;
2577
2578		rc = ecore_l2_alloc(p_hwfn);
2579		if (rc != ECORE_SUCCESS)
2580			goto alloc_err;
2581
2582#ifdef CONFIG_ECORE_LL2
2583		if (p_hwfn->using_ll2) {
2584			rc = ecore_ll2_alloc(p_hwfn);
2585			if (rc)
2586				goto alloc_err;
2587		}
2588#endif
2589		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
2590			rc = ecore_fcoe_alloc(p_hwfn);
2591			if (rc)
2592				goto alloc_err;
2593		}
2594
2595		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2596			rc = ecore_iscsi_alloc(p_hwfn);
2597			if (rc)
2598				goto alloc_err;
2599
2600			rc = ecore_ooo_alloc(p_hwfn);
2601			if (rc)
2602				goto alloc_err;
2603		}
2604#ifdef CONFIG_ECORE_ROCE
2605		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
2606			rc = ecore_rdma_info_alloc(p_hwfn);
2607			if (rc)
2608				goto alloc_err;
2609		}
2610#endif
2611
2612		/* DMA info initialization */
2613		rc = ecore_dmae_info_alloc(p_hwfn);
2614		if (rc) {
2615			DP_NOTICE(p_hwfn, false,
2616				  "Failed to allocate memory for dmae_info structure\n");
2617			goto alloc_err;
2618		}
2619
2620		/* DCBX initialization */
2621		rc = ecore_dcbx_info_alloc(p_hwfn);
2622		if (rc) {
2623			DP_NOTICE(p_hwfn, false,
2624				  "Failed to allocate memory for dcbx structure\n");
2625			goto alloc_err;
2626		}
2627	}
2628
2629	rc = ecore_llh_alloc(p_dev);
2630	if (rc != ECORE_SUCCESS) {
2631		DP_NOTICE(p_dev, false,
2632			  "Failed to allocate memory for the llh_info structure\n");
2633		goto alloc_err;
2634	}
2635
2636	p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
2637					 sizeof(*p_dev->reset_stats));
2638	if (!p_dev->reset_stats) {
2639		DP_NOTICE(p_dev, false,
2640			  "Failed to allocate reset statistics\n");
2641		goto alloc_no_mem;
2642	}
2643
2644	return ECORE_SUCCESS;
2645
2646alloc_no_mem:
2647	rc = ECORE_NOMEM;
2648alloc_err:
2649	ecore_resc_free(p_dev);
2650	return rc;
2651}
2652
2653void ecore_resc_setup(struct ecore_dev *p_dev)
2654{
2655	int i;
2656
2657	if (IS_VF(p_dev)) {
2658		for_each_hwfn(p_dev, i)
2659			ecore_l2_setup(&p_dev->hwfns[i]);
2660		return;
2661	}
2662
2663	for_each_hwfn(p_dev, i) {
2664		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2665
2666		ecore_cxt_mngr_setup(p_hwfn);
2667		ecore_spq_setup(p_hwfn);
2668		ecore_eq_setup(p_hwfn);
2669		ecore_consq_setup(p_hwfn);
2670
2671		/* Read shadow of current MFW mailbox */
2672		ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
2673		OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
2674			    p_hwfn->mcp_info->mfw_mb_cur,
2675			    p_hwfn->mcp_info->mfw_mb_length);
2676
2677		ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
2678
2679		ecore_l2_setup(p_hwfn);
2680		ecore_iov_setup(p_hwfn);
2681#ifdef CONFIG_ECORE_LL2
2682		if (p_hwfn->using_ll2)
2683			ecore_ll2_setup(p_hwfn);
2684#endif
2685		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
2686			ecore_fcoe_setup(p_hwfn);
2687
2688		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
2689			ecore_iscsi_setup(p_hwfn);
2690			ecore_ooo_setup(p_hwfn);
2691		}
2692	}
2693}
2694
2695#define FINAL_CLEANUP_POLL_CNT	(100)
2696#define FINAL_CLEANUP_POLL_TIME	(10)
2697enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
2698					 struct ecore_ptt *p_ptt,
2699					 u16 id, bool is_vf)
2700{
2701	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
2702	enum _ecore_status_t rc = ECORE_TIMEOUT;
2703
2704#ifndef ASIC_ONLY
2705	if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
2706	    CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2707		DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
2708		return ECORE_SUCCESS;
2709	}
2710#endif
2711
2712	addr = GTT_BAR0_MAP_REG_USDM_RAM +
2713	       USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
2714
2715	if (is_vf)
2716		id += 0x10;
2717
2718	command |= X_FINAL_CLEANUP_AGG_INT <<
2719		   SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
2720	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
2721	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
2722	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
2723
2724	/* Make sure notification is not set before initiating final cleanup */
2725	if (REG_RD(p_hwfn, addr)) {
2726		DP_NOTICE(p_hwfn, false,
2727			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
2728		REG_WR(p_hwfn, addr, 0);
2729	}
2730
2731	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2732		   "Sending final cleanup for PFVF[%d] [Command %08x]\n",
2733		   id, command);
2734
2735	ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
2736
2737	/* Poll until completion */
2738	while (!REG_RD(p_hwfn, addr) && count--)
2739		OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
2740
2741	if (REG_RD(p_hwfn, addr))
2742		rc = ECORE_SUCCESS;
2743	else
2744		DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n");
2745
2746	/* Cleanup afterwards */
2747	REG_WR(p_hwfn, addr, 0);
2748
2749	return rc;
2750}
2751
2752static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
2753{
2754	int hw_mode = 0;
2755
2756	if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
2757		hw_mode |= 1 << MODE_BB;
2758	} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
2759		hw_mode |= 1 << MODE_K2;
2760	} else if (ECORE_IS_E5(p_hwfn->p_dev)) {
2761		hw_mode |= 1 << MODE_E5;
2762	} else {
2763		DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
2764			  p_hwfn->p_dev->type);
2765		return ECORE_INVAL;
2766	}
2767
2768	/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/
2769	switch (p_hwfn->p_dev->num_ports_in_engine) {
2770	case 1:
2771		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
2772		break;
2773	case 2:
2774		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
2775		break;
2776	case 4:
2777		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
2778		break;
2779	default:
2780		DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n",
2781			  p_hwfn->p_dev->num_ports_in_engine);
2782		return ECORE_INVAL;
2783	}
2784
2785	if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
2786			  &p_hwfn->p_dev->mf_bits))
2787		hw_mode |= 1 << MODE_MF_SD;
2788	else
2789		hw_mode |= 1 << MODE_MF_SI;
2790
2791#ifndef ASIC_ONLY
2792	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2793		if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2794			hw_mode |= 1 << MODE_FPGA;
2795		} else {
2796			if (p_hwfn->p_dev->b_is_emul_full)
2797				hw_mode |= 1 << MODE_EMUL_FULL;
2798			else
2799				hw_mode |= 1 << MODE_EMUL_REDUCED;
2800		}
2801	} else
2802#endif
2803	hw_mode |= 1 << MODE_ASIC;
2804
2805	if (ECORE_IS_CMT(p_hwfn->p_dev))
2806		hw_mode |= 1 << MODE_100G;
2807
2808	p_hwfn->hw_info.hw_mode = hw_mode;
2809
2810	DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
2811		   "Configuring function for hw_mode: 0x%08x\n",
2812		   p_hwfn->hw_info.hw_mode);
2813
2814	return ECORE_SUCCESS;
2815}
2816
2817#ifndef ASIC_ONLY
2818/* MFW-replacement initializations for non-ASIC */
2819static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
2820					       struct ecore_ptt *p_ptt)
2821{
2822	struct ecore_dev *p_dev = p_hwfn->p_dev;
2823	u32 pl_hv = 1;
2824	int i;
2825
2826	if (CHIP_REV_IS_EMUL(p_dev)) {
2827		if (ECORE_IS_AH(p_dev))
2828			pl_hv |= 0x600;
2829		else if (ECORE_IS_E5(p_dev))
2830			ECORE_E5_MISSING_CODE;
2831	}
2832
2833	ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
2834
2835	if (CHIP_REV_IS_EMUL(p_dev) &&
2836	    (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev)))
2837		ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
2838			 0x3ffffff);
2839
2840	/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
2841	/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
2842	if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
2843		ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
2844
2845	if (CHIP_REV_IS_EMUL(p_dev)) {
2846		if (ECORE_IS_AH(p_dev)) {
2847			/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
2848			ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
2849				 (p_dev->num_ports_in_engine >> 1));
2850
2851			ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
2852				 p_dev->num_ports_in_engine == 4 ? 0 : 3);
2853		} else if (ECORE_IS_E5(p_dev)) {
2854			ECORE_E5_MISSING_CODE;
2855		}
2856
2857		/* Poll on RBC */
2858		ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
2859		for (i = 0; i < 100; i++) {
2860			OSAL_UDELAY(50);
2861			if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
2862				break;
2863		}
2864		if (i == 100)
2865			DP_NOTICE(p_hwfn, true,
2866				  "RBC done failed to complete in PSWRQ2\n");
2867	}
2868
2869	return ECORE_SUCCESS;
2870}
2871#endif
2872
2873/* Init run time data for all PFs and their VFs on an engine.
2874 * TBD - for VFs - Once we have parent PF info for each VF in
2875 * shmem available as CAU requires knowledge of parent PF for each VF.
2876 */
2877static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
2878{
2879	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
2880	int i, igu_sb_id;
2881
2882	for_each_hwfn(p_dev, i) {
2883		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2884		struct ecore_igu_info *p_igu_info;
2885		struct ecore_igu_block *p_block;
2886		struct cau_sb_entry sb_entry;
2887
2888		p_igu_info = p_hwfn->hw_info.p_igu_info;
2889
2890		for (igu_sb_id = 0;
2891		     igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
2892		     igu_sb_id++) {
2893			p_block = &p_igu_info->entry[igu_sb_id];
2894
2895			if (!p_block->is_pf)
2896				continue;
2897
2898			ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
2899						p_block->function_id,
2900						0, 0);
2901			STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
2902					 sb_entry);
2903		}
2904	}
2905}
2906
2907static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
2908				       struct ecore_ptt *p_ptt)
2909{
2910	u32 val, wr_mbs, cache_line_size;
2911
2912	val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
2913	switch (val) {
2914	case 0:
2915		wr_mbs = 128;
2916		break;
2917	case 1:
2918		wr_mbs = 256;
2919		break;
2920	case 2:
2921		wr_mbs = 512;
2922		break;
2923	default:
2924		DP_INFO(p_hwfn,
2925			"Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
2926			val);
2927		return;
2928	}
2929
2930	cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
2931	switch (cache_line_size) {
2932	case 32:
2933		val = 0;
2934		break;
2935	case 64:
2936		val = 1;
2937		break;
2938	case 128:
2939		val = 2;
2940		break;
2941	case 256:
2942		val = 3;
2943		break;
2944	default:
2945		DP_INFO(p_hwfn,
2946			"Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
2947			cache_line_size);
2948	}
2949
2950	if (OSAL_CACHE_LINE_SIZE > wr_mbs)
2951		DP_INFO(p_hwfn,
2952			"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
2953			OSAL_CACHE_LINE_SIZE, wr_mbs);
2954
2955	STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
2956	if (val > 0) {
2957		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
2958		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
2959	}
2960}
2961
2962static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
2963						 struct ecore_ptt *p_ptt,
2964						 int hw_mode)
2965{
2966	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
2967	struct ecore_dev *p_dev = p_hwfn->p_dev;
2968	u8 vf_id, max_num_vfs;
2969	u16 num_pfs, pf_id;
2970	u32 concrete_fid;
2971	enum _ecore_status_t rc	= ECORE_SUCCESS;
2972
2973	ecore_init_cau_rt_data(p_dev);
2974
2975	/* Program GTT windows */
2976	ecore_gtt_init(p_hwfn, p_ptt);
2977
2978#ifndef ASIC_ONLY
2979	if (CHIP_REV_IS_EMUL(p_dev)) {
2980		rc = ecore_hw_init_chip(p_hwfn, p_ptt);
2981		if (rc != ECORE_SUCCESS)
2982			return rc;
2983	}
2984#endif
2985
2986	if (p_hwfn->mcp_info) {
2987		if (p_hwfn->mcp_info->func_info.bandwidth_max)
2988			qm_info->pf_rl_en = 1;
2989		if (p_hwfn->mcp_info->func_info.bandwidth_min)
2990			qm_info->pf_wfq_en = 1;
2991	}
2992
2993	ecore_qm_common_rt_init(p_hwfn,
2994				p_dev->num_ports_in_engine,
2995				qm_info->max_phys_tcs_per_port,
2996				qm_info->pf_rl_en, qm_info->pf_wfq_en,
2997				qm_info->vport_rl_en, qm_info->vport_wfq_en,
2998				qm_info->qm_port_params);
2999
3000	ecore_cxt_hw_init_common(p_hwfn);
3001
3002	ecore_init_cache_line_size(p_hwfn, p_ptt);
3003
3004	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
3005			    hw_mode);
3006	if (rc != ECORE_SUCCESS)
3007		return rc;
3008
3009	/* @@TBD MichalK - should add VALIDATE_VFID to init tool...
3010	 * need to decide with which value, maybe runtime
3011	 */
3012	ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
3013	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
3014
3015	if (ECORE_IS_BB(p_dev)) {
3016		/* Workaround clears ROCE search for all functions to prevent
3017		 * involving non initialized function in processing ROCE packet.
3018		 */
3019		num_pfs = NUM_OF_ENG_PFS(p_dev);
3020		for (pf_id = 0; pf_id < num_pfs; pf_id++) {
3021			ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
3022			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
3023			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
3024		}
3025		/* pretend to original PF */
3026		ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
3027	}
3028
3029	/* Workaround for avoiding CCFC execution error when getting packets
3030	 * with CRC errors, and allowing instead the invoking of the FW error
3031	 * handler.
3032	 * This is not done inside the init tool since it currently can't
3033	 * perform a pretending to VFs.
3034	 */
3035	max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
3036	for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
3037		concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
3038		ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
3039		ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
3040		ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
3041		ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
3042		ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
3043	}
3044	/* pretend to original PF */
3045	ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
3046
3047	return rc;
3048}
3049
3050#ifndef ASIC_ONLY
3051#define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4)
3052#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5)
3053
3054#define PMEG_IF_BYTE_COUNT	8
3055
3056static void ecore_wr_nw_port(struct ecore_hwfn	*p_hwfn,
3057			     struct ecore_ptt	*p_ptt,
3058			     u32		addr,
3059			     u64		data,
3060			     u8			reg_type,
3061			     u8			port)
3062{
3063	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3064		   "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
3065		   ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
3066		   (8 << PMEG_IF_BYTE_COUNT),
3067		   (reg_type << 25) | (addr << 8) | port,
3068		   (u32)((data >> 32) & 0xffffffff),
3069		   (u32)(data & 0xffffffff));
3070
3071	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
3072		 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
3073		  0xffff00fe) |
3074		 (8 << PMEG_IF_BYTE_COUNT));
3075	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
3076		 (reg_type << 25) | (addr << 8) | port);
3077	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
3078	ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
3079		 (data >> 32) & 0xffffffff);
3080}
3081
3082#define XLPORT_MODE_REG	(0x20a)
3083#define XLPORT_MAC_CONTROL (0x210)
3084#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
3085#define XLPORT_ENABLE_REG (0x20b)
3086
3087#define XLMAC_CTRL (0x600)
3088#define XLMAC_MODE (0x601)
3089#define XLMAC_RX_MAX_SIZE (0x608)
3090#define XLMAC_TX_CTRL (0x604)
3091#define XLMAC_PAUSE_CTRL (0x60d)
3092#define XLMAC_PFC_CTRL (0x60e)
3093
3094static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
3095				    struct ecore_ptt *p_ptt)
3096{
3097	u8 loopback = 0, port = p_hwfn->port_id * 2;
3098
3099	DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
3100
3101	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG,
3102			 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */
3103	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
3104	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
3105			 0x40, 0, port); /*XLMAC: SOFT RESET */
3106	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE,
3107			 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */
3108	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE,
3109			 0x3fff, 0, port); /* XLMAC: Max Size */
3110	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
3111			 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
3112			 0, port);
3113	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL,
3114			 0x7c000, 0, port);
3115	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
3116			 0x30ffffc000ULL, 0, port);
3117	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2),
3118			 0, port); /* XLMAC: TX_EN, RX_EN */
3119	ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
3120			 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
3121	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG,
3122			 1, 0, port); /* Enabled Parallel PFC interface */
3123	ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG,
3124			 0xf, 1, port); /* XLPORT port enable */
3125}
3126
3127static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
3128				       struct ecore_ptt *p_ptt)
3129{
3130	u8 port = p_hwfn->port_id;
3131	u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
3132
3133	DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
3134
3135	ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
3136		 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
3137		 (port <<
3138		  CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
3139		 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
3140
3141	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
3142		 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
3143
3144	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
3145		 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
3146
3147	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
3148		 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
3149
3150	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
3151		 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
3152
3153	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
3154		 (0xA <<
3155		  ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
3156		 (8 <<
3157		  ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
3158
3159	ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
3160		 0xa853);
3161}
3162
3163static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
3164				 struct ecore_ptt *p_ptt)
3165{
3166	if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev))
3167		ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
3168	else /* BB */
3169		ecore_emul_link_init_bb(p_hwfn, p_ptt);
3170
3171	return;
3172}
3173
3174static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
3175			       struct ecore_ptt *p_ptt,  u8 port)
3176{
3177	int port_offset = port ? 0x800 : 0;
3178	u32 xmac_rxctrl	= 0;
3179
3180	/* Reset of XMAC */
3181	/* FIXME: move to common start */
3182	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32),
3183		 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
3184	OSAL_MSLEEP(1);
3185	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
3186		 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
3187
3188	ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
3189
3190	/* Set the number of ports on the Warp Core to 10G */
3191	ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
3192
3193	/* Soft reset of XMAC */
3194	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
3195		 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
3196	OSAL_MSLEEP(1);
3197	ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
3198		 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
3199
3200	/* FIXME: move to common end */
3201	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
3202		ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
3203
3204	/* Set Max packet size: initialize XMAC block register for port 0 */
3205	ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
3206
3207	/* CRC append for Tx packets: init XMAC block register for port 1 */
3208	ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
3209
3210	/* Enable TX and RX: initialize XMAC block register for port 1 */
3211	ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
3212		 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
3213	xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
3214			       XMAC_REG_RX_CTRL_BB + port_offset);
3215	xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
3216	ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
3217}
3218#endif
3219
3220static enum _ecore_status_t
3221ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
3222		       struct ecore_ptt *p_ptt,
3223		       u32 pwm_region_size,
3224		       u32 n_cpus)
3225{
3226	u32 dpi_bit_shift, dpi_count, dpi_page_size;
3227	u32 min_dpis;
3228	u32 n_wids;
3229
3230	/* Calculate DPI size
3231	 * ------------------
3232	 * The PWM region contains Doorbell Pages. The first is reserverd for
3233	 * the kernel for, e.g, L2. The others are free to be used by non-
3234	 * trusted applications, typically from user space. Each page, called a
3235	 * doorbell page is sectioned into windows that allow doorbells to be
3236	 * issued in parallel by the kernel/application. The size of such a
3237	 * window (a.k.a. WID) is 1kB.
3238	 * Summary:
3239	 *    1kB WID x N WIDS = DPI page size
3240	 *    DPI page size x N DPIs = PWM region size
3241	 * Notes:
3242	 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
3243	 * in order to ensure that two applications won't share the same page.
3244	 * It also must contain at least one WID per CPU to allow parallelism.
3245	 * It also must be a power of 2, since it is stored as a bit shift.
3246	 *
3247	 * The DPI page size is stored in a register as 'dpi_bit_shift' so that
3248	 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
3249	 * containing 4 WIDs.
3250	 */
3251	n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
3252	dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
3253	dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) & ~(OSAL_PAGE_SIZE - 1);
3254	dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
3255	dpi_count = pwm_region_size / dpi_page_size;
3256
3257	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
3258	min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
3259
3260	/* Update hwfn */
3261	p_hwfn->dpi_size = dpi_page_size;
3262	p_hwfn->dpi_count = dpi_count;
3263
3264	/* Update registers */
3265	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
3266
3267	if (dpi_count < min_dpis)
3268		return ECORE_NORESOURCES;
3269
3270	return ECORE_SUCCESS;
3271}
3272
3273enum ECORE_ROCE_EDPM_MODE {
3274	ECORE_ROCE_EDPM_MODE_ENABLE	= 0,
3275	ECORE_ROCE_EDPM_MODE_FORCE_ON	= 1,
3276	ECORE_ROCE_EDPM_MODE_DISABLE	= 2,
3277};
3278
3279static enum _ecore_status_t
3280ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
3281			      struct ecore_ptt *p_ptt)
3282{
3283	struct ecore_rdma_pf_params *p_rdma_pf_params;
3284	u32 pwm_regsize, norm_regsize;
3285	u32 non_pwm_conn, min_addr_reg1;
3286	u32 db_bar_size, n_cpus = 1;
3287	u32 roce_edpm_mode;
3288	u32 pf_dems_shift;
3289	enum _ecore_status_t rc = ECORE_SUCCESS;
3290	u8 cond;
3291
3292	db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
3293	if (ECORE_IS_CMT(p_hwfn->p_dev))
3294		db_bar_size /= 2;
3295
3296	/* Calculate doorbell regions
3297	 * -----------------------------------
3298	 * The doorbell BAR is made of two regions. The first is called normal
3299	 * region and the second is called PWM region. In the normal region
3300	 * each ICID has its own set of addresses so that writing to that
3301	 * specific address identifies the ICID. In the Process Window Mode
3302	 * region the ICID is given in the data written to the doorbell. The
3303	 * above per PF register denotes the offset in the doorbell BAR in which
3304	 * the PWM region begins.
3305	 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
3306	 * non-PWM connection. The calculation below computes the total non-PWM
3307	 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
3308	 * in units of 4,096 bytes.
3309	 */
3310	non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
3311		       ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
3312						     OSAL_NULL) +
3313		       ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
3314						     OSAL_NULL);
3315	norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, OSAL_PAGE_SIZE);
3316	min_addr_reg1 = norm_regsize / 4096;
3317	pwm_regsize = db_bar_size - norm_regsize;
3318
3319	/* Check that the normal and PWM sizes are valid */
3320	if (db_bar_size < norm_regsize) {
3321		DP_ERR(p_hwfn->p_dev,
3322		       "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
3323		       db_bar_size, norm_regsize);
3324		return ECORE_NORESOURCES;
3325	}
3326	if (pwm_regsize < ECORE_MIN_PWM_REGION) {
3327		DP_ERR(p_hwfn->p_dev,
3328		       "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
3329		       pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
3330		       norm_regsize);
3331		return ECORE_NORESOURCES;
3332	}
3333
3334	p_rdma_pf_params = &p_hwfn->pf_params.rdma_pf_params;
3335
3336	/* Calculate number of DPIs */
3337	if (ECORE_IS_IWARP_PERSONALITY(p_hwfn))
3338		p_rdma_pf_params->roce_edpm_mode =  ECORE_ROCE_EDPM_MODE_DISABLE;
3339
3340	if (p_rdma_pf_params->roce_edpm_mode <= ECORE_ROCE_EDPM_MODE_DISABLE) {
3341		roce_edpm_mode = p_rdma_pf_params->roce_edpm_mode;
3342	} else {
3343		DP_ERR(p_hwfn->p_dev,
3344		       "roce edpm mode was configured to an illegal value of %u. Resetting it to 0-Enable EDPM if BAR size is adequate\n",
3345		       p_rdma_pf_params->roce_edpm_mode);
3346		roce_edpm_mode = 0;
3347	}
3348
3349	if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
3350	    ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
3351		/* Either EDPM is mandatory, or we are attempting to allocate a
3352		 * WID per CPU.
3353		 */
3354		n_cpus = OSAL_NUM_CPUS();
3355		rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
3356	}
3357
3358	cond = ((rc != ECORE_SUCCESS) &&
3359		(roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
3360		(roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
3361	if (cond || p_hwfn->dcbx_no_edpm) {
3362		/* Either EDPM is disabled from user configuration, or it is
3363		 * disabled via DCBx, or it is not mandatory and we failed to
3364		 * allocated a WID per CPU.
3365		 */
3366		n_cpus = 1;
3367		rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
3368
3369#ifdef CONFIG_ECORE_ROCE
3370		/* If we entered this flow due to DCBX then the DPM register is
3371		 * already configured.
3372		 */
3373		if (cond)
3374			ecore_rdma_dpm_bar(p_hwfn, p_ptt);
3375#endif
3376	}
3377
3378	p_hwfn->wid_count = (u16)n_cpus;
3379
3380	/* Check return codes from above calls */
3381	if (rc != ECORE_SUCCESS) {
3382#ifndef LINUX_REMOVE
3383		DP_ERR(p_hwfn,
3384		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via user configuration min_dpis or by disabling EDPM via user configuration roce_edpm_mode\n",
3385		       p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
3386		       ECORE_MIN_DPIS);
3387#else
3388		DP_ERR(p_hwfn,
3389		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is set to %d. You can reduce this minimum down to %d via the module parameter min_rdma_dpis or by disabling EDPM by setting the module parameter roce_edpm to 2\n",
3390		       p_hwfn->dpi_count, p_rdma_pf_params->min_dpis,
3391		       ECORE_MIN_DPIS);
3392#endif
3393		DP_ERR(p_hwfn,
3394		       "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
3395		       norm_regsize, pwm_regsize, p_hwfn->dpi_size,
3396		       p_hwfn->dpi_count,
3397		       ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
3398		       "disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
3399
3400		return ECORE_NORESOURCES;
3401	}
3402
3403	DP_INFO(p_hwfn,
3404		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
3405		norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count,
3406		((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
3407		"disabled" : "enabled", (unsigned long)OSAL_PAGE_SIZE);
3408
3409	/* Update hwfn */
3410	p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
3411						      * calculate the doorbell
3412						      * address
3413						      */
3414
3415	/* Update registers */
3416	/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
3417	pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
3418	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
3419	ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
3420
3421	return ECORE_SUCCESS;
3422}
3423
3424static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
3425					       struct ecore_ptt *p_ptt,
3426					       int hw_mode)
3427{
3428	enum _ecore_status_t rc	= ECORE_SUCCESS;
3429
3430	/* In CMT the gate should be cleared by the 2nd hwfn */
3431	if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
3432		STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
3433
3434	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
3435			    hw_mode);
3436	if (rc != ECORE_SUCCESS)
3437		return rc;
3438
3439	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
3440
3441#ifndef ASIC_ONLY
3442	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
3443		return ECORE_SUCCESS;
3444
3445	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3446		if (ECORE_IS_AH(p_hwfn->p_dev))
3447			return ECORE_SUCCESS;
3448		else if (ECORE_IS_BB(p_hwfn->p_dev))
3449			ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
3450		else /* E5 */
3451			ECORE_E5_MISSING_CODE;
3452	} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
3453		if (ECORE_IS_CMT(p_hwfn->p_dev)) {
3454			/* Activate OPTE in CMT */
3455			u32 val;
3456
3457			val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
3458			val |= 0x10;
3459			ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
3460			ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
3461			ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
3462			ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
3463			ecore_wr(p_hwfn, p_ptt,
3464				 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
3465			ecore_wr(p_hwfn, p_ptt,
3466				 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
3467			ecore_wr(p_hwfn, p_ptt,
3468				 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
3469				 0x55555555);
3470		}
3471
3472		ecore_emul_link_init(p_hwfn, p_ptt);
3473	} else {
3474		DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
3475	}
3476#endif
3477
3478	return rc;
3479}
3480
3481static enum _ecore_status_t
3482ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3483		 int hw_mode, struct ecore_hw_init_params *p_params)
3484{
3485	u8 rel_pf_id = p_hwfn->rel_pf_id;
3486	u32 prs_reg;
3487	enum _ecore_status_t rc	= ECORE_SUCCESS;
3488	u16 ctrl;
3489	int pos;
3490
3491	if (p_hwfn->mcp_info) {
3492		struct ecore_mcp_function_info *p_info;
3493
3494		p_info = &p_hwfn->mcp_info->func_info;
3495		if (p_info->bandwidth_min)
3496			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
3497
3498		/* Update rate limit once we'll actually have a link */
3499		p_hwfn->qm_info.pf_rl = 100000;
3500	}
3501	ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
3502
3503	ecore_int_igu_init_rt(p_hwfn);
3504
3505	/* Set VLAN in NIG if needed */
3506	if (hw_mode & (1 << MODE_MF_SD)) {
3507		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
3508		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
3509		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
3510			     p_hwfn->hw_info.ovlan);
3511
3512		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
3513			   "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
3514		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
3515			     1);
3516	}
3517
3518	/* Enable classification by MAC if needed */
3519	if (hw_mode & (1 << MODE_MF_SI)) {
3520		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n");
3521		STORE_RT_REG(p_hwfn,
3522			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
3523	}
3524
3525	/* Protocl Configuration  - @@@TBD - should we set 0 otherwise?*/
3526	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
3527		     (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
3528	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
3529		     (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
3530	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
3531
3532	/* perform debug configuration when chip is out of reset */
3533	OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
3534
3535	/* Sanity check before the PF init sequence that uses DMAE */
3536	rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
3537	if (rc)
3538		return rc;
3539
3540	/* PF Init sequence */
3541	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
3542	if (rc)
3543		return rc;
3544
3545	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
3546	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
3547	if (rc)
3548		return rc;
3549
3550	/* Pure runtime initializations - directly to the HW  */
3551	ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
3552
3553	/* PCI relaxed ordering is generally beneficial for performance,
3554	 * but can hurt performance or lead to instability on some setups.
3555	 * If management FW is taking care of it go with that, otherwise
3556	 * disable to be on the safe side.
3557	 */
3558	pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
3559	if (!pos) {
3560		DP_NOTICE(p_hwfn, true,
3561			  "Failed to find the PCI Express Capability structure in the PCI config space\n");
3562		return ECORE_IO;
3563	}
3564
3565	OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
3566
3567	if (p_params->pci_rlx_odr_mode == ECORE_ENABLE_RLX_ODR) {
3568		ctrl |= PCI_EXP_DEVCTL_RELAX_EN;
3569		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3570					   pos + PCI_EXP_DEVCTL, ctrl);
3571	} else if (p_params->pci_rlx_odr_mode == ECORE_DISABLE_RLX_ODR) {
3572		ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
3573		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3574					   pos + PCI_EXP_DEVCTL, ctrl);
3575	} else if (ecore_mcp_rlx_odr_supported(p_hwfn)) {
3576		DP_INFO(p_hwfn, "PCI relax ordering configured by MFW\n");
3577	} else {
3578		ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
3579		OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev,
3580					   pos + PCI_EXP_DEVCTL, ctrl);
3581	}
3582
3583	rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
3584	if (rc != ECORE_SUCCESS)
3585		return rc;
3586
3587	/* Use the leading hwfn since in CMT only NIG #0 is operational */
3588	if (IS_LEAD_HWFN(p_hwfn)) {
3589		rc = ecore_llh_hw_init_pf(p_hwfn, p_ptt,
3590					  p_params->avoid_eng_affin);
3591		if (rc != ECORE_SUCCESS)
3592			return rc;
3593	}
3594
3595	if (p_params->b_hw_start) {
3596		/* enable interrupts */
3597		rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode);
3598		if (rc != ECORE_SUCCESS)
3599			return rc;
3600
3601		/* send function start command */
3602		rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
3603				       p_params->allow_npar_tx_switch);
3604		if (rc) {
3605			DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n");
3606			return rc;
3607		}
3608		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
3609		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3610				"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
3611
3612		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
3613		{
3614			ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
3615					(1 << 2));
3616			ecore_wr(p_hwfn, p_ptt,
3617					PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
3618					0x100);
3619		}
3620		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3621				"PRS_REG_SEARCH registers after start PFn\n");
3622		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
3623		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3624				"PRS_REG_SEARCH_TCP: %x\n", prs_reg);
3625		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
3626		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3627				"PRS_REG_SEARCH_UDP: %x\n", prs_reg);
3628		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
3629		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3630				"PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
3631		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
3632		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3633				"PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
3634		prs_reg = ecore_rd(p_hwfn, p_ptt,
3635				PRS_REG_SEARCH_TCP_FIRST_FRAG);
3636		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3637				"PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
3638				prs_reg);
3639		prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
3640		DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
3641				"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
3642	}
3643	return ECORE_SUCCESS;
3644}
3645
3646enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
3647						  struct ecore_ptt *p_ptt,
3648						  bool b_enable)
3649{
3650	u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
3651
3652	/* Configure the PF's internal FID_enable for master transactions */
3653	ecore_wr(p_hwfn, p_ptt,
3654		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
3655
3656	/* Wait until value is set - try for 1 second every 50us */
3657	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
3658		val = ecore_rd(p_hwfn, p_ptt,
3659			       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
3660		if (val == set_val)
3661			break;
3662
3663		OSAL_UDELAY(50);
3664	}
3665
3666	if (val != set_val) {
3667		DP_NOTICE(p_hwfn, true,
3668			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
3669		return ECORE_UNKNOWN_ERROR;
3670	}
3671
3672	return ECORE_SUCCESS;
3673}
3674
3675static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
3676			struct ecore_ptt *p_main_ptt)
3677{
3678	/* Read shadow of current MFW mailbox */
3679	ecore_mcp_read_mb(p_hwfn, p_main_ptt);
3680	OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
3681		    p_hwfn->mcp_info->mfw_mb_cur,
3682		    p_hwfn->mcp_info->mfw_mb_length);
3683}
3684
3685static enum _ecore_status_t
3686ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
3687			   struct ecore_load_req_params *p_load_req,
3688			   struct ecore_drv_load_params *p_drv_load)
3689{
3690	/* Make sure that if ecore-client didn't provide inputs, all the
3691	 * expected defaults are indeed zero.
3692	 */
3693	OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
3694	OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
3695	OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
3696
3697	OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
3698
3699	if (p_drv_load == OSAL_NULL)
3700		goto out;
3701
3702	p_load_req->drv_role = p_drv_load->is_crash_kernel ?
3703			       ECORE_DRV_ROLE_KDUMP :
3704			       ECORE_DRV_ROLE_OS;
3705	p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
3706	p_load_req->override_force_load = p_drv_load->override_force_load;
3707
3708	/* Old MFW versions don't support timeout values other than default and
3709	 * none, so these values are replaced according to the fall-back action.
3710	 */
3711
3712	if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
3713	    p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
3714	    (p_hwfn->mcp_info->capabilities &
3715	     FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
3716		p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
3717		goto out;
3718	}
3719
3720	switch (p_drv_load->mfw_timeout_fallback) {
3721	case ECORE_TO_FALLBACK_TO_NONE:
3722		p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
3723		break;
3724	case ECORE_TO_FALLBACK_TO_DEFAULT:
3725		p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
3726		break;
3727	case ECORE_TO_FALLBACK_FAIL_LOAD:
3728		DP_NOTICE(p_hwfn, false,
3729			  "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
3730			  p_drv_load->mfw_timeout_val,
3731			  ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
3732			  ECORE_LOAD_REQ_LOCK_TO_NONE);
3733		return ECORE_ABORTED;
3734	}
3735
3736	DP_INFO(p_hwfn,
3737		"Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
3738		p_drv_load->mfw_timeout_val,
3739		(p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
3740		"default" : "none",
3741		p_load_req->timeout_val);
3742out:
3743	return ECORE_SUCCESS;
3744}
3745
3746static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
3747				    struct ecore_hw_init_params *p_params)
3748{
3749	if (p_params->p_tunn) {
3750		ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
3751		ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
3752	}
3753
3754	p_hwfn->b_int_enabled = 1;
3755
3756	return ECORE_SUCCESS;
3757}
3758
3759static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
3760				   struct ecore_ptt *p_ptt)
3761{
3762	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
3763		 1 << p_hwfn->abs_pf_id);
3764}
3765
3766enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
3767				   struct ecore_hw_init_params *p_params)
3768{
3769	struct ecore_load_req_params load_req_params;
3770	u32 load_code, resp, param, drv_mb_param;
3771	bool b_default_mtu = true;
3772	struct ecore_hwfn *p_hwfn;
3773	enum _ecore_status_t rc = ECORE_SUCCESS, cancel_load;
3774	u16 ether_type;
3775	int i;
3776
3777	if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
3778		DP_NOTICE(p_dev, false,
3779			  "MSI mode is not supported for CMT devices\n");
3780		return ECORE_INVAL;
3781	}
3782
3783	if (IS_PF(p_dev)) {
3784		rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
3785		if (rc != ECORE_SUCCESS)
3786			return rc;
3787	}
3788
3789	for_each_hwfn(p_dev, i) {
3790		p_hwfn = &p_dev->hwfns[i];
3791
3792		/* If management didn't provide a default, set one of our own */
3793		if (!p_hwfn->hw_info.mtu) {
3794			p_hwfn->hw_info.mtu = 1500;
3795			b_default_mtu = false;
3796		}
3797
3798		if (IS_VF(p_dev)) {
3799			ecore_vf_start(p_hwfn, p_params);
3800			continue;
3801		}
3802
3803		rc = ecore_calc_hw_mode(p_hwfn);
3804		if (rc != ECORE_SUCCESS)
3805			return rc;
3806
3807		if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
3808						   &p_dev->mf_bits) ||
3809				     OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
3810						   &p_dev->mf_bits))) {
3811			if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
3812					  &p_dev->mf_bits))
3813				ether_type = ETH_P_8021Q;
3814			else
3815				ether_type = ETH_P_8021AD;
3816			STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3817				     ether_type);
3818			STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3819				     ether_type);
3820			STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
3821				     ether_type);
3822			STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
3823				     ether_type);
3824		}
3825
3826		rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
3827						p_params->p_drv_load_params);
3828		if (rc != ECORE_SUCCESS)
3829			return rc;
3830
3831		rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
3832					&load_req_params);
3833		if (rc != ECORE_SUCCESS) {
3834			DP_NOTICE(p_hwfn, false,
3835				  "Failed sending a LOAD_REQ command\n");
3836			return rc;
3837		}
3838
3839		load_code = load_req_params.load_code;
3840		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
3841			   "Load request was sent. Load code: 0x%x\n",
3842			   load_code);
3843
3844		ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
3845
3846		/* CQ75580:
3847		 * When coming back from hibernate state, the registers from
3848		 * which shadow is read initially are not initialized. It turns
3849		 * out that these registers get initialized during the call to
3850		 * ecore_mcp_load_req request. So we need to reread them here
3851		 * to get the proper shadow register value.
3852		 * Note: This is a workaround for the missing MFW
3853		 * initialization. It may be removed once the implementation
3854		 * is done.
3855		 */
3856		ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
3857
3858		/* Only relevant for recovery:
3859		 * Clear the indication after the LOAD_REQ command is responded
3860		 * by the MFW.
3861		 */
3862		p_dev->recov_in_prog = false;
3863
3864		if (!qm_lock_ref_cnt) {
3865#ifdef CONFIG_ECORE_LOCK_ALLOC
3866			rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
3867			if (rc) {
3868				DP_ERR(p_hwfn, "qm_lock allocation failed\n");
3869				goto qm_lock_fail;
3870			}
3871#endif
3872			OSAL_SPIN_LOCK_INIT(&qm_lock);
3873		}
3874		++qm_lock_ref_cnt;
3875
3876		/* Clean up chip from previous driver if such remains exist.
3877		 * This is not needed when the PF is the first one on the
3878		 * engine, since afterwards we are going to init the FW.
3879		 */
3880		if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
3881			rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
3882						 p_hwfn->rel_pf_id, false);
3883			if (rc != ECORE_SUCCESS) {
3884				ecore_hw_err_notify(p_hwfn,
3885						    ECORE_HW_ERR_RAMROD_FAIL);
3886				goto load_err;
3887			}
3888		}
3889
3890		/* Log and clear previous pglue_b errors if such exist */
3891		ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
3892
3893		/* Enable the PF's internal FID_enable in the PXP */
3894		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
3895						  true);
3896		if (rc != ECORE_SUCCESS)
3897			goto load_err;
3898
3899		/* Clear the pglue_b was_error indication.
3900		 * In E4 it must be done after the BME and the internal
3901		 * FID_enable for the PF are set, since VDMs may cause the
3902		 * indication to be set again.
3903		 */
3904		ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
3905
3906		switch (load_code) {
3907		case FW_MSG_CODE_DRV_LOAD_ENGINE:
3908			rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
3909						  p_hwfn->hw_info.hw_mode);
3910			if (rc != ECORE_SUCCESS)
3911				break;
3912			/* Fall into */
3913		case FW_MSG_CODE_DRV_LOAD_PORT:
3914			rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
3915						p_hwfn->hw_info.hw_mode);
3916			if (rc != ECORE_SUCCESS)
3917				break;
3918			/* Fall into */
3919		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3920			rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
3921					      p_hwfn->hw_info.hw_mode,
3922					      p_params);
3923			break;
3924		default:
3925			DP_NOTICE(p_hwfn, false,
3926				  "Unexpected load code [0x%08x]", load_code);
3927			rc = ECORE_NOTIMPL;
3928			break;
3929		}
3930
3931		if (rc != ECORE_SUCCESS) {
3932			DP_NOTICE(p_hwfn, false,
3933				  "init phase failed for loadcode 0x%x (rc %d)\n",
3934				  load_code, rc);
3935			goto load_err;
3936		}
3937
3938		rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
3939		if (rc != ECORE_SUCCESS) {
3940			DP_NOTICE(p_hwfn, false, "Sending load done failed, rc = %d\n", rc);
3941			if (rc == ECORE_NOMEM) {
3942				DP_NOTICE(p_hwfn, false,
3943					  "Sending load done was failed due to memory allocation failure\n");
3944				goto load_err;
3945			}
3946			return rc;
3947		}
3948
3949		/* send DCBX attention request command */
3950		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
3951			   "sending phony dcbx set command to trigger DCBx attention handling\n");
3952		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3953				   DRV_MSG_CODE_SET_DCBX,
3954				   1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
3955				   &param);
3956		if (rc != ECORE_SUCCESS) {
3957			DP_NOTICE(p_hwfn, false,
3958				  "Failed to send DCBX attention request\n");
3959			return rc;
3960		}
3961
3962		p_hwfn->hw_init_done = true;
3963	}
3964
3965	if (IS_PF(p_dev)) {
3966		/* Get pre-negotiated values for stag, bandwidth etc. */
3967		p_hwfn = ECORE_LEADING_HWFN(p_dev);
3968		DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
3969			   "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
3970		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3971				   DRV_MSG_CODE_GET_OEM_UPDATES,
3972				   1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
3973				   &resp, &param);
3974		if (rc != ECORE_SUCCESS)
3975			DP_NOTICE(p_hwfn, false,
3976				  "Failed to send GET_OEM_UPDATES attention request\n");
3977	}
3978
3979	if (IS_PF(p_dev)) {
3980		p_hwfn = ECORE_LEADING_HWFN(p_dev);
3981		drv_mb_param = STORM_FW_VERSION;
3982		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
3983				   DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
3984				   drv_mb_param, &resp, &param);
3985		if (rc != ECORE_SUCCESS)
3986			DP_INFO(p_hwfn, "Failed to update firmware version\n");
3987
3988		if (!b_default_mtu) {
3989			rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
3990						      p_hwfn->hw_info.mtu);
3991			if (rc != ECORE_SUCCESS)
3992				DP_INFO(p_hwfn, "Failed to update default mtu\n");
3993		}
3994
3995		rc = ecore_mcp_ov_update_driver_state(p_hwfn,
3996						      p_hwfn->p_main_ptt,
3997						      ECORE_OV_DRIVER_STATE_DISABLED);
3998		if (rc != ECORE_SUCCESS)
3999			DP_INFO(p_hwfn, "Failed to update driver state\n");
4000
4001		rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
4002						 ECORE_OV_ESWITCH_VEB);
4003		if (rc != ECORE_SUCCESS)
4004			DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
4005	}
4006
4007	return rc;
4008
4009load_err:
4010	--qm_lock_ref_cnt;
4011#ifdef CONFIG_ECORE_LOCK_ALLOC
4012	if (!qm_lock_ref_cnt)
4013		OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
4014qm_lock_fail:
4015#endif
4016	/* The MFW load lock should be released also when initialization fails.
4017	 * If supported, use a cancel_load request to update the MFW with the
4018	 * load failure.
4019	 */
4020	cancel_load = ecore_mcp_cancel_load_req(p_hwfn, p_hwfn->p_main_ptt);
4021	if (cancel_load == ECORE_NOTIMPL) {
4022		DP_INFO(p_hwfn,
4023			"Send a load done request instead of cancel load\n");
4024		ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
4025	}
4026	return rc;
4027}
4028
4029#define ECORE_HW_STOP_RETRY_LIMIT	(10)
4030static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
4031				 struct ecore_hwfn *p_hwfn,
4032				 struct ecore_ptt *p_ptt)
4033{
4034	int i;
4035
4036	/* close timers */
4037	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
4038	ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
4039	for (i = 0;
4040	     i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
4041	     i++) {
4042		if ((!ecore_rd(p_hwfn, p_ptt,
4043			       TM_REG_PF_SCAN_ACTIVE_CONN)) &&
4044		    (!ecore_rd(p_hwfn, p_ptt,
4045			       TM_REG_PF_SCAN_ACTIVE_TASK)))
4046			break;
4047
4048		/* Dependent on number of connection/tasks, possibly
4049		 * 1ms sleep is required between polls
4050		 */
4051		OSAL_MSLEEP(1);
4052	}
4053
4054	if (i < ECORE_HW_STOP_RETRY_LIMIT)
4055		return;
4056
4057	DP_NOTICE(p_hwfn, false,
4058		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
4059		  (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
4060		  (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
4061}
4062
4063void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
4064{
4065	int j;
4066
4067	for_each_hwfn(p_dev, j) {
4068		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
4069		struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
4070
4071		ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
4072	}
4073}
4074
4075static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
4076						 struct ecore_ptt *p_ptt,
4077						 u32 addr, u32 expected_val)
4078{
4079	u32 val = ecore_rd(p_hwfn, p_ptt, addr);
4080
4081	if (val != expected_val) {
4082		DP_NOTICE(p_hwfn, true,
4083			  "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
4084			  addr, val, expected_val);
4085		return ECORE_UNKNOWN_ERROR;
4086	}
4087
4088	return ECORE_SUCCESS;
4089}
4090
4091enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
4092{
4093	struct ecore_hwfn *p_hwfn;
4094	struct ecore_ptt *p_ptt;
4095	enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
4096	int j;
4097
4098	for_each_hwfn(p_dev, j) {
4099		p_hwfn = &p_dev->hwfns[j];
4100		p_ptt = p_hwfn->p_main_ptt;
4101
4102		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
4103
4104		if (IS_VF(p_dev)) {
4105			ecore_vf_pf_int_cleanup(p_hwfn);
4106			rc = ecore_vf_pf_reset(p_hwfn);
4107			if (rc != ECORE_SUCCESS) {
4108				DP_NOTICE(p_hwfn, true,
4109					  "ecore_vf_pf_reset failed. rc = %d.\n",
4110					  rc);
4111				rc2 = ECORE_UNKNOWN_ERROR;
4112			}
4113			continue;
4114		}
4115
4116		/* mark the hw as uninitialized... */
4117		p_hwfn->hw_init_done = false;
4118
4119		/* Send unload command to MCP */
4120		if (!p_dev->recov_in_prog) {
4121			rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
4122			if (rc != ECORE_SUCCESS) {
4123				DP_NOTICE(p_hwfn, false,
4124					  "Failed sending a UNLOAD_REQ command. rc = %d.\n",
4125					  rc);
4126				rc2 = ECORE_UNKNOWN_ERROR;
4127			}
4128		}
4129
4130		OSAL_DPC_SYNC(p_hwfn);
4131
4132		/* After this point no MFW attentions are expected, e.g. prevent
4133		 * race between pf stop and dcbx pf update.
4134		 */
4135
4136		rc = ecore_sp_pf_stop(p_hwfn);
4137		if (rc != ECORE_SUCCESS) {
4138			DP_NOTICE(p_hwfn, false,
4139				  "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
4140				  rc);
4141			rc2 = ECORE_UNKNOWN_ERROR;
4142		}
4143
4144		/* perform debug action after PF stop was sent */
4145		OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
4146
4147		/* close NIG to BRB gate */
4148		ecore_wr(p_hwfn, p_ptt,
4149			 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
4150
4151		/* close parser */
4152		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
4153		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
4154		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
4155		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
4156		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
4157
4158		/* @@@TBD - clean transmission queues (5.b) */
4159		/* @@@TBD - clean BTB (5.c) */
4160
4161		ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
4162
4163		/* @@@TBD - verify DMAE requests are done (8) */
4164
4165		/* Disable Attention Generation */
4166		ecore_int_igu_disable_int(p_hwfn, p_ptt);
4167		ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
4168		ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
4169		ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
4170		rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
4171		if (rc != ECORE_SUCCESS) {
4172			DP_NOTICE(p_hwfn, true,
4173				  "Failed to return IGU CAM to default\n");
4174			rc2 = ECORE_UNKNOWN_ERROR;
4175		}
4176
4177		/* Need to wait 1ms to guarantee SBs are cleared */
4178		OSAL_MSLEEP(1);
4179
4180		if (!p_dev->recov_in_prog) {
4181			ecore_verify_reg_val(p_hwfn, p_ptt,
4182					     QM_REG_USG_CNT_PF_TX, 0);
4183			ecore_verify_reg_val(p_hwfn, p_ptt,
4184					     QM_REG_USG_CNT_PF_OTHER, 0);
4185			/* @@@TBD - assert on incorrect xCFC values (10.b) */
4186		}
4187
4188		/* Disable PF in HW blocks */
4189		ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
4190		ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
4191
4192		if (IS_LEAD_HWFN(p_hwfn) &&
4193		    OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
4194		    !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
4195			ecore_llh_remove_mac_filter(p_dev, 0,
4196						    p_hwfn->hw_info.hw_mac_addr);
4197
4198		--qm_lock_ref_cnt;
4199#ifdef CONFIG_ECORE_LOCK_ALLOC
4200		if (!qm_lock_ref_cnt)
4201			OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
4202#endif
4203
4204		if (!p_dev->recov_in_prog) {
4205			rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
4206			if (rc == ECORE_NOMEM) {
4207				DP_NOTICE(p_hwfn, false,
4208					 "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
4209				rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
4210			}
4211			if (rc != ECORE_SUCCESS) {
4212				DP_NOTICE(p_hwfn, false,
4213					  "Failed sending a UNLOAD_DONE command. rc = %d.\n",
4214					  rc);
4215				rc2 = ECORE_UNKNOWN_ERROR;
4216			}
4217		}
4218	} /* hwfn loop */
4219
4220	if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
4221		p_hwfn = ECORE_LEADING_HWFN(p_dev);
4222		p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
4223
4224		 /* Clear the PF's internal FID_enable in the PXP.
4225		  * In CMT this should only be done for first hw-function, and
4226		  * only after all transactions have stopped for all active
4227		  * hw-functions.
4228		  */
4229		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
4230						  false);
4231		if (rc != ECORE_SUCCESS) {
4232			DP_NOTICE(p_hwfn, true,
4233				  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
4234				  rc);
4235			rc2 = ECORE_UNKNOWN_ERROR;
4236		}
4237	}
4238
4239	return rc2;
4240}
4241
4242enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
4243{
4244	int j;
4245
4246	for_each_hwfn(p_dev, j) {
4247		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
4248		struct ecore_ptt *p_ptt;
4249
4250		if (IS_VF(p_dev)) {
4251			ecore_vf_pf_int_cleanup(p_hwfn);
4252			continue;
4253		}
4254		p_ptt = ecore_ptt_acquire(p_hwfn);
4255		if (!p_ptt)
4256			return ECORE_AGAIN;
4257
4258		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n");
4259
4260		ecore_wr(p_hwfn, p_ptt,
4261			 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
4262
4263		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
4264		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
4265		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
4266		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
4267		ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
4268
4269		/* @@@TBD - clean transmission queues (5.b) */
4270		/* @@@TBD - clean BTB (5.c) */
4271
4272		/* @@@TBD - verify DMAE requests are done (8) */
4273
4274		ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
4275		/* Need to wait 1ms to guarantee SBs are cleared */
4276		OSAL_MSLEEP(1);
4277		ecore_ptt_release(p_hwfn, p_ptt);
4278	}
4279
4280	return ECORE_SUCCESS;
4281}
4282
4283enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
4284{
4285	struct ecore_ptt *p_ptt;
4286
4287	if (IS_VF(p_hwfn->p_dev))
4288		return ECORE_SUCCESS;
4289
4290	p_ptt = ecore_ptt_acquire(p_hwfn);
4291	if (!p_ptt)
4292		return ECORE_AGAIN;
4293
4294	/* If roce info is allocated it means roce is initialized and should
4295	 * be enabled in searcher.
4296	 */
4297	if (p_hwfn->p_rdma_info &&
4298	    p_hwfn->p_rdma_info->active &&
4299	    p_hwfn->b_rdma_enabled_in_prs)
4300		ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
4301
4302	/* Re-open incoming traffic */
4303	ecore_wr(p_hwfn, p_ptt,
4304		 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
4305	ecore_ptt_release(p_hwfn, p_ptt);
4306
4307	return ECORE_SUCCESS;
4308}
4309
4310enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev, u32 reg_idx,
4311					u32 pattern_size, u32 crc)
4312{
4313	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
4314	enum _ecore_status_t rc = ECORE_SUCCESS;
4315	struct ecore_ptt *p_ptt;
4316	u32 reg_len = 0;
4317	u32 reg_crc = 0;
4318
4319	p_ptt = ecore_ptt_acquire(p_hwfn);
4320	if (!p_ptt)
4321		return ECORE_AGAIN;
4322
4323	/* Get length and CRC register offsets */
4324	switch (reg_idx)
4325	{
4326	case 0:
4327		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB :
4328				WOL_REG_ACPI_PAT_0_LEN_K2_E5;
4329		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB :
4330				WOL_REG_ACPI_PAT_0_CRC_K2_E5;
4331		break;
4332	case 1:
4333		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB :
4334				WOL_REG_ACPI_PAT_1_LEN_K2_E5;
4335		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB :
4336				WOL_REG_ACPI_PAT_1_CRC_K2_E5;
4337		break;
4338	case 2:
4339		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB :
4340				WOL_REG_ACPI_PAT_2_LEN_K2_E5;
4341		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB :
4342				WOL_REG_ACPI_PAT_2_CRC_K2_E5;
4343		break;
4344	case 3:
4345		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB :
4346				WOL_REG_ACPI_PAT_3_LEN_K2_E5;
4347		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB :
4348				WOL_REG_ACPI_PAT_3_CRC_K2_E5;
4349		break;
4350	case 4:
4351		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB :
4352				WOL_REG_ACPI_PAT_4_LEN_K2_E5;
4353		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB :
4354				WOL_REG_ACPI_PAT_4_CRC_K2_E5;
4355		break;
4356	case 5:
4357		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB :
4358				WOL_REG_ACPI_PAT_5_LEN_K2_E5;
4359		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB :
4360				WOL_REG_ACPI_PAT_5_CRC_K2_E5;
4361		break;
4362	case 6:
4363		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB :
4364				WOL_REG_ACPI_PAT_6_LEN_K2_E5;
4365		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB :
4366				WOL_REG_ACPI_PAT_6_CRC_K2_E5;
4367		break;
4368	case 7:
4369		reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB :
4370				WOL_REG_ACPI_PAT_7_LEN_K2_E5;
4371		reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB :
4372				WOL_REG_ACPI_PAT_7_CRC_K2_E5;
4373		break;
4374	default:
4375		rc = ECORE_UNKNOWN_ERROR;
4376		goto out;
4377	}
4378
4379	/* Allign pattern size to 4 */
4380	while (pattern_size % 4)
4381		pattern_size++;
4382
4383	/* Write pattern length and crc value */
4384	if (ECORE_IS_BB(p_dev)) {
4385		rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_len, pattern_size);
4386		if (rc != ECORE_SUCCESS) {
4387			DP_NOTICE(p_hwfn, false,
4388				  "Failed to update the ACPI pattern length\n");
4389			return rc;
4390		}
4391
4392		rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, reg_crc, crc);
4393		if (rc != ECORE_SUCCESS) {
4394			DP_NOTICE(p_hwfn, false,
4395				  "Failed to update the ACPI pattern crc value\n");
4396			return rc;
4397		}
4398	} else {
4399		ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_len, pattern_size);
4400		ecore_mcp_wol_wr(p_hwfn, p_ptt, reg_crc, crc);
4401	}
4402
4403	DP_INFO(p_dev,
4404		"ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] "
4405		"reg_len[0x%x=0x%x]\n",
4406		reg_idx, reg_crc, crc, reg_len, pattern_size);
4407out:
4408	 ecore_ptt_release(p_hwfn, p_ptt);
4409
4410	return rc;
4411}
4412
4413void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
4414			    struct ecore_ptt *p_ptt)
4415{
4416	const u32 wake_buffer_clear_offset =
4417		ECORE_IS_BB(p_hwfn->p_dev) ?
4418		NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5;
4419
4420	DP_INFO(p_hwfn->p_dev,
4421		"ecore_wol_buffer_clear: reset "
4422		"REG_WAKE_BUFFER_CLEAR offset=0x%08x\n",
4423		wake_buffer_clear_offset);
4424
4425	if (ECORE_IS_BB(p_hwfn->p_dev)) {
4426		ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
4427		ecore_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
4428	} else {
4429		ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 1);
4430		ecore_mcp_wol_wr(p_hwfn, p_ptt, wake_buffer_clear_offset, 0);
4431	}
4432}
4433
4434enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
4435					 struct ecore_ptt *p_ptt,
4436					 struct ecore_wake_info *wake_info)
4437{
4438	struct ecore_dev *p_dev = p_hwfn->p_dev;
4439	u32 *buf = OSAL_NULL;
4440	u32 i    = 0;
4441	const u32 reg_wake_buffer_offest =
4442		ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB :
4443			WOL_REG_WAKE_BUFFER_K2_E5;
4444
4445	wake_info->wk_info    = ecore_rd(p_hwfn, p_ptt,
4446				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB :
4447				WOL_REG_WAKE_INFO_K2_E5);
4448	wake_info->wk_details = ecore_rd(p_hwfn, p_ptt,
4449				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB :
4450				WOL_REG_WAKE_DETAILS_K2_E5);
4451	wake_info->wk_pkt_len = ecore_rd(p_hwfn, p_ptt,
4452				ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB :
4453				WOL_REG_WAKE_PKT_LEN_K2_E5);
4454
4455	DP_INFO(p_dev,
4456		"ecore_get_wake_info: REG_WAKE_INFO=0x%08x "
4457		"REG_WAKE_DETAILS=0x%08x "
4458		"REG_WAKE_PKT_LEN=0x%08x\n",
4459		wake_info->wk_info,
4460		wake_info->wk_details,
4461		wake_info->wk_pkt_len);
4462
4463	buf = (u32 *)wake_info->wk_buffer;
4464
4465	for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++)
4466	{
4467		if ((i*sizeof(u32)) >=  sizeof(wake_info->wk_buffer))
4468		{
4469			DP_INFO(p_dev,
4470				"ecore_get_wake_info: i index to 0 high=%d\n",
4471				 i);
4472			break;
4473		}
4474		buf[i] = ecore_rd(p_hwfn, p_ptt,
4475				  reg_wake_buffer_offest + (i * sizeof(u32)));
4476		DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n",
4477			i, buf[i]);
4478	}
4479
4480	ecore_wol_buffer_clear(p_hwfn, p_ptt);
4481
4482	return ECORE_SUCCESS;
4483}
4484
4485/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
4486static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
4487{
4488	ecore_ptt_pool_free(p_hwfn);
4489	OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
4490	p_hwfn->hw_info.p_igu_info = OSAL_NULL;
4491}
4492
4493/* Setup bar access */
4494static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
4495{
4496	/* clear indirect access */
4497	if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) {
4498		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4499			 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
4500		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4501			 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
4502		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4503			 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
4504		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4505			 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
4506	} else {
4507		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4508			 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
4509		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4510			 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
4511		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4512			 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
4513		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4514			 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
4515	}
4516
4517	/* Clean previous pglue_b errors if such exist */
4518	ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
4519
4520	/* enable internal target-read */
4521	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4522		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
4523}
4524
4525static void get_function_id(struct ecore_hwfn *p_hwfn)
4526{
4527	/* ME Register */
4528	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
4529						  PXP_PF_ME_OPAQUE_ADDR);
4530
4531	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
4532
4533	/* Bits 16-19 from the ME registers are the pf_num */
4534	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
4535	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
4536				      PXP_CONCRETE_FID_PFID);
4537	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
4538				    PXP_CONCRETE_FID_PORT);
4539
4540	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
4541		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
4542		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
4543}
4544
4545void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
4546{
4547	u32 *feat_num = p_hwfn->hw_info.feat_num;
4548	struct ecore_sb_cnt_info sb_cnt;
4549	u32 non_l2_sbs = 0;
4550
4551	OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
4552	ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
4553
4554#ifdef CONFIG_ECORE_ROCE
4555	/* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
4556	 * status blocks equally between L2 / RoCE but with consideration as
4557	 * to how many l2 queues / cnqs we have
4558	 */
4559	if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
4560#ifndef __EXTRACT__LINUX__THROW__
4561		u32 max_cnqs;
4562#endif
4563
4564		feat_num[ECORE_RDMA_CNQ] =
4565			OSAL_MIN_T(u32,
4566				   sb_cnt.cnt / 2,
4567				   RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM));
4568
4569#ifndef __EXTRACT__LINUX__THROW__
4570		/* Upper layer might require less */
4571		max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs;
4572		if (max_cnqs) {
4573			if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE)
4574				max_cnqs = 0;
4575			feat_num[ECORE_RDMA_CNQ] =
4576				OSAL_MIN_T(u32,
4577					   feat_num[ECORE_RDMA_CNQ],
4578					   max_cnqs);
4579		}
4580#endif
4581
4582		non_l2_sbs = feat_num[ECORE_RDMA_CNQ];
4583	}
4584#endif
4585
4586	/* L2 Queues require each: 1 status block. 1 L2 queue */
4587	if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
4588		/* Start by allocating VF queues, then PF's */
4589		feat_num[ECORE_VF_L2_QUE] =
4590			OSAL_MIN_T(u32,
4591				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
4592				   sb_cnt.iov_cnt);
4593		feat_num[ECORE_PF_L2_QUE] =
4594			OSAL_MIN_T(u32,
4595				   sb_cnt.cnt - non_l2_sbs,
4596				   RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
4597				   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
4598	}
4599
4600	if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
4601		feat_num[ECORE_FCOE_CQ] =
4602			OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
4603							     ECORE_CMDQS_CQS));
4604
4605	if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
4606		feat_num[ECORE_ISCSI_CQ] =
4607			OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
4608							     ECORE_CMDQS_CQS));
4609
4610	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
4611		   "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
4612		   (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
4613		   (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
4614		   (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
4615		   (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
4616		   (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
4617		   (int)sb_cnt.cnt);
4618}
4619
4620const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
4621{
4622	switch (res_id) {
4623	case ECORE_L2_QUEUE:
4624		return "L2_QUEUE";
4625	case ECORE_VPORT:
4626		return "VPORT";
4627	case ECORE_RSS_ENG:
4628		return "RSS_ENG";
4629	case ECORE_PQ:
4630		return "PQ";
4631	case ECORE_RL:
4632		return "RL";
4633	case ECORE_MAC:
4634		return "MAC";
4635	case ECORE_VLAN:
4636		return "VLAN";
4637	case ECORE_RDMA_CNQ_RAM:
4638		return "RDMA_CNQ_RAM";
4639	case ECORE_ILT:
4640		return "ILT";
4641	case ECORE_LL2_QUEUE:
4642		return "LL2_QUEUE";
4643	case ECORE_CMDQS_CQS:
4644		return "CMDQS_CQS";
4645	case ECORE_RDMA_STATS_QUEUE:
4646		return "RDMA_STATS_QUEUE";
4647	case ECORE_BDQ:
4648		return "BDQ";
4649	case ECORE_SB:
4650		return "SB";
4651	default:
4652		return "UNKNOWN_RESOURCE";
4653	}
4654}
4655
4656static enum _ecore_status_t
4657__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
4658			      struct ecore_ptt *p_ptt,
4659			      enum ecore_resources res_id,
4660			      u32 resc_max_val,
4661			      u32 *p_mcp_resp)
4662{
4663	enum _ecore_status_t rc;
4664
4665	rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
4666					resc_max_val, p_mcp_resp);
4667	if (rc != ECORE_SUCCESS) {
4668		DP_NOTICE(p_hwfn, false,
4669			  "MFW response failure for a max value setting of resource %d [%s]\n",
4670			  res_id, ecore_hw_get_resc_name(res_id));
4671		return rc;
4672	}
4673
4674	if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
4675		DP_INFO(p_hwfn,
4676			"Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
4677			res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
4678
4679	return ECORE_SUCCESS;
4680}
4681
4682static enum _ecore_status_t
4683ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
4684			    struct ecore_ptt *p_ptt)
4685{
4686	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4687	u32 resc_max_val, mcp_resp;
4688	u8 res_id;
4689	enum _ecore_status_t rc;
4690
4691	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
4692		switch (res_id) {
4693		case ECORE_LL2_QUEUE:
4694			resc_max_val = MAX_NUM_LL2_RX_QUEUES;
4695			break;
4696		case ECORE_RDMA_CNQ_RAM:
4697			/* No need for a case for ECORE_CMDQS_CQS since
4698			 * CNQ/CMDQS are the same resource.
4699			 */
4700			resc_max_val = NUM_OF_GLOBAL_QUEUES;
4701			break;
4702		case ECORE_RDMA_STATS_QUEUE:
4703			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
4704					    : RDMA_NUM_STATISTIC_COUNTERS_BB;
4705			break;
4706		case ECORE_BDQ:
4707			resc_max_val = BDQ_NUM_RESOURCES;
4708			break;
4709		default:
4710			continue;
4711		}
4712
4713		rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
4714						   resc_max_val, &mcp_resp);
4715		if (rc != ECORE_SUCCESS)
4716			return rc;
4717
4718		/* There's no point to continue to the next resource if the
4719		 * command is not supported by the MFW.
4720		 * We do continue if the command is supported but the resource
4721		 * is unknown to the MFW. Such a resource will be later
4722		 * configured with the default allocation values.
4723		 */
4724		if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
4725			return ECORE_NOTIMPL;
4726	}
4727
4728	return ECORE_SUCCESS;
4729}
4730
4731static
4732enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
4733					    enum ecore_resources res_id,
4734					    u32 *p_resc_num, u32 *p_resc_start)
4735{
4736	u8 num_funcs = p_hwfn->num_funcs_on_engine;
4737	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4738
4739	switch (res_id) {
4740	case ECORE_L2_QUEUE:
4741		*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
4742				      MAX_NUM_L2_QUEUES_BB) / num_funcs;
4743		break;
4744	case ECORE_VPORT:
4745		*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
4746				      MAX_NUM_VPORTS_BB) / num_funcs;
4747		break;
4748	case ECORE_RSS_ENG:
4749		*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
4750				      ETH_RSS_ENGINE_NUM_BB) / num_funcs;
4751		break;
4752	case ECORE_PQ:
4753		*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
4754				      MAX_QM_TX_QUEUES_BB) / num_funcs;
4755		*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
4756		break;
4757	case ECORE_RL:
4758		*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
4759		break;
4760	case ECORE_MAC:
4761	case ECORE_VLAN:
4762		/* Each VFC resource can accommodate both a MAC and a VLAN */
4763		*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
4764		break;
4765	case ECORE_ILT:
4766		*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
4767				      PXP_NUM_ILT_RECORDS_BB) / num_funcs;
4768		break;
4769	case ECORE_LL2_QUEUE:
4770		*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
4771		break;
4772	case ECORE_RDMA_CNQ_RAM:
4773	case ECORE_CMDQS_CQS:
4774		/* CNQ/CMDQS are the same resource */
4775		*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
4776		break;
4777	case ECORE_RDMA_STATS_QUEUE:
4778		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
4779				      RDMA_NUM_STATISTIC_COUNTERS_BB) /
4780			      num_funcs;
4781		break;
4782	case ECORE_BDQ:
4783		if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI &&
4784		    p_hwfn->hw_info.personality != ECORE_PCI_FCOE)
4785			*p_resc_num = 0;
4786		else
4787			*p_resc_num = 1;
4788		break;
4789	case ECORE_SB:
4790		/* Since we want its value to reflect whether MFW supports
4791		 * the new scheme, have a default of 0.
4792		 */
4793		*p_resc_num = 0;
4794		break;
4795	default:
4796		return ECORE_INVAL;
4797	}
4798
4799	switch (res_id) {
4800	case ECORE_BDQ:
4801		if (!*p_resc_num)
4802			*p_resc_start = 0;
4803		else if (p_hwfn->p_dev->num_ports_in_engine == 4)
4804			*p_resc_start = p_hwfn->port_id;
4805		else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
4806			*p_resc_start = p_hwfn->port_id;
4807		else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
4808			*p_resc_start = p_hwfn->port_id + 2;
4809		break;
4810	default:
4811		*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
4812		break;
4813	}
4814
4815	return ECORE_SUCCESS;
4816}
4817
4818static enum _ecore_status_t
4819__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
4820			 bool drv_resc_alloc)
4821{
4822	u32 dflt_resc_num = 0, dflt_resc_start = 0;
4823	u32 mcp_resp, *p_resc_num, *p_resc_start;
4824	enum _ecore_status_t rc;
4825
4826	p_resc_num = &RESC_NUM(p_hwfn, res_id);
4827	p_resc_start = &RESC_START(p_hwfn, res_id);
4828
4829	rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
4830				    &dflt_resc_start);
4831	if (rc != ECORE_SUCCESS) {
4832		DP_ERR(p_hwfn,
4833		       "Failed to get default amount for resource %d [%s]\n",
4834			res_id, ecore_hw_get_resc_name(res_id));
4835		return rc;
4836	}
4837
4838#ifndef ASIC_ONLY
4839	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
4840		*p_resc_num = dflt_resc_num;
4841		*p_resc_start = dflt_resc_start;
4842		goto out;
4843	}
4844#endif
4845
4846	rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
4847				     &mcp_resp, p_resc_num, p_resc_start);
4848	if (rc != ECORE_SUCCESS) {
4849		DP_NOTICE(p_hwfn, false,
4850			  "MFW response failure for an allocation request for resource %d [%s]\n",
4851			  res_id, ecore_hw_get_resc_name(res_id));
4852		return rc;
4853	}
4854
4855	/* Default driver values are applied in the following cases:
4856	 * - The resource allocation MB command is not supported by the MFW
4857	 * - There is an internal error in the MFW while processing the request
4858	 * - The resource ID is unknown to the MFW
4859	 */
4860	if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
4861		DP_INFO(p_hwfn,
4862			"Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
4863			res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
4864			dflt_resc_num, dflt_resc_start);
4865		*p_resc_num = dflt_resc_num;
4866		*p_resc_start = dflt_resc_start;
4867		goto out;
4868	}
4869
4870	if ((*p_resc_num != dflt_resc_num ||
4871	     *p_resc_start != dflt_resc_start) &&
4872	    res_id != ECORE_SB) {
4873		DP_INFO(p_hwfn,
4874			"MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
4875			res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
4876			*p_resc_start, dflt_resc_num, dflt_resc_start,
4877			drv_resc_alloc ? " - Applying default values" : "");
4878		if (drv_resc_alloc) {
4879			*p_resc_num = dflt_resc_num;
4880			*p_resc_start = dflt_resc_start;
4881		}
4882	}
4883out:
4884	/* PQs have to divide by 8 [that's the HW granularity].
4885	 * Reduce number so it would fit.
4886	 */
4887	if ((res_id == ECORE_PQ) &&
4888	    ((*p_resc_num % 8) || (*p_resc_start % 8))) {
4889		DP_INFO(p_hwfn,
4890			"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
4891			*p_resc_num, (*p_resc_num) & ~0x7,
4892			*p_resc_start, (*p_resc_start) & ~0x7);
4893		*p_resc_num &= ~0x7;
4894		*p_resc_start &= ~0x7;
4895	}
4896
4897	return ECORE_SUCCESS;
4898}
4899
4900static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
4901						   bool drv_resc_alloc)
4902{
4903	enum _ecore_status_t rc;
4904	u8 res_id;
4905
4906	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
4907		rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
4908		if (rc != ECORE_SUCCESS)
4909			return rc;
4910	}
4911
4912	return ECORE_SUCCESS;
4913}
4914
4915static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
4916						      struct ecore_ptt *p_ptt)
4917{
4918	u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn);
4919	struct ecore_dev *p_dev = p_hwfn->p_dev;
4920	enum _ecore_status_t rc;
4921
4922	rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
4923	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL)
4924		return rc;
4925	else if (rc == ECORE_NOTIMPL)
4926		p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
4927
4928	if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
4929		DP_INFO(p_hwfn,
4930			"Fix the PPFID bitmap to inculde the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
4931			native_ppfid_idx, p_dev->ppfid_bitmap);
4932		p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
4933	}
4934
4935	return ECORE_SUCCESS;
4936}
4937
4938static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
4939					      struct ecore_ptt *p_ptt,
4940					      bool drv_resc_alloc)
4941{
4942	struct ecore_resc_unlock_params resc_unlock_params;
4943	struct ecore_resc_lock_params resc_lock_params;
4944	bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
4945	u8 res_id;
4946	enum _ecore_status_t rc;
4947#ifndef ASIC_ONLY
4948	u32 *resc_start = p_hwfn->hw_info.resc_start;
4949	u32 *resc_num = p_hwfn->hw_info.resc_num;
4950	/* For AH, an equal share of the ILT lines between the maximal number of
4951	 * PFs is not enough for RoCE. This would be solved by the future
4952	 * resource allocation scheme, but isn't currently present for
4953	 * FPGA/emulation. For now we keep a number that is sufficient for RoCE
4954	 * to work - the BB number of ILT lines divided by its max PFs number.
4955	 */
4956	u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
4957#endif
4958
4959	/* Setting the max values of the soft resources and the following
4960	 * resources allocation queries should be atomic. Since several PFs can
4961	 * run in parallel - a resource lock is needed.
4962	 * If either the resource lock or resource set value commands are not
4963	 * supported - skip the the max values setting, release the lock if
4964	 * needed, and proceed to the queries. Other failures, including a
4965	 * failure to acquire the lock, will cause this function to fail.
4966	 * Old drivers that don't acquire the lock can run in parallel, and
4967	 * their allocation values won't be affected by the updated max values.
4968	 */
4969
4970	ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
4971					 ECORE_RESC_LOCK_RESC_ALLOC, false);
4972
4973	/* Changes on top of the default values to accommodate parallel attempts
4974	 * of several PFs.
4975	 * [10 x 10 msec by default ==> 20 x 50 msec]
4976	 */
4977	resc_lock_params.retry_num *= 2;
4978	resc_lock_params.retry_interval *= 5;
4979
4980	rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
4981	if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
4982		return rc;
4983	} else if (rc == ECORE_NOTIMPL) {
4984		DP_INFO(p_hwfn,
4985			"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
4986	} else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
4987		DP_NOTICE(p_hwfn, false,
4988			  "Failed to acquire the resource lock for the resource allocation commands\n");
4989		return ECORE_BUSY;
4990	} else {
4991		rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
4992		if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
4993			DP_NOTICE(p_hwfn, false,
4994				  "Failed to set the max values of the soft resources\n");
4995			goto unlock_and_exit;
4996		} else if (rc == ECORE_NOTIMPL) {
4997			DP_INFO(p_hwfn,
4998				"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
4999			rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5000						   &resc_unlock_params);
5001			if (rc != ECORE_SUCCESS)
5002				DP_INFO(p_hwfn,
5003					"Failed to release the resource lock for the resource allocation commands\n");
5004		}
5005	}
5006
5007	rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
5008	if (rc != ECORE_SUCCESS)
5009		goto unlock_and_exit;
5010
5011	if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
5012		rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5013					   &resc_unlock_params);
5014		if (rc != ECORE_SUCCESS)
5015			DP_INFO(p_hwfn,
5016				"Failed to release the resource lock for the resource allocation commands\n");
5017	}
5018
5019	/* PPFID bitmap */
5020	if (IS_LEAD_HWFN(p_hwfn)) {
5021		rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
5022		if (rc != ECORE_SUCCESS)
5023			return rc;
5024	}
5025
5026#ifndef ASIC_ONLY
5027	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
5028		/* Reduced build contains less PQs */
5029		if (!(p_hwfn->p_dev->b_is_emul_full)) {
5030			resc_num[ECORE_PQ] = 32;
5031			resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
5032					       p_hwfn->enabled_func_idx;
5033		}
5034
5035		/* For AH emulation, since we have a possible maximal number of
5036		 * 16 enabled PFs, in case there are not enough ILT lines -
5037		 * allocate only first PF as RoCE and have all the other ETH
5038		 * only with less ILT lines.
5039		 */
5040		if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
5041			resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
5042							 resc_num[ECORE_ILT],
5043							 roce_min_ilt_lines);
5044	}
5045
5046	/* Correct the common ILT calculation if PF0 has more */
5047	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
5048	    p_hwfn->p_dev->b_is_emul_full &&
5049	    p_hwfn->rel_pf_id &&
5050	    resc_num[ECORE_ILT] < roce_min_ilt_lines)
5051		resc_start[ECORE_ILT] += roce_min_ilt_lines -
5052					 resc_num[ECORE_ILT];
5053#endif
5054
5055	/* Sanity for ILT */
5056	if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
5057	    (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
5058		DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n",
5059			  RESC_START(p_hwfn, ECORE_ILT),
5060			  RESC_END(p_hwfn, ECORE_ILT) - 1);
5061		return ECORE_INVAL;
5062	}
5063
5064	/* This will also learn the number of SBs from MFW */
5065	if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
5066		return ECORE_INVAL;
5067
5068	ecore_hw_set_feat(p_hwfn);
5069
5070	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
5071		   "The numbers for each resource are:\n");
5072	for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
5073		DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
5074			   ecore_hw_get_resc_name(res_id),
5075			   RESC_NUM(p_hwfn, res_id),
5076			   RESC_START(p_hwfn, res_id));
5077
5078	return ECORE_SUCCESS;
5079
5080unlock_and_exit:
5081	if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
5082		ecore_mcp_resc_unlock(p_hwfn, p_ptt,
5083				      &resc_unlock_params);
5084	return rc;
5085}
5086
5087static enum _ecore_status_t
5088ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
5089		      struct ecore_ptt *p_ptt,
5090		      struct ecore_hw_prepare_params *p_params)
5091{
5092	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
5093	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
5094	struct ecore_mcp_link_capabilities *p_caps;
5095	struct ecore_mcp_link_params *link;
5096	enum _ecore_status_t rc;
5097	u32 dcbx_mode;  /* __LINUX__THROW__ */
5098
5099	/* Read global nvm_cfg address */
5100	nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
5101
5102	/* Verify MCP has initialized it */
5103	if (!nvm_cfg_addr) {
5104		DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
5105		if (p_params->b_relaxed_probe)
5106			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
5107		return ECORE_INVAL;
5108	}
5109
5110	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
5111	nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
5112
5113	addr = MCP_REG_SCRATCH  + nvm_cfg1_offset +
5114		   OFFSETOF(struct nvm_cfg1, glob) +
5115		   OFFSETOF(struct nvm_cfg1_glob, core_cfg);
5116
5117	core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
5118
5119	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
5120		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
5121	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
5122		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
5123		break;
5124	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
5125		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
5126		break;
5127	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
5128		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
5129		break;
5130	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
5131		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
5132		break;
5133	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
5134		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
5135		break;
5136	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
5137		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
5138		break;
5139	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
5140		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
5141		break;
5142	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
5143		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
5144		break;
5145	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
5146		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
5147		break;
5148	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
5149		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
5150		break;
5151	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
5152		p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
5153		break;
5154	default:
5155		DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
5156			  core_cfg);
5157		break;
5158	}
5159
5160#ifndef __EXTRACT__LINUX__THROW__
5161	/* Read DCBX configuration */
5162	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5163			OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
5164	dcbx_mode = ecore_rd(p_hwfn, p_ptt,
5165			     port_cfg_addr +
5166			     OFFSETOF(struct nvm_cfg1_port, generic_cont0));
5167	dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
5168		>> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
5169	switch (dcbx_mode) {
5170	case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
5171		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
5172		break;
5173	case NVM_CFG1_PORT_DCBX_MODE_CEE:
5174		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
5175		break;
5176	case NVM_CFG1_PORT_DCBX_MODE_IEEE:
5177		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
5178		break;
5179	default:
5180		p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
5181	}
5182#endif
5183
5184	/* Read default link configuration */
5185	link = &p_hwfn->mcp_info->link_input;
5186	p_caps = &p_hwfn->mcp_info->link_capabilities;
5187	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5188			OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
5189	link_temp = ecore_rd(p_hwfn, p_ptt,
5190			     port_cfg_addr +
5191			     OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
5192	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
5193	link->speed.advertised_speeds = link_temp;
5194	p_caps->speed_capabilities = link->speed.advertised_speeds;
5195
5196	link_temp = ecore_rd(p_hwfn, p_ptt,
5197				 port_cfg_addr +
5198				 OFFSETOF(struct nvm_cfg1_port, link_settings));
5199	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
5200		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
5201	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
5202		link->speed.autoneg = true;
5203		break;
5204	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
5205		link->speed.forced_speed = 1000;
5206		break;
5207	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
5208		link->speed.forced_speed = 10000;
5209		break;
5210	case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
5211		link->speed.forced_speed = 20000;
5212		break;
5213	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
5214		link->speed.forced_speed = 25000;
5215		break;
5216	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
5217		link->speed.forced_speed = 40000;
5218		break;
5219	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
5220		link->speed.forced_speed = 50000;
5221		break;
5222	case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
5223		link->speed.forced_speed = 100000;
5224		break;
5225	default:
5226		DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n",
5227			  link_temp);
5228	}
5229
5230	p_caps->default_speed = link->speed.forced_speed; /* __LINUX__THROW__ */
5231	p_caps->default_speed_autoneg = link->speed.autoneg;
5232
5233	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
5234	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
5235	link->pause.autoneg = !!(link_temp &
5236				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
5237	link->pause.forced_rx = !!(link_temp &
5238				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
5239	link->pause.forced_tx = !!(link_temp &
5240				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
5241	link->loopback_mode = 0;
5242
5243	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
5244		link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
5245				     OFFSETOF(struct nvm_cfg1_port, ext_phy));
5246		link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
5247		link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
5248		p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
5249		link->eee.enable = true;
5250		switch (link_temp) {
5251		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
5252			p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
5253			link->eee.enable = false;
5254			break;
5255		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
5256			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
5257			break;
5258		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
5259			p_caps->eee_lpi_timer =
5260				EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
5261			break;
5262		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
5263			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
5264			break;
5265		}
5266
5267		link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
5268		link->eee.tx_lpi_enable = link->eee.enable;
5269		link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
5270	} else {
5271		p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
5272	}
5273
5274	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5275		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
5276		   link->speed.forced_speed, link->speed.advertised_speeds,
5277		   link->speed.autoneg, link->pause.autoneg,
5278		   p_caps->default_eee, p_caps->eee_lpi_timer);
5279
5280	/* Read Multi-function information from shmem */
5281	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5282		   OFFSETOF(struct nvm_cfg1, glob) +
5283		   OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
5284
5285	generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
5286
5287	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
5288		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
5289
5290	switch (mf_mode) {
5291	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
5292		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
5293		break;
5294	case NVM_CFG1_GLOB_MF_MODE_UFP:
5295		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
5296					 1 << ECORE_MF_LLH_PROTO_CLSS |
5297					 1 << ECORE_MF_UFP_SPECIFIC |
5298					 1 << ECORE_MF_8021Q_TAGGING;
5299		break;
5300	case NVM_CFG1_GLOB_MF_MODE_BD:
5301		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
5302					 1 << ECORE_MF_LLH_PROTO_CLSS |
5303					 1 << ECORE_MF_8021AD_TAGGING;
5304		break;
5305	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
5306		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
5307					 1 << ECORE_MF_LLH_PROTO_CLSS |
5308					 1 << ECORE_MF_LL2_NON_UNICAST |
5309					 1 << ECORE_MF_INTER_PF_SWITCH |
5310					 1 << ECORE_MF_DISABLE_ARFS;
5311		break;
5312	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
5313		p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
5314					 1 << ECORE_MF_LLH_PROTO_CLSS |
5315					 1 << ECORE_MF_LL2_NON_UNICAST;
5316		if (ECORE_IS_BB(p_hwfn->p_dev))
5317			p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
5318		break;
5319	}
5320	DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
5321		p_hwfn->p_dev->mf_bits);
5322
5323	if (ECORE_IS_CMT(p_hwfn->p_dev))
5324		p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
5325
5326#ifndef __EXTRACT__LINUX__THROW__
5327	/* It's funny since we have another switch, but it's easier
5328	 * to throw this away in linux this way. Long term, it might be
5329	 * better to have have getters for needed ECORE_MF_* fields,
5330	 * convert client code and eliminate this.
5331	 */
5332	switch (mf_mode) {
5333	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
5334		p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
5335		break;
5336	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
5337		p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
5338		break;
5339	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
5340		p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
5341		break;
5342	case NVM_CFG1_GLOB_MF_MODE_UFP:
5343		p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
5344		break;
5345	}
5346#endif
5347
5348	/* Read Multi-function information from shmem */
5349	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
5350		   OFFSETOF(struct nvm_cfg1, glob) +
5351		   OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
5352
5353	device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
5354	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
5355		OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
5356				 &p_hwfn->hw_info.device_capabilities);
5357	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
5358		OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
5359				 &p_hwfn->hw_info.device_capabilities);
5360	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
5361		OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
5362				 &p_hwfn->hw_info.device_capabilities);
5363	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
5364		OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
5365				 &p_hwfn->hw_info.device_capabilities);
5366	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
5367		OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
5368				 &p_hwfn->hw_info.device_capabilities);
5369
5370	rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
5371	if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
5372		rc = ECORE_SUCCESS;
5373		p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
5374	}
5375
5376	return rc;
5377}
5378
5379static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
5380				struct ecore_ptt *p_ptt)
5381{
5382	u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
5383	u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
5384	struct ecore_dev *p_dev = p_hwfn->p_dev;
5385
5386	num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
5387
5388	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
5389	 * in the other bits are selected.
5390	 * Bits 1-15 are for functions 1-15, respectively, and their value is
5391	 * '0' only for enabled functions (function 0 always exists and
5392	 * enabled).
5393	 * In case of CMT in BB, only the "even" functions are enabled, and thus
5394	 * the number of functions for both hwfns is learnt from the same bits.
5395	 */
5396	reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
5397
5398	if (reg_function_hide & 0x1) {
5399		if (ECORE_IS_BB(p_dev)) {
5400			if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
5401				num_funcs = 0;
5402				eng_mask = 0xaaaa;
5403			} else {
5404				num_funcs = 1;
5405				eng_mask = 0x5554;
5406			}
5407		} else {
5408			num_funcs = 1;
5409			eng_mask = 0xfffe;
5410		}
5411
5412		/* Get the number of the enabled functions on the engine */
5413		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
5414		while (tmp) {
5415			if (tmp & 0x1)
5416				num_funcs++;
5417			tmp >>= 0x1;
5418		}
5419
5420		/* Get the PF index within the enabled functions */
5421		low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
5422		tmp = reg_function_hide & eng_mask & low_pfs_mask;
5423		while (tmp) {
5424			if (tmp & 0x1)
5425				enabled_func_idx--;
5426			tmp >>= 0x1;
5427		}
5428	}
5429
5430	p_hwfn->num_funcs_on_engine = num_funcs;
5431	p_hwfn->enabled_func_idx = enabled_func_idx;
5432
5433#ifndef ASIC_ONLY
5434	if (CHIP_REV_IS_FPGA(p_dev)) {
5435		DP_NOTICE(p_hwfn, false,
5436			  "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
5437		p_hwfn->num_funcs_on_engine = 4;
5438	}
5439#endif
5440
5441	DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
5442		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
5443		   p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
5444		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
5445}
5446
5447static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
5448				      struct ecore_ptt *p_ptt)
5449{
5450	struct ecore_dev *p_dev = p_hwfn->p_dev;
5451	u32 port_mode;
5452
5453#ifndef ASIC_ONLY
5454	/* Read the port mode */
5455	if (CHIP_REV_IS_FPGA(p_dev))
5456		port_mode = 4;
5457	else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
5458		/* In CMT on emulation, assume 1 port */
5459		port_mode = 1;
5460	else
5461#endif
5462	port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
5463
5464	if (port_mode < 3) {
5465		p_dev->num_ports_in_engine = 1;
5466	} else if (port_mode <= 5) {
5467		p_dev->num_ports_in_engine = 2;
5468	} else {
5469		DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
5470			  p_dev->num_ports_in_engine);
5471
5472		/* Default num_ports_in_engine to something */
5473		p_dev->num_ports_in_engine = 1;
5474	}
5475}
5476
5477static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
5478					 struct ecore_ptt *p_ptt)
5479{
5480	struct ecore_dev *p_dev = p_hwfn->p_dev;
5481	u32 port;
5482	int i;
5483
5484	p_dev->num_ports_in_engine = 0;
5485
5486#ifndef ASIC_ONLY
5487	if (CHIP_REV_IS_EMUL(p_dev)) {
5488		port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
5489		switch ((port & 0xf000) >> 12) {
5490		case 1:
5491			p_dev->num_ports_in_engine = 1;
5492			break;
5493		case 3:
5494			p_dev->num_ports_in_engine = 2;
5495			break;
5496		case 0xf:
5497			p_dev->num_ports_in_engine = 4;
5498			break;
5499		default:
5500			DP_NOTICE(p_hwfn, false,
5501				  "Unknown port mode in ECO_RESERVED %08x\n",
5502				  port);
5503		}
5504	} else
5505#endif
5506	for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
5507		port = ecore_rd(p_hwfn, p_ptt,
5508				CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4));
5509		if (port & 1)
5510			p_dev->num_ports_in_engine++;
5511	}
5512
5513	if (!p_dev->num_ports_in_engine) {
5514		DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
5515
5516		/* Default num_ports_in_engine to something */
5517		p_dev->num_ports_in_engine = 1;
5518	}
5519}
5520
5521static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
5522				   struct ecore_ptt *p_ptt)
5523{
5524	struct ecore_dev *p_dev = p_hwfn->p_dev;
5525
5526	/* Determine the number of ports per engine */
5527	if (ECORE_IS_BB(p_dev))
5528		ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
5529	else
5530		ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
5531
5532	/* Get the total number of ports of the device */
5533	if (ECORE_IS_CMT(p_dev)) {
5534		/* In CMT there is always only one port */
5535		p_dev->num_ports = 1;
5536#ifndef ASIC_ONLY
5537	} else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
5538		p_dev->num_ports = p_dev->num_ports_in_engine *
5539				   ecore_device_num_engines(p_dev);
5540#endif
5541	} else {
5542		u32 addr, global_offsize, global_addr;
5543
5544		addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
5545					    PUBLIC_GLOBAL);
5546		global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
5547		global_addr = SECTION_ADDR(global_offsize, 0);
5548		addr = global_addr + OFFSETOF(struct public_global, max_ports);
5549		p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
5550	}
5551}
5552
5553static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
5554				   struct ecore_ptt *p_ptt)
5555{
5556	struct ecore_mcp_link_capabilities *p_caps;
5557	u32 eee_status;
5558
5559	p_caps = &p_hwfn->mcp_info->link_capabilities;
5560	if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
5561		return;
5562
5563	p_caps->eee_speed_caps = 0;
5564	eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
5565			      OFFSETOF(struct public_port, eee_status));
5566	eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
5567			EEE_SUPPORTED_SPEED_OFFSET;
5568	if (eee_status & EEE_1G_SUPPORTED)
5569		p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
5570	if (eee_status & EEE_10G_ADV)
5571		p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
5572}
5573
5574static enum _ecore_status_t
5575ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
5576		  enum ecore_pci_personality personality,
5577		  struct ecore_hw_prepare_params *p_params)
5578{
5579	bool drv_resc_alloc = p_params->drv_resc_alloc;
5580	enum _ecore_status_t rc;
5581
5582	/* Since all information is common, only first hwfns should do this */
5583	if (IS_LEAD_HWFN(p_hwfn)) {
5584		rc = ecore_iov_hw_info(p_hwfn);
5585		if (rc != ECORE_SUCCESS) {
5586			if (p_params->b_relaxed_probe)
5587				p_params->p_relaxed_res =
5588						ECORE_HW_PREPARE_BAD_IOV;
5589			else
5590				return rc;
5591		}
5592	}
5593
5594	if (IS_LEAD_HWFN(p_hwfn))
5595		ecore_hw_info_port_num(p_hwfn, p_ptt);
5596
5597	ecore_mcp_get_capabilities(p_hwfn, p_ptt);
5598
5599#ifndef ASIC_ONLY
5600	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
5601#endif
5602	rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
5603	if (rc != ECORE_SUCCESS)
5604		return rc;
5605#ifndef ASIC_ONLY
5606	}
5607#endif
5608
5609	rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
5610	if (rc != ECORE_SUCCESS) {
5611		if (p_params->b_relaxed_probe)
5612			p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
5613		else
5614			return rc;
5615	}
5616
5617#ifndef ASIC_ONLY
5618	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
5619#endif
5620	OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
5621		    p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
5622#ifndef ASIC_ONLY
5623	} else {
5624		static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6};
5625
5626		OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
5627		p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
5628	}
5629#endif
5630
5631	if (ecore_mcp_is_init(p_hwfn)) {
5632		if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
5633			p_hwfn->hw_info.ovlan =
5634				p_hwfn->mcp_info->func_info.ovlan;
5635
5636		ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
5637
5638		ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
5639
5640		ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
5641	}
5642
5643	if (personality != ECORE_PCI_DEFAULT) {
5644		p_hwfn->hw_info.personality = personality;
5645	} else if (ecore_mcp_is_init(p_hwfn)) {
5646		enum ecore_pci_personality protocol;
5647
5648		protocol = p_hwfn->mcp_info->func_info.protocol;
5649		p_hwfn->hw_info.personality = protocol;
5650	}
5651
5652#ifndef ASIC_ONLY
5653	/* To overcome ILT lack for emulation, until at least until we'll have
5654	 * a definite answer from system about it, allow only PF0 to be RoCE.
5655	 */
5656	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
5657		if (!p_hwfn->rel_pf_id)
5658			p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
5659		else
5660			p_hwfn->hw_info.personality = ECORE_PCI_ETH;
5661	}
5662#endif
5663
5664	/* although in BB some constellations may support more than 4 tcs,
5665	 * that can result in performance penalty in some cases. 4
5666	 * represents a good tradeoff between performance and flexibility.
5667	 */
5668	p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
5669
5670	/* start out with a single active tc. This can be increased either
5671	 * by dcbx negotiation or by upper layer driver
5672	 */
5673	p_hwfn->hw_info.num_active_tc = 1;
5674
5675	ecore_get_num_funcs(p_hwfn, p_ptt);
5676
5677	if (ecore_mcp_is_init(p_hwfn))
5678		p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
5679
5680	/* In case of forcing the driver's default resource allocation, calling
5681	 * ecore_hw_get_resc() should come after initializing the personality
5682	 * and after getting the number of functions, since the calculation of
5683	 * the resources/features depends on them.
5684	 * This order is not harmful if not forcing.
5685	 */
5686	rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
5687	if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
5688		rc = ECORE_SUCCESS;
5689		p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
5690	}
5691
5692	return rc;
5693}
5694
5695#define ECORE_MAX_DEVICE_NAME_LEN	(8)
5696
5697void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)
5698{
5699	u8 n;
5700
5701	n = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);
5702	OSAL_SNPRINTF(name, n, "%s %c%d", ECORE_IS_BB(p_dev) ? "BB" : "AH",
5703		      'A' + p_dev->chip_rev, (int)p_dev->chip_metal);
5704}
5705
5706static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
5707					       struct ecore_ptt *p_ptt)
5708{
5709	struct ecore_dev *p_dev = p_hwfn->p_dev;
5710	u16 device_id_mask;
5711	u32 tmp;
5712
5713	/* Read Vendor Id / Device Id */
5714	OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
5715				  &p_dev->vendor_id);
5716	OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
5717				  &p_dev->device_id);
5718
5719	/* Determine type */
5720	device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
5721	switch (device_id_mask) {
5722	case ECORE_DEV_ID_MASK_BB:
5723		p_dev->type = ECORE_DEV_TYPE_BB;
5724		break;
5725	case ECORE_DEV_ID_MASK_AH:
5726		p_dev->type = ECORE_DEV_TYPE_AH;
5727		break;
5728	case ECORE_DEV_ID_MASK_E5:
5729		p_dev->type = ECORE_DEV_TYPE_E5;
5730		break;
5731	default:
5732		DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
5733			  p_dev->device_id);
5734		return ECORE_ABORTED;
5735	}
5736
5737	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
5738	p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
5739	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
5740	p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
5741
5742	/* Learn number of HW-functions */
5743	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
5744
5745	if (tmp & (1 << p_hwfn->rel_pf_id)) {
5746		DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
5747		p_dev->num_hwfns = 2;
5748	} else {
5749		p_dev->num_hwfns = 1;
5750	}
5751
5752#ifndef ASIC_ONLY
5753	if (CHIP_REV_IS_EMUL(p_dev)) {
5754		/* For some reason we have problems with this register
5755		 * in B0 emulation; Simply assume no CMT
5756		 */
5757		DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n");
5758		p_dev->num_hwfns = 1;
5759	}
5760#endif
5761
5762	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
5763	p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
5764	tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
5765	p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
5766
5767	DP_INFO(p_dev->hwfns,
5768		"Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
5769		ECORE_IS_BB(p_dev) ? "BB" : "AH",
5770		'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
5771		p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
5772		p_dev->chip_metal);
5773
5774	if (ECORE_IS_BB_A0(p_dev)) {
5775		DP_NOTICE(p_dev->hwfns, false,
5776			  "The chip type/rev (BB A0) is not supported!\n");
5777		return ECORE_ABORTED;
5778	}
5779
5780#ifndef ASIC_ONLY
5781	if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
5782		ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
5783
5784	if (CHIP_REV_IS_EMUL(p_dev)) {
5785		tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
5786		if (tmp & (1 << 29)) {
5787			DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n");
5788			p_dev->b_is_emul_full = true;
5789		} else {
5790			DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n");
5791		}
5792	}
5793#endif
5794
5795	return ECORE_SUCCESS;
5796}
5797
5798#ifndef LINUX_REMOVE
5799void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev)
5800{
5801	int j;
5802
5803	if (IS_VF(p_dev))
5804		return;
5805
5806	for_each_hwfn(p_dev, j) {
5807		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
5808
5809		DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n");
5810
5811		p_hwfn->hw_init_done = false;
5812
5813		ecore_ptt_invalidate(p_hwfn);
5814	}
5815}
5816
5817void ecore_hw_hibernate_resume(struct ecore_dev *p_dev)
5818{
5819	int j = 0;
5820
5821	if (IS_VF(p_dev))
5822		return;
5823
5824	for_each_hwfn(p_dev, j) {
5825		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
5826		struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
5827
5828		ecore_hw_hwfn_prepare(p_hwfn);
5829
5830		if (!p_ptt)
5831			DP_NOTICE(p_hwfn, false, "ptt acquire failed\n");
5832		else {
5833			ecore_load_mcp_offsets(p_hwfn, p_ptt);
5834			ecore_ptt_release(p_hwfn, p_ptt);
5835		}
5836		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n");
5837	}
5838}
5839
5840#endif
5841
5842static enum _ecore_status_t
5843ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
5844			void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
5845			struct ecore_hw_prepare_params *p_params)
5846{
5847	struct ecore_mdump_retain_data mdump_retain;
5848	struct ecore_dev *p_dev = p_hwfn->p_dev;
5849	struct ecore_mdump_info mdump_info;
5850	enum _ecore_status_t rc = ECORE_SUCCESS;
5851
5852	/* Split PCI bars evenly between hwfns */
5853	p_hwfn->regview = p_regview;
5854	p_hwfn->doorbells = p_doorbells;
5855	p_hwfn->db_phys_addr = db_phys_addr;
5856
5857#ifndef LINUX_REMOVE
5858       p_hwfn->reg_offset = (u8 *)p_hwfn->regview - (u8 *)p_hwfn->p_dev->regview;
5859       p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - (u8 *)p_hwfn->p_dev->doorbells;
5860#endif
5861
5862	if (IS_VF(p_dev))
5863		return ecore_vf_hw_prepare(p_hwfn);
5864
5865	/* Validate that chip access is feasible */
5866	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
5867		DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n");
5868		if (p_params->b_relaxed_probe)
5869			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
5870		return ECORE_INVAL;
5871	}
5872
5873	get_function_id(p_hwfn);
5874
5875	/* Allocate PTT pool */
5876	rc = ecore_ptt_pool_alloc(p_hwfn);
5877	if (rc) {
5878		DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
5879		if (p_params->b_relaxed_probe)
5880			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5881		goto err0;
5882	}
5883
5884	/* Allocate the main PTT */
5885	p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
5886
5887	/* First hwfn learns basic information, e.g., number of hwfns */
5888	if (!p_hwfn->my_id) {
5889		rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
5890		if (rc != ECORE_SUCCESS) {
5891			if (p_params->b_relaxed_probe)
5892				p_params->p_relaxed_res =
5893					ECORE_HW_PREPARE_FAILED_DEV;
5894			goto err1;
5895		}
5896	}
5897
5898	ecore_hw_hwfn_prepare(p_hwfn);
5899
5900	/* Initialize MCP structure */
5901	rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
5902	if (rc) {
5903		DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
5904		if (p_params->b_relaxed_probe)
5905			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5906		goto err1;
5907	}
5908
5909	/* Read the device configuration information from the HW and SHMEM */
5910	rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
5911			       p_params->personality, p_params);
5912	if (rc) {
5913		DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
5914		goto err2;
5915	}
5916
5917	/* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
5918	 * called, since among others it sets the ports number in an engine.
5919	 */
5920	if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
5921	    !p_dev->recov_in_prog) {
5922		rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
5923		if (rc != ECORE_SUCCESS)
5924			DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
5925	}
5926
5927	/* Check if mdump logs/data are present and update the epoch value */
5928	if (IS_LEAD_HWFN(p_hwfn)) {
5929#ifndef ASIC_ONLY
5930		if (!CHIP_REV_IS_EMUL(p_dev)) {
5931#endif
5932		rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
5933					      &mdump_info);
5934		if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
5935			DP_NOTICE(p_hwfn, false,
5936				  "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
5937
5938		rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
5939						&mdump_retain);
5940		if (rc == ECORE_SUCCESS && mdump_retain.valid)
5941			DP_NOTICE(p_hwfn, false,
5942				  "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
5943				  mdump_retain.epoch, mdump_retain.pf,
5944				  mdump_retain.status);
5945
5946		ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
5947					   p_params->epoch);
5948#ifndef ASIC_ONLY
5949		}
5950#endif
5951	}
5952
5953	/* Allocate the init RT array and initialize the init-ops engine */
5954	rc = ecore_init_alloc(p_hwfn);
5955	if (rc) {
5956		DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
5957		if (p_params->b_relaxed_probe)
5958			p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
5959		goto err2;
5960	}
5961
5962#ifndef ASIC_ONLY
5963	if (CHIP_REV_IS_FPGA(p_dev)) {
5964		DP_NOTICE(p_hwfn, false,
5965			  "FPGA: workaround; Prevent DMAE parities\n");
5966		ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
5967			 7);
5968
5969		DP_NOTICE(p_hwfn, false,
5970			  "FPGA: workaround: Set VF bar0 size\n");
5971		ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
5972			 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
5973	}
5974#endif
5975
5976	return rc;
5977err2:
5978	if (IS_LEAD_HWFN(p_hwfn))
5979		ecore_iov_free_hw_info(p_dev);
5980	ecore_mcp_free(p_hwfn);
5981err1:
5982	ecore_hw_hwfn_free(p_hwfn);
5983err0:
5984	return rc;
5985}
5986
5987enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
5988				      struct ecore_hw_prepare_params *p_params)
5989{
5990	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
5991	enum _ecore_status_t rc;
5992
5993	p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
5994	p_dev->allow_mdump = p_params->allow_mdump;
5995
5996	if (p_params->b_relaxed_probe)
5997		p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
5998
5999	/* Store the precompiled init data ptrs */
6000	if (IS_PF(p_dev))
6001		ecore_init_iro_array(p_dev);
6002
6003	/* Initialize the first hwfn - will learn number of hwfns */
6004	rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
6005				     p_dev->doorbells, p_dev->db_phys_addr,
6006				     p_params);
6007	if (rc != ECORE_SUCCESS)
6008		return rc;
6009
6010	p_params->personality = p_hwfn->hw_info.personality;
6011
6012	/* initilalize 2nd hwfn if necessary */
6013	if (ECORE_IS_CMT(p_dev)) {
6014		void OSAL_IOMEM *p_regview, *p_doorbell;
6015		u8 OSAL_IOMEM *addr;
6016		u64 db_phys_addr;
6017		u32 offset;
6018
6019		/* adjust bar offset for second engine */
6020		offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
6021					   BAR_ID_0) / 2;
6022		addr = (u8 OSAL_IOMEM *)p_dev->regview + offset;
6023		p_regview = (void OSAL_IOMEM *)addr;
6024
6025		offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
6026					   BAR_ID_1) / 2;
6027		addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset;
6028		p_doorbell = (void OSAL_IOMEM *)addr;
6029		db_phys_addr = p_dev->db_phys_addr + offset;
6030
6031		/* prepare second hw function */
6032		rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
6033					     p_doorbell, db_phys_addr,
6034					     p_params);
6035
6036		/* in case of error, need to free the previously
6037		 * initiliazed hwfn 0.
6038		 */
6039		if (rc != ECORE_SUCCESS) {
6040			if (p_params->b_relaxed_probe)
6041				p_params->p_relaxed_res =
6042						ECORE_HW_PREPARE_FAILED_ENG2;
6043
6044			if (IS_PF(p_dev)) {
6045				ecore_init_free(p_hwfn);
6046				ecore_mcp_free(p_hwfn);
6047				ecore_hw_hwfn_free(p_hwfn);
6048			} else {
6049				DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
6050			}
6051			return rc;
6052		}
6053	}
6054
6055	return rc;
6056}
6057
6058void ecore_hw_remove(struct ecore_dev *p_dev)
6059{
6060	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
6061	int i;
6062
6063	if (IS_PF(p_dev))
6064		ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
6065						 ECORE_OV_DRIVER_STATE_NOT_LOADED);
6066
6067	for_each_hwfn(p_dev, i) {
6068		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6069
6070		if (IS_VF(p_dev)) {
6071			ecore_vf_pf_release(p_hwfn);
6072			continue;
6073		}
6074
6075		ecore_init_free(p_hwfn);
6076		ecore_hw_hwfn_free(p_hwfn);
6077		ecore_mcp_free(p_hwfn);
6078
6079#ifdef CONFIG_ECORE_LOCK_ALLOC
6080		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
6081#endif
6082	}
6083
6084	ecore_iov_free_hw_info(p_dev);
6085}
6086
6087static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
6088				      struct ecore_chain *p_chain)
6089{
6090	void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
6091	dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
6092	struct ecore_chain_next *p_next;
6093	u32 size, i;
6094
6095	if (!p_virt)
6096		return;
6097
6098	size = p_chain->elem_size * p_chain->usable_per_page;
6099
6100	for (i = 0; i < p_chain->page_cnt; i++) {
6101		if (!p_virt)
6102			break;
6103
6104		p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
6105		p_virt_next = p_next->next_virt;
6106		p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
6107
6108		OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
6109				       ECORE_CHAIN_PAGE_SIZE);
6110
6111		p_virt = p_virt_next;
6112		p_phys = p_phys_next;
6113	}
6114}
6115
6116static void ecore_chain_free_single(struct ecore_dev *p_dev,
6117				    struct ecore_chain *p_chain)
6118{
6119	if (!p_chain->p_virt_addr)
6120		return;
6121
6122	OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
6123			       p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
6124}
6125
6126static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
6127				 struct ecore_chain *p_chain)
6128{
6129	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
6130	u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
6131	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
6132
6133	if (!pp_virt_addr_tbl)
6134		return;
6135
6136	if (!p_pbl_virt)
6137		goto out;
6138
6139	for (i = 0; i < page_cnt; i++) {
6140		if (!pp_virt_addr_tbl[i])
6141			break;
6142
6143		OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
6144				       *(dma_addr_t *)p_pbl_virt,
6145				       ECORE_CHAIN_PAGE_SIZE);
6146
6147		p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
6148	}
6149
6150	pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
6151
6152	if (!p_chain->b_external_pbl) {
6153		OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
6154				       p_chain->pbl_sp.p_phys_table, pbl_size);
6155	}
6156out:
6157	OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
6158	p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
6159}
6160
6161void ecore_chain_free(struct ecore_dev *p_dev,
6162		      struct ecore_chain *p_chain)
6163{
6164	switch (p_chain->mode) {
6165	case ECORE_CHAIN_MODE_NEXT_PTR:
6166		ecore_chain_free_next_ptr(p_dev, p_chain);
6167		break;
6168	case ECORE_CHAIN_MODE_SINGLE:
6169		ecore_chain_free_single(p_dev, p_chain);
6170		break;
6171	case ECORE_CHAIN_MODE_PBL:
6172		ecore_chain_free_pbl(p_dev, p_chain);
6173		break;
6174	}
6175}
6176
6177static enum _ecore_status_t
6178ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
6179			       enum ecore_chain_cnt_type cnt_type,
6180			       osal_size_t elem_size, u32 page_cnt)
6181{
6182	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
6183
6184	/* The actual chain size can be larger than the maximal possible value
6185	 * after rounding up the requested elements number to pages, and after
6186	 * taking into acount the unusuable elements (next-ptr elements).
6187	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
6188	 * size/capacity fields are of a u32 type.
6189	 */
6190	if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
6191	     chain_size > ((u32)ECORE_U16_MAX + 1)) ||
6192	    (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
6193	     chain_size > ECORE_U32_MAX)) {
6194		DP_NOTICE(p_dev, true,
6195			  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
6196			  (unsigned long long)chain_size);
6197		return ECORE_INVAL;
6198	}
6199
6200	return ECORE_SUCCESS;
6201}
6202
6203static enum _ecore_status_t
6204ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
6205{
6206	void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
6207	dma_addr_t p_phys = 0;
6208	u32 i;
6209
6210	for (i = 0; i < p_chain->page_cnt; i++) {
6211		p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
6212						 ECORE_CHAIN_PAGE_SIZE);
6213		if (!p_virt) {
6214			DP_NOTICE(p_dev, false,
6215				  "Failed to allocate chain memory\n");
6216			return ECORE_NOMEM;
6217		}
6218
6219		if (i == 0) {
6220			ecore_chain_init_mem(p_chain, p_virt, p_phys);
6221			ecore_chain_reset(p_chain);
6222		} else {
6223			ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
6224						       p_virt, p_phys);
6225		}
6226
6227		p_virt_prev = p_virt;
6228	}
6229	/* Last page's next element should point to the beginning of the
6230	 * chain.
6231	 */
6232	ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
6233				       p_chain->p_virt_addr,
6234				       p_chain->p_phys_addr);
6235
6236	return ECORE_SUCCESS;
6237}
6238
6239static enum _ecore_status_t
6240ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
6241{
6242	dma_addr_t p_phys = 0;
6243	void *p_virt = OSAL_NULL;
6244
6245	p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
6246	if (!p_virt) {
6247		DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
6248		return ECORE_NOMEM;
6249	}
6250
6251	ecore_chain_init_mem(p_chain, p_virt, p_phys);
6252	ecore_chain_reset(p_chain);
6253
6254	return ECORE_SUCCESS;
6255}
6256
6257static enum _ecore_status_t
6258ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
6259		      struct ecore_chain *p_chain,
6260		      struct ecore_chain_ext_pbl *ext_pbl)
6261{
6262	u32 page_cnt = p_chain->page_cnt, size, i;
6263	dma_addr_t p_phys = 0, p_pbl_phys = 0;
6264	void **pp_virt_addr_tbl = OSAL_NULL;
6265	u8 *p_pbl_virt = OSAL_NULL;
6266	void *p_virt = OSAL_NULL;
6267
6268	size = page_cnt * sizeof(*pp_virt_addr_tbl);
6269	pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
6270	if (!pp_virt_addr_tbl) {
6271		DP_NOTICE(p_dev, false,
6272			  "Failed to allocate memory for the chain virtual addresses table\n");
6273		return ECORE_NOMEM;
6274	}
6275
6276	/* The allocation of the PBL table is done with its full size, since it
6277	 * is expected to be successive.
6278	 * ecore_chain_init_pbl_mem() is called even in a case of an allocation
6279	 * failure, since pp_virt_addr_tbl was previously allocated, and it
6280	 * should be saved to allow its freeing during the error flow.
6281	 */
6282	size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
6283
6284	if (ext_pbl == OSAL_NULL) {
6285		p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
6286	} else {
6287		p_pbl_virt = ext_pbl->p_pbl_virt;
6288		p_pbl_phys = ext_pbl->p_pbl_phys;
6289		p_chain->b_external_pbl = true;
6290	}
6291
6292	ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
6293				 pp_virt_addr_tbl);
6294	if (!p_pbl_virt) {
6295		DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
6296		return ECORE_NOMEM;
6297	}
6298
6299	for (i = 0; i < page_cnt; i++) {
6300		p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
6301						 ECORE_CHAIN_PAGE_SIZE);
6302		if (!p_virt) {
6303			DP_NOTICE(p_dev, false,
6304				  "Failed to allocate chain memory\n");
6305			return ECORE_NOMEM;
6306		}
6307
6308		if (i == 0) {
6309			ecore_chain_init_mem(p_chain, p_virt, p_phys);
6310			ecore_chain_reset(p_chain);
6311		}
6312
6313		/* Fill the PBL table with the physical address of the page */
6314		*(dma_addr_t *)p_pbl_virt = p_phys;
6315		/* Keep the virtual address of the page */
6316		p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
6317
6318		p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
6319	}
6320
6321	return ECORE_SUCCESS;
6322}
6323
6324enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
6325				       enum ecore_chain_use_mode intended_use,
6326				       enum ecore_chain_mode mode,
6327				       enum ecore_chain_cnt_type cnt_type,
6328				       u32 num_elems, osal_size_t elem_size,
6329				       struct ecore_chain *p_chain,
6330				       struct ecore_chain_ext_pbl *ext_pbl)
6331{
6332	u32 page_cnt;
6333	enum _ecore_status_t rc = ECORE_SUCCESS;
6334
6335	if (mode == ECORE_CHAIN_MODE_SINGLE)
6336		page_cnt = 1;
6337	else
6338		page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
6339
6340	rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
6341					    page_cnt);
6342	if (rc) {
6343		DP_NOTICE(p_dev, false,
6344			  "Cannot allocate a chain with the given arguments:\n"
6345			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
6346			  intended_use, mode, cnt_type, num_elems, elem_size);
6347		return rc;
6348	}
6349
6350	ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
6351				mode, cnt_type, p_dev->dp_ctx);
6352
6353	switch (mode) {
6354	case ECORE_CHAIN_MODE_NEXT_PTR:
6355		rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
6356		break;
6357	case ECORE_CHAIN_MODE_SINGLE:
6358		rc = ecore_chain_alloc_single(p_dev, p_chain);
6359		break;
6360	case ECORE_CHAIN_MODE_PBL:
6361		rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
6362		break;
6363	}
6364	if (rc)
6365		goto nomem;
6366
6367	return ECORE_SUCCESS;
6368
6369nomem:
6370	ecore_chain_free(p_dev, p_chain);
6371	return rc;
6372}
6373
6374enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
6375				       u16 src_id, u16 *dst_id)
6376{
6377	if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
6378		u16 min, max;
6379
6380		min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
6381		max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
6382		DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
6383			  src_id, min, max);
6384
6385		return ECORE_INVAL;
6386	}
6387
6388	*dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
6389
6390	return ECORE_SUCCESS;
6391}
6392
6393enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
6394				    u8 src_id, u8 *dst_id)
6395{
6396	if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
6397		u8 min, max;
6398
6399		min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
6400		max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
6401		DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n",
6402			  src_id, min, max);
6403
6404		return ECORE_INVAL;
6405	}
6406
6407	*dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
6408
6409	return ECORE_SUCCESS;
6410}
6411
6412enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
6413				      u8 src_id, u8 *dst_id)
6414{
6415	if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
6416		u8 min, max;
6417
6418		min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
6419		max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
6420		DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
6421			  src_id, min, max);
6422
6423		return ECORE_INVAL;
6424	}
6425
6426	*dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
6427
6428	return ECORE_SUCCESS;
6429}
6430
6431enum _ecore_status_t
6432ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
6433				  struct ecore_ptt *p_ptt)
6434{
6435	if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
6436		ecore_wr(p_hwfn, p_ptt,
6437			 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
6438			 1 << p_hwfn->abs_pf_id / 2);
6439		ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
6440		return ECORE_SUCCESS;
6441	} else {
6442		DP_NOTICE(p_hwfn, false,
6443			  "This function can't be set as default\n");
6444		return ECORE_INVAL;
6445	}
6446}
6447
6448static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
6449					       struct ecore_ptt *p_ptt,
6450					       u32 hw_addr, void *p_eth_qzone,
6451					       osal_size_t eth_qzone_size,
6452					       u8 timeset)
6453{
6454	struct coalescing_timeset *p_coal_timeset;
6455
6456	if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
6457		DP_NOTICE(p_hwfn, true,
6458			  "Coalescing configuration not enabled\n");
6459		return ECORE_INVAL;
6460	}
6461
6462	p_coal_timeset = p_eth_qzone;
6463	OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
6464	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
6465	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
6466	ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
6467
6468	return ECORE_SUCCESS;
6469}
6470
6471enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
6472					      u16 rx_coal, u16 tx_coal,
6473					      void *p_handle)
6474{
6475	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
6476	enum _ecore_status_t rc = ECORE_SUCCESS;
6477	struct ecore_ptt *p_ptt;
6478
6479	/* TODO - Configuring a single queue's coalescing but
6480	 * claiming all queues are abiding same configuration
6481	 * for PF and VF both.
6482	 */
6483
6484#ifdef CONFIG_ECORE_SRIOV
6485	if (IS_VF(p_hwfn->p_dev))
6486		return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
6487						tx_coal, p_cid);
6488#endif /* #ifdef CONFIG_ECORE_SRIOV */
6489
6490	p_ptt = ecore_ptt_acquire(p_hwfn);
6491	if (!p_ptt)
6492		return ECORE_AGAIN;
6493
6494	if (rx_coal) {
6495		rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
6496		if (rc)
6497			goto out;
6498		p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
6499	}
6500
6501	if (tx_coal) {
6502		rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
6503		if (rc)
6504			goto out;
6505		p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
6506	}
6507out:
6508	ecore_ptt_release(p_hwfn, p_ptt);
6509
6510	return rc;
6511}
6512
6513enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
6514					    struct ecore_ptt *p_ptt,
6515					    u16 coalesce,
6516					    struct ecore_queue_cid *p_cid)
6517{
6518	struct ustorm_eth_queue_zone eth_qzone;
6519	u8 timeset, timer_res;
6520	u32 address;
6521	enum _ecore_status_t rc;
6522
6523	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
6524	if (coalesce <= 0x7F)
6525		timer_res = 0;
6526	else if (coalesce <= 0xFF)
6527		timer_res = 1;
6528	else if (coalesce <= 0x1FF)
6529		timer_res = 2;
6530	else {
6531		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
6532		return ECORE_INVAL;
6533	}
6534	timeset = (u8)(coalesce >> timer_res);
6535
6536	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
6537				     p_cid->sb_igu_id, false);
6538	if (rc != ECORE_SUCCESS)
6539		goto out;
6540
6541	address = BAR0_MAP_REG_USDM_RAM +
6542		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
6543
6544	rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
6545				sizeof(struct ustorm_eth_queue_zone), timeset);
6546	if (rc != ECORE_SUCCESS)
6547		goto out;
6548
6549out:
6550	return rc;
6551}
6552
6553enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
6554					    struct ecore_ptt *p_ptt,
6555					    u16 coalesce,
6556					    struct ecore_queue_cid *p_cid)
6557{
6558	struct xstorm_eth_queue_zone eth_qzone;
6559	u8 timeset, timer_res;
6560	u32 address;
6561	enum _ecore_status_t rc;
6562
6563	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
6564	if (coalesce <= 0x7F)
6565		timer_res = 0;
6566	else if (coalesce <= 0xFF)
6567		timer_res = 1;
6568	else if (coalesce <= 0x1FF)
6569		timer_res = 2;
6570	else {
6571		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
6572		return ECORE_INVAL;
6573	}
6574	timeset = (u8)(coalesce >> timer_res);
6575
6576	rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
6577				     p_cid->sb_igu_id, true);
6578	if (rc != ECORE_SUCCESS)
6579		goto out;
6580
6581	address = BAR0_MAP_REG_XSDM_RAM +
6582		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
6583
6584	rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
6585				sizeof(struct xstorm_eth_queue_zone), timeset);
6586out:
6587	return rc;
6588}
6589
6590/* Calculate final WFQ values for all vports and configure it.
6591 * After this configuration each vport must have
6592 * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
6593 */
6594static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
6595					       struct ecore_ptt *p_ptt,
6596					       u32 min_pf_rate)
6597{
6598	struct init_qm_vport_params *vport_params;
6599	int i;
6600
6601	vport_params = p_hwfn->qm_info.qm_vport_params;
6602
6603	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6604		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
6605
6606		vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
6607					    min_pf_rate;
6608		ecore_init_vport_wfq(p_hwfn, p_ptt,
6609				     vport_params[i].first_tx_pq_id,
6610				     vport_params[i].vport_wfq);
6611	}
6612}
6613
6614static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
6615
6616{
6617	int i;
6618
6619	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
6620		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
6621}
6622
6623static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
6624					     struct ecore_ptt *p_ptt)
6625{
6626	struct init_qm_vport_params *vport_params;
6627	int i;
6628
6629	vport_params = p_hwfn->qm_info.qm_vport_params;
6630
6631	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6632		ecore_init_wfq_default_param(p_hwfn);
6633		ecore_init_vport_wfq(p_hwfn, p_ptt,
6634				     vport_params[i].first_tx_pq_id,
6635				     vport_params[i].vport_wfq);
6636	}
6637}
6638
6639/* This function performs several validations for WFQ
6640 * configuration and required min rate for a given vport
6641 * 1. req_rate must be greater than one percent of min_pf_rate.
6642 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
6643 *    rates to get less than one percent of min_pf_rate.
6644 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
6645 */
6646static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
6647						 u16 vport_id, u32 req_rate,
6648						 u32 min_pf_rate)
6649{
6650	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
6651	int non_requested_count = 0, req_count = 0, i, num_vports;
6652
6653	num_vports = p_hwfn->qm_info.num_vports;
6654
6655	/* Accounting for the vports which are configured for WFQ explicitly */
6656	for (i = 0; i < num_vports; i++) {
6657		u32 tmp_speed;
6658
6659		if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
6660			req_count++;
6661			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
6662			total_req_min_rate += tmp_speed;
6663		}
6664	}
6665
6666	/* Include current vport data as well */
6667	req_count++;
6668	total_req_min_rate += req_rate;
6669	non_requested_count = num_vports - req_count;
6670
6671	/* validate possible error cases */
6672	if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
6673		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6674			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
6675			   vport_id, req_rate, min_pf_rate);
6676		return ECORE_INVAL;
6677	}
6678
6679	/* TBD - for number of vports greater than 100 */
6680	if (num_vports > ECORE_WFQ_UNIT) {
6681		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6682			   "Number of vports is greater than %d\n",
6683			   ECORE_WFQ_UNIT);
6684		return ECORE_INVAL;
6685	}
6686
6687	if (total_req_min_rate > min_pf_rate) {
6688		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6689			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
6690			   total_req_min_rate, min_pf_rate);
6691		return ECORE_INVAL;
6692	}
6693
6694	/* Data left for non requested vports */
6695	total_left_rate = min_pf_rate - total_req_min_rate;
6696	left_rate_per_vp = total_left_rate / non_requested_count;
6697
6698	/* validate if non requested get < 1% of min bw */
6699	if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
6700		DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6701			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
6702			   left_rate_per_vp, min_pf_rate);
6703		return ECORE_INVAL;
6704	}
6705
6706	/* now req_rate for given vport passes all scenarios.
6707	 * assign final wfq rates to all vports.
6708	 */
6709	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
6710	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
6711
6712	for (i = 0; i < num_vports; i++) {
6713		if (p_hwfn->qm_info.wfq_data[i].configured)
6714			continue;
6715
6716		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
6717	}
6718
6719	return ECORE_SUCCESS;
6720}
6721
6722static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
6723				       struct ecore_ptt *p_ptt,
6724				       u16 vp_id, u32 rate)
6725{
6726	struct ecore_mcp_link_state *p_link;
6727	int rc = ECORE_SUCCESS;
6728
6729	p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
6730
6731	if (!p_link->min_pf_rate) {
6732		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
6733		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
6734		return rc;
6735	}
6736
6737	rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
6738
6739	if (rc == ECORE_SUCCESS)
6740		ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
6741						   p_link->min_pf_rate);
6742	else
6743		DP_NOTICE(p_hwfn, false,
6744			  "Validation failed while configuring min rate\n");
6745
6746	return rc;
6747}
6748
6749static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
6750						   struct ecore_ptt *p_ptt,
6751						   u32 min_pf_rate)
6752{
6753	bool use_wfq = false;
6754	int rc = ECORE_SUCCESS;
6755	u16 i;
6756
6757	/* Validate all pre configured vports for wfq */
6758	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
6759		u32 rate;
6760
6761		if (!p_hwfn->qm_info.wfq_data[i].configured)
6762			continue;
6763
6764		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
6765		use_wfq = true;
6766
6767		rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
6768		if (rc != ECORE_SUCCESS) {
6769			DP_NOTICE(p_hwfn, false,
6770				  "WFQ validation failed while configuring min rate\n");
6771			break;
6772		}
6773	}
6774
6775	if (rc == ECORE_SUCCESS && use_wfq)
6776		ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
6777	else
6778		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
6779
6780	return rc;
6781}
6782
6783/* Main API for ecore clients to configure vport min rate.
6784 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
6785 * rate - Speed in Mbps needs to be assigned to a given vport.
6786 */
6787int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
6788{
6789	int i, rc = ECORE_INVAL;
6790
6791	/* TBD - for multiple hardware functions - that is 100 gig */
6792	if (ECORE_IS_CMT(p_dev)) {
6793		DP_NOTICE(p_dev, false,
6794			  "WFQ configuration is not supported for this device\n");
6795		return rc;
6796	}
6797
6798	for_each_hwfn(p_dev, i) {
6799		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6800		struct ecore_ptt *p_ptt;
6801
6802		p_ptt = ecore_ptt_acquire(p_hwfn);
6803		if (!p_ptt)
6804			return ECORE_TIMEOUT;
6805
6806		rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
6807
6808		if (rc != ECORE_SUCCESS) {
6809			ecore_ptt_release(p_hwfn, p_ptt);
6810			return rc;
6811		}
6812
6813		ecore_ptt_release(p_hwfn, p_ptt);
6814	}
6815
6816	return rc;
6817}
6818
6819/* API to configure WFQ from mcp link change */
6820void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
6821					   struct ecore_ptt *p_ptt,
6822					   u32 min_pf_rate)
6823{
6824	int i;
6825
6826	/* TBD - for multiple hardware functions - that is 100 gig */
6827	if (ECORE_IS_CMT(p_dev)) {
6828		DP_VERBOSE(p_dev, ECORE_MSG_LINK,
6829			   "WFQ configuration is not supported for this device\n");
6830		return;
6831	}
6832
6833	for_each_hwfn(p_dev, i) {
6834		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6835
6836		__ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
6837							min_pf_rate);
6838	}
6839}
6840
6841int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
6842				       struct ecore_ptt *p_ptt,
6843				       struct ecore_mcp_link_state *p_link,
6844				       u8 max_bw)
6845{
6846	int rc = ECORE_SUCCESS;
6847
6848	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
6849
6850	if (!p_link->line_speed && (max_bw != 100))
6851		return rc;
6852
6853	p_link->speed = (p_link->line_speed * max_bw) / 100;
6854	p_hwfn->qm_info.pf_rl = p_link->speed;
6855
6856	/* Since the limiter also affects Tx-switched traffic, we don't want it
6857	 * to limit such traffic in case there's no actual limit.
6858	 * In that case, set limit to imaginary high boundary.
6859	 */
6860	if (max_bw == 100)
6861		p_hwfn->qm_info.pf_rl = 100000;
6862
6863	rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
6864			      p_hwfn->qm_info.pf_rl);
6865
6866	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6867		   "Configured MAX bandwidth to be %08x Mb/sec\n",
6868		   p_link->speed);
6869
6870	return rc;
6871}
6872
6873/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
6874int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
6875{
6876	int i, rc = ECORE_INVAL;
6877
6878	if (max_bw < 1 || max_bw > 100) {
6879		DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
6880		return rc;
6881	}
6882
6883	for_each_hwfn(p_dev, i) {
6884		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6885		struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
6886		struct ecore_mcp_link_state *p_link;
6887		struct ecore_ptt *p_ptt;
6888
6889		p_link = &p_lead->mcp_info->link_output;
6890
6891		p_ptt = ecore_ptt_acquire(p_hwfn);
6892		if (!p_ptt)
6893			return ECORE_TIMEOUT;
6894
6895		rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
6896							p_link, max_bw);
6897
6898		ecore_ptt_release(p_hwfn, p_ptt);
6899
6900		if (rc != ECORE_SUCCESS)
6901			break;
6902	}
6903
6904	return rc;
6905}
6906
6907int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
6908				       struct ecore_ptt *p_ptt,
6909				       struct ecore_mcp_link_state *p_link,
6910				       u8 min_bw)
6911{
6912	int rc = ECORE_SUCCESS;
6913
6914	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
6915	p_hwfn->qm_info.pf_wfq = min_bw;
6916
6917	if (!p_link->line_speed)
6918		return rc;
6919
6920	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
6921
6922	rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
6923
6924	DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
6925		   "Configured MIN bandwidth to be %d Mb/sec\n",
6926		   p_link->min_pf_rate);
6927
6928	return rc;
6929}
6930
6931/* Main API to configure PF min bandwidth where bw range is [1-100] */
6932int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
6933{
6934	int i, rc = ECORE_INVAL;
6935
6936	if (min_bw < 1 || min_bw > 100) {
6937		DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
6938		return rc;
6939	}
6940
6941	for_each_hwfn(p_dev, i) {
6942		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
6943		struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
6944		struct ecore_mcp_link_state *p_link;
6945		struct ecore_ptt *p_ptt;
6946
6947		p_link = &p_lead->mcp_info->link_output;
6948
6949		p_ptt = ecore_ptt_acquire(p_hwfn);
6950		if (!p_ptt)
6951			return ECORE_TIMEOUT;
6952
6953		rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
6954							p_link, min_bw);
6955		if (rc != ECORE_SUCCESS) {
6956			ecore_ptt_release(p_hwfn, p_ptt);
6957			return rc;
6958		}
6959
6960		if (p_link->min_pf_rate) {
6961			u32 min_rate = p_link->min_pf_rate;
6962
6963			rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
6964								     p_ptt,
6965								     min_rate);
6966		}
6967
6968		ecore_ptt_release(p_hwfn, p_ptt);
6969	}
6970
6971	return rc;
6972}
6973
6974void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
6975{
6976	struct ecore_mcp_link_state *p_link;
6977
6978	p_link = &p_hwfn->mcp_info->link_output;
6979
6980	if (p_link->min_pf_rate)
6981		ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
6982
6983	OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
6984		    sizeof(*p_hwfn->qm_info.wfq_data) *
6985				p_hwfn->qm_info.num_vports);
6986}
6987
6988int ecore_device_num_engines(struct ecore_dev *p_dev)
6989{
6990	return ECORE_IS_BB(p_dev) ? 2 : 1;
6991}
6992
6993int ecore_device_num_ports(struct ecore_dev *p_dev)
6994{
6995	return p_dev->num_ports;
6996}
6997
6998void ecore_set_fw_mac_addr(__le16 *fw_msb,
6999			  __le16 *fw_mid,
7000			  __le16 *fw_lsb,
7001			  u8 *mac)
7002{
7003	((u8 *)fw_msb)[0] = mac[1];
7004	((u8 *)fw_msb)[1] = mac[0];
7005	((u8 *)fw_mid)[0] = mac[3];
7006	((u8 *)fw_mid)[1] = mac[2];
7007	((u8 *)fw_lsb)[0] = mac[5];
7008	((u8 *)fw_lsb)[1] = mac[4];
7009}
7010
7011void ecore_set_dev_access_enable(struct ecore_dev *p_dev, bool b_enable)
7012{
7013	if (p_dev->recov_in_prog != !b_enable) {
7014		DP_INFO(p_dev, "%s access to the device\n",
7015			b_enable ?  "Enable" : "Disable");
7016		p_dev->recov_in_prog = !b_enable;
7017	}
7018}
7019
7020#ifdef _NTDDK_
7021#pragma warning(pop)
7022#endif
7023