ecore_l2.c revision 337519
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_l2.c
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_l2.c 337519 2018-08-09 01:39:47Z davidcs $");
33
34#include "bcm_osal.h"
35
36#include "ecore.h"
37#include "ecore_status.h"
38#include "ecore_hsi_eth.h"
39#include "ecore_chain.h"
40#include "ecore_spq.h"
41#include "ecore_init_fw_funcs.h"
42#include "ecore_cxt.h"
43#include "ecore_l2.h"
44#include "ecore_sp_commands.h"
45#include "ecore_gtt_reg_addr.h"
46#include "ecore_iro.h"
47#include "reg_addr.h"
48#include "ecore_int.h"
49#include "ecore_hw.h"
50#include "ecore_vf.h"
51#include "ecore_sriov.h"
52#include "ecore_mcp.h"
53
54#define ECORE_MAX_SGES_NUM 16
55#define CRC32_POLY 0x1edc6f41
56
57#ifdef _NTDDK_
58#pragma warning(push)
59#pragma warning(disable : 28167)
60#pragma warning(disable : 28123)
61#pragma warning(disable : 28121)
62#endif
63
64struct ecore_l2_info {
65	u32 queues;
66	unsigned long **pp_qid_usage;
67
68	/* The lock is meant to synchronize access to the qid usage */
69	osal_mutex_t lock;
70};
71
72enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
73{
74	struct ecore_l2_info *p_l2_info;
75	unsigned long **pp_qids;
76	u32 i;
77
78	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
79		return ECORE_SUCCESS;
80
81	p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
82	if (!p_l2_info)
83		return ECORE_NOMEM;
84	p_hwfn->p_l2_info = p_l2_info;
85
86	if (IS_PF(p_hwfn->p_dev)) {
87		p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
88	} else {
89		u8 rx = 0, tx = 0;
90
91		ecore_vf_get_num_rxqs(p_hwfn, &rx);
92		ecore_vf_get_num_txqs(p_hwfn, &tx);
93
94		p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
95	}
96
97	pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
98			       sizeof(unsigned long *) *
99			       p_l2_info->queues);
100	if (pp_qids == OSAL_NULL)
101		return ECORE_NOMEM;
102	p_l2_info->pp_qid_usage = pp_qids;
103
104	for (i = 0; i < p_l2_info->queues; i++) {
105		pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
106					  MAX_QUEUES_PER_QZONE / 8);
107		if (pp_qids[i] == OSAL_NULL)
108			return ECORE_NOMEM;
109	}
110
111#ifdef CONFIG_ECORE_LOCK_ALLOC
112	if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
113		return ECORE_NOMEM;
114#endif
115
116	return ECORE_SUCCESS;
117}
118
119void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
120{
121	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
122		return;
123
124	OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
125}
126
127void ecore_l2_free(struct ecore_hwfn *p_hwfn)
128{
129	u32 i;
130
131	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
132		return;
133
134	if (p_hwfn->p_l2_info == OSAL_NULL)
135		return;
136
137	if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
138		goto out_l2_info;
139
140	/* Free until hit first uninitialized entry */
141	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
142		if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
143			break;
144		OSAL_VFREE(p_hwfn->p_dev,
145			   p_hwfn->p_l2_info->pp_qid_usage[i]);
146		p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
147	}
148
149#ifdef CONFIG_ECORE_LOCK_ALLOC
150	/* Lock is last to initialize, if everything else was */
151	if (i == p_hwfn->p_l2_info->queues)
152		OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
153#endif
154
155	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
156	p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
157
158out_l2_info:
159	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
160	p_hwfn->p_l2_info = OSAL_NULL;
161}
162
163/* TODO - we'll need locking around these... */
164static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
165					  struct ecore_queue_cid *p_cid)
166{
167	struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
168	u16 queue_id = p_cid->rel.queue_id;
169	bool b_rc = true;
170	u8 first;
171
172	OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
173
174	if (queue_id > p_l2_info->queues) {
175		DP_NOTICE(p_hwfn, true,
176			  "Requested to increase usage for qzone %04x out of %08x\n",
177			  queue_id, p_l2_info->queues);
178		b_rc = false;
179		goto out;
180	}
181
182	first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
183					     MAX_QUEUES_PER_QZONE);
184	if (first >= MAX_QUEUES_PER_QZONE) {
185		b_rc = false;
186		goto out;
187	}
188
189	OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
190	p_cid->qid_usage_idx = first;
191
192out:
193	OSAL_MUTEX_RELEASE(&p_l2_info->lock);
194	return b_rc;
195}
196
197static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
198					  struct ecore_queue_cid *p_cid)
199{
200	OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
201
202	OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
203		       p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
204
205	OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
206}
207
208void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
209				 struct ecore_queue_cid *p_cid)
210{
211	bool b_legacy_vf = !!(p_cid->vf_legacy &
212			      ECORE_QCID_LEGACY_VF_CID);
213
214	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
215	 * For legacy vf-queues, the CID doesn't go through here.
216	 */
217	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
218		_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
219
220	/* VFs maintain the index inside queue-zone on their own */
221	if (p_cid->vfid == ECORE_QUEUE_CID_PF)
222		ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
223
224	OSAL_VFREE(p_hwfn->p_dev, p_cid);
225}
226
227/* The internal is only meant to be directly called by PFs initializeing CIDs
228 * for their VFs.
229 */
230static struct ecore_queue_cid *
231_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
232			u16 opaque_fid, u32 cid,
233			struct ecore_queue_start_common_params *p_params,
234			bool b_is_rx,
235			struct ecore_queue_cid_vf_params *p_vf_params)
236{
237	struct ecore_queue_cid *p_cid;
238	enum _ecore_status_t rc;
239
240	p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
241	if (p_cid == OSAL_NULL)
242		return OSAL_NULL;
243
244	p_cid->opaque_fid = opaque_fid;
245	p_cid->cid = cid;
246	p_cid->p_owner = p_hwfn;
247
248	/* Fill in parameters */
249	p_cid->rel.vport_id = p_params->vport_id;
250	p_cid->rel.queue_id = p_params->queue_id;
251	p_cid->rel.stats_id = p_params->stats_id;
252	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
253	p_cid->b_is_rx = b_is_rx;
254	p_cid->sb_idx = p_params->sb_idx;
255
256	/* Fill-in bits related to VFs' queues if information was provided */
257	if (p_vf_params != OSAL_NULL) {
258		p_cid->vfid = p_vf_params->vfid;
259		p_cid->vf_qid = p_vf_params->vf_qid;
260		p_cid->vf_legacy = p_vf_params->vf_legacy;
261	} else {
262		p_cid->vfid = ECORE_QUEUE_CID_PF;
263	}
264
265	/* Don't try calculating the absolute indices for VFs */
266	if (IS_VF(p_hwfn->p_dev)) {
267		p_cid->abs = p_cid->rel;
268
269		goto out;
270	}
271
272	/* Calculate the engine-absolute indices of the resources.
273	 * This would guarantee they're valid later on.
274	 * In some cases [SBs] we already have the right values.
275	 */
276	rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
277	if (rc != ECORE_SUCCESS)
278		goto fail;
279
280	rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
281			       &p_cid->abs.queue_id);
282	if (rc != ECORE_SUCCESS)
283		goto fail;
284
285	/* In case of a PF configuring its VF's queues, the stats-id is already
286	 * absolute [since there's a single index that's suitable per-VF].
287	 */
288	if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
289		rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
290				    &p_cid->abs.stats_id);
291		if (rc != ECORE_SUCCESS)
292			goto fail;
293	} else {
294		p_cid->abs.stats_id = p_cid->rel.stats_id;
295	}
296
297out:
298	/* VF-images have provided the qid_usage_idx on their own.
299	 * Otherwise, we need to allocate a unique one.
300	 */
301	if (!p_vf_params) {
302		if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
303			goto fail;
304	} else {
305		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
306	}
307
308	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
309		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
310		   p_cid->opaque_fid, p_cid->cid,
311		   p_cid->rel.vport_id, p_cid->abs.vport_id,
312		   p_cid->rel.queue_id,	p_cid->qid_usage_idx,
313		   p_cid->abs.queue_id,
314		   p_cid->rel.stats_id, p_cid->abs.stats_id,
315		   p_cid->sb_igu_id, p_cid->sb_idx);
316
317	return p_cid;
318
319fail:
320	OSAL_VFREE(p_hwfn->p_dev, p_cid);
321	return OSAL_NULL;
322}
323
324struct ecore_queue_cid *
325ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
326		       struct ecore_queue_start_common_params *p_params,
327		       bool b_is_rx,
328		       struct ecore_queue_cid_vf_params *p_vf_params)
329{
330	struct ecore_queue_cid *p_cid;
331	u8 vfid = ECORE_CXT_PF_CID;
332	bool b_legacy_vf = false;
333	u32 cid = 0;
334
335	/* In case of legacy VFs, The CID can be derived from the additional
336	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
337	 * use the vf_qid for this purpose as well.
338	 */
339	if (p_vf_params) {
340		vfid = p_vf_params->vfid;
341
342		if (p_vf_params->vf_legacy &
343		    ECORE_QCID_LEGACY_VF_CID) {
344			b_legacy_vf = true;
345			cid = p_vf_params->vf_qid;
346		}
347	}
348
349	/* Get a unique firmware CID for this queue, in case it's a PF.
350	 * VF's don't need a CID as the queue configuration will be done
351	 * by PF.
352	 */
353	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
354		if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
355					   &cid, vfid) != ECORE_SUCCESS) {
356			DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
357			return OSAL_NULL;
358		}
359	}
360
361	p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
362					p_params, b_is_rx, p_vf_params);
363	if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
364		_ecore_cxt_release_cid(p_hwfn, cid, vfid);
365
366	return p_cid;
367}
368
369static struct ecore_queue_cid *
370ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
371			  bool b_is_rx,
372			  struct ecore_queue_start_common_params *p_params)
373{
374	return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
375				      OSAL_NULL);
376}
377
378enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
379					      struct ecore_sp_vport_start_params *p_params)
380{
381	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
382	struct ecore_spq_entry *p_ent = OSAL_NULL;
383	struct ecore_sp_init_data init_data;
384	struct eth_vport_tpa_param *p_tpa;
385	u16 rx_mode = 0, tx_err = 0;
386	u8 abs_vport_id = 0;
387	enum _ecore_status_t rc = ECORE_NOTIMPL;
388
389	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
390	if (rc != ECORE_SUCCESS)
391		return rc;
392
393	/* Get SPQ entry */
394	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
395	init_data.cid = ecore_spq_get_cid(p_hwfn);
396	init_data.opaque_fid = p_params->opaque_fid;
397	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
398
399	rc = ecore_sp_init_request(p_hwfn, &p_ent,
400				   ETH_RAMROD_VPORT_START,
401				   PROTOCOLID_ETH, &init_data);
402	if (rc != ECORE_SUCCESS)
403		return rc;
404
405	p_ramrod = &p_ent->ramrod.vport_start;
406	p_ramrod->vport_id = abs_vport_id;
407
408	p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
409	p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
410	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
411	p_ramrod->drop_ttl0_en	= p_params->drop_ttl0;
412	p_ramrod->untagged = p_params->only_untagged;
413	p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
414
415	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
416	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
417
418	p_ramrod->rx_mode.state	= OSAL_CPU_TO_LE16(rx_mode);
419
420	/* Handle requests for strict behavior on transmission errors */
421	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
422		  p_params->b_err_illegal_vlan_mode ?
423		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
424	SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
425		  p_params->b_err_small_pkt ?
426		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
427	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
428		  p_params->b_err_anti_spoof ?
429		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
430	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
431		  p_params->b_err_illegal_inband_mode ?
432		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
433	SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
434		  p_params->b_err_vlan_insert_with_inband ?
435		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
436	SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
437		  p_params->b_err_big_pkt ?
438		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
439	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
440		  p_params->b_err_ctrl_frame ?
441		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
442	p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
443
444	/* TPA related fields */
445	p_tpa = &p_ramrod->tpa_param;
446	OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
447	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
448
449	switch (p_params->tpa_mode) {
450	case ECORE_TPA_MODE_GRO:
451		p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
452		p_tpa->tpa_max_size = (u16)-1;
453		p_tpa->tpa_min_size_to_cont = p_params->mtu/2;
454		p_tpa->tpa_min_size_to_start = p_params->mtu/2;
455		p_tpa->tpa_ipv4_en_flg = 1;
456		p_tpa->tpa_ipv6_en_flg = 1;
457		p_tpa->tpa_ipv4_tunn_en_flg = 1;
458		p_tpa->tpa_ipv6_tunn_en_flg = 1;
459		p_tpa->tpa_pkt_split_flg = 1;
460		p_tpa->tpa_gro_consistent_flg = 1;
461		break;
462	default:
463		break;
464	}
465
466	p_ramrod->tx_switching_en = p_params->tx_switching;
467#ifndef ASIC_ONLY
468	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
469		p_ramrod->tx_switching_en = 0;
470#endif
471
472	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
473	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
474
475	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
476	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
477
478	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
479}
480
481enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
482					  struct ecore_sp_vport_start_params *p_params)
483{
484	if (IS_VF(p_hwfn->p_dev))
485		return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
486					       p_params->mtu,
487					       p_params->remove_inner_vlan,
488					       p_params->tpa_mode,
489					       p_params->max_buffers_per_cqe,
490					       p_params->only_untagged,
491					       p_params->zero_placement_offset);
492
493	return ecore_sp_eth_vport_start(p_hwfn, p_params);
494}
495
496static enum _ecore_status_t
497ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
498			  struct vport_update_ramrod_data *p_ramrod,
499			  struct ecore_rss_params *p_rss)
500{
501	struct eth_vport_rss_config *p_config;
502	u16 capabilities = 0;
503	int i, table_size;
504	enum _ecore_status_t rc = ECORE_SUCCESS;
505
506	if (!p_rss) {
507		p_ramrod->common.update_rss_flg = 0;
508		return rc;
509	}
510	p_config = &p_ramrod->rss_config;
511
512	OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
513			   ETH_RSS_IND_TABLE_ENTRIES_NUM);
514
515	rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id,
516			      &p_config->rss_id);
517	if (rc != ECORE_SUCCESS)
518		return rc;
519
520	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
521	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
522	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
523	p_config->update_rss_key = p_rss->update_rss_key;
524
525	p_config->rss_mode = p_rss->rss_enable ?
526			     ETH_VPORT_RSS_MODE_REGULAR :
527			     ETH_VPORT_RSS_MODE_DISABLED;
528
529	p_config->capabilities = 0;
530
531	SET_FIELD(capabilities,
532		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
533		  !!(p_rss->rss_caps & ECORE_RSS_IPV4));
534	SET_FIELD(capabilities,
535		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
536		  !!(p_rss->rss_caps & ECORE_RSS_IPV6));
537	SET_FIELD(capabilities,
538		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
539		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
540	SET_FIELD(capabilities,
541		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
542		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
543	SET_FIELD(capabilities,
544		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
545		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
546	SET_FIELD(capabilities,
547		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
548		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
549	p_config->tbl_size = p_rss->rss_table_size_log;
550	p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
551
552	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
553		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
554		   p_ramrod->common.update_rss_flg,
555		   p_config->rss_mode,
556		   p_config->update_rss_capabilities,
557		   p_config->capabilities,
558		   p_config->update_rss_ind_table,
559		   p_config->update_rss_key);
560
561	table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
562				1 << p_config->tbl_size);
563	for (i = 0; i < table_size; i++) {
564		struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
565
566		if (!p_queue)
567			return ECORE_INVAL;
568
569		p_config->indirection_table[i] =
570				OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
571	}
572
573	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
574		   "Configured RSS indirection table [%d entries]:\n",
575		   table_size);
576	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
577		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
578			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
579			   OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
580			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
581			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
582			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
583			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
584			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
585			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
586			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
587			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
588			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
589			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
590			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
591			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
592			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
593			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
594			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
595	}
596
597	for (i = 0; i <  10; i++)
598		p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
599
600	return rc;
601}
602
603static void
604ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
605			    struct vport_update_ramrod_data *p_ramrod,
606			    struct ecore_filter_accept_flags accept_flags)
607{
608	p_ramrod->common.update_rx_mode_flg =
609					accept_flags.update_rx_mode_config;
610	p_ramrod->common.update_tx_mode_flg =
611					accept_flags.update_tx_mode_config;
612
613#ifndef ASIC_ONLY
614	/* On B0 emulation we cannot enable Tx, since this would cause writes
615	 * to PVFC HW block which isn't implemented in emulation.
616	 */
617	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
618		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
619			   "Non-Asic - prevent Tx mode in vport update\n");
620		p_ramrod->common.update_tx_mode_flg = 0;
621	}
622#endif
623
624	/* Set Rx mode accept flags */
625	if (p_ramrod->common.update_rx_mode_flg) {
626		u8 accept_filter = accept_flags.rx_accept_filter;
627		u16 state = 0;
628
629		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
630			  !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
631			   !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
632
633		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
634			  !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
635
636		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
637			  !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
638			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
639
640		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
641			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
642			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
643
644		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
645			  !!(accept_filter & ECORE_ACCEPT_BCAST));
646
647		p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
648		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
649			   "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
650			   p_ramrod->common.vport_id, state);
651	}
652
653	/* Set Tx mode accept flags */
654	if (p_ramrod->common.update_tx_mode_flg) {
655		u8 accept_filter = accept_flags.tx_accept_filter;
656		u16 state = 0;
657
658		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
659			  !!(accept_filter & ECORE_ACCEPT_NONE));
660
661		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
662			  !!(accept_filter & ECORE_ACCEPT_NONE));
663
664		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
665			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
666			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
667
668		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
669			  !!(accept_filter & ECORE_ACCEPT_BCAST));
670
671		p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
672		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
673			   "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
674			   p_ramrod->common.vport_id, state);
675	}
676}
677
678static void
679ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
680			      struct ecore_sge_tpa_params *p_params)
681{
682	struct eth_vport_tpa_param *p_tpa;
683	u16 val;
684
685	if (!p_params) {
686		p_ramrod->common.update_tpa_param_flg = 0;
687		p_ramrod->common.update_tpa_en_flg = 0;
688		p_ramrod->common.update_tpa_param_flg = 0;
689		return;
690	}
691
692	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
693	p_tpa = &p_ramrod->tpa_param;
694	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
695	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
696	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
697	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
698
699	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
700	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
701	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
702	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
703	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
704	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
705	val = p_params->tpa_max_size;
706	p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
707	val = p_params->tpa_min_size_to_start;
708	p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
709	val = p_params->tpa_min_size_to_cont;
710	p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
711}
712
713static void
714ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
715			  struct ecore_sp_vport_update_params *p_params)
716{
717	int i;
718
719	OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
720		    sizeof(p_ramrod->approx_mcast.bins));
721
722	if (!p_params->update_approx_mcast_flg)
723		return;
724
725	p_ramrod->common.update_approx_mcast_flg = 1;
726	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
727		u32 *p_bins = p_params->bins;
728
729		p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
730	}
731}
732
733enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
734					   struct ecore_sp_vport_update_params *p_params,
735					   enum spq_mode comp_mode,
736					   struct ecore_spq_comp_cb *p_comp_data)
737{
738	struct ecore_rss_params *p_rss_params = p_params->rss_params;
739	struct vport_update_ramrod_data_cmn *p_cmn;
740	struct ecore_sp_init_data init_data;
741	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
742	struct ecore_spq_entry *p_ent = OSAL_NULL;
743	u8 abs_vport_id = 0, val;
744	enum _ecore_status_t rc = ECORE_NOTIMPL;
745
746	if (IS_VF(p_hwfn->p_dev)) {
747		rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
748		return rc;
749	}
750
751	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
752	if (rc != ECORE_SUCCESS)
753		return rc;
754
755	/* Get SPQ entry */
756	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
757	init_data.cid = ecore_spq_get_cid(p_hwfn);
758	init_data.opaque_fid = p_params->opaque_fid;
759	init_data.comp_mode = comp_mode;
760	init_data.p_comp_data = p_comp_data;
761
762	rc = ecore_sp_init_request(p_hwfn, &p_ent,
763				   ETH_RAMROD_VPORT_UPDATE,
764				   PROTOCOLID_ETH, &init_data);
765	if (rc != ECORE_SUCCESS)
766		return rc;
767
768	/* Copy input params to ramrod according to FW struct */
769	p_ramrod = &p_ent->ramrod.vport_update;
770	p_cmn = &p_ramrod->common;
771
772	p_cmn->vport_id = abs_vport_id;
773
774	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
775	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
776	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
777	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
778
779	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
780	val = p_params->update_accept_any_vlan_flg;
781	p_cmn->update_accept_any_vlan_flg = val;
782
783	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
784	val = p_params->update_inner_vlan_removal_flg;
785	p_cmn->update_inner_vlan_removal_en_flg = val;
786
787	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
788	val = p_params->update_default_vlan_enable_flg;
789	p_cmn->update_default_vlan_en_flg = val;
790
791	p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
792	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
793
794	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
795
796	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
797#ifndef ASIC_ONLY
798	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
799		if (p_ramrod->common.tx_switching_en ||
800		    p_ramrod->common.update_tx_switching_en_flg) {
801			DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n");
802			p_ramrod->common.tx_switching_en = 0;
803			p_ramrod->common.update_tx_switching_en_flg = 1;
804		}
805#endif
806	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
807
808	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
809	val = p_params->update_anti_spoofing_en_flg;
810	p_ramrod->common.update_anti_spoofing_en_flg = val;
811
812	rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
813	if (rc != ECORE_SUCCESS) {
814		/* Return spq entry which is taken in ecore_sp_init_request()*/
815		ecore_spq_return_entry(p_hwfn, p_ent);
816		return rc;
817	}
818
819	/* Update mcast bins for VFs, PF doesn't use this functionality */
820	ecore_sp_update_mcast_bin(p_ramrod, p_params);
821
822	ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
823	ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
824	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
825}
826
827enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
828					  u16 opaque_fid,
829					  u8 vport_id)
830{
831	struct vport_stop_ramrod_data *p_ramrod;
832	struct ecore_sp_init_data init_data;
833	struct ecore_spq_entry *p_ent;
834	u8 abs_vport_id = 0;
835	enum _ecore_status_t rc;
836
837	if (IS_VF(p_hwfn->p_dev))
838		return ecore_vf_pf_vport_stop(p_hwfn);
839
840	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
841	if (rc != ECORE_SUCCESS)
842		return rc;
843
844	/* Get SPQ entry */
845	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
846	init_data.cid = ecore_spq_get_cid(p_hwfn);
847	init_data.opaque_fid = opaque_fid;
848	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
849
850	rc = ecore_sp_init_request(p_hwfn, &p_ent,
851				   ETH_RAMROD_VPORT_STOP,
852				   PROTOCOLID_ETH, &init_data);
853	if (rc != ECORE_SUCCESS)
854		return rc;
855
856	p_ramrod = &p_ent->ramrod.vport_stop;
857	p_ramrod->vport_id = abs_vport_id;
858
859	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
860}
861
862static enum _ecore_status_t
863ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
864			 struct ecore_filter_accept_flags *p_accept_flags)
865{
866	struct ecore_sp_vport_update_params s_params;
867
868	OSAL_MEMSET(&s_params, 0, sizeof(s_params));
869	OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
870		    sizeof(struct ecore_filter_accept_flags));
871
872	return ecore_vf_pf_vport_update(p_hwfn, &s_params);
873}
874
875enum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev,
876					     u8 vport,
877					     struct ecore_filter_accept_flags accept_flags,
878					     u8 update_accept_any_vlan,
879					     u8 accept_any_vlan,
880					     enum spq_mode comp_mode,
881					     struct ecore_spq_comp_cb *p_comp_data)
882{
883	struct ecore_sp_vport_update_params vport_update_params;
884	int i, rc;
885
886	/* Prepare and send the vport rx_mode change */
887	OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
888	vport_update_params.vport_id = vport;
889	vport_update_params.accept_flags = accept_flags;
890	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
891	vport_update_params.accept_any_vlan = accept_any_vlan;
892
893	for_each_hwfn(p_dev, i) {
894		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
895
896		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
897
898		if (IS_VF(p_dev)) {
899			rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
900			if (rc != ECORE_SUCCESS)
901				return rc;
902			continue;
903		}
904
905		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
906					   comp_mode, p_comp_data);
907		if (rc != ECORE_SUCCESS) {
908			DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
909			return rc;
910		}
911
912		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
913			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
914			   accept_flags.rx_accept_filter,
915			   accept_flags.tx_accept_filter);
916
917		if (update_accept_any_vlan)
918			DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
919				   "accept_any_vlan=%d configured\n",
920				   accept_any_vlan);
921	}
922
923	return 0;
924}
925
926enum _ecore_status_t
927ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
928			   struct ecore_queue_cid *p_cid,
929			   u16 bd_max_bytes,
930			   dma_addr_t bd_chain_phys_addr,
931			   dma_addr_t cqe_pbl_addr,
932			   u16 cqe_pbl_size)
933{
934	struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
935	struct ecore_spq_entry *p_ent = OSAL_NULL;
936	struct ecore_sp_init_data init_data;
937	enum _ecore_status_t rc = ECORE_NOTIMPL;
938
939	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
940		   p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
941		   p_cid->abs.vport_id, p_cid->sb_igu_id);
942
943	/* Get SPQ entry */
944	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
945	init_data.cid = p_cid->cid;
946	init_data.opaque_fid = p_cid->opaque_fid;
947	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
948
949	rc = ecore_sp_init_request(p_hwfn, &p_ent,
950				   ETH_RAMROD_RX_QUEUE_START,
951				   PROTOCOLID_ETH, &init_data);
952	if (rc != ECORE_SUCCESS)
953		return rc;
954
955	p_ramrod = &p_ent->ramrod.rx_queue_start;
956
957	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
958	p_ramrod->sb_index = p_cid->sb_idx;
959	p_ramrod->vport_id = p_cid->abs.vport_id;
960	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
961	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
962	p_ramrod->complete_cqe_flg = 0;
963	p_ramrod->complete_event_flg = 1;
964
965	p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
966	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
967
968	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
969	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
970
971	if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
972		bool b_legacy_vf = !!(p_cid->vf_legacy &
973				      ECORE_QCID_LEGACY_VF_RX_PROD);
974
975		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
976		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n",
977			   b_legacy_vf ? " [legacy]" : "",
978			   p_cid->vf_qid);
979		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
980	}
981
982	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
983}
984
985static enum _ecore_status_t
986ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
987			    struct ecore_queue_cid *p_cid,
988			    u16 bd_max_bytes,
989			    dma_addr_t bd_chain_phys_addr,
990			    dma_addr_t cqe_pbl_addr,
991			    u16 cqe_pbl_size,
992			    void OSAL_IOMEM **pp_prod)
993{
994	u32 init_prod_val = 0;
995
996	*pp_prod = (u8 OSAL_IOMEM*)
997		    p_hwfn->regview +
998		    GTT_BAR0_MAP_REG_MSDM_RAM +
999		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
1000
1001	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
1002	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
1003			  (u32 *)(&init_prod_val));
1004
1005	return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
1006					  bd_max_bytes,
1007					  bd_chain_phys_addr,
1008					  cqe_pbl_addr, cqe_pbl_size);
1009}
1010
1011enum _ecore_status_t
1012ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
1013			 u16 opaque_fid,
1014			 struct ecore_queue_start_common_params *p_params,
1015			 u16 bd_max_bytes,
1016			 dma_addr_t bd_chain_phys_addr,
1017			 dma_addr_t cqe_pbl_addr,
1018			 u16 cqe_pbl_size,
1019			 struct ecore_rxq_start_ret_params *p_ret_params)
1020{
1021	struct ecore_queue_cid *p_cid;
1022	enum _ecore_status_t rc;
1023
1024	/* Allocate a CID for the queue */
1025	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
1026	if (p_cid == OSAL_NULL)
1027		return ECORE_NOMEM;
1028
1029	if (IS_PF(p_hwfn->p_dev))
1030		rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1031						 bd_max_bytes,
1032						 bd_chain_phys_addr,
1033						 cqe_pbl_addr, cqe_pbl_size,
1034						 &p_ret_params->p_prod);
1035	else
1036		rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1037					   bd_max_bytes,
1038					   bd_chain_phys_addr,
1039					   cqe_pbl_addr,
1040					   cqe_pbl_size,
1041					   &p_ret_params->p_prod);
1042
1043	/* Provide the caller with a reference to as handler */
1044	if (rc != ECORE_SUCCESS)
1045		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1046	else
1047		p_ret_params->p_handle = (void *)p_cid;
1048
1049	return rc;
1050}
1051
1052enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1053						   void **pp_rxq_handles,
1054						   u8 num_rxqs,
1055						   u8 complete_cqe_flg,
1056						   u8 complete_event_flg,
1057						   enum spq_mode comp_mode,
1058						   struct ecore_spq_comp_cb *p_comp_data)
1059{
1060	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1061	struct ecore_spq_entry *p_ent = OSAL_NULL;
1062	struct ecore_sp_init_data init_data;
1063	struct ecore_queue_cid *p_cid;
1064	enum _ecore_status_t rc = ECORE_NOTIMPL;
1065	u8 i;
1066
1067#ifndef LINUX_REMOVE
1068	if (IS_VF(p_hwfn->p_dev))
1069		return ecore_vf_pf_rxqs_update(p_hwfn,
1070					       (struct ecore_queue_cid **)
1071					       pp_rxq_handles,
1072					       num_rxqs,
1073					       complete_cqe_flg,
1074					       complete_event_flg);
1075#endif
1076
1077	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1078	init_data.comp_mode = comp_mode;
1079	init_data.p_comp_data = p_comp_data;
1080
1081	for (i = 0; i < num_rxqs; i++) {
1082		p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1083
1084		/* Get SPQ entry */
1085		init_data.cid = p_cid->cid;
1086		init_data.opaque_fid = p_cid->opaque_fid;
1087
1088		rc = ecore_sp_init_request(p_hwfn, &p_ent,
1089					   ETH_RAMROD_RX_QUEUE_UPDATE,
1090					   PROTOCOLID_ETH, &init_data);
1091		if (rc != ECORE_SUCCESS)
1092			return rc;
1093
1094		p_ramrod = &p_ent->ramrod.rx_queue_update;
1095		p_ramrod->vport_id = p_cid->abs.vport_id;
1096
1097		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1098		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1099		p_ramrod->complete_event_flg = complete_event_flg;
1100
1101		rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1102		if (rc != ECORE_SUCCESS)
1103			return rc;
1104	}
1105
1106	return rc;
1107}
1108
1109enum _ecore_status_t
1110ecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn,
1111				   void *p_rxq_handler,
1112				   enum spq_mode comp_mode,
1113				   struct ecore_spq_comp_cb *p_comp_data)
1114{
1115	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1116	struct ecore_spq_entry *p_ent = OSAL_NULL;
1117	struct ecore_sp_init_data init_data;
1118	struct ecore_queue_cid *p_cid;
1119	enum _ecore_status_t rc = ECORE_SUCCESS;
1120
1121	if (IS_VF(p_hwfn->p_dev))
1122		return ECORE_NOTIMPL;
1123
1124	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1125	init_data.comp_mode = comp_mode;
1126	init_data.p_comp_data = p_comp_data;
1127
1128	p_cid = (struct ecore_queue_cid *)p_rxq_handler;
1129
1130	/* Get SPQ entry */
1131	init_data.cid = p_cid->cid;
1132	init_data.opaque_fid = p_cid->opaque_fid;
1133
1134	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1135				   ETH_RAMROD_RX_QUEUE_UPDATE,
1136				   PROTOCOLID_ETH, &init_data);
1137	if (rc != ECORE_SUCCESS)
1138		return rc;
1139
1140	p_ramrod = &p_ent->ramrod.rx_queue_update;
1141	p_ramrod->vport_id = p_cid->abs.vport_id;
1142
1143	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1144	p_ramrod->complete_cqe_flg = 0;
1145	p_ramrod->complete_event_flg = 1;
1146	p_ramrod->set_default_rss_queue = 1;
1147
1148	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1149
1150	return rc;
1151}
1152
1153static enum _ecore_status_t
1154ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1155			   struct ecore_queue_cid *p_cid,
1156			   bool b_eq_completion_only,
1157			   bool b_cqe_completion)
1158{
1159	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1160	struct ecore_spq_entry *p_ent = OSAL_NULL;
1161	struct ecore_sp_init_data init_data;
1162	enum _ecore_status_t rc;
1163
1164	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1165	init_data.cid = p_cid->cid;
1166	init_data.opaque_fid = p_cid->opaque_fid;
1167	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1168
1169	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1170				   ETH_RAMROD_RX_QUEUE_STOP,
1171				   PROTOCOLID_ETH, &init_data);
1172	if (rc != ECORE_SUCCESS)
1173		return rc;
1174
1175	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1176	p_ramrod->vport_id = p_cid->abs.vport_id;
1177	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1178
1179	/* Cleaning the queue requires the completion to arrive there.
1180	 * In addition, VFs require the answer to come as eqe to PF.
1181	 */
1182	p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1183				      !b_eq_completion_only) ||
1184				     b_cqe_completion;
1185	p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1186				       b_eq_completion_only;
1187
1188	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1189}
1190
1191enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1192					     void *p_rxq,
1193					     bool eq_completion_only,
1194					     bool cqe_completion)
1195{
1196	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1197	enum _ecore_status_t rc = ECORE_NOTIMPL;
1198
1199	if (IS_PF(p_hwfn->p_dev))
1200		rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1201						eq_completion_only,
1202						cqe_completion);
1203	else
1204		rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1205
1206	if (rc == ECORE_SUCCESS)
1207		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1208	return rc;
1209}
1210
1211enum _ecore_status_t
1212ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1213			   struct ecore_queue_cid *p_cid,
1214			   dma_addr_t pbl_addr, u16 pbl_size,
1215			   u16 pq_id)
1216{
1217	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1218	struct ecore_spq_entry *p_ent = OSAL_NULL;
1219	struct ecore_sp_init_data init_data;
1220	enum _ecore_status_t rc = ECORE_NOTIMPL;
1221
1222	/* Get SPQ entry */
1223	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1224	init_data.cid = p_cid->cid;
1225	init_data.opaque_fid = p_cid->opaque_fid;
1226	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1227
1228	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1229				   ETH_RAMROD_TX_QUEUE_START,
1230				   PROTOCOLID_ETH, &init_data);
1231	if (rc != ECORE_SUCCESS)
1232		return rc;
1233
1234	p_ramrod = &p_ent->ramrod.tx_queue_start;
1235	p_ramrod->vport_id = p_cid->abs.vport_id;
1236
1237	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1238	p_ramrod->sb_index = p_cid->sb_idx;
1239	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1240
1241	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1242	p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1243
1244	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1245	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1246
1247	p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1248
1249	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1250}
1251
1252static enum _ecore_status_t
1253ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1254			    struct ecore_queue_cid *p_cid,
1255			    u8 tc,
1256			    dma_addr_t pbl_addr, u16 pbl_size,
1257			    void OSAL_IOMEM **pp_doorbell)
1258{
1259	enum _ecore_status_t rc;
1260
1261	/* TODO - set tc in the pq_params for multi-cos */
1262	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
1263					pbl_addr, pbl_size,
1264					ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
1265	if (rc != ECORE_SUCCESS)
1266		return rc;
1267
1268	/* Provide the caller with the necessary return values */
1269	*pp_doorbell = (u8 OSAL_IOMEM *)
1270		       p_hwfn->doorbells +
1271		       DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1272
1273	return ECORE_SUCCESS;
1274}
1275
1276enum _ecore_status_t
1277ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1278			 struct ecore_queue_start_common_params *p_params,
1279			 u8 tc,
1280			 dma_addr_t pbl_addr, u16 pbl_size,
1281			 struct ecore_txq_start_ret_params *p_ret_params)
1282{
1283	struct ecore_queue_cid *p_cid;
1284	enum _ecore_status_t rc;
1285
1286	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1287	if (p_cid == OSAL_NULL)
1288		return ECORE_INVAL;
1289
1290	if (IS_PF(p_hwfn->p_dev))
1291		rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1292						 pbl_addr, pbl_size,
1293						 &p_ret_params->p_doorbell);
1294	else
1295		rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1296					   pbl_addr, pbl_size,
1297					   &p_ret_params->p_doorbell);
1298
1299	if (rc != ECORE_SUCCESS)
1300		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1301	else
1302		p_ret_params->p_handle = (void *)p_cid;
1303
1304	return rc;
1305}
1306
1307static enum _ecore_status_t
1308ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1309			   struct ecore_queue_cid *p_cid)
1310{
1311	struct ecore_spq_entry *p_ent = OSAL_NULL;
1312	struct ecore_sp_init_data init_data;
1313	enum _ecore_status_t rc;
1314
1315	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1316	init_data.cid = p_cid->cid;
1317	init_data.opaque_fid = p_cid->opaque_fid;
1318	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1319
1320	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1321				   ETH_RAMROD_TX_QUEUE_STOP,
1322				   PROTOCOLID_ETH, &init_data);
1323	if (rc != ECORE_SUCCESS)
1324		return rc;
1325
1326	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1327}
1328
1329enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1330					     void *p_handle)
1331{
1332	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1333	enum _ecore_status_t rc;
1334
1335	if (IS_PF(p_hwfn->p_dev))
1336		rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1337	else
1338		rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1339
1340	if (rc == ECORE_SUCCESS)
1341		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1342	return rc;
1343}
1344
1345static enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode)
1346{
1347	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1348
1349	switch (opcode) {
1350	case ECORE_FILTER_ADD:
1351		action = ETH_FILTER_ACTION_ADD;
1352		break;
1353	case ECORE_FILTER_REMOVE:
1354		action = ETH_FILTER_ACTION_REMOVE;
1355		break;
1356	case ECORE_FILTER_FLUSH:
1357		action = ETH_FILTER_ACTION_REMOVE_ALL;
1358		break;
1359	default:
1360		action = MAX_ETH_FILTER_ACTION;
1361	}
1362
1363	return action;
1364}
1365
1366static enum _ecore_status_t
1367ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1368			  u16 opaque_fid,
1369			  struct ecore_filter_ucast *p_filter_cmd,
1370			  struct vport_filter_update_ramrod_data **pp_ramrod,
1371			  struct ecore_spq_entry **pp_ent,
1372			  enum spq_mode comp_mode,
1373			  struct ecore_spq_comp_cb *p_comp_data)
1374{
1375	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1376	struct vport_filter_update_ramrod_data *p_ramrod;
1377	struct eth_filter_cmd *p_first_filter;
1378	struct eth_filter_cmd *p_second_filter;
1379	struct ecore_sp_init_data init_data;
1380	enum eth_filter_action action;
1381	enum _ecore_status_t rc;
1382
1383	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1384			    &vport_to_remove_from);
1385	if (rc != ECORE_SUCCESS)
1386		return rc;
1387
1388	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1389			    &vport_to_add_to);
1390	if (rc != ECORE_SUCCESS)
1391		return rc;
1392
1393	/* Get SPQ entry */
1394	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1395	init_data.cid = ecore_spq_get_cid(p_hwfn);
1396	init_data.opaque_fid = opaque_fid;
1397	init_data.comp_mode = comp_mode;
1398	init_data.p_comp_data = p_comp_data;
1399
1400	rc = ecore_sp_init_request(p_hwfn, pp_ent,
1401				   ETH_RAMROD_FILTERS_UPDATE,
1402				   PROTOCOLID_ETH, &init_data);
1403	if (rc != ECORE_SUCCESS)
1404		return rc;
1405
1406	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1407	p_ramrod = *pp_ramrod;
1408	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1409	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1410
1411#ifndef ASIC_ONLY
1412	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1413		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1414			   "Non-Asic - prevent Tx filters\n");
1415		p_ramrod->filter_cmd_hdr.tx = 0;
1416	}
1417
1418#endif
1419
1420	switch (p_filter_cmd->opcode) {
1421	case ECORE_FILTER_REPLACE:
1422	case ECORE_FILTER_MOVE:
1423		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1424	default:
1425		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1426	}
1427
1428	p_first_filter = &p_ramrod->filter_cmds[0];
1429	p_second_filter = &p_ramrod->filter_cmds[1];
1430
1431	switch (p_filter_cmd->type) {
1432	case ECORE_FILTER_MAC:
1433		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1434	case ECORE_FILTER_VLAN:
1435		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1436	case ECORE_FILTER_MAC_VLAN:
1437		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1438	case ECORE_FILTER_INNER_MAC:
1439		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1440	case ECORE_FILTER_INNER_VLAN:
1441		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1442	case ECORE_FILTER_INNER_PAIR:
1443		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1444	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1445		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1446		break;
1447	case ECORE_FILTER_MAC_VNI_PAIR:
1448		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1449	case ECORE_FILTER_VNI:
1450		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1451	}
1452
1453	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1454	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1455	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1456	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1457	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1458	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1459		ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1460				      &p_first_filter->mac_mid,
1461				      &p_first_filter->mac_lsb,
1462				      (u8 *)p_filter_cmd->mac);
1463
1464	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1465	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1466	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1467	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1468		p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1469
1470	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1471	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1472	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1473		p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1474
1475	if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1476		p_second_filter->type = p_first_filter->type;
1477		p_second_filter->mac_msb = p_first_filter->mac_msb;
1478		p_second_filter->mac_mid = p_first_filter->mac_mid;
1479		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1480		p_second_filter->vlan_id = p_first_filter->vlan_id;
1481		p_second_filter->vni = p_first_filter->vni;
1482
1483		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1484
1485		p_first_filter->vport_id = vport_to_remove_from;
1486
1487		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1488		p_second_filter->vport_id = vport_to_add_to;
1489	} else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1490		p_first_filter->vport_id = vport_to_add_to;
1491		OSAL_MEMCPY(p_second_filter, p_first_filter,
1492			    sizeof(*p_second_filter));
1493		p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1494		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1495	} else {
1496		action = ecore_filter_action(p_filter_cmd->opcode);
1497
1498		if (action == MAX_ETH_FILTER_ACTION) {
1499			DP_NOTICE(p_hwfn, true,
1500				  "%d is not supported yet\n",
1501				  p_filter_cmd->opcode);
1502			return ECORE_NOTIMPL;
1503		}
1504
1505		p_first_filter->action = action;
1506		p_first_filter->vport_id =
1507			(p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1508			vport_to_remove_from : vport_to_add_to;
1509	}
1510
1511	return ECORE_SUCCESS;
1512}
1513
1514enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1515					       u16 opaque_fid,
1516					       struct ecore_filter_ucast *p_filter_cmd,
1517					       enum spq_mode comp_mode,
1518					       struct ecore_spq_comp_cb *p_comp_data)
1519{
1520	struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1521	struct ecore_spq_entry *p_ent = OSAL_NULL;
1522	struct eth_filter_cmd_header *p_header;
1523	enum _ecore_status_t rc;
1524
1525	rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1526				       &p_ramrod, &p_ent,
1527				       comp_mode, p_comp_data);
1528	if (rc != ECORE_SUCCESS) {
1529		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1530		return rc;
1531	}
1532	p_header = &p_ramrod->filter_cmd_hdr;
1533	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1534
1535	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1536	if (rc != ECORE_SUCCESS) {
1537		DP_ERR(p_hwfn,
1538		       "Unicast filter ADD command failed %d\n",
1539		       rc);
1540		return rc;
1541	}
1542
1543	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1544		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1545		   (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1546		    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1547		     "REMOVE" :
1548		     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1549		      "MOVE" : "REPLACE")),
1550		   (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1551		    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1552		     "VLAN" : "MAC & VLAN"),
1553		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1554		   p_filter_cmd->is_rx_filter,
1555		   p_filter_cmd->is_tx_filter);
1556	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1557		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1558		   p_filter_cmd->vport_to_add_to,
1559		   p_filter_cmd->vport_to_remove_from,
1560		   p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1561		   p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1562		   p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1563		   p_filter_cmd->vlan);
1564
1565	return ECORE_SUCCESS;
1566}
1567
1568/*******************************************************************************
1569 * Description:
1570 *         Calculates crc 32 on a buffer
1571 *         Note: crc32_length MUST be aligned to 8
1572 * Return:
1573 ******************************************************************************/
1574static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1575{
1576	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1577	u8  msb = 0, current_byte = 0;
1578
1579	if ((crc32_packet == OSAL_NULL) ||
1580	    (crc32_length == 0) ||
1581	    ((crc32_length % 8) != 0)) {
1582		return crc32_result;
1583	}
1584
1585	for (byte = 0; byte < crc32_length; byte++) {
1586		current_byte = crc32_packet[byte];
1587		for (bit = 0; bit < 8; bit++) {
1588			msb = (u8)(crc32_result >> 31);
1589			crc32_result = crc32_result << 1;
1590			if (msb != (0x1 & (current_byte >> bit))) {
1591				crc32_result = crc32_result ^ CRC32_POLY;
1592				crc32_result |= 1; /*crc32_result[0] = 1;*/
1593			}
1594		}
1595	}
1596
1597	return crc32_result;
1598}
1599
1600static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1601{
1602	u32 packet_buf[2] = {0};
1603
1604	OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1605	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1606}
1607
1608u8 ecore_mcast_bin_from_mac(u8 *mac)
1609{
1610	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1611
1612	return crc & 0xff;
1613}
1614
1615static enum _ecore_status_t
1616ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1617			  struct ecore_filter_mcast *p_filter_cmd,
1618			  enum spq_mode comp_mode,
1619			  struct ecore_spq_comp_cb *p_comp_data)
1620{
1621	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1622	u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1623	struct ecore_spq_entry *p_ent = OSAL_NULL;
1624	struct ecore_sp_init_data init_data;
1625	u8 abs_vport_id = 0;
1626	enum _ecore_status_t rc;
1627	int i;
1628
1629	if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1630		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1631				    &abs_vport_id);
1632	else
1633		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1634				    &abs_vport_id);
1635	if (rc != ECORE_SUCCESS)
1636		return rc;
1637
1638	/* Get SPQ entry */
1639	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1640	init_data.cid = ecore_spq_get_cid(p_hwfn);
1641	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1642	init_data.comp_mode = comp_mode;
1643	init_data.p_comp_data = p_comp_data;
1644
1645	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1646				   ETH_RAMROD_VPORT_UPDATE,
1647				   PROTOCOLID_ETH, &init_data);
1648	if (rc != ECORE_SUCCESS) {
1649		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1650		return rc;
1651	}
1652
1653	p_ramrod = &p_ent->ramrod.vport_update;
1654	p_ramrod->common.update_approx_mcast_flg = 1;
1655
1656	/* explicitly clear out the entire vector */
1657	OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1658		    0, sizeof(p_ramrod->approx_mcast.bins));
1659	OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1660	/* filter ADD op is explicit set op and it removes
1661	*  any existing filters for the vport.
1662	*/
1663	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1664		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1665			u32 bit;
1666
1667			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1668			bins[bit / 32] |= 1 << (bit % 32);
1669		}
1670
1671		/* Convert to correct endianity */
1672		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1673			struct vport_update_ramrod_mcast *p_ramrod_bins;
1674
1675			p_ramrod_bins = &p_ramrod->approx_mcast;
1676			p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
1677		}
1678	}
1679
1680	p_ramrod->common.vport_id = abs_vport_id;
1681
1682	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1683	if (rc != ECORE_SUCCESS)
1684		DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1685
1686	return rc;
1687}
1688
1689enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1690					    struct ecore_filter_mcast *p_filter_cmd,
1691					    enum spq_mode comp_mode,
1692					    struct ecore_spq_comp_cb *p_comp_data)
1693{
1694	enum _ecore_status_t rc = ECORE_SUCCESS;
1695	int i;
1696
1697	/* only ADD and REMOVE operations are supported for multi-cast */
1698	if ((p_filter_cmd->opcode != ECORE_FILTER_ADD  &&
1699	     (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1700	     (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1701		return ECORE_INVAL;
1702	}
1703
1704	for_each_hwfn(p_dev, i) {
1705		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1706
1707		if (IS_VF(p_dev)) {
1708			ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1709			continue;
1710		}
1711
1712		rc = ecore_sp_eth_filter_mcast(p_hwfn,
1713					       p_filter_cmd,
1714					       comp_mode,
1715					       p_comp_data);
1716		if (rc != ECORE_SUCCESS)
1717			break;
1718	}
1719
1720	return rc;
1721}
1722
1723enum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1724					    struct ecore_filter_ucast *p_filter_cmd,
1725					    enum spq_mode comp_mode,
1726					    struct ecore_spq_comp_cb *p_comp_data)
1727{
1728	enum _ecore_status_t rc = ECORE_SUCCESS;
1729	int i;
1730
1731	for_each_hwfn(p_dev, i) {
1732		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1733		u16 opaque_fid;
1734
1735		if (IS_VF(p_dev)) {
1736			rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1737			continue;
1738		}
1739
1740		opaque_fid = p_hwfn->hw_info.opaque_fid;
1741		rc = ecore_sp_eth_filter_ucast(p_hwfn,
1742					       opaque_fid,
1743					       p_filter_cmd,
1744					       comp_mode,
1745					       p_comp_data);
1746		if (rc != ECORE_SUCCESS)
1747			break;
1748	}
1749
1750	return rc;
1751}
1752
1753/* Statistics related code */
1754static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1755					     u32 *p_addr, u32 *p_len,
1756					     u16 statistics_bin)
1757{
1758	if (IS_PF(p_hwfn->p_dev)) {
1759		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1760			  PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1761		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1762	} else {
1763		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1764		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1765
1766		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1767		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1768	}
1769}
1770
1771static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1772				     struct ecore_ptt *p_ptt,
1773				     struct ecore_eth_stats *p_stats,
1774				     u16 statistics_bin)
1775{
1776	struct eth_pstorm_per_queue_stat pstats;
1777	u32 pstats_addr = 0, pstats_len = 0;
1778
1779	__ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1780					 statistics_bin);
1781
1782	OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1783	ecore_memcpy_from(p_hwfn, p_ptt, &pstats,
1784			  pstats_addr, pstats_len);
1785
1786	p_stats->common.tx_ucast_bytes +=
1787		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1788	p_stats->common.tx_mcast_bytes +=
1789		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1790	p_stats->common.tx_bcast_bytes +=
1791		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1792	p_stats->common.tx_ucast_pkts +=
1793		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1794	p_stats->common.tx_mcast_pkts +=
1795		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1796	p_stats->common.tx_bcast_pkts +=
1797		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1798	p_stats->common.tx_err_drop_pkts +=
1799		HILO_64_REGPAIR(pstats.error_drop_pkts);
1800}
1801
1802static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1803				     struct ecore_ptt *p_ptt,
1804				     struct ecore_eth_stats *p_stats)
1805{
1806	struct tstorm_per_port_stat tstats;
1807	u32 tstats_addr, tstats_len;
1808
1809	if (IS_PF(p_hwfn->p_dev)) {
1810		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1811			      TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1812		tstats_len = sizeof(struct tstorm_per_port_stat);
1813	} else {
1814		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1815		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1816
1817		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1818		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1819	}
1820
1821	OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1822	ecore_memcpy_from(p_hwfn, p_ptt, &tstats,
1823			  tstats_addr, tstats_len);
1824
1825	p_stats->common.mftag_filter_discards +=
1826		HILO_64_REGPAIR(tstats.mftag_filter_discard);
1827	p_stats->common.mac_filter_discards +=
1828		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1829}
1830
1831static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1832					     u32 *p_addr, u32 *p_len,
1833					     u16 statistics_bin)
1834{
1835	if (IS_PF(p_hwfn->p_dev)) {
1836		*p_addr = BAR0_MAP_REG_USDM_RAM +
1837			  USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1838		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1839	} else {
1840		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1841		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1842
1843		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1844		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1845	}
1846}
1847
1848static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1849				     struct ecore_ptt *p_ptt,
1850				     struct ecore_eth_stats *p_stats,
1851				     u16 statistics_bin)
1852{
1853	struct eth_ustorm_per_queue_stat ustats;
1854	u32 ustats_addr = 0, ustats_len = 0;
1855
1856	__ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1857					 statistics_bin);
1858
1859	OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1860	ecore_memcpy_from(p_hwfn, p_ptt, &ustats,
1861			  ustats_addr, ustats_len);
1862
1863	p_stats->common.rx_ucast_bytes +=
1864		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1865	p_stats->common.rx_mcast_bytes +=
1866		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1867	p_stats->common.rx_bcast_bytes +=
1868		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1869	p_stats->common.rx_ucast_pkts +=
1870		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1871	p_stats->common.rx_mcast_pkts +=
1872		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1873	p_stats->common.rx_bcast_pkts +=
1874		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1875}
1876
1877static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1878					     u32 *p_addr, u32 *p_len,
1879					     u16 statistics_bin)
1880{
1881	if (IS_PF(p_hwfn->p_dev)) {
1882		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1883			  MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1884		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1885	} else {
1886		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1887		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1888
1889		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1890		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1891	}
1892}
1893
1894static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1895				     struct ecore_ptt *p_ptt,
1896				     struct ecore_eth_stats *p_stats,
1897				     u16 statistics_bin)
1898{
1899	struct eth_mstorm_per_queue_stat mstats;
1900	u32 mstats_addr = 0, mstats_len = 0;
1901
1902	__ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1903					 statistics_bin);
1904
1905	OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1906	ecore_memcpy_from(p_hwfn, p_ptt, &mstats,
1907			  mstats_addr, mstats_len);
1908
1909	p_stats->common.no_buff_discards +=
1910		HILO_64_REGPAIR(mstats.no_buff_discard);
1911	p_stats->common.packet_too_big_discard +=
1912		HILO_64_REGPAIR(mstats.packet_too_big_discard);
1913	p_stats->common.ttl0_discard +=
1914		HILO_64_REGPAIR(mstats.ttl0_discard);
1915	p_stats->common.tpa_coalesced_pkts +=
1916		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1917	p_stats->common.tpa_coalesced_events +=
1918		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1919	p_stats->common.tpa_aborts_num +=
1920		HILO_64_REGPAIR(mstats.tpa_aborts_num);
1921	p_stats->common.tpa_coalesced_bytes +=
1922		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1923}
1924
1925static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1926					 struct ecore_ptt *p_ptt,
1927					 struct ecore_eth_stats *p_stats)
1928{
1929	struct ecore_eth_stats_common *p_common = &p_stats->common;
1930	struct port_stats port_stats;
1931	int j;
1932
1933	OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1934
1935	ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1936			  p_hwfn->mcp_info->port_addr +
1937			  OFFSETOF(struct public_port, stats),
1938			  sizeof(port_stats));
1939
1940	p_common->rx_64_byte_packets += port_stats.eth.r64;
1941	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1942	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1943	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1944	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1945	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1946	p_common->rx_crc_errors += port_stats.eth.rfcs;
1947	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1948	p_common->rx_pause_frames += port_stats.eth.rxpf;
1949	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1950	p_common->rx_align_errors += port_stats.eth.raln;
1951	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1952	p_common->rx_oversize_packets += port_stats.eth.rovr;
1953	p_common->rx_jabbers += port_stats.eth.rjbr;
1954	p_common->rx_undersize_packets += port_stats.eth.rund;
1955	p_common->rx_fragments += port_stats.eth.rfrg;
1956	p_common->tx_64_byte_packets += port_stats.eth.t64;
1957	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1958	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1959	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1960	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1961	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1962	p_common->tx_pause_frames += port_stats.eth.txpf;
1963	p_common->tx_pfc_frames += port_stats.eth.txpp;
1964	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1965	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1966	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1967	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1968	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1969	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1970	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1971	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1972	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1973	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1974	for (j = 0; j < 8; j++) {
1975		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1976		p_common->brb_discards += port_stats.brb.brb_discard[j];
1977	}
1978
1979	if (ECORE_IS_BB(p_hwfn->p_dev)) {
1980		struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1981
1982		p_bb->rx_1519_to_1522_byte_packets +=
1983			port_stats.eth.u0.bb0.r1522;
1984		p_bb->rx_1519_to_2047_byte_packets +=
1985			port_stats.eth.u0.bb0.r2047;
1986		p_bb->rx_2048_to_4095_byte_packets +=
1987			port_stats.eth.u0.bb0.r4095;
1988		p_bb->rx_4096_to_9216_byte_packets +=
1989			port_stats.eth.u0.bb0.r9216;
1990		p_bb->rx_9217_to_16383_byte_packets +=
1991			port_stats.eth.u0.bb0.r16383;
1992		p_bb->tx_1519_to_2047_byte_packets +=
1993			port_stats.eth.u1.bb1.t2047;
1994		p_bb->tx_2048_to_4095_byte_packets +=
1995			port_stats.eth.u1.bb1.t4095;
1996		p_bb->tx_4096_to_9216_byte_packets +=
1997			port_stats.eth.u1.bb1.t9216;
1998		p_bb->tx_9217_to_16383_byte_packets +=
1999			port_stats.eth.u1.bb1.t16383;
2000		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
2001		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
2002	} else {
2003		struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
2004
2005		p_ah->rx_1519_to_max_byte_packets +=
2006			port_stats.eth.u0.ah0.r1519_to_max;
2007		p_ah->tx_1519_to_max_byte_packets =
2008			port_stats.eth.u1.ah1.t1519_to_max;
2009	}
2010
2011	p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
2012					       p_hwfn->mcp_info->port_addr +
2013					       OFFSETOF(struct public_port,
2014							link_change_count));
2015}
2016
2017void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
2018			     struct ecore_ptt *p_ptt,
2019			     struct ecore_eth_stats *stats,
2020			     u16 statistics_bin, bool b_get_port_stats)
2021{
2022	__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
2023	__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
2024	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
2025	__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
2026
2027#ifndef ASIC_ONLY
2028	/* Avoid getting PORT stats for emulation.*/
2029	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
2030		return;
2031#endif
2032
2033	if (b_get_port_stats && p_hwfn->mcp_info)
2034		__ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
2035}
2036
2037static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
2038				   struct ecore_eth_stats *stats)
2039{
2040	u8 fw_vport = 0;
2041	int i;
2042
2043	OSAL_MEMSET(stats, 0, sizeof(*stats));
2044
2045	for_each_hwfn(p_dev, i) {
2046		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2047		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2048					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2049		bool b_get_port_stats;
2050
2051		if (IS_PF(p_dev)) {
2052			/* The main vport index is relative first */
2053			if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
2054				DP_ERR(p_hwfn, "No vport available!\n");
2055				goto out;
2056			}
2057		}
2058
2059		if (IS_PF(p_dev) && !p_ptt) {
2060			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2061			continue;
2062		}
2063
2064		b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
2065		__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
2066					b_get_port_stats);
2067
2068out:
2069		if (IS_PF(p_dev) && p_ptt)
2070			ecore_ptt_release(p_hwfn, p_ptt);
2071	}
2072}
2073
2074void ecore_get_vport_stats(struct ecore_dev *p_dev,
2075			   struct ecore_eth_stats *stats)
2076{
2077	u32 i;
2078
2079	if (!p_dev) {
2080		OSAL_MEMSET(stats, 0, sizeof(*stats));
2081		return;
2082	}
2083
2084	_ecore_get_vport_stats(p_dev, stats);
2085
2086	if (!p_dev->reset_stats)
2087		return;
2088
2089	/* Reduce the statistics baseline */
2090	for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2091		((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2092}
2093
2094/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2095void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2096{
2097	int i;
2098
2099	for_each_hwfn(p_dev, i) {
2100		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2101		struct eth_mstorm_per_queue_stat mstats;
2102		struct eth_ustorm_per_queue_stat ustats;
2103		struct eth_pstorm_per_queue_stat pstats;
2104		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2105					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2106		u32 addr = 0, len = 0;
2107
2108		if (IS_PF(p_dev) && !p_ptt) {
2109			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2110			continue;
2111		}
2112
2113		OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2114		__ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2115		ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2116
2117		OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2118		__ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2119		ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2120
2121		OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2122		__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2123		ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2124
2125		if (IS_PF(p_dev))
2126			ecore_ptt_release(p_hwfn, p_ptt);
2127	}
2128
2129	/* PORT statistics are not necessarily reset, so we need to
2130	 * read and create a baseline for future statistics.
2131	 * Link change stat is maintained by MFW, return its value as is.
2132	 */
2133	if (!p_dev->reset_stats)
2134		DP_INFO(p_dev, "Reset stats not allocated\n");
2135	else {
2136		_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2137		p_dev->reset_stats->common.link_change_count = 0;
2138	}
2139}
2140
2141static enum gft_profile_type
2142ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
2143{
2144	if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
2145		return GFT_PROFILE_TYPE_4_TUPLE;
2146	if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
2147		return GFT_PROFILE_TYPE_IP_DST_ADDR;
2148	return GFT_PROFILE_TYPE_L4_DST_PORT;
2149}
2150
2151void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2152			       struct ecore_ptt *p_ptt,
2153			       struct ecore_arfs_config_params *p_cfg_params)
2154{
2155	if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
2156		return;
2157
2158	if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
2159		ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2160				 p_cfg_params->tcp,
2161				 p_cfg_params->udp,
2162				 p_cfg_params->ipv4,
2163				 p_cfg_params->ipv6,
2164				 ecore_arfs_mode_to_hsi(p_cfg_params->mode));
2165		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2166			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
2167			   p_cfg_params->tcp ? "Enable" : "Disable",
2168			   p_cfg_params->udp ? "Enable" : "Disable",
2169			   p_cfg_params->ipv4 ? "Enable" : "Disable",
2170			   p_cfg_params->ipv6 ? "Enable" : "Disable",
2171			   (u32)p_cfg_params->mode);
2172	} else {
2173		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Disabled Filtering\n");
2174		ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2175	}
2176}
2177
2178enum _ecore_status_t
2179ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2180				  struct ecore_spq_comp_cb *p_cb,
2181				  struct ecore_ntuple_filter_params *p_params)
2182{
2183	struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2184	struct ecore_spq_entry *p_ent = OSAL_NULL;
2185	struct ecore_sp_init_data init_data;
2186	u16 abs_rx_q_id = 0;
2187	u8 abs_vport_id = 0;
2188	enum _ecore_status_t rc = ECORE_NOTIMPL;
2189
2190	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2191	if (rc != ECORE_SUCCESS)
2192		return rc;
2193
2194	if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) {
2195		rc = ecore_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
2196		if (rc != ECORE_SUCCESS)
2197			return rc;
2198	}
2199
2200	/* Get SPQ entry */
2201	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2202	init_data.cid = ecore_spq_get_cid(p_hwfn);
2203
2204	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2205
2206	if (p_cb) {
2207		init_data.comp_mode = ECORE_SPQ_MODE_CB;
2208		init_data.p_comp_data = p_cb;
2209	} else {
2210		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2211	}
2212
2213	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2214				   ETH_RAMROD_GFT_UPDATE_FILTER,
2215				   PROTOCOLID_ETH, &init_data);
2216	if (rc != ECORE_SUCCESS)
2217		return rc;
2218
2219	p_ramrod = &p_ent->ramrod.rx_update_gft;
2220
2221	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2222	p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(p_params->length);
2223
2224	if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) {
2225		p_ramrod->rx_qid_valid = 1;
2226		p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2227	}
2228
2229	p_ramrod->flow_id_valid = 0;
2230	p_ramrod->flow_id = 0;
2231
2232	p_ramrod->vport_id = OSAL_CPU_TO_LE16 ((u16)abs_vport_id);
2233	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2234						     : GFT_DELETE_FILTER;
2235
2236	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2237		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2238		   abs_vport_id, abs_rx_q_id,
2239		   p_params->b_is_add ? "Adding" : "Removing",
2240		   (unsigned long long)p_params->addr, p_params->length);
2241
2242	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2243}
2244
2245int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
2246			   struct ecore_ptt *p_ptt,
2247			   struct ecore_queue_cid *p_cid,
2248			   u16 *p_rx_coal)
2249{
2250	u32 coalesce, address, is_valid;
2251	struct cau_sb_entry sb_entry;
2252	u8 timer_res;
2253	enum _ecore_status_t rc;
2254
2255	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2256				 p_cid->sb_igu_id * sizeof(u64),
2257				 (u64)(osal_uintptr_t)&sb_entry, 2,
2258				 OSAL_NULL /* default parameters */);
2259	if (rc != ECORE_SUCCESS) {
2260		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2261		return rc;
2262	}
2263
2264	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2265
2266	address = BAR0_MAP_REG_USDM_RAM +
2267		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2268	coalesce = ecore_rd(p_hwfn, p_ptt, address);
2269
2270	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2271	if (!is_valid)
2272		return ECORE_INVAL;
2273
2274	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2275	*p_rx_coal = (u16)(coalesce << timer_res);
2276
2277	return ECORE_SUCCESS;
2278}
2279
2280int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
2281			   struct ecore_ptt *p_ptt,
2282			   struct ecore_queue_cid *p_cid,
2283			   u16 *p_tx_coal)
2284{
2285	u32 coalesce, address, is_valid;
2286	struct cau_sb_entry sb_entry;
2287	u8 timer_res;
2288	enum _ecore_status_t rc;
2289
2290	rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2291				 p_cid->sb_igu_id * sizeof(u64),
2292				 (u64)(osal_uintptr_t)&sb_entry, 2,
2293				 OSAL_NULL /* default parameters */);
2294	if (rc != ECORE_SUCCESS) {
2295		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2296		return rc;
2297	}
2298
2299	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2300
2301	address = BAR0_MAP_REG_XSDM_RAM +
2302		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2303	coalesce = ecore_rd(p_hwfn, p_ptt, address);
2304
2305	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2306	if (!is_valid)
2307		return ECORE_INVAL;
2308
2309	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2310	*p_tx_coal = (u16)(coalesce << timer_res);
2311
2312	return ECORE_SUCCESS;
2313}
2314
2315enum _ecore_status_t
2316ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
2317			 void *handle)
2318{
2319	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
2320	enum _ecore_status_t rc = ECORE_SUCCESS;
2321	struct ecore_ptt *p_ptt;
2322
2323#ifdef CONFIG_ECORE_SRIOV
2324	if (IS_VF(p_hwfn->p_dev)) {
2325		rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2326		if (rc != ECORE_SUCCESS)
2327			DP_NOTICE(p_hwfn, false,
2328				  "Unable to read queue calescing\n");
2329
2330		return rc;
2331	}
2332#endif
2333
2334	p_ptt = ecore_ptt_acquire(p_hwfn);
2335	if (!p_ptt)
2336		return ECORE_AGAIN;
2337
2338	if (p_cid->b_is_rx) {
2339		rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2340		if (rc != ECORE_SUCCESS)
2341			goto out;
2342	} else {
2343		rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2344		if (rc != ECORE_SUCCESS)
2345			goto out;
2346	}
2347
2348out:
2349	ecore_ptt_release(p_hwfn, p_ptt);
2350
2351	return rc;
2352}
2353#ifdef _NTDDK_
2354#pragma warning(pop)
2355#endif
2356