1316485Sdavidcs/* 2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc. 3316485Sdavidcs * All rights reserved. 4316485Sdavidcs * 5316485Sdavidcs * Redistribution and use in source and binary forms, with or without 6316485Sdavidcs * modification, are permitted provided that the following conditions 7316485Sdavidcs * are met: 8316485Sdavidcs * 9316485Sdavidcs * 1. Redistributions of source code must retain the above copyright 10316485Sdavidcs * notice, this list of conditions and the following disclaimer. 11316485Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12316485Sdavidcs * notice, this list of conditions and the following disclaimer in the 13316485Sdavidcs * documentation and/or other materials provided with the distribution. 14316485Sdavidcs * 15316485Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16316485Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17316485Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18316485Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19316485Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20316485Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21316485Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22316485Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23316485Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24316485Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25316485Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26316485Sdavidcs */ 27316485Sdavidcs 28316485Sdavidcs/* 29316485Sdavidcs * File : ecore_l2.c 30316485Sdavidcs */ 31316485Sdavidcs#include <sys/cdefs.h> 32316485Sdavidcs__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_l2.c 337519 2018-08-09 01:39:47Z davidcs $"); 33316485Sdavidcs 34316485Sdavidcs#include "bcm_osal.h" 35316485Sdavidcs 36316485Sdavidcs#include "ecore.h" 37316485Sdavidcs#include "ecore_status.h" 38316485Sdavidcs#include "ecore_hsi_eth.h" 39316485Sdavidcs#include "ecore_chain.h" 40316485Sdavidcs#include "ecore_spq.h" 41316485Sdavidcs#include "ecore_init_fw_funcs.h" 42316485Sdavidcs#include "ecore_cxt.h" 43316485Sdavidcs#include "ecore_l2.h" 44316485Sdavidcs#include "ecore_sp_commands.h" 45316485Sdavidcs#include "ecore_gtt_reg_addr.h" 46316485Sdavidcs#include "ecore_iro.h" 47316485Sdavidcs#include "reg_addr.h" 48316485Sdavidcs#include "ecore_int.h" 49316485Sdavidcs#include "ecore_hw.h" 50316485Sdavidcs#include "ecore_vf.h" 51316485Sdavidcs#include "ecore_sriov.h" 52316485Sdavidcs#include "ecore_mcp.h" 53316485Sdavidcs 54316485Sdavidcs#define ECORE_MAX_SGES_NUM 16 55316485Sdavidcs#define CRC32_POLY 0x1edc6f41 56316485Sdavidcs 57337519Sdavidcs#ifdef _NTDDK_ 58337519Sdavidcs#pragma warning(push) 59337519Sdavidcs#pragma warning(disable : 28167) 60337519Sdavidcs#pragma warning(disable : 28123) 61337519Sdavidcs#pragma warning(disable : 28121) 62337519Sdavidcs#endif 63337519Sdavidcs 64316485Sdavidcsstruct ecore_l2_info { 65316485Sdavidcs u32 queues; 66316485Sdavidcs unsigned long **pp_qid_usage; 67316485Sdavidcs 68316485Sdavidcs /* The lock is meant to synchronize access to the qid usage */ 69316485Sdavidcs osal_mutex_t lock; 70316485Sdavidcs}; 71316485Sdavidcs 72316485Sdavidcsenum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) 73316485Sdavidcs{ 74316485Sdavidcs struct ecore_l2_info *p_l2_info; 75316485Sdavidcs unsigned long **pp_qids; 76316485Sdavidcs u32 i; 77316485Sdavidcs 78316485Sdavidcs if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 79316485Sdavidcs return ECORE_SUCCESS; 80316485Sdavidcs 81316485Sdavidcs p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); 82316485Sdavidcs if (!p_l2_info) 83316485Sdavidcs return ECORE_NOMEM; 84316485Sdavidcs p_hwfn->p_l2_info = p_l2_info; 85316485Sdavidcs 86316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 87316485Sdavidcs p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 88316485Sdavidcs } else { 89316485Sdavidcs u8 rx = 0, tx = 0; 90316485Sdavidcs 91316485Sdavidcs ecore_vf_get_num_rxqs(p_hwfn, &rx); 92316485Sdavidcs ecore_vf_get_num_txqs(p_hwfn, &tx); 93316485Sdavidcs 94316485Sdavidcs p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); 95316485Sdavidcs } 96316485Sdavidcs 97316485Sdavidcs pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, 98316485Sdavidcs sizeof(unsigned long *) * 99316485Sdavidcs p_l2_info->queues); 100316485Sdavidcs if (pp_qids == OSAL_NULL) 101316485Sdavidcs return ECORE_NOMEM; 102316485Sdavidcs p_l2_info->pp_qid_usage = pp_qids; 103316485Sdavidcs 104316485Sdavidcs for (i = 0; i < p_l2_info->queues; i++) { 105316485Sdavidcs pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, 106316485Sdavidcs MAX_QUEUES_PER_QZONE / 8); 107316485Sdavidcs if (pp_qids[i] == OSAL_NULL) 108316485Sdavidcs return ECORE_NOMEM; 109316485Sdavidcs } 110316485Sdavidcs 111316485Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC 112337519Sdavidcs if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) 113337519Sdavidcs return ECORE_NOMEM; 114316485Sdavidcs#endif 115316485Sdavidcs 116316485Sdavidcs return ECORE_SUCCESS; 117316485Sdavidcs} 118316485Sdavidcs 119316485Sdavidcsvoid ecore_l2_setup(struct ecore_hwfn *p_hwfn) 120316485Sdavidcs{ 121316485Sdavidcs if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 122316485Sdavidcs return; 123316485Sdavidcs 124316485Sdavidcs OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); 125316485Sdavidcs} 126316485Sdavidcs 127316485Sdavidcsvoid ecore_l2_free(struct ecore_hwfn *p_hwfn) 128316485Sdavidcs{ 129316485Sdavidcs u32 i; 130316485Sdavidcs 131316485Sdavidcs if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 132316485Sdavidcs return; 133316485Sdavidcs 134316485Sdavidcs if (p_hwfn->p_l2_info == OSAL_NULL) 135316485Sdavidcs return; 136316485Sdavidcs 137316485Sdavidcs if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) 138316485Sdavidcs goto out_l2_info; 139316485Sdavidcs 140316485Sdavidcs /* Free until hit first uninitialized entry */ 141316485Sdavidcs for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 142316485Sdavidcs if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) 143316485Sdavidcs break; 144316485Sdavidcs OSAL_VFREE(p_hwfn->p_dev, 145316485Sdavidcs p_hwfn->p_l2_info->pp_qid_usage[i]); 146337519Sdavidcs p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; 147316485Sdavidcs } 148316485Sdavidcs 149316485Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC 150316485Sdavidcs /* Lock is last to initialize, if everything else was */ 151316485Sdavidcs if (i == p_hwfn->p_l2_info->queues) 152316485Sdavidcs OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); 153316485Sdavidcs#endif 154316485Sdavidcs 155316485Sdavidcs OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); 156337519Sdavidcs p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; 157316485Sdavidcs 158316485Sdavidcsout_l2_info: 159316485Sdavidcs OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); 160316485Sdavidcs p_hwfn->p_l2_info = OSAL_NULL; 161316485Sdavidcs} 162316485Sdavidcs 163316485Sdavidcs/* TODO - we'll need locking around these... */ 164316485Sdavidcsstatic bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, 165316485Sdavidcs struct ecore_queue_cid *p_cid) 166316485Sdavidcs{ 167316485Sdavidcs struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; 168316485Sdavidcs u16 queue_id = p_cid->rel.queue_id; 169316485Sdavidcs bool b_rc = true; 170316485Sdavidcs u8 first; 171316485Sdavidcs 172316485Sdavidcs OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); 173316485Sdavidcs 174316485Sdavidcs if (queue_id > p_l2_info->queues) { 175316485Sdavidcs DP_NOTICE(p_hwfn, true, 176316485Sdavidcs "Requested to increase usage for qzone %04x out of %08x\n", 177316485Sdavidcs queue_id, p_l2_info->queues); 178316485Sdavidcs b_rc = false; 179316485Sdavidcs goto out; 180316485Sdavidcs } 181316485Sdavidcs 182316485Sdavidcs first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], 183316485Sdavidcs MAX_QUEUES_PER_QZONE); 184316485Sdavidcs if (first >= MAX_QUEUES_PER_QZONE) { 185316485Sdavidcs b_rc = false; 186316485Sdavidcs goto out; 187316485Sdavidcs } 188316485Sdavidcs 189316485Sdavidcs OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); 190316485Sdavidcs p_cid->qid_usage_idx = first; 191316485Sdavidcs 192316485Sdavidcsout: 193316485Sdavidcs OSAL_MUTEX_RELEASE(&p_l2_info->lock); 194316485Sdavidcs return b_rc; 195316485Sdavidcs} 196316485Sdavidcs 197316485Sdavidcsstatic void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, 198316485Sdavidcs struct ecore_queue_cid *p_cid) 199316485Sdavidcs{ 200316485Sdavidcs OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); 201316485Sdavidcs 202316485Sdavidcs OSAL_CLEAR_BIT(p_cid->qid_usage_idx, 203316485Sdavidcs p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 204316485Sdavidcs 205316485Sdavidcs OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); 206316485Sdavidcs} 207316485Sdavidcs 208316485Sdavidcsvoid ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, 209316485Sdavidcs struct ecore_queue_cid *p_cid) 210316485Sdavidcs{ 211316485Sdavidcs bool b_legacy_vf = !!(p_cid->vf_legacy & 212316485Sdavidcs ECORE_QCID_LEGACY_VF_CID); 213316485Sdavidcs 214316485Sdavidcs /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. 215316485Sdavidcs * For legacy vf-queues, the CID doesn't go through here. 216316485Sdavidcs */ 217316485Sdavidcs if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 218316485Sdavidcs _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 219316485Sdavidcs 220316485Sdavidcs /* VFs maintain the index inside queue-zone on their own */ 221316485Sdavidcs if (p_cid->vfid == ECORE_QUEUE_CID_PF) 222316485Sdavidcs ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); 223316485Sdavidcs 224316485Sdavidcs OSAL_VFREE(p_hwfn->p_dev, p_cid); 225316485Sdavidcs} 226316485Sdavidcs 227316485Sdavidcs/* The internal is only meant to be directly called by PFs initializeing CIDs 228316485Sdavidcs * for their VFs. 229316485Sdavidcs */ 230316485Sdavidcsstatic struct ecore_queue_cid * 231316485Sdavidcs_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, 232316485Sdavidcs u16 opaque_fid, u32 cid, 233316485Sdavidcs struct ecore_queue_start_common_params *p_params, 234337519Sdavidcs bool b_is_rx, 235316485Sdavidcs struct ecore_queue_cid_vf_params *p_vf_params) 236316485Sdavidcs{ 237316485Sdavidcs struct ecore_queue_cid *p_cid; 238316485Sdavidcs enum _ecore_status_t rc; 239316485Sdavidcs 240316485Sdavidcs p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); 241316485Sdavidcs if (p_cid == OSAL_NULL) 242316485Sdavidcs return OSAL_NULL; 243316485Sdavidcs 244316485Sdavidcs p_cid->opaque_fid = opaque_fid; 245316485Sdavidcs p_cid->cid = cid; 246316485Sdavidcs p_cid->p_owner = p_hwfn; 247316485Sdavidcs 248316485Sdavidcs /* Fill in parameters */ 249316485Sdavidcs p_cid->rel.vport_id = p_params->vport_id; 250316485Sdavidcs p_cid->rel.queue_id = p_params->queue_id; 251316485Sdavidcs p_cid->rel.stats_id = p_params->stats_id; 252316485Sdavidcs p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 253337519Sdavidcs p_cid->b_is_rx = b_is_rx; 254316485Sdavidcs p_cid->sb_idx = p_params->sb_idx; 255316485Sdavidcs 256316485Sdavidcs /* Fill-in bits related to VFs' queues if information was provided */ 257316485Sdavidcs if (p_vf_params != OSAL_NULL) { 258316485Sdavidcs p_cid->vfid = p_vf_params->vfid; 259316485Sdavidcs p_cid->vf_qid = p_vf_params->vf_qid; 260316485Sdavidcs p_cid->vf_legacy = p_vf_params->vf_legacy; 261316485Sdavidcs } else { 262316485Sdavidcs p_cid->vfid = ECORE_QUEUE_CID_PF; 263316485Sdavidcs } 264316485Sdavidcs 265316485Sdavidcs /* Don't try calculating the absolute indices for VFs */ 266316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) { 267316485Sdavidcs p_cid->abs = p_cid->rel; 268316485Sdavidcs 269316485Sdavidcs goto out; 270316485Sdavidcs } 271316485Sdavidcs 272316485Sdavidcs /* Calculate the engine-absolute indices of the resources. 273337519Sdavidcs * This would guarantee they're valid later on. 274316485Sdavidcs * In some cases [SBs] we already have the right values. 275316485Sdavidcs */ 276316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 277316485Sdavidcs if (rc != ECORE_SUCCESS) 278316485Sdavidcs goto fail; 279316485Sdavidcs 280316485Sdavidcs rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, 281316485Sdavidcs &p_cid->abs.queue_id); 282316485Sdavidcs if (rc != ECORE_SUCCESS) 283316485Sdavidcs goto fail; 284316485Sdavidcs 285316485Sdavidcs /* In case of a PF configuring its VF's queues, the stats-id is already 286316485Sdavidcs * absolute [since there's a single index that's suitable per-VF]. 287316485Sdavidcs */ 288316485Sdavidcs if (p_cid->vfid == ECORE_QUEUE_CID_PF) { 289316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, 290316485Sdavidcs &p_cid->abs.stats_id); 291316485Sdavidcs if (rc != ECORE_SUCCESS) 292316485Sdavidcs goto fail; 293316485Sdavidcs } else { 294316485Sdavidcs p_cid->abs.stats_id = p_cid->rel.stats_id; 295316485Sdavidcs } 296316485Sdavidcs 297316485Sdavidcsout: 298316485Sdavidcs /* VF-images have provided the qid_usage_idx on their own. 299316485Sdavidcs * Otherwise, we need to allocate a unique one. 300316485Sdavidcs */ 301316485Sdavidcs if (!p_vf_params) { 302316485Sdavidcs if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) 303316485Sdavidcs goto fail; 304316485Sdavidcs } else { 305316485Sdavidcs p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 306316485Sdavidcs } 307316485Sdavidcs 308316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 309316485Sdavidcs "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 310316485Sdavidcs p_cid->opaque_fid, p_cid->cid, 311316485Sdavidcs p_cid->rel.vport_id, p_cid->abs.vport_id, 312316485Sdavidcs p_cid->rel.queue_id, p_cid->qid_usage_idx, 313316485Sdavidcs p_cid->abs.queue_id, 314316485Sdavidcs p_cid->rel.stats_id, p_cid->abs.stats_id, 315316485Sdavidcs p_cid->sb_igu_id, p_cid->sb_idx); 316316485Sdavidcs 317316485Sdavidcs return p_cid; 318316485Sdavidcs 319316485Sdavidcsfail: 320316485Sdavidcs OSAL_VFREE(p_hwfn->p_dev, p_cid); 321316485Sdavidcs return OSAL_NULL; 322316485Sdavidcs} 323316485Sdavidcs 324316485Sdavidcsstruct ecore_queue_cid * 325316485Sdavidcsecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 326316485Sdavidcs struct ecore_queue_start_common_params *p_params, 327337519Sdavidcs bool b_is_rx, 328316485Sdavidcs struct ecore_queue_cid_vf_params *p_vf_params) 329316485Sdavidcs{ 330316485Sdavidcs struct ecore_queue_cid *p_cid; 331316485Sdavidcs u8 vfid = ECORE_CXT_PF_CID; 332316485Sdavidcs bool b_legacy_vf = false; 333316485Sdavidcs u32 cid = 0; 334316485Sdavidcs 335316485Sdavidcs /* In case of legacy VFs, The CID can be derived from the additional 336316485Sdavidcs * VF parameters - the VF assumes queue X uses CID X, so we can simply 337316485Sdavidcs * use the vf_qid for this purpose as well. 338316485Sdavidcs */ 339316485Sdavidcs if (p_vf_params) { 340316485Sdavidcs vfid = p_vf_params->vfid; 341316485Sdavidcs 342316485Sdavidcs if (p_vf_params->vf_legacy & 343316485Sdavidcs ECORE_QCID_LEGACY_VF_CID) { 344316485Sdavidcs b_legacy_vf = true; 345316485Sdavidcs cid = p_vf_params->vf_qid; 346316485Sdavidcs } 347316485Sdavidcs } 348316485Sdavidcs 349316485Sdavidcs /* Get a unique firmware CID for this queue, in case it's a PF. 350316485Sdavidcs * VF's don't need a CID as the queue configuration will be done 351316485Sdavidcs * by PF. 352316485Sdavidcs */ 353316485Sdavidcs if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { 354316485Sdavidcs if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 355316485Sdavidcs &cid, vfid) != ECORE_SUCCESS) { 356316485Sdavidcs DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); 357316485Sdavidcs return OSAL_NULL; 358316485Sdavidcs } 359316485Sdavidcs } 360316485Sdavidcs 361316485Sdavidcs p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 362337519Sdavidcs p_params, b_is_rx, p_vf_params); 363316485Sdavidcs if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 364316485Sdavidcs _ecore_cxt_release_cid(p_hwfn, cid, vfid); 365316485Sdavidcs 366316485Sdavidcs return p_cid; 367316485Sdavidcs} 368316485Sdavidcs 369316485Sdavidcsstatic struct ecore_queue_cid * 370316485Sdavidcsecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 371337519Sdavidcs bool b_is_rx, 372316485Sdavidcs struct ecore_queue_start_common_params *p_params) 373316485Sdavidcs{ 374337519Sdavidcs return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 375337519Sdavidcs OSAL_NULL); 376316485Sdavidcs} 377316485Sdavidcs 378316485Sdavidcsenum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, 379316485Sdavidcs struct ecore_sp_vport_start_params *p_params) 380316485Sdavidcs{ 381316485Sdavidcs struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; 382316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 383316485Sdavidcs struct ecore_sp_init_data init_data; 384337519Sdavidcs struct eth_vport_tpa_param *p_tpa; 385316485Sdavidcs u16 rx_mode = 0, tx_err = 0; 386316485Sdavidcs u8 abs_vport_id = 0; 387316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 388316485Sdavidcs 389316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 390316485Sdavidcs if (rc != ECORE_SUCCESS) 391316485Sdavidcs return rc; 392316485Sdavidcs 393316485Sdavidcs /* Get SPQ entry */ 394316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 395316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 396316485Sdavidcs init_data.opaque_fid = p_params->opaque_fid; 397316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 398316485Sdavidcs 399316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 400316485Sdavidcs ETH_RAMROD_VPORT_START, 401316485Sdavidcs PROTOCOLID_ETH, &init_data); 402316485Sdavidcs if (rc != ECORE_SUCCESS) 403316485Sdavidcs return rc; 404316485Sdavidcs 405316485Sdavidcs p_ramrod = &p_ent->ramrod.vport_start; 406316485Sdavidcs p_ramrod->vport_id = abs_vport_id; 407316485Sdavidcs 408316485Sdavidcs p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); 409337519Sdavidcs p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 410316485Sdavidcs p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 411316485Sdavidcs p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 412316485Sdavidcs p_ramrod->untagged = p_params->only_untagged; 413316485Sdavidcs p_ramrod->zero_placement_offset = p_params->zero_placement_offset; 414316485Sdavidcs 415316485Sdavidcs SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 416316485Sdavidcs SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 417316485Sdavidcs 418316485Sdavidcs p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); 419316485Sdavidcs 420316485Sdavidcs /* Handle requests for strict behavior on transmission errors */ 421316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, 422316485Sdavidcs p_params->b_err_illegal_vlan_mode ? 423316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 424316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, 425316485Sdavidcs p_params->b_err_small_pkt ? 426316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 427316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, 428316485Sdavidcs p_params->b_err_anti_spoof ? 429316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 430316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, 431316485Sdavidcs p_params->b_err_illegal_inband_mode ? 432316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 433316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, 434316485Sdavidcs p_params->b_err_vlan_insert_with_inband ? 435316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 436316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, 437316485Sdavidcs p_params->b_err_big_pkt ? 438316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 439316485Sdavidcs SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, 440316485Sdavidcs p_params->b_err_ctrl_frame ? 441316485Sdavidcs ETH_TX_ERR_ASSERT_MALICIOUS : 0); 442316485Sdavidcs p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); 443316485Sdavidcs 444316485Sdavidcs /* TPA related fields */ 445337519Sdavidcs p_tpa = &p_ramrod->tpa_param; 446337519Sdavidcs OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param)); 447337519Sdavidcs p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 448316485Sdavidcs 449316485Sdavidcs switch (p_params->tpa_mode) { 450316485Sdavidcs case ECORE_TPA_MODE_GRO: 451337519Sdavidcs p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 452337519Sdavidcs p_tpa->tpa_max_size = (u16)-1; 453337519Sdavidcs p_tpa->tpa_min_size_to_cont = p_params->mtu/2; 454337519Sdavidcs p_tpa->tpa_min_size_to_start = p_params->mtu/2; 455337519Sdavidcs p_tpa->tpa_ipv4_en_flg = 1; 456337519Sdavidcs p_tpa->tpa_ipv6_en_flg = 1; 457337519Sdavidcs p_tpa->tpa_ipv4_tunn_en_flg = 1; 458337519Sdavidcs p_tpa->tpa_ipv6_tunn_en_flg = 1; 459337519Sdavidcs p_tpa->tpa_pkt_split_flg = 1; 460337519Sdavidcs p_tpa->tpa_gro_consistent_flg = 1; 461316485Sdavidcs break; 462316485Sdavidcs default: 463316485Sdavidcs break; 464316485Sdavidcs } 465316485Sdavidcs 466316485Sdavidcs p_ramrod->tx_switching_en = p_params->tx_switching; 467316485Sdavidcs#ifndef ASIC_ONLY 468316485Sdavidcs if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 469316485Sdavidcs p_ramrod->tx_switching_en = 0; 470316485Sdavidcs#endif 471316485Sdavidcs 472316485Sdavidcs p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 473316485Sdavidcs p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 474316485Sdavidcs 475316485Sdavidcs /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 476320162Sdavidcs p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid); 477316485Sdavidcs 478316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 479316485Sdavidcs} 480316485Sdavidcs 481316485Sdavidcsenum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 482316485Sdavidcs struct ecore_sp_vport_start_params *p_params) 483316485Sdavidcs{ 484316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 485316485Sdavidcs return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, 486316485Sdavidcs p_params->mtu, 487316485Sdavidcs p_params->remove_inner_vlan, 488316485Sdavidcs p_params->tpa_mode, 489316485Sdavidcs p_params->max_buffers_per_cqe, 490337519Sdavidcs p_params->only_untagged, 491337519Sdavidcs p_params->zero_placement_offset); 492316485Sdavidcs 493316485Sdavidcs return ecore_sp_eth_vport_start(p_hwfn, p_params); 494316485Sdavidcs} 495316485Sdavidcs 496316485Sdavidcsstatic enum _ecore_status_t 497316485Sdavidcsecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, 498316485Sdavidcs struct vport_update_ramrod_data *p_ramrod, 499316485Sdavidcs struct ecore_rss_params *p_rss) 500316485Sdavidcs{ 501316485Sdavidcs struct eth_vport_rss_config *p_config; 502337519Sdavidcs u16 capabilities = 0; 503316485Sdavidcs int i, table_size; 504316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 505316485Sdavidcs 506316485Sdavidcs if (!p_rss) { 507316485Sdavidcs p_ramrod->common.update_rss_flg = 0; 508316485Sdavidcs return rc; 509316485Sdavidcs } 510316485Sdavidcs p_config = &p_ramrod->rss_config; 511316485Sdavidcs 512316485Sdavidcs OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != 513316485Sdavidcs ETH_RSS_IND_TABLE_ENTRIES_NUM); 514316485Sdavidcs 515316485Sdavidcs rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, 516316485Sdavidcs &p_config->rss_id); 517316485Sdavidcs if (rc != ECORE_SUCCESS) 518316485Sdavidcs return rc; 519316485Sdavidcs 520316485Sdavidcs p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 521316485Sdavidcs p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 522316485Sdavidcs p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 523316485Sdavidcs p_config->update_rss_key = p_rss->update_rss_key; 524316485Sdavidcs 525316485Sdavidcs p_config->rss_mode = p_rss->rss_enable ? 526316485Sdavidcs ETH_VPORT_RSS_MODE_REGULAR : 527316485Sdavidcs ETH_VPORT_RSS_MODE_DISABLED; 528316485Sdavidcs 529316485Sdavidcs p_config->capabilities = 0; 530316485Sdavidcs 531337519Sdavidcs SET_FIELD(capabilities, 532316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 533316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV4)); 534337519Sdavidcs SET_FIELD(capabilities, 535316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 536316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV6)); 537337519Sdavidcs SET_FIELD(capabilities, 538316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 539316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); 540337519Sdavidcs SET_FIELD(capabilities, 541316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 542316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); 543337519Sdavidcs SET_FIELD(capabilities, 544316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 545316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); 546337519Sdavidcs SET_FIELD(capabilities, 547316485Sdavidcs ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 548316485Sdavidcs !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); 549316485Sdavidcs p_config->tbl_size = p_rss->rss_table_size_log; 550337519Sdavidcs p_config->capabilities = OSAL_CPU_TO_LE16(capabilities); 551316485Sdavidcs 552316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 553316485Sdavidcs "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 554316485Sdavidcs p_ramrod->common.update_rss_flg, 555316485Sdavidcs p_config->rss_mode, 556316485Sdavidcs p_config->update_rss_capabilities, 557316485Sdavidcs p_config->capabilities, 558316485Sdavidcs p_config->update_rss_ind_table, 559316485Sdavidcs p_config->update_rss_key); 560316485Sdavidcs 561316485Sdavidcs table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, 562316485Sdavidcs 1 << p_config->tbl_size); 563316485Sdavidcs for (i = 0; i < table_size; i++) { 564316485Sdavidcs struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; 565316485Sdavidcs 566316485Sdavidcs if (!p_queue) 567316485Sdavidcs return ECORE_INVAL; 568316485Sdavidcs 569316485Sdavidcs p_config->indirection_table[i] = 570316485Sdavidcs OSAL_CPU_TO_LE16(p_queue->abs.queue_id); 571316485Sdavidcs } 572316485Sdavidcs 573316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 574316485Sdavidcs "Configured RSS indirection table [%d entries]:\n", 575316485Sdavidcs table_size); 576316485Sdavidcs for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { 577316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 578316485Sdavidcs "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 579316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i]), 580316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), 581316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), 582316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), 583316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), 584316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), 585316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), 586316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), 587316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), 588316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), 589316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), 590316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), 591316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), 592316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), 593316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), 594316485Sdavidcs OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); 595316485Sdavidcs } 596316485Sdavidcs 597316485Sdavidcs for (i = 0; i < 10; i++) 598316485Sdavidcs p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); 599316485Sdavidcs 600316485Sdavidcs return rc; 601316485Sdavidcs} 602316485Sdavidcs 603316485Sdavidcsstatic void 604316485Sdavidcsecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, 605316485Sdavidcs struct vport_update_ramrod_data *p_ramrod, 606316485Sdavidcs struct ecore_filter_accept_flags accept_flags) 607316485Sdavidcs{ 608316485Sdavidcs p_ramrod->common.update_rx_mode_flg = 609316485Sdavidcs accept_flags.update_rx_mode_config; 610316485Sdavidcs p_ramrod->common.update_tx_mode_flg = 611316485Sdavidcs accept_flags.update_tx_mode_config; 612316485Sdavidcs 613316485Sdavidcs#ifndef ASIC_ONLY 614316485Sdavidcs /* On B0 emulation we cannot enable Tx, since this would cause writes 615316485Sdavidcs * to PVFC HW block which isn't implemented in emulation. 616316485Sdavidcs */ 617316485Sdavidcs if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 618316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 619316485Sdavidcs "Non-Asic - prevent Tx mode in vport update\n"); 620316485Sdavidcs p_ramrod->common.update_tx_mode_flg = 0; 621316485Sdavidcs } 622316485Sdavidcs#endif 623316485Sdavidcs 624316485Sdavidcs /* Set Rx mode accept flags */ 625316485Sdavidcs if (p_ramrod->common.update_rx_mode_flg) { 626316485Sdavidcs u8 accept_filter = accept_flags.rx_accept_filter; 627316485Sdavidcs u16 state = 0; 628316485Sdavidcs 629316485Sdavidcs SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 630316485Sdavidcs !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || 631316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); 632316485Sdavidcs 633316485Sdavidcs SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 634316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); 635316485Sdavidcs 636316485Sdavidcs SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 637316485Sdavidcs !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || 638316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 639316485Sdavidcs 640316485Sdavidcs SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 641316485Sdavidcs (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 642316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 643316485Sdavidcs 644316485Sdavidcs SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 645316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_BCAST)); 646316485Sdavidcs 647316485Sdavidcs p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); 648316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 649316485Sdavidcs "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", 650316485Sdavidcs p_ramrod->common.vport_id, state); 651316485Sdavidcs } 652316485Sdavidcs 653316485Sdavidcs /* Set Tx mode accept flags */ 654316485Sdavidcs if (p_ramrod->common.update_tx_mode_flg) { 655316485Sdavidcs u8 accept_filter = accept_flags.tx_accept_filter; 656316485Sdavidcs u16 state = 0; 657316485Sdavidcs 658316485Sdavidcs SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 659316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_NONE)); 660316485Sdavidcs 661316485Sdavidcs SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 662316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_NONE)); 663316485Sdavidcs 664316485Sdavidcs SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 665316485Sdavidcs (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 666316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 667316485Sdavidcs 668316485Sdavidcs SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 669316485Sdavidcs !!(accept_filter & ECORE_ACCEPT_BCAST)); 670316485Sdavidcs 671316485Sdavidcs p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); 672316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 673316485Sdavidcs "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", 674316485Sdavidcs p_ramrod->common.vport_id, state); 675316485Sdavidcs } 676316485Sdavidcs} 677316485Sdavidcs 678316485Sdavidcsstatic void 679320162Sdavidcsecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, 680316485Sdavidcs struct ecore_sge_tpa_params *p_params) 681316485Sdavidcs{ 682316485Sdavidcs struct eth_vport_tpa_param *p_tpa; 683337519Sdavidcs u16 val; 684316485Sdavidcs 685316485Sdavidcs if (!p_params) { 686316485Sdavidcs p_ramrod->common.update_tpa_param_flg = 0; 687316485Sdavidcs p_ramrod->common.update_tpa_en_flg = 0; 688316485Sdavidcs p_ramrod->common.update_tpa_param_flg = 0; 689316485Sdavidcs return; 690316485Sdavidcs } 691316485Sdavidcs 692316485Sdavidcs p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 693316485Sdavidcs p_tpa = &p_ramrod->tpa_param; 694316485Sdavidcs p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 695316485Sdavidcs p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 696316485Sdavidcs p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 697316485Sdavidcs p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 698316485Sdavidcs 699316485Sdavidcs p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 700316485Sdavidcs p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 701316485Sdavidcs p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 702316485Sdavidcs p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 703316485Sdavidcs p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 704316485Sdavidcs p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 705337519Sdavidcs val = p_params->tpa_max_size; 706337519Sdavidcs p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val); 707337519Sdavidcs val = p_params->tpa_min_size_to_start; 708337519Sdavidcs p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val); 709337519Sdavidcs val = p_params->tpa_min_size_to_cont; 710337519Sdavidcs p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val); 711316485Sdavidcs} 712316485Sdavidcs 713316485Sdavidcsstatic void 714320162Sdavidcsecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, 715316485Sdavidcs struct ecore_sp_vport_update_params *p_params) 716316485Sdavidcs{ 717316485Sdavidcs int i; 718316485Sdavidcs 719316485Sdavidcs OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, 720316485Sdavidcs sizeof(p_ramrod->approx_mcast.bins)); 721316485Sdavidcs 722316485Sdavidcs if (!p_params->update_approx_mcast_flg) 723316485Sdavidcs return; 724316485Sdavidcs 725316485Sdavidcs p_ramrod->common.update_approx_mcast_flg = 1; 726316485Sdavidcs for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 727337519Sdavidcs u32 *p_bins = p_params->bins; 728316485Sdavidcs 729316485Sdavidcs p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 730316485Sdavidcs } 731316485Sdavidcs} 732316485Sdavidcs 733316485Sdavidcsenum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 734316485Sdavidcs struct ecore_sp_vport_update_params *p_params, 735316485Sdavidcs enum spq_mode comp_mode, 736316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 737316485Sdavidcs{ 738316485Sdavidcs struct ecore_rss_params *p_rss_params = p_params->rss_params; 739316485Sdavidcs struct vport_update_ramrod_data_cmn *p_cmn; 740316485Sdavidcs struct ecore_sp_init_data init_data; 741316485Sdavidcs struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 742316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 743316485Sdavidcs u8 abs_vport_id = 0, val; 744316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 745316485Sdavidcs 746316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) { 747316485Sdavidcs rc = ecore_vf_pf_vport_update(p_hwfn, p_params); 748316485Sdavidcs return rc; 749316485Sdavidcs } 750316485Sdavidcs 751316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 752316485Sdavidcs if (rc != ECORE_SUCCESS) 753316485Sdavidcs return rc; 754316485Sdavidcs 755316485Sdavidcs /* Get SPQ entry */ 756316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 757316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 758316485Sdavidcs init_data.opaque_fid = p_params->opaque_fid; 759316485Sdavidcs init_data.comp_mode = comp_mode; 760316485Sdavidcs init_data.p_comp_data = p_comp_data; 761316485Sdavidcs 762316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 763316485Sdavidcs ETH_RAMROD_VPORT_UPDATE, 764316485Sdavidcs PROTOCOLID_ETH, &init_data); 765316485Sdavidcs if (rc != ECORE_SUCCESS) 766316485Sdavidcs return rc; 767316485Sdavidcs 768316485Sdavidcs /* Copy input params to ramrod according to FW struct */ 769316485Sdavidcs p_ramrod = &p_ent->ramrod.vport_update; 770316485Sdavidcs p_cmn = &p_ramrod->common; 771316485Sdavidcs 772316485Sdavidcs p_cmn->vport_id = abs_vport_id; 773316485Sdavidcs 774316485Sdavidcs p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 775316485Sdavidcs p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 776316485Sdavidcs p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 777316485Sdavidcs p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 778316485Sdavidcs 779316485Sdavidcs p_cmn->accept_any_vlan = p_params->accept_any_vlan; 780316485Sdavidcs val = p_params->update_accept_any_vlan_flg; 781316485Sdavidcs p_cmn->update_accept_any_vlan_flg = val; 782316485Sdavidcs 783316485Sdavidcs p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 784316485Sdavidcs val = p_params->update_inner_vlan_removal_flg; 785316485Sdavidcs p_cmn->update_inner_vlan_removal_en_flg = val; 786316485Sdavidcs 787316485Sdavidcs p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 788316485Sdavidcs val = p_params->update_default_vlan_enable_flg; 789316485Sdavidcs p_cmn->update_default_vlan_en_flg = val; 790316485Sdavidcs 791316485Sdavidcs p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); 792316485Sdavidcs p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 793316485Sdavidcs 794316485Sdavidcs p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 795316485Sdavidcs 796316485Sdavidcs p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 797316485Sdavidcs#ifndef ASIC_ONLY 798316485Sdavidcs if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 799316485Sdavidcs if (p_ramrod->common.tx_switching_en || 800316485Sdavidcs p_ramrod->common.update_tx_switching_en_flg) { 801316485Sdavidcs DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n"); 802316485Sdavidcs p_ramrod->common.tx_switching_en = 0; 803316485Sdavidcs p_ramrod->common.update_tx_switching_en_flg = 1; 804316485Sdavidcs } 805316485Sdavidcs#endif 806316485Sdavidcs p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 807316485Sdavidcs 808316485Sdavidcs p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 809316485Sdavidcs val = p_params->update_anti_spoofing_en_flg; 810316485Sdavidcs p_ramrod->common.update_anti_spoofing_en_flg = val; 811316485Sdavidcs 812316485Sdavidcs rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 813316485Sdavidcs if (rc != ECORE_SUCCESS) { 814316485Sdavidcs /* Return spq entry which is taken in ecore_sp_init_request()*/ 815316485Sdavidcs ecore_spq_return_entry(p_hwfn, p_ent); 816316485Sdavidcs return rc; 817316485Sdavidcs } 818316485Sdavidcs 819316485Sdavidcs /* Update mcast bins for VFs, PF doesn't use this functionality */ 820320162Sdavidcs ecore_sp_update_mcast_bin(p_ramrod, p_params); 821316485Sdavidcs 822316485Sdavidcs ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 823320162Sdavidcs ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params); 824316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 825316485Sdavidcs} 826316485Sdavidcs 827316485Sdavidcsenum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 828316485Sdavidcs u16 opaque_fid, 829316485Sdavidcs u8 vport_id) 830316485Sdavidcs{ 831316485Sdavidcs struct vport_stop_ramrod_data *p_ramrod; 832316485Sdavidcs struct ecore_sp_init_data init_data; 833316485Sdavidcs struct ecore_spq_entry *p_ent; 834316485Sdavidcs u8 abs_vport_id = 0; 835316485Sdavidcs enum _ecore_status_t rc; 836316485Sdavidcs 837316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 838316485Sdavidcs return ecore_vf_pf_vport_stop(p_hwfn); 839316485Sdavidcs 840316485Sdavidcs rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 841316485Sdavidcs if (rc != ECORE_SUCCESS) 842316485Sdavidcs return rc; 843316485Sdavidcs 844316485Sdavidcs /* Get SPQ entry */ 845316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 846316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 847316485Sdavidcs init_data.opaque_fid = opaque_fid; 848316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 849316485Sdavidcs 850316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 851316485Sdavidcs ETH_RAMROD_VPORT_STOP, 852316485Sdavidcs PROTOCOLID_ETH, &init_data); 853316485Sdavidcs if (rc != ECORE_SUCCESS) 854316485Sdavidcs return rc; 855316485Sdavidcs 856316485Sdavidcs p_ramrod = &p_ent->ramrod.vport_stop; 857316485Sdavidcs p_ramrod->vport_id = abs_vport_id; 858316485Sdavidcs 859316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 860316485Sdavidcs} 861316485Sdavidcs 862316485Sdavidcsstatic enum _ecore_status_t 863316485Sdavidcsecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, 864316485Sdavidcs struct ecore_filter_accept_flags *p_accept_flags) 865316485Sdavidcs{ 866316485Sdavidcs struct ecore_sp_vport_update_params s_params; 867316485Sdavidcs 868316485Sdavidcs OSAL_MEMSET(&s_params, 0, sizeof(s_params)); 869316485Sdavidcs OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, 870316485Sdavidcs sizeof(struct ecore_filter_accept_flags)); 871316485Sdavidcs 872316485Sdavidcs return ecore_vf_pf_vport_update(p_hwfn, &s_params); 873316485Sdavidcs} 874316485Sdavidcs 875316485Sdavidcsenum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev, 876316485Sdavidcs u8 vport, 877316485Sdavidcs struct ecore_filter_accept_flags accept_flags, 878316485Sdavidcs u8 update_accept_any_vlan, 879316485Sdavidcs u8 accept_any_vlan, 880316485Sdavidcs enum spq_mode comp_mode, 881316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 882316485Sdavidcs{ 883316485Sdavidcs struct ecore_sp_vport_update_params vport_update_params; 884316485Sdavidcs int i, rc; 885316485Sdavidcs 886316485Sdavidcs /* Prepare and send the vport rx_mode change */ 887316485Sdavidcs OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); 888316485Sdavidcs vport_update_params.vport_id = vport; 889316485Sdavidcs vport_update_params.accept_flags = accept_flags; 890316485Sdavidcs vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 891316485Sdavidcs vport_update_params.accept_any_vlan = accept_any_vlan; 892316485Sdavidcs 893316485Sdavidcs for_each_hwfn(p_dev, i) { 894316485Sdavidcs struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 895316485Sdavidcs 896316485Sdavidcs vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 897316485Sdavidcs 898316485Sdavidcs if (IS_VF(p_dev)) { 899316485Sdavidcs rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); 900316485Sdavidcs if (rc != ECORE_SUCCESS) 901316485Sdavidcs return rc; 902316485Sdavidcs continue; 903316485Sdavidcs } 904316485Sdavidcs 905316485Sdavidcs rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 906316485Sdavidcs comp_mode, p_comp_data); 907316485Sdavidcs if (rc != ECORE_SUCCESS) { 908316485Sdavidcs DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); 909316485Sdavidcs return rc; 910316485Sdavidcs } 911316485Sdavidcs 912316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 913316485Sdavidcs "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 914316485Sdavidcs accept_flags.rx_accept_filter, 915316485Sdavidcs accept_flags.tx_accept_filter); 916316485Sdavidcs 917316485Sdavidcs if (update_accept_any_vlan) 918316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 919316485Sdavidcs "accept_any_vlan=%d configured\n", 920316485Sdavidcs accept_any_vlan); 921316485Sdavidcs } 922316485Sdavidcs 923316485Sdavidcs return 0; 924316485Sdavidcs} 925316485Sdavidcs 926316485Sdavidcsenum _ecore_status_t 927316485Sdavidcsecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, 928316485Sdavidcs struct ecore_queue_cid *p_cid, 929316485Sdavidcs u16 bd_max_bytes, 930316485Sdavidcs dma_addr_t bd_chain_phys_addr, 931316485Sdavidcs dma_addr_t cqe_pbl_addr, 932316485Sdavidcs u16 cqe_pbl_size) 933316485Sdavidcs{ 934316485Sdavidcs struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 935316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 936316485Sdavidcs struct ecore_sp_init_data init_data; 937316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 938316485Sdavidcs 939316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 940316485Sdavidcs p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, 941316485Sdavidcs p_cid->abs.vport_id, p_cid->sb_igu_id); 942316485Sdavidcs 943316485Sdavidcs /* Get SPQ entry */ 944316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 945316485Sdavidcs init_data.cid = p_cid->cid; 946316485Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 947316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 948316485Sdavidcs 949316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 950316485Sdavidcs ETH_RAMROD_RX_QUEUE_START, 951316485Sdavidcs PROTOCOLID_ETH, &init_data); 952316485Sdavidcs if (rc != ECORE_SUCCESS) 953316485Sdavidcs return rc; 954316485Sdavidcs 955316485Sdavidcs p_ramrod = &p_ent->ramrod.rx_queue_start; 956316485Sdavidcs 957316485Sdavidcs p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 958316485Sdavidcs p_ramrod->sb_index = p_cid->sb_idx; 959316485Sdavidcs p_ramrod->vport_id = p_cid->abs.vport_id; 960316485Sdavidcs p_ramrod->stats_counter_id = p_cid->abs.stats_id; 961316485Sdavidcs p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 962316485Sdavidcs p_ramrod->complete_cqe_flg = 0; 963316485Sdavidcs p_ramrod->complete_event_flg = 1; 964316485Sdavidcs 965316485Sdavidcs p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); 966316485Sdavidcs DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 967316485Sdavidcs 968316485Sdavidcs p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 969316485Sdavidcs DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 970316485Sdavidcs 971316485Sdavidcs if (p_cid->vfid != ECORE_QUEUE_CID_PF) { 972316485Sdavidcs bool b_legacy_vf = !!(p_cid->vf_legacy & 973316485Sdavidcs ECORE_QCID_LEGACY_VF_RX_PROD); 974316485Sdavidcs 975316485Sdavidcs p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 976316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", 977316485Sdavidcs b_legacy_vf ? " [legacy]" : "", 978316485Sdavidcs p_cid->vf_qid); 979316485Sdavidcs p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 980316485Sdavidcs } 981316485Sdavidcs 982316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 983316485Sdavidcs} 984316485Sdavidcs 985316485Sdavidcsstatic enum _ecore_status_t 986316485Sdavidcsecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, 987316485Sdavidcs struct ecore_queue_cid *p_cid, 988316485Sdavidcs u16 bd_max_bytes, 989316485Sdavidcs dma_addr_t bd_chain_phys_addr, 990316485Sdavidcs dma_addr_t cqe_pbl_addr, 991316485Sdavidcs u16 cqe_pbl_size, 992316485Sdavidcs void OSAL_IOMEM **pp_prod) 993316485Sdavidcs{ 994316485Sdavidcs u32 init_prod_val = 0; 995316485Sdavidcs 996316485Sdavidcs *pp_prod = (u8 OSAL_IOMEM*) 997316485Sdavidcs p_hwfn->regview + 998316485Sdavidcs GTT_BAR0_MAP_REG_MSDM_RAM + 999316485Sdavidcs MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 1000316485Sdavidcs 1001316485Sdavidcs /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 1002316485Sdavidcs __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 1003316485Sdavidcs (u32 *)(&init_prod_val)); 1004316485Sdavidcs 1005316485Sdavidcs return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 1006316485Sdavidcs bd_max_bytes, 1007316485Sdavidcs bd_chain_phys_addr, 1008316485Sdavidcs cqe_pbl_addr, cqe_pbl_size); 1009316485Sdavidcs} 1010316485Sdavidcs 1011316485Sdavidcsenum _ecore_status_t 1012316485Sdavidcsecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 1013316485Sdavidcs u16 opaque_fid, 1014316485Sdavidcs struct ecore_queue_start_common_params *p_params, 1015316485Sdavidcs u16 bd_max_bytes, 1016316485Sdavidcs dma_addr_t bd_chain_phys_addr, 1017316485Sdavidcs dma_addr_t cqe_pbl_addr, 1018316485Sdavidcs u16 cqe_pbl_size, 1019316485Sdavidcs struct ecore_rxq_start_ret_params *p_ret_params) 1020316485Sdavidcs{ 1021316485Sdavidcs struct ecore_queue_cid *p_cid; 1022316485Sdavidcs enum _ecore_status_t rc; 1023316485Sdavidcs 1024316485Sdavidcs /* Allocate a CID for the queue */ 1025337519Sdavidcs p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 1026316485Sdavidcs if (p_cid == OSAL_NULL) 1027316485Sdavidcs return ECORE_NOMEM; 1028316485Sdavidcs 1029316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) 1030316485Sdavidcs rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, 1031316485Sdavidcs bd_max_bytes, 1032316485Sdavidcs bd_chain_phys_addr, 1033316485Sdavidcs cqe_pbl_addr, cqe_pbl_size, 1034316485Sdavidcs &p_ret_params->p_prod); 1035316485Sdavidcs else 1036316485Sdavidcs rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, 1037316485Sdavidcs bd_max_bytes, 1038316485Sdavidcs bd_chain_phys_addr, 1039316485Sdavidcs cqe_pbl_addr, 1040316485Sdavidcs cqe_pbl_size, 1041316485Sdavidcs &p_ret_params->p_prod); 1042316485Sdavidcs 1043316485Sdavidcs /* Provide the caller with a reference to as handler */ 1044316485Sdavidcs if (rc != ECORE_SUCCESS) 1045316485Sdavidcs ecore_eth_queue_cid_release(p_hwfn, p_cid); 1046316485Sdavidcs else 1047316485Sdavidcs p_ret_params->p_handle = (void *)p_cid; 1048316485Sdavidcs 1049316485Sdavidcs return rc; 1050316485Sdavidcs} 1051316485Sdavidcs 1052316485Sdavidcsenum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 1053316485Sdavidcs void **pp_rxq_handles, 1054316485Sdavidcs u8 num_rxqs, 1055316485Sdavidcs u8 complete_cqe_flg, 1056316485Sdavidcs u8 complete_event_flg, 1057316485Sdavidcs enum spq_mode comp_mode, 1058316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1059316485Sdavidcs{ 1060316485Sdavidcs struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1061316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1062316485Sdavidcs struct ecore_sp_init_data init_data; 1063316485Sdavidcs struct ecore_queue_cid *p_cid; 1064316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 1065316485Sdavidcs u8 i; 1066316485Sdavidcs 1067337519Sdavidcs#ifndef LINUX_REMOVE 1068316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 1069316485Sdavidcs return ecore_vf_pf_rxqs_update(p_hwfn, 1070316485Sdavidcs (struct ecore_queue_cid **) 1071316485Sdavidcs pp_rxq_handles, 1072316485Sdavidcs num_rxqs, 1073316485Sdavidcs complete_cqe_flg, 1074316485Sdavidcs complete_event_flg); 1075337519Sdavidcs#endif 1076316485Sdavidcs 1077316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1078316485Sdavidcs init_data.comp_mode = comp_mode; 1079316485Sdavidcs init_data.p_comp_data = p_comp_data; 1080316485Sdavidcs 1081316485Sdavidcs for (i = 0; i < num_rxqs; i++) { 1082316485Sdavidcs p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; 1083316485Sdavidcs 1084316485Sdavidcs /* Get SPQ entry */ 1085316485Sdavidcs init_data.cid = p_cid->cid; 1086316485Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 1087316485Sdavidcs 1088316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1089316485Sdavidcs ETH_RAMROD_RX_QUEUE_UPDATE, 1090316485Sdavidcs PROTOCOLID_ETH, &init_data); 1091316485Sdavidcs if (rc != ECORE_SUCCESS) 1092316485Sdavidcs return rc; 1093316485Sdavidcs 1094316485Sdavidcs p_ramrod = &p_ent->ramrod.rx_queue_update; 1095316485Sdavidcs p_ramrod->vport_id = p_cid->abs.vport_id; 1096316485Sdavidcs 1097316485Sdavidcs p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1098316485Sdavidcs p_ramrod->complete_cqe_flg = complete_cqe_flg; 1099316485Sdavidcs p_ramrod->complete_event_flg = complete_event_flg; 1100316485Sdavidcs 1101316485Sdavidcs rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1102316485Sdavidcs if (rc != ECORE_SUCCESS) 1103316485Sdavidcs return rc; 1104316485Sdavidcs } 1105316485Sdavidcs 1106316485Sdavidcs return rc; 1107316485Sdavidcs} 1108316485Sdavidcs 1109337519Sdavidcsenum _ecore_status_t 1110337519Sdavidcsecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn, 1111337519Sdavidcs void *p_rxq_handler, 1112337519Sdavidcs enum spq_mode comp_mode, 1113337519Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1114337519Sdavidcs{ 1115337519Sdavidcs struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1116337519Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1117337519Sdavidcs struct ecore_sp_init_data init_data; 1118337519Sdavidcs struct ecore_queue_cid *p_cid; 1119337519Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1120337519Sdavidcs 1121337519Sdavidcs if (IS_VF(p_hwfn->p_dev)) 1122337519Sdavidcs return ECORE_NOTIMPL; 1123337519Sdavidcs 1124337519Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1125337519Sdavidcs init_data.comp_mode = comp_mode; 1126337519Sdavidcs init_data.p_comp_data = p_comp_data; 1127337519Sdavidcs 1128337519Sdavidcs p_cid = (struct ecore_queue_cid *)p_rxq_handler; 1129337519Sdavidcs 1130337519Sdavidcs /* Get SPQ entry */ 1131337519Sdavidcs init_data.cid = p_cid->cid; 1132337519Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 1133337519Sdavidcs 1134337519Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1135337519Sdavidcs ETH_RAMROD_RX_QUEUE_UPDATE, 1136337519Sdavidcs PROTOCOLID_ETH, &init_data); 1137337519Sdavidcs if (rc != ECORE_SUCCESS) 1138337519Sdavidcs return rc; 1139337519Sdavidcs 1140337519Sdavidcs p_ramrod = &p_ent->ramrod.rx_queue_update; 1141337519Sdavidcs p_ramrod->vport_id = p_cid->abs.vport_id; 1142337519Sdavidcs 1143337519Sdavidcs p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1144337519Sdavidcs p_ramrod->complete_cqe_flg = 0; 1145337519Sdavidcs p_ramrod->complete_event_flg = 1; 1146337519Sdavidcs p_ramrod->set_default_rss_queue = 1; 1147337519Sdavidcs 1148337519Sdavidcs rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1149337519Sdavidcs 1150337519Sdavidcs return rc; 1151337519Sdavidcs} 1152337519Sdavidcs 1153316485Sdavidcsstatic enum _ecore_status_t 1154316485Sdavidcsecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1155316485Sdavidcs struct ecore_queue_cid *p_cid, 1156316485Sdavidcs bool b_eq_completion_only, 1157316485Sdavidcs bool b_cqe_completion) 1158316485Sdavidcs{ 1159316485Sdavidcs struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; 1160316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1161316485Sdavidcs struct ecore_sp_init_data init_data; 1162316485Sdavidcs enum _ecore_status_t rc; 1163316485Sdavidcs 1164316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1165316485Sdavidcs init_data.cid = p_cid->cid; 1166316485Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 1167316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1168316485Sdavidcs 1169316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1170316485Sdavidcs ETH_RAMROD_RX_QUEUE_STOP, 1171316485Sdavidcs PROTOCOLID_ETH, &init_data); 1172316485Sdavidcs if (rc != ECORE_SUCCESS) 1173316485Sdavidcs return rc; 1174316485Sdavidcs 1175316485Sdavidcs p_ramrod = &p_ent->ramrod.rx_queue_stop; 1176316485Sdavidcs p_ramrod->vport_id = p_cid->abs.vport_id; 1177316485Sdavidcs p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1178316485Sdavidcs 1179316485Sdavidcs /* Cleaning the queue requires the completion to arrive there. 1180316485Sdavidcs * In addition, VFs require the answer to come as eqe to PF. 1181316485Sdavidcs */ 1182316485Sdavidcs p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && 1183316485Sdavidcs !b_eq_completion_only) || 1184316485Sdavidcs b_cqe_completion; 1185316485Sdavidcs p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || 1186316485Sdavidcs b_eq_completion_only; 1187316485Sdavidcs 1188316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1189316485Sdavidcs} 1190316485Sdavidcs 1191316485Sdavidcsenum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1192316485Sdavidcs void *p_rxq, 1193316485Sdavidcs bool eq_completion_only, 1194316485Sdavidcs bool cqe_completion) 1195316485Sdavidcs{ 1196316485Sdavidcs struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; 1197316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 1198316485Sdavidcs 1199316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) 1200316485Sdavidcs rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1201316485Sdavidcs eq_completion_only, 1202316485Sdavidcs cqe_completion); 1203316485Sdavidcs else 1204316485Sdavidcs rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1205316485Sdavidcs 1206316485Sdavidcs if (rc == ECORE_SUCCESS) 1207316485Sdavidcs ecore_eth_queue_cid_release(p_hwfn, p_cid); 1208316485Sdavidcs return rc; 1209316485Sdavidcs} 1210316485Sdavidcs 1211316485Sdavidcsenum _ecore_status_t 1212316485Sdavidcsecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, 1213316485Sdavidcs struct ecore_queue_cid *p_cid, 1214316485Sdavidcs dma_addr_t pbl_addr, u16 pbl_size, 1215316485Sdavidcs u16 pq_id) 1216316485Sdavidcs{ 1217316485Sdavidcs struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 1218316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1219316485Sdavidcs struct ecore_sp_init_data init_data; 1220316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 1221316485Sdavidcs 1222316485Sdavidcs /* Get SPQ entry */ 1223316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1224316485Sdavidcs init_data.cid = p_cid->cid; 1225316485Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 1226316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1227316485Sdavidcs 1228316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1229316485Sdavidcs ETH_RAMROD_TX_QUEUE_START, 1230316485Sdavidcs PROTOCOLID_ETH, &init_data); 1231316485Sdavidcs if (rc != ECORE_SUCCESS) 1232316485Sdavidcs return rc; 1233316485Sdavidcs 1234316485Sdavidcs p_ramrod = &p_ent->ramrod.tx_queue_start; 1235316485Sdavidcs p_ramrod->vport_id = p_cid->abs.vport_id; 1236316485Sdavidcs 1237316485Sdavidcs p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 1238316485Sdavidcs p_ramrod->sb_index = p_cid->sb_idx; 1239316485Sdavidcs p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1240316485Sdavidcs 1241316485Sdavidcs p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1242316485Sdavidcs p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1243316485Sdavidcs 1244316485Sdavidcs p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 1245316485Sdavidcs DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1246316485Sdavidcs 1247316485Sdavidcs p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 1248316485Sdavidcs 1249316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1250316485Sdavidcs} 1251316485Sdavidcs 1252316485Sdavidcsstatic enum _ecore_status_t 1253316485Sdavidcsecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, 1254316485Sdavidcs struct ecore_queue_cid *p_cid, 1255316485Sdavidcs u8 tc, 1256316485Sdavidcs dma_addr_t pbl_addr, u16 pbl_size, 1257316485Sdavidcs void OSAL_IOMEM **pp_doorbell) 1258316485Sdavidcs{ 1259316485Sdavidcs enum _ecore_status_t rc; 1260316485Sdavidcs 1261316485Sdavidcs /* TODO - set tc in the pq_params for multi-cos */ 1262316485Sdavidcs rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, 1263316485Sdavidcs pbl_addr, pbl_size, 1264316485Sdavidcs ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); 1265316485Sdavidcs if (rc != ECORE_SUCCESS) 1266316485Sdavidcs return rc; 1267316485Sdavidcs 1268316485Sdavidcs /* Provide the caller with the necessary return values */ 1269316485Sdavidcs *pp_doorbell = (u8 OSAL_IOMEM *) 1270316485Sdavidcs p_hwfn->doorbells + 1271316485Sdavidcs DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); 1272316485Sdavidcs 1273316485Sdavidcs return ECORE_SUCCESS; 1274316485Sdavidcs} 1275316485Sdavidcs 1276316485Sdavidcsenum _ecore_status_t 1277316485Sdavidcsecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 1278316485Sdavidcs struct ecore_queue_start_common_params *p_params, 1279316485Sdavidcs u8 tc, 1280316485Sdavidcs dma_addr_t pbl_addr, u16 pbl_size, 1281316485Sdavidcs struct ecore_txq_start_ret_params *p_ret_params) 1282316485Sdavidcs{ 1283316485Sdavidcs struct ecore_queue_cid *p_cid; 1284316485Sdavidcs enum _ecore_status_t rc; 1285316485Sdavidcs 1286337519Sdavidcs p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1287316485Sdavidcs if (p_cid == OSAL_NULL) 1288316485Sdavidcs return ECORE_INVAL; 1289316485Sdavidcs 1290316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) 1291316485Sdavidcs rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1292316485Sdavidcs pbl_addr, pbl_size, 1293316485Sdavidcs &p_ret_params->p_doorbell); 1294316485Sdavidcs else 1295316485Sdavidcs rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, 1296316485Sdavidcs pbl_addr, pbl_size, 1297316485Sdavidcs &p_ret_params->p_doorbell); 1298316485Sdavidcs 1299316485Sdavidcs if (rc != ECORE_SUCCESS) 1300316485Sdavidcs ecore_eth_queue_cid_release(p_hwfn, p_cid); 1301316485Sdavidcs else 1302316485Sdavidcs p_ret_params->p_handle = (void *)p_cid; 1303316485Sdavidcs 1304316485Sdavidcs return rc; 1305316485Sdavidcs} 1306316485Sdavidcs 1307316485Sdavidcsstatic enum _ecore_status_t 1308316485Sdavidcsecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1309316485Sdavidcs struct ecore_queue_cid *p_cid) 1310316485Sdavidcs{ 1311316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1312316485Sdavidcs struct ecore_sp_init_data init_data; 1313316485Sdavidcs enum _ecore_status_t rc; 1314316485Sdavidcs 1315316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1316316485Sdavidcs init_data.cid = p_cid->cid; 1317316485Sdavidcs init_data.opaque_fid = p_cid->opaque_fid; 1318316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1319316485Sdavidcs 1320316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1321316485Sdavidcs ETH_RAMROD_TX_QUEUE_STOP, 1322316485Sdavidcs PROTOCOLID_ETH, &init_data); 1323316485Sdavidcs if (rc != ECORE_SUCCESS) 1324316485Sdavidcs return rc; 1325316485Sdavidcs 1326316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1327316485Sdavidcs} 1328316485Sdavidcs 1329316485Sdavidcsenum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1330316485Sdavidcs void *p_handle) 1331316485Sdavidcs{ 1332316485Sdavidcs struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 1333316485Sdavidcs enum _ecore_status_t rc; 1334316485Sdavidcs 1335316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) 1336316485Sdavidcs rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1337316485Sdavidcs else 1338316485Sdavidcs rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); 1339316485Sdavidcs 1340316485Sdavidcs if (rc == ECORE_SUCCESS) 1341316485Sdavidcs ecore_eth_queue_cid_release(p_hwfn, p_cid); 1342316485Sdavidcs return rc; 1343316485Sdavidcs} 1344316485Sdavidcs 1345316485Sdavidcsstatic enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode) 1346316485Sdavidcs{ 1347316485Sdavidcs enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1348316485Sdavidcs 1349316485Sdavidcs switch (opcode) { 1350316485Sdavidcs case ECORE_FILTER_ADD: 1351316485Sdavidcs action = ETH_FILTER_ACTION_ADD; 1352316485Sdavidcs break; 1353316485Sdavidcs case ECORE_FILTER_REMOVE: 1354316485Sdavidcs action = ETH_FILTER_ACTION_REMOVE; 1355316485Sdavidcs break; 1356316485Sdavidcs case ECORE_FILTER_FLUSH: 1357316485Sdavidcs action = ETH_FILTER_ACTION_REMOVE_ALL; 1358316485Sdavidcs break; 1359316485Sdavidcs default: 1360316485Sdavidcs action = MAX_ETH_FILTER_ACTION; 1361316485Sdavidcs } 1362316485Sdavidcs 1363316485Sdavidcs return action; 1364316485Sdavidcs} 1365316485Sdavidcs 1366316485Sdavidcsstatic enum _ecore_status_t 1367316485Sdavidcsecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, 1368316485Sdavidcs u16 opaque_fid, 1369316485Sdavidcs struct ecore_filter_ucast *p_filter_cmd, 1370316485Sdavidcs struct vport_filter_update_ramrod_data **pp_ramrod, 1371316485Sdavidcs struct ecore_spq_entry **pp_ent, 1372316485Sdavidcs enum spq_mode comp_mode, 1373316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1374316485Sdavidcs{ 1375316485Sdavidcs u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1376316485Sdavidcs struct vport_filter_update_ramrod_data *p_ramrod; 1377316485Sdavidcs struct eth_filter_cmd *p_first_filter; 1378316485Sdavidcs struct eth_filter_cmd *p_second_filter; 1379316485Sdavidcs struct ecore_sp_init_data init_data; 1380316485Sdavidcs enum eth_filter_action action; 1381316485Sdavidcs enum _ecore_status_t rc; 1382316485Sdavidcs 1383316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1384316485Sdavidcs &vport_to_remove_from); 1385316485Sdavidcs if (rc != ECORE_SUCCESS) 1386316485Sdavidcs return rc; 1387316485Sdavidcs 1388316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1389316485Sdavidcs &vport_to_add_to); 1390316485Sdavidcs if (rc != ECORE_SUCCESS) 1391316485Sdavidcs return rc; 1392316485Sdavidcs 1393316485Sdavidcs /* Get SPQ entry */ 1394316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1395316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 1396316485Sdavidcs init_data.opaque_fid = opaque_fid; 1397316485Sdavidcs init_data.comp_mode = comp_mode; 1398316485Sdavidcs init_data.p_comp_data = p_comp_data; 1399316485Sdavidcs 1400316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, pp_ent, 1401316485Sdavidcs ETH_RAMROD_FILTERS_UPDATE, 1402316485Sdavidcs PROTOCOLID_ETH, &init_data); 1403316485Sdavidcs if (rc != ECORE_SUCCESS) 1404316485Sdavidcs return rc; 1405316485Sdavidcs 1406316485Sdavidcs *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1407316485Sdavidcs p_ramrod = *pp_ramrod; 1408316485Sdavidcs p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1409316485Sdavidcs p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1410316485Sdavidcs 1411316485Sdavidcs#ifndef ASIC_ONLY 1412316485Sdavidcs if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1413316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1414316485Sdavidcs "Non-Asic - prevent Tx filters\n"); 1415316485Sdavidcs p_ramrod->filter_cmd_hdr.tx = 0; 1416316485Sdavidcs } 1417316485Sdavidcs 1418316485Sdavidcs#endif 1419316485Sdavidcs 1420316485Sdavidcs switch (p_filter_cmd->opcode) { 1421316485Sdavidcs case ECORE_FILTER_REPLACE: 1422316485Sdavidcs case ECORE_FILTER_MOVE: 1423316485Sdavidcs p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1424316485Sdavidcs default: 1425316485Sdavidcs p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1426316485Sdavidcs } 1427316485Sdavidcs 1428316485Sdavidcs p_first_filter = &p_ramrod->filter_cmds[0]; 1429316485Sdavidcs p_second_filter = &p_ramrod->filter_cmds[1]; 1430316485Sdavidcs 1431316485Sdavidcs switch (p_filter_cmd->type) { 1432316485Sdavidcs case ECORE_FILTER_MAC: 1433316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1434316485Sdavidcs case ECORE_FILTER_VLAN: 1435316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1436316485Sdavidcs case ECORE_FILTER_MAC_VLAN: 1437316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1438316485Sdavidcs case ECORE_FILTER_INNER_MAC: 1439316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1440316485Sdavidcs case ECORE_FILTER_INNER_VLAN: 1441316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1442316485Sdavidcs case ECORE_FILTER_INNER_PAIR: 1443316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1444316485Sdavidcs case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1445316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1446316485Sdavidcs break; 1447316485Sdavidcs case ECORE_FILTER_MAC_VNI_PAIR: 1448316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1449316485Sdavidcs case ECORE_FILTER_VNI: 1450316485Sdavidcs p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1451316485Sdavidcs } 1452316485Sdavidcs 1453316485Sdavidcs if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1454316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1455316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1456316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1457316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1458316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) 1459316485Sdavidcs ecore_set_fw_mac_addr(&p_first_filter->mac_msb, 1460316485Sdavidcs &p_first_filter->mac_mid, 1461316485Sdavidcs &p_first_filter->mac_lsb, 1462316485Sdavidcs (u8 *)p_filter_cmd->mac); 1463316485Sdavidcs 1464316485Sdavidcs if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1465316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1466316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1467316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1468316485Sdavidcs p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); 1469316485Sdavidcs 1470316485Sdavidcs if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1471316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1472316485Sdavidcs (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1473316485Sdavidcs p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); 1474316485Sdavidcs 1475316485Sdavidcs if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { 1476316485Sdavidcs p_second_filter->type = p_first_filter->type; 1477316485Sdavidcs p_second_filter->mac_msb = p_first_filter->mac_msb; 1478316485Sdavidcs p_second_filter->mac_mid = p_first_filter->mac_mid; 1479316485Sdavidcs p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1480316485Sdavidcs p_second_filter->vlan_id = p_first_filter->vlan_id; 1481316485Sdavidcs p_second_filter->vni = p_first_filter->vni; 1482316485Sdavidcs 1483316485Sdavidcs p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1484316485Sdavidcs 1485316485Sdavidcs p_first_filter->vport_id = vport_to_remove_from; 1486316485Sdavidcs 1487316485Sdavidcs p_second_filter->action = ETH_FILTER_ACTION_ADD; 1488316485Sdavidcs p_second_filter->vport_id = vport_to_add_to; 1489316485Sdavidcs } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { 1490316485Sdavidcs p_first_filter->vport_id = vport_to_add_to; 1491316485Sdavidcs OSAL_MEMCPY(p_second_filter, p_first_filter, 1492316485Sdavidcs sizeof(*p_second_filter)); 1493316485Sdavidcs p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1494316485Sdavidcs p_second_filter->action = ETH_FILTER_ACTION_ADD; 1495316485Sdavidcs } else { 1496316485Sdavidcs action = ecore_filter_action(p_filter_cmd->opcode); 1497316485Sdavidcs 1498316485Sdavidcs if (action == MAX_ETH_FILTER_ACTION) { 1499316485Sdavidcs DP_NOTICE(p_hwfn, true, 1500316485Sdavidcs "%d is not supported yet\n", 1501316485Sdavidcs p_filter_cmd->opcode); 1502316485Sdavidcs return ECORE_NOTIMPL; 1503316485Sdavidcs } 1504316485Sdavidcs 1505316485Sdavidcs p_first_filter->action = action; 1506316485Sdavidcs p_first_filter->vport_id = 1507316485Sdavidcs (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1508316485Sdavidcs vport_to_remove_from : vport_to_add_to; 1509316485Sdavidcs } 1510316485Sdavidcs 1511316485Sdavidcs return ECORE_SUCCESS; 1512316485Sdavidcs} 1513316485Sdavidcs 1514316485Sdavidcsenum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 1515316485Sdavidcs u16 opaque_fid, 1516316485Sdavidcs struct ecore_filter_ucast *p_filter_cmd, 1517316485Sdavidcs enum spq_mode comp_mode, 1518316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1519316485Sdavidcs{ 1520316485Sdavidcs struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; 1521316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1522316485Sdavidcs struct eth_filter_cmd_header *p_header; 1523316485Sdavidcs enum _ecore_status_t rc; 1524316485Sdavidcs 1525316485Sdavidcs rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1526316485Sdavidcs &p_ramrod, &p_ent, 1527316485Sdavidcs comp_mode, p_comp_data); 1528316485Sdavidcs if (rc != ECORE_SUCCESS) { 1529316485Sdavidcs DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1530316485Sdavidcs return rc; 1531316485Sdavidcs } 1532316485Sdavidcs p_header = &p_ramrod->filter_cmd_hdr; 1533316485Sdavidcs p_header->assert_on_error = p_filter_cmd->assert_on_error; 1534316485Sdavidcs 1535316485Sdavidcs rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1536316485Sdavidcs if (rc != ECORE_SUCCESS) { 1537316485Sdavidcs DP_ERR(p_hwfn, 1538316485Sdavidcs "Unicast filter ADD command failed %d\n", 1539316485Sdavidcs rc); 1540316485Sdavidcs return rc; 1541316485Sdavidcs } 1542316485Sdavidcs 1543316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1544316485Sdavidcs "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1545316485Sdavidcs (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : 1546316485Sdavidcs ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1547316485Sdavidcs "REMOVE" : 1548316485Sdavidcs ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? 1549316485Sdavidcs "MOVE" : "REPLACE")), 1550316485Sdavidcs (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : 1551316485Sdavidcs ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? 1552316485Sdavidcs "VLAN" : "MAC & VLAN"), 1553316485Sdavidcs p_ramrod->filter_cmd_hdr.cmd_cnt, 1554316485Sdavidcs p_filter_cmd->is_rx_filter, 1555316485Sdavidcs p_filter_cmd->is_tx_filter); 1556316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1557316485Sdavidcs "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1558316485Sdavidcs p_filter_cmd->vport_to_add_to, 1559316485Sdavidcs p_filter_cmd->vport_to_remove_from, 1560316485Sdavidcs p_filter_cmd->mac[0], p_filter_cmd->mac[1], 1561316485Sdavidcs p_filter_cmd->mac[2], p_filter_cmd->mac[3], 1562316485Sdavidcs p_filter_cmd->mac[4], p_filter_cmd->mac[5], 1563316485Sdavidcs p_filter_cmd->vlan); 1564316485Sdavidcs 1565316485Sdavidcs return ECORE_SUCCESS; 1566316485Sdavidcs} 1567316485Sdavidcs 1568316485Sdavidcs/******************************************************************************* 1569316485Sdavidcs * Description: 1570316485Sdavidcs * Calculates crc 32 on a buffer 1571316485Sdavidcs * Note: crc32_length MUST be aligned to 8 1572316485Sdavidcs * Return: 1573316485Sdavidcs ******************************************************************************/ 1574320162Sdavidcsstatic u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed) 1575316485Sdavidcs{ 1576316485Sdavidcs u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1577316485Sdavidcs u8 msb = 0, current_byte = 0; 1578316485Sdavidcs 1579316485Sdavidcs if ((crc32_packet == OSAL_NULL) || 1580316485Sdavidcs (crc32_length == 0) || 1581316485Sdavidcs ((crc32_length % 8) != 0)) { 1582316485Sdavidcs return crc32_result; 1583316485Sdavidcs } 1584316485Sdavidcs 1585316485Sdavidcs for (byte = 0; byte < crc32_length; byte++) { 1586316485Sdavidcs current_byte = crc32_packet[byte]; 1587316485Sdavidcs for (bit = 0; bit < 8; bit++) { 1588316485Sdavidcs msb = (u8)(crc32_result >> 31); 1589316485Sdavidcs crc32_result = crc32_result << 1; 1590316485Sdavidcs if (msb != (0x1 & (current_byte >> bit))) { 1591316485Sdavidcs crc32_result = crc32_result ^ CRC32_POLY; 1592316485Sdavidcs crc32_result |= 1; /*crc32_result[0] = 1;*/ 1593316485Sdavidcs } 1594316485Sdavidcs } 1595316485Sdavidcs } 1596316485Sdavidcs 1597316485Sdavidcs return crc32_result; 1598316485Sdavidcs} 1599316485Sdavidcs 1600320162Sdavidcsstatic u32 ecore_crc32c_le(u32 seed, u8 *mac) 1601316485Sdavidcs{ 1602316485Sdavidcs u32 packet_buf[2] = {0}; 1603316485Sdavidcs 1604316485Sdavidcs OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); 1605320162Sdavidcs return ecore_calc_crc32c((u8 *)packet_buf, 8, seed); 1606316485Sdavidcs} 1607316485Sdavidcs 1608316485Sdavidcsu8 ecore_mcast_bin_from_mac(u8 *mac) 1609316485Sdavidcs{ 1610320162Sdavidcs u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac); 1611316485Sdavidcs 1612316485Sdavidcs return crc & 0xff; 1613316485Sdavidcs} 1614316485Sdavidcs 1615316485Sdavidcsstatic enum _ecore_status_t 1616316485Sdavidcsecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, 1617316485Sdavidcs struct ecore_filter_mcast *p_filter_cmd, 1618316485Sdavidcs enum spq_mode comp_mode, 1619316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1620316485Sdavidcs{ 1621316485Sdavidcs struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 1622337519Sdavidcs u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1623316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 1624316485Sdavidcs struct ecore_sp_init_data init_data; 1625316485Sdavidcs u8 abs_vport_id = 0; 1626316485Sdavidcs enum _ecore_status_t rc; 1627316485Sdavidcs int i; 1628316485Sdavidcs 1629316485Sdavidcs if (p_filter_cmd->opcode == ECORE_FILTER_ADD) 1630316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1631316485Sdavidcs &abs_vport_id); 1632316485Sdavidcs else 1633316485Sdavidcs rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1634316485Sdavidcs &abs_vport_id); 1635316485Sdavidcs if (rc != ECORE_SUCCESS) 1636316485Sdavidcs return rc; 1637316485Sdavidcs 1638316485Sdavidcs /* Get SPQ entry */ 1639316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1640316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 1641316485Sdavidcs init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1642316485Sdavidcs init_data.comp_mode = comp_mode; 1643316485Sdavidcs init_data.p_comp_data = p_comp_data; 1644316485Sdavidcs 1645316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 1646316485Sdavidcs ETH_RAMROD_VPORT_UPDATE, 1647316485Sdavidcs PROTOCOLID_ETH, &init_data); 1648316485Sdavidcs if (rc != ECORE_SUCCESS) { 1649316485Sdavidcs DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1650316485Sdavidcs return rc; 1651316485Sdavidcs } 1652316485Sdavidcs 1653316485Sdavidcs p_ramrod = &p_ent->ramrod.vport_update; 1654316485Sdavidcs p_ramrod->common.update_approx_mcast_flg = 1; 1655316485Sdavidcs 1656316485Sdavidcs /* explicitly clear out the entire vector */ 1657316485Sdavidcs OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 1658316485Sdavidcs 0, sizeof(p_ramrod->approx_mcast.bins)); 1659337519Sdavidcs OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1660316485Sdavidcs /* filter ADD op is explicit set op and it removes 1661316485Sdavidcs * any existing filters for the vport. 1662316485Sdavidcs */ 1663316485Sdavidcs if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1664316485Sdavidcs for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1665316485Sdavidcs u32 bit; 1666316485Sdavidcs 1667316485Sdavidcs bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1668337519Sdavidcs bins[bit / 32] |= 1 << (bit % 32); 1669316485Sdavidcs } 1670316485Sdavidcs 1671316485Sdavidcs /* Convert to correct endianity */ 1672316485Sdavidcs for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1673316485Sdavidcs struct vport_update_ramrod_mcast *p_ramrod_bins; 1674316485Sdavidcs 1675316485Sdavidcs p_ramrod_bins = &p_ramrod->approx_mcast; 1676337519Sdavidcs p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); 1677316485Sdavidcs } 1678316485Sdavidcs } 1679316485Sdavidcs 1680316485Sdavidcs p_ramrod->common.vport_id = abs_vport_id; 1681316485Sdavidcs 1682316485Sdavidcs rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1683316485Sdavidcs if (rc != ECORE_SUCCESS) 1684316485Sdavidcs DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); 1685316485Sdavidcs 1686316485Sdavidcs return rc; 1687316485Sdavidcs} 1688316485Sdavidcs 1689316485Sdavidcsenum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 1690316485Sdavidcs struct ecore_filter_mcast *p_filter_cmd, 1691316485Sdavidcs enum spq_mode comp_mode, 1692316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1693316485Sdavidcs{ 1694316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1695316485Sdavidcs int i; 1696316485Sdavidcs 1697316485Sdavidcs /* only ADD and REMOVE operations are supported for multi-cast */ 1698316485Sdavidcs if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && 1699316485Sdavidcs (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || 1700316485Sdavidcs (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { 1701316485Sdavidcs return ECORE_INVAL; 1702316485Sdavidcs } 1703316485Sdavidcs 1704316485Sdavidcs for_each_hwfn(p_dev, i) { 1705316485Sdavidcs struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1706316485Sdavidcs 1707316485Sdavidcs if (IS_VF(p_dev)) { 1708316485Sdavidcs ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1709316485Sdavidcs continue; 1710316485Sdavidcs } 1711316485Sdavidcs 1712316485Sdavidcs rc = ecore_sp_eth_filter_mcast(p_hwfn, 1713316485Sdavidcs p_filter_cmd, 1714316485Sdavidcs comp_mode, 1715316485Sdavidcs p_comp_data); 1716316485Sdavidcs if (rc != ECORE_SUCCESS) 1717316485Sdavidcs break; 1718316485Sdavidcs } 1719316485Sdavidcs 1720316485Sdavidcs return rc; 1721316485Sdavidcs} 1722316485Sdavidcs 1723316485Sdavidcsenum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 1724316485Sdavidcs struct ecore_filter_ucast *p_filter_cmd, 1725316485Sdavidcs enum spq_mode comp_mode, 1726316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data) 1727316485Sdavidcs{ 1728316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1729316485Sdavidcs int i; 1730316485Sdavidcs 1731316485Sdavidcs for_each_hwfn(p_dev, i) { 1732316485Sdavidcs struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1733316485Sdavidcs u16 opaque_fid; 1734316485Sdavidcs 1735316485Sdavidcs if (IS_VF(p_dev)) { 1736316485Sdavidcs rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1737316485Sdavidcs continue; 1738316485Sdavidcs } 1739316485Sdavidcs 1740316485Sdavidcs opaque_fid = p_hwfn->hw_info.opaque_fid; 1741316485Sdavidcs rc = ecore_sp_eth_filter_ucast(p_hwfn, 1742316485Sdavidcs opaque_fid, 1743316485Sdavidcs p_filter_cmd, 1744316485Sdavidcs comp_mode, 1745316485Sdavidcs p_comp_data); 1746316485Sdavidcs if (rc != ECORE_SUCCESS) 1747316485Sdavidcs break; 1748316485Sdavidcs } 1749316485Sdavidcs 1750316485Sdavidcs return rc; 1751316485Sdavidcs} 1752316485Sdavidcs 1753316485Sdavidcs/* Statistics related code */ 1754316485Sdavidcsstatic void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, 1755316485Sdavidcs u32 *p_addr, u32 *p_len, 1756316485Sdavidcs u16 statistics_bin) 1757316485Sdavidcs{ 1758316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1759316485Sdavidcs *p_addr = BAR0_MAP_REG_PSDM_RAM + 1760316485Sdavidcs PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1761316485Sdavidcs *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1762316485Sdavidcs } else { 1763316485Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1764316485Sdavidcs struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1765316485Sdavidcs 1766316485Sdavidcs *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1767316485Sdavidcs *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1768316485Sdavidcs } 1769316485Sdavidcs} 1770316485Sdavidcs 1771316485Sdavidcsstatic void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, 1772316485Sdavidcs struct ecore_ptt *p_ptt, 1773316485Sdavidcs struct ecore_eth_stats *p_stats, 1774316485Sdavidcs u16 statistics_bin) 1775316485Sdavidcs{ 1776316485Sdavidcs struct eth_pstorm_per_queue_stat pstats; 1777316485Sdavidcs u32 pstats_addr = 0, pstats_len = 0; 1778316485Sdavidcs 1779316485Sdavidcs __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1780316485Sdavidcs statistics_bin); 1781316485Sdavidcs 1782316485Sdavidcs OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 1783316485Sdavidcs ecore_memcpy_from(p_hwfn, p_ptt, &pstats, 1784316485Sdavidcs pstats_addr, pstats_len); 1785316485Sdavidcs 1786316485Sdavidcs p_stats->common.tx_ucast_bytes += 1787316485Sdavidcs HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1788316485Sdavidcs p_stats->common.tx_mcast_bytes += 1789316485Sdavidcs HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1790316485Sdavidcs p_stats->common.tx_bcast_bytes += 1791316485Sdavidcs HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1792316485Sdavidcs p_stats->common.tx_ucast_pkts += 1793316485Sdavidcs HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1794316485Sdavidcs p_stats->common.tx_mcast_pkts += 1795316485Sdavidcs HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1796316485Sdavidcs p_stats->common.tx_bcast_pkts += 1797316485Sdavidcs HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1798316485Sdavidcs p_stats->common.tx_err_drop_pkts += 1799316485Sdavidcs HILO_64_REGPAIR(pstats.error_drop_pkts); 1800316485Sdavidcs} 1801316485Sdavidcs 1802316485Sdavidcsstatic void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, 1803316485Sdavidcs struct ecore_ptt *p_ptt, 1804320162Sdavidcs struct ecore_eth_stats *p_stats) 1805316485Sdavidcs{ 1806316485Sdavidcs struct tstorm_per_port_stat tstats; 1807316485Sdavidcs u32 tstats_addr, tstats_len; 1808316485Sdavidcs 1809316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1810316485Sdavidcs tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1811316485Sdavidcs TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1812316485Sdavidcs tstats_len = sizeof(struct tstorm_per_port_stat); 1813316485Sdavidcs } else { 1814316485Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1815316485Sdavidcs struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1816316485Sdavidcs 1817316485Sdavidcs tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1818316485Sdavidcs tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1819316485Sdavidcs } 1820316485Sdavidcs 1821316485Sdavidcs OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 1822316485Sdavidcs ecore_memcpy_from(p_hwfn, p_ptt, &tstats, 1823316485Sdavidcs tstats_addr, tstats_len); 1824316485Sdavidcs 1825316485Sdavidcs p_stats->common.mftag_filter_discards += 1826316485Sdavidcs HILO_64_REGPAIR(tstats.mftag_filter_discard); 1827316485Sdavidcs p_stats->common.mac_filter_discards += 1828316485Sdavidcs HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1829316485Sdavidcs} 1830316485Sdavidcs 1831316485Sdavidcsstatic void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, 1832316485Sdavidcs u32 *p_addr, u32 *p_len, 1833316485Sdavidcs u16 statistics_bin) 1834316485Sdavidcs{ 1835316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1836316485Sdavidcs *p_addr = BAR0_MAP_REG_USDM_RAM + 1837316485Sdavidcs USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1838316485Sdavidcs *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1839316485Sdavidcs } else { 1840316485Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1841316485Sdavidcs struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1842316485Sdavidcs 1843316485Sdavidcs *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1844316485Sdavidcs *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1845316485Sdavidcs } 1846316485Sdavidcs} 1847316485Sdavidcs 1848316485Sdavidcsstatic void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, 1849316485Sdavidcs struct ecore_ptt *p_ptt, 1850316485Sdavidcs struct ecore_eth_stats *p_stats, 1851316485Sdavidcs u16 statistics_bin) 1852316485Sdavidcs{ 1853316485Sdavidcs struct eth_ustorm_per_queue_stat ustats; 1854316485Sdavidcs u32 ustats_addr = 0, ustats_len = 0; 1855316485Sdavidcs 1856316485Sdavidcs __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1857316485Sdavidcs statistics_bin); 1858316485Sdavidcs 1859316485Sdavidcs OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 1860316485Sdavidcs ecore_memcpy_from(p_hwfn, p_ptt, &ustats, 1861316485Sdavidcs ustats_addr, ustats_len); 1862316485Sdavidcs 1863316485Sdavidcs p_stats->common.rx_ucast_bytes += 1864316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1865316485Sdavidcs p_stats->common.rx_mcast_bytes += 1866316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1867316485Sdavidcs p_stats->common.rx_bcast_bytes += 1868316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1869316485Sdavidcs p_stats->common.rx_ucast_pkts += 1870316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1871316485Sdavidcs p_stats->common.rx_mcast_pkts += 1872316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1873316485Sdavidcs p_stats->common.rx_bcast_pkts += 1874316485Sdavidcs HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1875316485Sdavidcs} 1876316485Sdavidcs 1877316485Sdavidcsstatic void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, 1878316485Sdavidcs u32 *p_addr, u32 *p_len, 1879316485Sdavidcs u16 statistics_bin) 1880316485Sdavidcs{ 1881316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1882316485Sdavidcs *p_addr = BAR0_MAP_REG_MSDM_RAM + 1883316485Sdavidcs MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1884316485Sdavidcs *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1885316485Sdavidcs } else { 1886316485Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1887316485Sdavidcs struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1888316485Sdavidcs 1889316485Sdavidcs *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1890316485Sdavidcs *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1891316485Sdavidcs } 1892316485Sdavidcs} 1893316485Sdavidcs 1894316485Sdavidcsstatic void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, 1895316485Sdavidcs struct ecore_ptt *p_ptt, 1896316485Sdavidcs struct ecore_eth_stats *p_stats, 1897316485Sdavidcs u16 statistics_bin) 1898316485Sdavidcs{ 1899316485Sdavidcs struct eth_mstorm_per_queue_stat mstats; 1900316485Sdavidcs u32 mstats_addr = 0, mstats_len = 0; 1901316485Sdavidcs 1902316485Sdavidcs __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1903316485Sdavidcs statistics_bin); 1904316485Sdavidcs 1905316485Sdavidcs OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 1906316485Sdavidcs ecore_memcpy_from(p_hwfn, p_ptt, &mstats, 1907316485Sdavidcs mstats_addr, mstats_len); 1908316485Sdavidcs 1909316485Sdavidcs p_stats->common.no_buff_discards += 1910316485Sdavidcs HILO_64_REGPAIR(mstats.no_buff_discard); 1911316485Sdavidcs p_stats->common.packet_too_big_discard += 1912316485Sdavidcs HILO_64_REGPAIR(mstats.packet_too_big_discard); 1913316485Sdavidcs p_stats->common.ttl0_discard += 1914316485Sdavidcs HILO_64_REGPAIR(mstats.ttl0_discard); 1915316485Sdavidcs p_stats->common.tpa_coalesced_pkts += 1916316485Sdavidcs HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1917316485Sdavidcs p_stats->common.tpa_coalesced_events += 1918316485Sdavidcs HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1919316485Sdavidcs p_stats->common.tpa_aborts_num += 1920316485Sdavidcs HILO_64_REGPAIR(mstats.tpa_aborts_num); 1921316485Sdavidcs p_stats->common.tpa_coalesced_bytes += 1922316485Sdavidcs HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1923316485Sdavidcs} 1924316485Sdavidcs 1925316485Sdavidcsstatic void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, 1926316485Sdavidcs struct ecore_ptt *p_ptt, 1927316485Sdavidcs struct ecore_eth_stats *p_stats) 1928316485Sdavidcs{ 1929316485Sdavidcs struct ecore_eth_stats_common *p_common = &p_stats->common; 1930316485Sdavidcs struct port_stats port_stats; 1931316485Sdavidcs int j; 1932316485Sdavidcs 1933316485Sdavidcs OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 1934316485Sdavidcs 1935316485Sdavidcs ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 1936316485Sdavidcs p_hwfn->mcp_info->port_addr + 1937316485Sdavidcs OFFSETOF(struct public_port, stats), 1938316485Sdavidcs sizeof(port_stats)); 1939316485Sdavidcs 1940316485Sdavidcs p_common->rx_64_byte_packets += port_stats.eth.r64; 1941316485Sdavidcs p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1942316485Sdavidcs p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1943316485Sdavidcs p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1944316485Sdavidcs p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1945316485Sdavidcs p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1946316485Sdavidcs p_common->rx_crc_errors += port_stats.eth.rfcs; 1947316485Sdavidcs p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1948316485Sdavidcs p_common->rx_pause_frames += port_stats.eth.rxpf; 1949316485Sdavidcs p_common->rx_pfc_frames += port_stats.eth.rxpp; 1950316485Sdavidcs p_common->rx_align_errors += port_stats.eth.raln; 1951316485Sdavidcs p_common->rx_carrier_errors += port_stats.eth.rfcr; 1952316485Sdavidcs p_common->rx_oversize_packets += port_stats.eth.rovr; 1953316485Sdavidcs p_common->rx_jabbers += port_stats.eth.rjbr; 1954316485Sdavidcs p_common->rx_undersize_packets += port_stats.eth.rund; 1955316485Sdavidcs p_common->rx_fragments += port_stats.eth.rfrg; 1956316485Sdavidcs p_common->tx_64_byte_packets += port_stats.eth.t64; 1957316485Sdavidcs p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1958316485Sdavidcs p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1959316485Sdavidcs p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1960316485Sdavidcs p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1961316485Sdavidcs p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1962316485Sdavidcs p_common->tx_pause_frames += port_stats.eth.txpf; 1963316485Sdavidcs p_common->tx_pfc_frames += port_stats.eth.txpp; 1964316485Sdavidcs p_common->rx_mac_bytes += port_stats.eth.rbyte; 1965316485Sdavidcs p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1966316485Sdavidcs p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1967316485Sdavidcs p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1968316485Sdavidcs p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1969316485Sdavidcs p_common->tx_mac_bytes += port_stats.eth.tbyte; 1970316485Sdavidcs p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1971316485Sdavidcs p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1972316485Sdavidcs p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1973316485Sdavidcs p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1974316485Sdavidcs for (j = 0; j < 8; j++) { 1975316485Sdavidcs p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1976316485Sdavidcs p_common->brb_discards += port_stats.brb.brb_discard[j]; 1977316485Sdavidcs } 1978316485Sdavidcs 1979316485Sdavidcs if (ECORE_IS_BB(p_hwfn->p_dev)) { 1980316485Sdavidcs struct ecore_eth_stats_bb *p_bb = &p_stats->bb; 1981316485Sdavidcs 1982316485Sdavidcs p_bb->rx_1519_to_1522_byte_packets += 1983316485Sdavidcs port_stats.eth.u0.bb0.r1522; 1984316485Sdavidcs p_bb->rx_1519_to_2047_byte_packets += 1985316485Sdavidcs port_stats.eth.u0.bb0.r2047; 1986316485Sdavidcs p_bb->rx_2048_to_4095_byte_packets += 1987316485Sdavidcs port_stats.eth.u0.bb0.r4095; 1988316485Sdavidcs p_bb->rx_4096_to_9216_byte_packets += 1989316485Sdavidcs port_stats.eth.u0.bb0.r9216; 1990316485Sdavidcs p_bb->rx_9217_to_16383_byte_packets += 1991316485Sdavidcs port_stats.eth.u0.bb0.r16383; 1992316485Sdavidcs p_bb->tx_1519_to_2047_byte_packets += 1993316485Sdavidcs port_stats.eth.u1.bb1.t2047; 1994316485Sdavidcs p_bb->tx_2048_to_4095_byte_packets += 1995316485Sdavidcs port_stats.eth.u1.bb1.t4095; 1996316485Sdavidcs p_bb->tx_4096_to_9216_byte_packets += 1997316485Sdavidcs port_stats.eth.u1.bb1.t9216; 1998316485Sdavidcs p_bb->tx_9217_to_16383_byte_packets += 1999316485Sdavidcs port_stats.eth.u1.bb1.t16383; 2000316485Sdavidcs p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 2001316485Sdavidcs p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 2002316485Sdavidcs } else { 2003316485Sdavidcs struct ecore_eth_stats_ah *p_ah = &p_stats->ah; 2004316485Sdavidcs 2005316485Sdavidcs p_ah->rx_1519_to_max_byte_packets += 2006316485Sdavidcs port_stats.eth.u0.ah0.r1519_to_max; 2007316485Sdavidcs p_ah->tx_1519_to_max_byte_packets = 2008316485Sdavidcs port_stats.eth.u1.ah1.t1519_to_max; 2009316485Sdavidcs } 2010337519Sdavidcs 2011337519Sdavidcs p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, 2012337519Sdavidcs p_hwfn->mcp_info->port_addr + 2013337519Sdavidcs OFFSETOF(struct public_port, 2014337519Sdavidcs link_change_count)); 2015316485Sdavidcs} 2016316485Sdavidcs 2017316485Sdavidcsvoid __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 2018316485Sdavidcs struct ecore_ptt *p_ptt, 2019316485Sdavidcs struct ecore_eth_stats *stats, 2020316485Sdavidcs u16 statistics_bin, bool b_get_port_stats) 2021316485Sdavidcs{ 2022316485Sdavidcs __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 2023316485Sdavidcs __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 2024320162Sdavidcs __ecore_get_vport_tstats(p_hwfn, p_ptt, stats); 2025316485Sdavidcs __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 2026316485Sdavidcs 2027316485Sdavidcs#ifndef ASIC_ONLY 2028316485Sdavidcs /* Avoid getting PORT stats for emulation.*/ 2029316485Sdavidcs if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 2030316485Sdavidcs return; 2031316485Sdavidcs#endif 2032316485Sdavidcs 2033316485Sdavidcs if (b_get_port_stats && p_hwfn->mcp_info) 2034316485Sdavidcs __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); 2035316485Sdavidcs} 2036316485Sdavidcs 2037316485Sdavidcsstatic void _ecore_get_vport_stats(struct ecore_dev *p_dev, 2038316485Sdavidcs struct ecore_eth_stats *stats) 2039316485Sdavidcs{ 2040316485Sdavidcs u8 fw_vport = 0; 2041316485Sdavidcs int i; 2042316485Sdavidcs 2043316485Sdavidcs OSAL_MEMSET(stats, 0, sizeof(*stats)); 2044316485Sdavidcs 2045316485Sdavidcs for_each_hwfn(p_dev, i) { 2046316485Sdavidcs struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2047316485Sdavidcs struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2048316485Sdavidcs ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2049337519Sdavidcs bool b_get_port_stats; 2050316485Sdavidcs 2051316485Sdavidcs if (IS_PF(p_dev)) { 2052316485Sdavidcs /* The main vport index is relative first */ 2053316485Sdavidcs if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { 2054316485Sdavidcs DP_ERR(p_hwfn, "No vport available!\n"); 2055316485Sdavidcs goto out; 2056316485Sdavidcs } 2057316485Sdavidcs } 2058316485Sdavidcs 2059316485Sdavidcs if (IS_PF(p_dev) && !p_ptt) { 2060316485Sdavidcs DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2061316485Sdavidcs continue; 2062316485Sdavidcs } 2063316485Sdavidcs 2064337519Sdavidcs b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn); 2065316485Sdavidcs __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 2066337519Sdavidcs b_get_port_stats); 2067316485Sdavidcs 2068316485Sdavidcsout: 2069316485Sdavidcs if (IS_PF(p_dev) && p_ptt) 2070316485Sdavidcs ecore_ptt_release(p_hwfn, p_ptt); 2071316485Sdavidcs } 2072316485Sdavidcs} 2073316485Sdavidcs 2074316485Sdavidcsvoid ecore_get_vport_stats(struct ecore_dev *p_dev, 2075316485Sdavidcs struct ecore_eth_stats *stats) 2076316485Sdavidcs{ 2077316485Sdavidcs u32 i; 2078316485Sdavidcs 2079316485Sdavidcs if (!p_dev) { 2080316485Sdavidcs OSAL_MEMSET(stats, 0, sizeof(*stats)); 2081316485Sdavidcs return; 2082316485Sdavidcs } 2083316485Sdavidcs 2084316485Sdavidcs _ecore_get_vport_stats(p_dev, stats); 2085316485Sdavidcs 2086316485Sdavidcs if (!p_dev->reset_stats) 2087316485Sdavidcs return; 2088316485Sdavidcs 2089316485Sdavidcs /* Reduce the statistics baseline */ 2090316485Sdavidcs for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) 2091316485Sdavidcs ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; 2092316485Sdavidcs} 2093316485Sdavidcs 2094316485Sdavidcs/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 2095316485Sdavidcsvoid ecore_reset_vport_stats(struct ecore_dev *p_dev) 2096316485Sdavidcs{ 2097316485Sdavidcs int i; 2098316485Sdavidcs 2099316485Sdavidcs for_each_hwfn(p_dev, i) { 2100316485Sdavidcs struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2101316485Sdavidcs struct eth_mstorm_per_queue_stat mstats; 2102316485Sdavidcs struct eth_ustorm_per_queue_stat ustats; 2103316485Sdavidcs struct eth_pstorm_per_queue_stat pstats; 2104316485Sdavidcs struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2105316485Sdavidcs ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2106316485Sdavidcs u32 addr = 0, len = 0; 2107316485Sdavidcs 2108316485Sdavidcs if (IS_PF(p_dev) && !p_ptt) { 2109316485Sdavidcs DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2110316485Sdavidcs continue; 2111316485Sdavidcs } 2112316485Sdavidcs 2113316485Sdavidcs OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 2114316485Sdavidcs __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 2115316485Sdavidcs ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 2116316485Sdavidcs 2117316485Sdavidcs OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2118316485Sdavidcs __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 2119316485Sdavidcs ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 2120316485Sdavidcs 2121316485Sdavidcs OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2122316485Sdavidcs __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 2123316485Sdavidcs ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 2124316485Sdavidcs 2125316485Sdavidcs if (IS_PF(p_dev)) 2126316485Sdavidcs ecore_ptt_release(p_hwfn, p_ptt); 2127316485Sdavidcs } 2128316485Sdavidcs 2129316485Sdavidcs /* PORT statistics are not necessarily reset, so we need to 2130316485Sdavidcs * read and create a baseline for future statistics. 2131337519Sdavidcs * Link change stat is maintained by MFW, return its value as is. 2132316485Sdavidcs */ 2133316485Sdavidcs if (!p_dev->reset_stats) 2134316485Sdavidcs DP_INFO(p_dev, "Reset stats not allocated\n"); 2135337519Sdavidcs else { 2136316485Sdavidcs _ecore_get_vport_stats(p_dev, p_dev->reset_stats); 2137337519Sdavidcs p_dev->reset_stats->common.link_change_count = 0; 2138337519Sdavidcs } 2139316485Sdavidcs} 2140316485Sdavidcs 2141337519Sdavidcsstatic enum gft_profile_type 2142337519Sdavidcsecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode) 2143337519Sdavidcs{ 2144337519Sdavidcs if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE) 2145337519Sdavidcs return GFT_PROFILE_TYPE_4_TUPLE; 2146337519Sdavidcs if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST) 2147337519Sdavidcs return GFT_PROFILE_TYPE_IP_DST_ADDR; 2148337519Sdavidcs return GFT_PROFILE_TYPE_L4_DST_PORT; 2149337519Sdavidcs} 2150337519Sdavidcs 2151316485Sdavidcsvoid ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 2152316485Sdavidcs struct ecore_ptt *p_ptt, 2153316485Sdavidcs struct ecore_arfs_config_params *p_cfg_params) 2154316485Sdavidcs{ 2155337519Sdavidcs if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits)) 2156337519Sdavidcs return; 2157337519Sdavidcs 2158337519Sdavidcs if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) { 2159337519Sdavidcs ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2160337519Sdavidcs p_cfg_params->tcp, 2161337519Sdavidcs p_cfg_params->udp, 2162337519Sdavidcs p_cfg_params->ipv4, 2163337519Sdavidcs p_cfg_params->ipv6, 2164337519Sdavidcs ecore_arfs_mode_to_hsi(p_cfg_params->mode)); 2165316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2166337519Sdavidcs "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", 2167316485Sdavidcs p_cfg_params->tcp ? "Enable" : "Disable", 2168316485Sdavidcs p_cfg_params->udp ? "Enable" : "Disable", 2169316485Sdavidcs p_cfg_params->ipv4 ? "Enable" : "Disable", 2170337519Sdavidcs p_cfg_params->ipv6 ? "Enable" : "Disable", 2171337519Sdavidcs (u32)p_cfg_params->mode); 2172316485Sdavidcs } else { 2173337519Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Disabled Filtering\n"); 2174337519Sdavidcs ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2175316485Sdavidcs } 2176316485Sdavidcs} 2177316485Sdavidcs 2178316485Sdavidcsenum _ecore_status_t 2179316485Sdavidcsecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 2180316485Sdavidcs struct ecore_spq_comp_cb *p_cb, 2181337519Sdavidcs struct ecore_ntuple_filter_params *p_params) 2182316485Sdavidcs{ 2183316485Sdavidcs struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; 2184316485Sdavidcs struct ecore_spq_entry *p_ent = OSAL_NULL; 2185316485Sdavidcs struct ecore_sp_init_data init_data; 2186316485Sdavidcs u16 abs_rx_q_id = 0; 2187316485Sdavidcs u8 abs_vport_id = 0; 2188316485Sdavidcs enum _ecore_status_t rc = ECORE_NOTIMPL; 2189316485Sdavidcs 2190337519Sdavidcs rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 2191316485Sdavidcs if (rc != ECORE_SUCCESS) 2192316485Sdavidcs return rc; 2193316485Sdavidcs 2194337519Sdavidcs if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) { 2195337519Sdavidcs rc = ecore_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); 2196337519Sdavidcs if (rc != ECORE_SUCCESS) 2197337519Sdavidcs return rc; 2198337519Sdavidcs } 2199316485Sdavidcs 2200316485Sdavidcs /* Get SPQ entry */ 2201316485Sdavidcs OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 2202316485Sdavidcs init_data.cid = ecore_spq_get_cid(p_hwfn); 2203316485Sdavidcs 2204316485Sdavidcs init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2205316485Sdavidcs 2206316485Sdavidcs if (p_cb) { 2207316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_CB; 2208316485Sdavidcs init_data.p_comp_data = p_cb; 2209316485Sdavidcs } else { 2210316485Sdavidcs init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 2211316485Sdavidcs } 2212316485Sdavidcs 2213316485Sdavidcs rc = ecore_sp_init_request(p_hwfn, &p_ent, 2214316485Sdavidcs ETH_RAMROD_GFT_UPDATE_FILTER, 2215316485Sdavidcs PROTOCOLID_ETH, &init_data); 2216316485Sdavidcs if (rc != ECORE_SUCCESS) 2217316485Sdavidcs return rc; 2218316485Sdavidcs 2219316485Sdavidcs p_ramrod = &p_ent->ramrod.rx_update_gft; 2220316485Sdavidcs 2221337519Sdavidcs DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); 2222337519Sdavidcs p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(p_params->length); 2223316485Sdavidcs 2224337519Sdavidcs if (p_params->qid != ECORE_RFS_NTUPLE_QID_RSS) { 2225337519Sdavidcs p_ramrod->rx_qid_valid = 1; 2226337519Sdavidcs p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id); 2227337519Sdavidcs } 2228337519Sdavidcs 2229337519Sdavidcs p_ramrod->flow_id_valid = 0; 2230337519Sdavidcs p_ramrod->flow_id = 0; 2231337519Sdavidcs 2232337519Sdavidcs p_ramrod->vport_id = OSAL_CPU_TO_LE16 ((u16)abs_vport_id); 2233337519Sdavidcs p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER 2234337519Sdavidcs : GFT_DELETE_FILTER; 2235337519Sdavidcs 2236316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2237316485Sdavidcs "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2238316485Sdavidcs abs_vport_id, abs_rx_q_id, 2239337519Sdavidcs p_params->b_is_add ? "Adding" : "Removing", 2240337519Sdavidcs (unsigned long long)p_params->addr, p_params->length); 2241316485Sdavidcs 2242316485Sdavidcs return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 2243316485Sdavidcs} 2244337519Sdavidcs 2245337519Sdavidcsint ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, 2246337519Sdavidcs struct ecore_ptt *p_ptt, 2247337519Sdavidcs struct ecore_queue_cid *p_cid, 2248337519Sdavidcs u16 *p_rx_coal) 2249337519Sdavidcs{ 2250337519Sdavidcs u32 coalesce, address, is_valid; 2251337519Sdavidcs struct cau_sb_entry sb_entry; 2252337519Sdavidcs u8 timer_res; 2253337519Sdavidcs enum _ecore_status_t rc; 2254337519Sdavidcs 2255337519Sdavidcs rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2256337519Sdavidcs p_cid->sb_igu_id * sizeof(u64), 2257337519Sdavidcs (u64)(osal_uintptr_t)&sb_entry, 2, 2258337519Sdavidcs OSAL_NULL /* default parameters */); 2259337519Sdavidcs if (rc != ECORE_SUCCESS) { 2260337519Sdavidcs DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2261337519Sdavidcs return rc; 2262337519Sdavidcs } 2263337519Sdavidcs 2264337519Sdavidcs timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); 2265337519Sdavidcs 2266337519Sdavidcs address = BAR0_MAP_REG_USDM_RAM + 2267337519Sdavidcs USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2268337519Sdavidcs coalesce = ecore_rd(p_hwfn, p_ptt, address); 2269337519Sdavidcs 2270337519Sdavidcs is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2271337519Sdavidcs if (!is_valid) 2272337519Sdavidcs return ECORE_INVAL; 2273337519Sdavidcs 2274337519Sdavidcs coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2275337519Sdavidcs *p_rx_coal = (u16)(coalesce << timer_res); 2276337519Sdavidcs 2277337519Sdavidcs return ECORE_SUCCESS; 2278337519Sdavidcs} 2279337519Sdavidcs 2280337519Sdavidcsint ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, 2281337519Sdavidcs struct ecore_ptt *p_ptt, 2282337519Sdavidcs struct ecore_queue_cid *p_cid, 2283337519Sdavidcs u16 *p_tx_coal) 2284337519Sdavidcs{ 2285337519Sdavidcs u32 coalesce, address, is_valid; 2286337519Sdavidcs struct cau_sb_entry sb_entry; 2287337519Sdavidcs u8 timer_res; 2288337519Sdavidcs enum _ecore_status_t rc; 2289337519Sdavidcs 2290337519Sdavidcs rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2291337519Sdavidcs p_cid->sb_igu_id * sizeof(u64), 2292337519Sdavidcs (u64)(osal_uintptr_t)&sb_entry, 2, 2293337519Sdavidcs OSAL_NULL /* default parameters */); 2294337519Sdavidcs if (rc != ECORE_SUCCESS) { 2295337519Sdavidcs DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2296337519Sdavidcs return rc; 2297337519Sdavidcs } 2298337519Sdavidcs 2299337519Sdavidcs timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); 2300337519Sdavidcs 2301337519Sdavidcs address = BAR0_MAP_REG_XSDM_RAM + 2302337519Sdavidcs XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2303337519Sdavidcs coalesce = ecore_rd(p_hwfn, p_ptt, address); 2304337519Sdavidcs 2305337519Sdavidcs is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2306337519Sdavidcs if (!is_valid) 2307337519Sdavidcs return ECORE_INVAL; 2308337519Sdavidcs 2309337519Sdavidcs coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2310337519Sdavidcs *p_tx_coal = (u16)(coalesce << timer_res); 2311337519Sdavidcs 2312337519Sdavidcs return ECORE_SUCCESS; 2313337519Sdavidcs} 2314337519Sdavidcs 2315337519Sdavidcsenum _ecore_status_t 2316337519Sdavidcsecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal, 2317337519Sdavidcs void *handle) 2318337519Sdavidcs{ 2319337519Sdavidcs struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle; 2320337519Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 2321337519Sdavidcs struct ecore_ptt *p_ptt; 2322337519Sdavidcs 2323337519Sdavidcs#ifdef CONFIG_ECORE_SRIOV 2324337519Sdavidcs if (IS_VF(p_hwfn->p_dev)) { 2325337519Sdavidcs rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); 2326337519Sdavidcs if (rc != ECORE_SUCCESS) 2327337519Sdavidcs DP_NOTICE(p_hwfn, false, 2328337519Sdavidcs "Unable to read queue calescing\n"); 2329337519Sdavidcs 2330337519Sdavidcs return rc; 2331337519Sdavidcs } 2332337519Sdavidcs#endif 2333337519Sdavidcs 2334337519Sdavidcs p_ptt = ecore_ptt_acquire(p_hwfn); 2335337519Sdavidcs if (!p_ptt) 2336337519Sdavidcs return ECORE_AGAIN; 2337337519Sdavidcs 2338337519Sdavidcs if (p_cid->b_is_rx) { 2339337519Sdavidcs rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2340337519Sdavidcs if (rc != ECORE_SUCCESS) 2341337519Sdavidcs goto out; 2342337519Sdavidcs } else { 2343337519Sdavidcs rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2344337519Sdavidcs if (rc != ECORE_SUCCESS) 2345337519Sdavidcs goto out; 2346337519Sdavidcs } 2347337519Sdavidcs 2348337519Sdavidcsout: 2349337519Sdavidcs ecore_ptt_release(p_hwfn, p_ptt); 2350337519Sdavidcs 2351337519Sdavidcs return rc; 2352337519Sdavidcs} 2353337519Sdavidcs#ifdef _NTDDK_ 2354337519Sdavidcs#pragma warning(pop) 2355337519Sdavidcs#endif 2356