1316485Sdavidcs/* 2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc. 3316485Sdavidcs * All rights reserved. 4316485Sdavidcs * 5316485Sdavidcs * Redistribution and use in source and binary forms, with or without 6316485Sdavidcs * modification, are permitted provided that the following conditions 7316485Sdavidcs * are met: 8316485Sdavidcs * 9316485Sdavidcs * 1. Redistributions of source code must retain the above copyright 10316485Sdavidcs * notice, this list of conditions and the following disclaimer. 11316485Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12316485Sdavidcs * notice, this list of conditions and the following disclaimer in the 13316485Sdavidcs * documentation and/or other materials provided with the distribution. 14316485Sdavidcs * 15316485Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16316485Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17316485Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18316485Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19316485Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20316485Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21316485Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22316485Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23316485Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24316485Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25316485Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26316485Sdavidcs * 27316485Sdavidcs * $FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_l2_api.h 337517 2018-08-09 01:17:35Z davidcs $ 28316485Sdavidcs * 29316485Sdavidcs */ 30316485Sdavidcs 31316485Sdavidcs#ifndef __ECORE_L2_API_H__ 32316485Sdavidcs#define __ECORE_L2_API_H__ 33316485Sdavidcs 34316485Sdavidcs#include "ecore_status.h" 35316485Sdavidcs#include "ecore_sp_api.h" 36316485Sdavidcs#include "ecore_int_api.h" 37316485Sdavidcs 38337517Sdavidcs#ifndef __EXTRACT__LINUX__ 39316485Sdavidcsenum ecore_rss_caps { 40316485Sdavidcs ECORE_RSS_IPV4 = 0x1, 41316485Sdavidcs ECORE_RSS_IPV6 = 0x2, 42316485Sdavidcs ECORE_RSS_IPV4_TCP = 0x4, 43316485Sdavidcs ECORE_RSS_IPV6_TCP = 0x8, 44316485Sdavidcs ECORE_RSS_IPV4_UDP = 0x10, 45316485Sdavidcs ECORE_RSS_IPV6_UDP = 0x20, 46316485Sdavidcs}; 47316485Sdavidcs 48316485Sdavidcs/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ 49316485Sdavidcs#define ECORE_RSS_IND_TABLE_SIZE 128 50316485Sdavidcs#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ 51316485Sdavidcs 52316485Sdavidcs#define ECORE_MAX_PHC_DRIFT_PPB 291666666 53316485Sdavidcs 54316485Sdavidcsenum ecore_ptp_filter_type { 55320164Sdavidcs ECORE_PTP_FILTER_NONE, 56320164Sdavidcs ECORE_PTP_FILTER_ALL, 57320164Sdavidcs ECORE_PTP_FILTER_V1_L4_EVENT, 58320164Sdavidcs ECORE_PTP_FILTER_V1_L4_GEN, 59320164Sdavidcs ECORE_PTP_FILTER_V2_L4_EVENT, 60320164Sdavidcs ECORE_PTP_FILTER_V2_L4_GEN, 61320164Sdavidcs ECORE_PTP_FILTER_V2_L2_EVENT, 62320164Sdavidcs ECORE_PTP_FILTER_V2_L2_GEN, 63320164Sdavidcs ECORE_PTP_FILTER_V2_EVENT, 64320164Sdavidcs ECORE_PTP_FILTER_V2_GEN 65316485Sdavidcs}; 66316485Sdavidcs 67320164Sdavidcsenum ecore_ptp_hwtstamp_tx_type { 68320164Sdavidcs ECORE_PTP_HWTSTAMP_TX_OFF, 69320164Sdavidcs ECORE_PTP_HWTSTAMP_TX_ON, 70320164Sdavidcs}; 71337517Sdavidcs#endif 72320164Sdavidcs 73337517Sdavidcs#ifndef __EXTRACT__LINUX__ 74316485Sdavidcsstruct ecore_queue_start_common_params { 75316485Sdavidcs /* Should always be relative to entity sending this. */ 76316485Sdavidcs u8 vport_id; 77316485Sdavidcs u16 queue_id; 78316485Sdavidcs 79316485Sdavidcs /* Relative, but relevant only for PFs */ 80316485Sdavidcs u8 stats_id; 81316485Sdavidcs 82316485Sdavidcs struct ecore_sb_info *p_sb; 83316485Sdavidcs u8 sb_idx; 84337517Sdavidcs 85337517Sdavidcs u8 tc; 86316485Sdavidcs}; 87316485Sdavidcs 88316485Sdavidcsstruct ecore_rxq_start_ret_params { 89316485Sdavidcs void OSAL_IOMEM *p_prod; 90316485Sdavidcs void *p_handle; 91316485Sdavidcs}; 92316485Sdavidcs 93316485Sdavidcsstruct ecore_txq_start_ret_params { 94316485Sdavidcs void OSAL_IOMEM *p_doorbell; 95316485Sdavidcs void *p_handle; 96316485Sdavidcs}; 97337517Sdavidcs#endif 98316485Sdavidcs 99316485Sdavidcsstruct ecore_rss_params { 100316485Sdavidcs u8 update_rss_config; 101316485Sdavidcs u8 rss_enable; 102316485Sdavidcs u8 rss_eng_id; 103316485Sdavidcs u8 update_rss_capabilities; 104316485Sdavidcs u8 update_rss_ind_table; 105316485Sdavidcs u8 update_rss_key; 106316485Sdavidcs u8 rss_caps; 107316485Sdavidcs u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ 108316485Sdavidcs 109316485Sdavidcs /* Indirection table consist of rx queue handles */ 110316485Sdavidcs void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; 111316485Sdavidcs u32 rss_key[ECORE_RSS_KEY_SIZE]; 112316485Sdavidcs}; 113316485Sdavidcs 114316485Sdavidcsstruct ecore_sge_tpa_params { 115316485Sdavidcs u8 max_buffers_per_cqe; 116316485Sdavidcs 117316485Sdavidcs u8 update_tpa_en_flg; 118316485Sdavidcs u8 tpa_ipv4_en_flg; 119316485Sdavidcs u8 tpa_ipv6_en_flg; 120316485Sdavidcs u8 tpa_ipv4_tunn_en_flg; 121316485Sdavidcs u8 tpa_ipv6_tunn_en_flg; 122316485Sdavidcs 123316485Sdavidcs u8 update_tpa_param_flg; 124316485Sdavidcs u8 tpa_pkt_split_flg; 125316485Sdavidcs u8 tpa_hdr_data_split_flg; 126316485Sdavidcs u8 tpa_gro_consistent_flg; 127316485Sdavidcs u8 tpa_max_aggs_num; 128316485Sdavidcs u16 tpa_max_size; 129316485Sdavidcs u16 tpa_min_size_to_start; 130316485Sdavidcs u16 tpa_min_size_to_cont; 131316485Sdavidcs}; 132316485Sdavidcs 133316485Sdavidcsenum ecore_filter_opcode { 134316485Sdavidcs ECORE_FILTER_ADD, 135316485Sdavidcs ECORE_FILTER_REMOVE, 136316485Sdavidcs ECORE_FILTER_MOVE, 137316485Sdavidcs ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ 138316485Sdavidcs ECORE_FILTER_FLUSH, /* Removes all filters */ 139316485Sdavidcs}; 140316485Sdavidcs 141316485Sdavidcsenum ecore_filter_ucast_type { 142316485Sdavidcs ECORE_FILTER_MAC, 143316485Sdavidcs ECORE_FILTER_VLAN, 144316485Sdavidcs ECORE_FILTER_MAC_VLAN, 145316485Sdavidcs ECORE_FILTER_INNER_MAC, 146316485Sdavidcs ECORE_FILTER_INNER_VLAN, 147316485Sdavidcs ECORE_FILTER_INNER_PAIR, 148316485Sdavidcs ECORE_FILTER_INNER_MAC_VNI_PAIR, 149316485Sdavidcs ECORE_FILTER_MAC_VNI_PAIR, 150316485Sdavidcs ECORE_FILTER_VNI, 151316485Sdavidcs}; 152316485Sdavidcs 153316485Sdavidcsstruct ecore_filter_ucast { 154316485Sdavidcs enum ecore_filter_opcode opcode; 155316485Sdavidcs enum ecore_filter_ucast_type type; 156316485Sdavidcs u8 is_rx_filter; 157316485Sdavidcs u8 is_tx_filter; 158316485Sdavidcs u8 vport_to_add_to; 159316485Sdavidcs u8 vport_to_remove_from; 160316485Sdavidcs unsigned char mac[ETH_ALEN]; 161316485Sdavidcs u8 assert_on_error; 162316485Sdavidcs u16 vlan; 163316485Sdavidcs u32 vni; 164316485Sdavidcs}; 165316485Sdavidcs 166316485Sdavidcsstruct ecore_filter_mcast { 167316485Sdavidcs /* MOVE is not supported for multicast */ 168316485Sdavidcs enum ecore_filter_opcode opcode; 169316485Sdavidcs u8 vport_to_add_to; 170316485Sdavidcs u8 vport_to_remove_from; 171316485Sdavidcs u8 num_mc_addrs; 172316485Sdavidcs#define ECORE_MAX_MC_ADDRS 64 173316485Sdavidcs unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; 174316485Sdavidcs}; 175316485Sdavidcs 176316485Sdavidcsstruct ecore_filter_accept_flags { 177316485Sdavidcs u8 update_rx_mode_config; 178316485Sdavidcs u8 update_tx_mode_config; 179316485Sdavidcs u8 rx_accept_filter; 180316485Sdavidcs u8 tx_accept_filter; 181316485Sdavidcs#define ECORE_ACCEPT_NONE 0x01 182316485Sdavidcs#define ECORE_ACCEPT_UCAST_MATCHED 0x02 183316485Sdavidcs#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04 184316485Sdavidcs#define ECORE_ACCEPT_MCAST_MATCHED 0x08 185316485Sdavidcs#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10 186316485Sdavidcs#define ECORE_ACCEPT_BCAST 0x20 187316485Sdavidcs}; 188316485Sdavidcs 189337517Sdavidcs#ifndef __EXTRACT__LINUX__ 190337517Sdavidcsenum ecore_filter_config_mode { 191337517Sdavidcs ECORE_FILTER_CONFIG_MODE_DISABLE, 192337517Sdavidcs ECORE_FILTER_CONFIG_MODE_5_TUPLE, 193337517Sdavidcs ECORE_FILTER_CONFIG_MODE_L4_PORT, 194337517Sdavidcs ECORE_FILTER_CONFIG_MODE_IP_DEST, 195337517Sdavidcs}; 196337517Sdavidcs#endif 197337517Sdavidcs 198316485Sdavidcsstruct ecore_arfs_config_params { 199316485Sdavidcs bool tcp; 200316485Sdavidcs bool udp; 201316485Sdavidcs bool ipv4; 202316485Sdavidcs bool ipv6; 203337517Sdavidcs enum ecore_filter_config_mode mode; 204316485Sdavidcs}; 205316485Sdavidcs 206316485Sdavidcs/* Add / remove / move / remove-all unicast MAC-VLAN filters. 207316485Sdavidcs * FW will assert in the following cases, so driver should take care...: 208316485Sdavidcs * 1. Adding a filter to a full table. 209316485Sdavidcs * 2. Adding a filter which already exists on that vport. 210316485Sdavidcs * 3. Removing a filter which doesn't exist. 211316485Sdavidcs */ 212316485Sdavidcs 213316485Sdavidcsenum _ecore_status_t 214316485Sdavidcsecore_filter_ucast_cmd(struct ecore_dev *p_dev, 215316485Sdavidcs struct ecore_filter_ucast *p_filter_cmd, 216316485Sdavidcs enum spq_mode comp_mode, 217316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 218316485Sdavidcs 219316485Sdavidcs/* Add / remove / move multicast MAC filters. */ 220316485Sdavidcsenum _ecore_status_t 221316485Sdavidcsecore_filter_mcast_cmd(struct ecore_dev *p_dev, 222316485Sdavidcs struct ecore_filter_mcast *p_filter_cmd, 223316485Sdavidcs enum spq_mode comp_mode, 224316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 225316485Sdavidcs 226316485Sdavidcs/* Set "accept" filters */ 227316485Sdavidcsenum _ecore_status_t 228316485Sdavidcsecore_filter_accept_cmd( 229316485Sdavidcs struct ecore_dev *p_dev, 230316485Sdavidcs u8 vport, 231316485Sdavidcs struct ecore_filter_accept_flags accept_flags, 232316485Sdavidcs u8 update_accept_any_vlan, 233316485Sdavidcs u8 accept_any_vlan, 234316485Sdavidcs enum spq_mode comp_mode, 235316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 236316485Sdavidcs 237316485Sdavidcs/** 238316485Sdavidcs * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod 239316485Sdavidcs * 240316485Sdavidcs * This ramrod initializes an RX Queue for a VPort. An Assert is generated if 241316485Sdavidcs * the VPort ID is not currently initialized. 242316485Sdavidcs * 243316485Sdavidcs * @param p_hwfn 244316485Sdavidcs * @param opaque_fid 245316485Sdavidcs * @p_params Inputs; Relative for PF [SB being an exception] 246316485Sdavidcs * @param bd_max_bytes Maximum bytes that can be placed on a BD 247316485Sdavidcs * @param bd_chain_phys_addr Physical address of BDs for receive. 248316485Sdavidcs * @param cqe_pbl_addr Physical address of the CQE PBL Table. 249316485Sdavidcs * @param cqe_pbl_size Size of the CQE PBL Table 250316485Sdavidcs * @param p_ret_params Pointed struct to be filled with outputs. 251316485Sdavidcs * 252316485Sdavidcs * @return enum _ecore_status_t 253316485Sdavidcs */ 254316485Sdavidcsenum _ecore_status_t 255316485Sdavidcsecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 256316485Sdavidcs u16 opaque_fid, 257316485Sdavidcs struct ecore_queue_start_common_params *p_params, 258316485Sdavidcs u16 bd_max_bytes, 259316485Sdavidcs dma_addr_t bd_chain_phys_addr, 260316485Sdavidcs dma_addr_t cqe_pbl_addr, 261316485Sdavidcs u16 cqe_pbl_size, 262316485Sdavidcs struct ecore_rxq_start_ret_params *p_ret_params); 263316485Sdavidcs 264316485Sdavidcs/** 265316485Sdavidcs * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue 266316485Sdavidcs * 267316485Sdavidcs * @param p_hwfn 268316485Sdavidcs * @param p_rxq Handler of queue to close 269316485Sdavidcs * @param eq_completion_only If True completion will be on 270316485Sdavidcs * EQe, if False completion will be 271316485Sdavidcs * on EQe if p_hwfn opaque 272316485Sdavidcs * different from the RXQ opaque 273316485Sdavidcs * otherwise on CQe. 274316485Sdavidcs * @param cqe_completion If True completion will be 275320164Sdavidcs * recieve on CQe. 276316485Sdavidcs * @return enum _ecore_status_t 277316485Sdavidcs */ 278316485Sdavidcsenum _ecore_status_t 279316485Sdavidcsecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 280316485Sdavidcs void *p_rxq, 281316485Sdavidcs bool eq_completion_only, 282316485Sdavidcs bool cqe_completion); 283316485Sdavidcs 284316485Sdavidcs/** 285316485Sdavidcs * @brief - TX Queue Start Ramrod 286316485Sdavidcs * 287316485Sdavidcs * This ramrod initializes a TX Queue for a VPort. An Assert is generated if 288316485Sdavidcs * the VPort is not currently initialized. 289316485Sdavidcs * 290316485Sdavidcs * @param p_hwfn 291316485Sdavidcs * @param opaque_fid 292316485Sdavidcs * @p_params 293316485Sdavidcs * @param tc traffic class to use with this L2 txq 294316485Sdavidcs * @param pbl_addr address of the pbl array 295316485Sdavidcs * @param pbl_size number of entries in pbl 296316485Sdavidcs * @oaram p_ret_params Pointer to fill the return parameters in. 297316485Sdavidcs * 298316485Sdavidcs * @return enum _ecore_status_t 299316485Sdavidcs */ 300316485Sdavidcsenum _ecore_status_t 301316485Sdavidcsecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, 302316485Sdavidcs u16 opaque_fid, 303316485Sdavidcs struct ecore_queue_start_common_params *p_params, 304316485Sdavidcs u8 tc, 305316485Sdavidcs dma_addr_t pbl_addr, 306316485Sdavidcs u16 pbl_size, 307316485Sdavidcs struct ecore_txq_start_ret_params *p_ret_params); 308316485Sdavidcs 309316485Sdavidcs/** 310316485Sdavidcs * @brief ecore_eth_tx_queue_stop - closes a Tx queue 311316485Sdavidcs * 312316485Sdavidcs * @param p_hwfn 313316485Sdavidcs * @param p_txq - handle to Tx queue needed to be closed 314316485Sdavidcs * 315316485Sdavidcs * @return enum _ecore_status_t 316316485Sdavidcs */ 317316485Sdavidcsenum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 318316485Sdavidcs void *p_txq); 319316485Sdavidcs 320316485Sdavidcsenum ecore_tpa_mode { 321316485Sdavidcs ECORE_TPA_MODE_NONE, 322316485Sdavidcs ECORE_TPA_MODE_RSC, 323316485Sdavidcs ECORE_TPA_MODE_GRO, 324316485Sdavidcs ECORE_TPA_MODE_MAX 325316485Sdavidcs}; 326316485Sdavidcs 327316485Sdavidcsstruct ecore_sp_vport_start_params { 328316485Sdavidcs enum ecore_tpa_mode tpa_mode; 329316485Sdavidcs bool remove_inner_vlan; /* Inner VLAN removal is enabled */ 330316485Sdavidcs bool tx_switching; /* Vport supports tx-switching */ 331316485Sdavidcs bool handle_ptp_pkts; /* Handle PTP packets */ 332316485Sdavidcs bool only_untagged; /* Untagged pkt control */ 333316485Sdavidcs bool drop_ttl0; /* Drop packets with TTL = 0 */ 334316485Sdavidcs u8 max_buffers_per_cqe; 335316485Sdavidcs u32 concrete_fid; 336316485Sdavidcs u16 opaque_fid; 337316485Sdavidcs u8 vport_id; /* VPORT ID */ 338316485Sdavidcs u16 mtu; /* VPORT MTU */ 339316485Sdavidcs bool zero_placement_offset; 340316485Sdavidcs bool check_mac; 341316485Sdavidcs bool check_ethtype; 342316485Sdavidcs 343316485Sdavidcs /* Strict behavior on transmission errors */ 344316485Sdavidcs bool b_err_illegal_vlan_mode; 345316485Sdavidcs bool b_err_illegal_inband_mode; 346316485Sdavidcs bool b_err_vlan_insert_with_inband; 347316485Sdavidcs bool b_err_small_pkt; 348316485Sdavidcs bool b_err_big_pkt; 349316485Sdavidcs bool b_err_anti_spoof; 350316485Sdavidcs bool b_err_ctrl_frame; 351316485Sdavidcs}; 352316485Sdavidcs 353316485Sdavidcs/** 354316485Sdavidcs * @brief ecore_sp_vport_start - 355316485Sdavidcs * 356316485Sdavidcs * This ramrod initializes a VPort. An Assert if generated if the Function ID 357316485Sdavidcs * of the VPort is not enabled. 358316485Sdavidcs * 359316485Sdavidcs * @param p_hwfn 360316485Sdavidcs * @param p_params VPORT start params 361316485Sdavidcs * 362316485Sdavidcs * @return enum _ecore_status_t 363316485Sdavidcs */ 364316485Sdavidcsenum _ecore_status_t 365316485Sdavidcsecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 366316485Sdavidcs struct ecore_sp_vport_start_params *p_params); 367316485Sdavidcs 368316485Sdavidcsstruct ecore_sp_vport_update_params { 369316485Sdavidcs u16 opaque_fid; 370316485Sdavidcs u8 vport_id; 371316485Sdavidcs u8 update_vport_active_rx_flg; 372316485Sdavidcs u8 vport_active_rx_flg; 373316485Sdavidcs u8 update_vport_active_tx_flg; 374316485Sdavidcs u8 vport_active_tx_flg; 375316485Sdavidcs u8 update_inner_vlan_removal_flg; 376316485Sdavidcs u8 inner_vlan_removal_flg; 377316485Sdavidcs u8 silent_vlan_removal_flg; 378316485Sdavidcs u8 update_default_vlan_enable_flg; 379316485Sdavidcs u8 default_vlan_enable_flg; 380316485Sdavidcs u8 update_default_vlan_flg; 381316485Sdavidcs u16 default_vlan; 382316485Sdavidcs u8 update_tx_switching_flg; 383316485Sdavidcs u8 tx_switching_flg; 384316485Sdavidcs u8 update_approx_mcast_flg; 385316485Sdavidcs u8 update_anti_spoofing_en_flg; 386316485Sdavidcs u8 anti_spoofing_en; 387316485Sdavidcs u8 update_accept_any_vlan_flg; 388316485Sdavidcs u8 accept_any_vlan; 389337517Sdavidcs u32 bins[8]; 390316485Sdavidcs struct ecore_rss_params *rss_params; 391316485Sdavidcs struct ecore_filter_accept_flags accept_flags; 392316485Sdavidcs struct ecore_sge_tpa_params *sge_tpa_params; 393316485Sdavidcs}; 394316485Sdavidcs 395316485Sdavidcs/** 396316485Sdavidcs * @brief ecore_sp_vport_update - 397316485Sdavidcs * 398316485Sdavidcs * This ramrod updates the parameters of the VPort. Every field can be updated 399316485Sdavidcs * independently, according to flags. 400316485Sdavidcs * 401316485Sdavidcs * This ramrod is also used to set the VPort state to active after creation. 402316485Sdavidcs * An Assert is generated if the VPort does not contain an RX queue. 403316485Sdavidcs * 404316485Sdavidcs * @param p_hwfn 405316485Sdavidcs * @param p_params 406316485Sdavidcs * 407316485Sdavidcs * @return enum _ecore_status_t 408316485Sdavidcs */ 409316485Sdavidcsenum _ecore_status_t 410316485Sdavidcsecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 411316485Sdavidcs struct ecore_sp_vport_update_params *p_params, 412316485Sdavidcs enum spq_mode comp_mode, 413316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 414316485Sdavidcs/** 415316485Sdavidcs * @brief ecore_sp_vport_stop - 416316485Sdavidcs * 417316485Sdavidcs * This ramrod closes a VPort after all its RX and TX queues are terminated. 418316485Sdavidcs * An Assert is generated if any queues are left open. 419316485Sdavidcs * 420316485Sdavidcs * @param p_hwfn 421316485Sdavidcs * @param opaque_fid 422316485Sdavidcs * @param vport_id VPort ID 423316485Sdavidcs * 424316485Sdavidcs * @return enum _ecore_status_t 425316485Sdavidcs */ 426316485Sdavidcsenum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 427316485Sdavidcs u16 opaque_fid, 428316485Sdavidcs u8 vport_id); 429316485Sdavidcs 430316485Sdavidcsenum _ecore_status_t 431316485Sdavidcsecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 432316485Sdavidcs u16 opaque_fid, 433316485Sdavidcs struct ecore_filter_ucast *p_filter_cmd, 434316485Sdavidcs enum spq_mode comp_mode, 435316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 436316485Sdavidcs 437316485Sdavidcs/** 438316485Sdavidcs * @brief ecore_sp_rx_eth_queues_update - 439316485Sdavidcs * 440316485Sdavidcs * This ramrod updates an RX queue. It is used for setting the active state 441316485Sdavidcs * of the queue and updating the TPA and SGE parameters. 442316485Sdavidcs * 443316485Sdavidcs * @note Final phase API. 444316485Sdavidcs * 445316485Sdavidcs * @param p_hwfn 446316485Sdavidcs * @param pp_rxq_handlers An array of queue handlers to be updated. 447316485Sdavidcs * @param num_rxqs number of queues to update. 448316485Sdavidcs * @param complete_cqe_flg Post completion to the CQE Ring if set 449316485Sdavidcs * @param complete_event_flg Post completion to the Event Ring if set 450316485Sdavidcs * @param comp_mode 451316485Sdavidcs * @param p_comp_data 452316485Sdavidcs * 453316485Sdavidcs * @return enum _ecore_status_t 454316485Sdavidcs */ 455316485Sdavidcs 456316485Sdavidcsenum _ecore_status_t 457316485Sdavidcsecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 458316485Sdavidcs void **pp_rxq_handlers, 459316485Sdavidcs u8 num_rxqs, 460316485Sdavidcs u8 complete_cqe_flg, 461316485Sdavidcs u8 complete_event_flg, 462316485Sdavidcs enum spq_mode comp_mode, 463316485Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 464316485Sdavidcs 465337517Sdavidcs/** 466337517Sdavidcs * @brief ecore_sp_eth_rx_queues_set_default - 467337517Sdavidcs * 468337517Sdavidcs * This ramrod sets RSS RX queue as default one. 469337517Sdavidcs * 470337517Sdavidcs * @note Final phase API. 471337517Sdavidcs * 472337517Sdavidcs * @param p_hwfn 473337517Sdavidcs * @param p_rxq_handlers queue handlers to be updated. 474337517Sdavidcs * @param comp_mode 475337517Sdavidcs * @param p_comp_data 476337517Sdavidcs * 477337517Sdavidcs * @return enum _ecore_status_t 478337517Sdavidcs */ 479337517Sdavidcs 480337517Sdavidcsenum _ecore_status_t 481337517Sdavidcsecore_sp_eth_rx_queues_set_default(struct ecore_hwfn *p_hwfn, 482337517Sdavidcs void *p_rxq_handler, 483337517Sdavidcs enum spq_mode comp_mode, 484337517Sdavidcs struct ecore_spq_comp_cb *p_comp_data); 485337517Sdavidcs 486316485Sdavidcsvoid __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 487316485Sdavidcs struct ecore_ptt *p_ptt, 488316485Sdavidcs struct ecore_eth_stats *stats, 489316485Sdavidcs u16 statistics_bin, bool b_get_port_stats); 490316485Sdavidcs 491316485Sdavidcsvoid ecore_get_vport_stats(struct ecore_dev *p_dev, 492316485Sdavidcs struct ecore_eth_stats *stats); 493316485Sdavidcs 494316485Sdavidcsvoid ecore_reset_vport_stats(struct ecore_dev *p_dev); 495316485Sdavidcs 496316485Sdavidcs/** 497316485Sdavidcs *@brief ecore_arfs_mode_configure - 498316485Sdavidcs * 499316485Sdavidcs *Enable or disable rfs mode. It must accept atleast one of tcp or udp true 500316485Sdavidcs *and atleast one of ipv4 or ipv6 true to enable rfs mode. 501316485Sdavidcs * 502316485Sdavidcs *@param p_hwfn 503316485Sdavidcs *@param p_ptt 504316485Sdavidcs *@param p_cfg_params arfs mode configuration parameters. 505316485Sdavidcs * 506316485Sdavidcs */ 507316485Sdavidcsvoid ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 508316485Sdavidcs struct ecore_ptt *p_ptt, 509316485Sdavidcs struct ecore_arfs_config_params *p_cfg_params); 510320164Sdavidcs 511337517Sdavidcs#ifndef __EXTRACT__LINUX__ 512337517Sdavidcsstruct ecore_ntuple_filter_params { 513337517Sdavidcs /* Physically mapped address containing header of buffer to be used 514337517Sdavidcs * as filter. 515337517Sdavidcs */ 516337517Sdavidcs dma_addr_t addr; 517337517Sdavidcs 518337517Sdavidcs /* Length of header in bytes */ 519337517Sdavidcs u16 length; 520337517Sdavidcs 521337517Sdavidcs /* Relative queue-id to receive classified packet */ 522337517Sdavidcs#define ECORE_RFS_NTUPLE_QID_RSS ((u16)-1) 523337517Sdavidcs u16 qid; 524337517Sdavidcs 525337517Sdavidcs /* Identifier can either be according to vport-id or vfid */ 526337517Sdavidcs bool b_is_vf; 527337517Sdavidcs u8 vport_id; 528337517Sdavidcs u8 vf_id; 529337517Sdavidcs 530337517Sdavidcs /* true iff this filter is to be added. Else to be removed */ 531337517Sdavidcs bool b_is_add; 532337517Sdavidcs}; 533337517Sdavidcs#endif 534337517Sdavidcs 535320164Sdavidcs/** 536320164Sdavidcs * @brief - ecore_configure_rfs_ntuple_filter 537320164Sdavidcs * 538320164Sdavidcs * This ramrod should be used to add or remove arfs hw filter 539320164Sdavidcs * 540320164Sdavidcs * @params p_hwfn 541320164Sdavidcs * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize 542320164Sdavidcs * it with cookie and callback function address, if not 543320164Sdavidcs * using this mode then client must pass NULL. 544337517Sdavidcs * @params p_params 545320164Sdavidcs */ 546320164Sdavidcsenum _ecore_status_t 547320164Sdavidcsecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 548320164Sdavidcs struct ecore_spq_comp_cb *p_cb, 549337517Sdavidcs struct ecore_ntuple_filter_params *p_params); 550316485Sdavidcs#endif 551