1336695Sdavidcs/* 2336695Sdavidcs * Copyright (c) 2018-2019 Cavium, Inc. 3336695Sdavidcs * All rights reserved. 4336695Sdavidcs * 5336695Sdavidcs * Redistribution and use in source and binary forms, with or without 6336695Sdavidcs * modification, are permitted provided that the following conditions 7336695Sdavidcs * are met: 8336695Sdavidcs * 9336695Sdavidcs * 1. Redistributions of source code must retain the above copyright 10336695Sdavidcs * notice, this list of conditions and the following disclaimer. 11336695Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12336695Sdavidcs * notice, this list of conditions and the following disclaimer in the 13336695Sdavidcs * documentation and/or other materials provided with the distribution. 14336695Sdavidcs * 15336695Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16336695Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17336695Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18336695Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19336695Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20336695Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21336695Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22336695Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23336695Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24336695Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25336695Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26336695Sdavidcs */ 27336695Sdavidcs 28336695Sdavidcs 29336695Sdavidcs#include <sys/cdefs.h> 30336695Sdavidcs__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/ecore_vf.c 337519 2018-08-09 01:39:47Z davidcs $"); 31336695Sdavidcs 32336695Sdavidcs#include "bcm_osal.h" 33336695Sdavidcs#include "ecore.h" 34336695Sdavidcs#include "ecore_hsi_eth.h" 35336695Sdavidcs#include "ecore_sriov.h" 36336695Sdavidcs#include "ecore_l2_api.h" 37336695Sdavidcs#include "ecore_vf.h" 38336695Sdavidcs#include "ecore_vfpf_if.h" 39336695Sdavidcs#include "ecore_status.h" 40336695Sdavidcs#include "reg_addr.h" 41336695Sdavidcs#include "ecore_int.h" 42336695Sdavidcs#include "ecore_l2.h" 43336695Sdavidcs#include "ecore_mcp_api.h" 44336695Sdavidcs#include "ecore_vf_api.h" 45336695Sdavidcs 46336695Sdavidcs#ifdef _NTDDK_ 47336695Sdavidcs#pragma warning(push) 48336695Sdavidcs#pragma warning(disable : 28167) 49336695Sdavidcs#pragma warning(disable : 28123) 50336695Sdavidcs#pragma warning(disable : 28121) 51336695Sdavidcs#endif 52336695Sdavidcs 53336695Sdavidcsstatic void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, 54336695Sdavidcs u16 type, u16 length) 55336695Sdavidcs{ 56336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 57336695Sdavidcs void *p_tlv; 58336695Sdavidcs 59336695Sdavidcs /* This lock is released when we receive PF's response 60336695Sdavidcs * in ecore_send_msg2pf(). 61336695Sdavidcs * So, ecore_vf_pf_prep() and ecore_send_msg2pf() 62336695Sdavidcs * must come in sequence. 63336695Sdavidcs */ 64336695Sdavidcs OSAL_MUTEX_ACQUIRE(&(p_iov->mutex)); 65336695Sdavidcs 66336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "preparing to send %s tlv over vf pf channel\n", 67336695Sdavidcs ecore_channel_tlvs_string[type]); 68336695Sdavidcs 69336695Sdavidcs /* Reset Requst offset */ 70336695Sdavidcs p_iov->offset = (u8 *)p_iov->vf2pf_request; 71336695Sdavidcs 72336695Sdavidcs /* Clear mailbox - both request and reply */ 73336695Sdavidcs OSAL_MEMSET(p_iov->vf2pf_request, 0, 74336695Sdavidcs sizeof(union vfpf_tlvs)); 75336695Sdavidcs OSAL_MEMSET(p_iov->pf2vf_reply, 0, 76336695Sdavidcs sizeof(union pfvf_tlvs)); 77336695Sdavidcs 78336695Sdavidcs /* Init type and length */ 79336695Sdavidcs p_tlv = ecore_add_tlv(&p_iov->offset, type, length); 80336695Sdavidcs 81336695Sdavidcs /* Init first tlv header */ 82336695Sdavidcs ((struct vfpf_first_tlv *)p_tlv)->reply_address = 83336695Sdavidcs (u64)p_iov->pf2vf_reply_phys; 84336695Sdavidcs 85336695Sdavidcs return p_tlv; 86336695Sdavidcs} 87336695Sdavidcs 88336695Sdavidcsstatic void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn, 89336695Sdavidcs enum _ecore_status_t req_status) 90336695Sdavidcs{ 91336695Sdavidcs union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 92336695Sdavidcs 93336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 94336695Sdavidcs "VF request status = 0x%x, PF reply status = 0x%x\n", 95336695Sdavidcs req_status, resp->default_resp.hdr.status); 96336695Sdavidcs 97336695Sdavidcs OSAL_MUTEX_RELEASE(&(p_hwfn->vf_iov_info->mutex)); 98336695Sdavidcs} 99336695Sdavidcs 100336695Sdavidcs#ifdef CONFIG_ECORE_SW_CHANNEL 101336695Sdavidcs/* The SW channel implementation of Windows needs to know the 'exact' 102336695Sdavidcs * response size of any given message. That means that for future 103336695Sdavidcs * messages we'd be unable to send TLVs to PF if he'll be unable to 104336695Sdavidcs * answer them if the |response| != |default response|. 105336695Sdavidcs * We'd need to handshake in acquire capabilities for any such. 106336695Sdavidcs */ 107336695Sdavidcs#endif 108336695Sdavidcsstatic enum _ecore_status_t 109336695Sdavidcsecore_send_msg2pf(struct ecore_hwfn *p_hwfn, 110336695Sdavidcs u8 *done, u32 resp_size) 111336695Sdavidcs{ 112336695Sdavidcs union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 113336695Sdavidcs struct ustorm_trigger_vf_zone trigger; 114336695Sdavidcs struct ustorm_vf_zone *zone_data; 115336695Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 116336695Sdavidcs int time = 100; 117336695Sdavidcs 118336695Sdavidcs zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 119336695Sdavidcs 120336695Sdavidcs /* output tlvs list */ 121336695Sdavidcs ecore_dp_tlv_list(p_hwfn, p_req); 122336695Sdavidcs 123336695Sdavidcs /* need to add the END TLV to the message size */ 124336695Sdavidcs resp_size += sizeof(struct channel_list_end_tlv); 125336695Sdavidcs 126336695Sdavidcs#ifdef CONFIG_ECORE_SW_CHANNEL 127336695Sdavidcs if (!p_hwfn->vf_iov_info->b_hw_channel) { 128336695Sdavidcs rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev, 129336695Sdavidcs done, 130336695Sdavidcs p_req, 131336695Sdavidcs p_hwfn->vf_iov_info->pf2vf_reply, 132336695Sdavidcs sizeof(union vfpf_tlvs), 133336695Sdavidcs resp_size); 134336695Sdavidcs /* TODO - no prints about message ? */ 135336695Sdavidcs return rc; 136336695Sdavidcs } 137336695Sdavidcs#endif 138336695Sdavidcs 139336695Sdavidcs /* Send TLVs over HW channel */ 140336695Sdavidcs OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 141336695Sdavidcs trigger.vf_pf_msg_valid = 1; 142336695Sdavidcs 143336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 144336695Sdavidcs "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", 145336695Sdavidcs GET_FIELD(p_hwfn->hw_info.concrete_fid, 146336695Sdavidcs PXP_CONCRETE_FID_PFID), 147336695Sdavidcs U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), 148336695Sdavidcs U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), 149336695Sdavidcs &zone_data->non_trigger.vf_pf_msg_addr, 150336695Sdavidcs *((u32 *)&trigger), 151336695Sdavidcs &zone_data->trigger); 152336695Sdavidcs 153336695Sdavidcs REG_WR(p_hwfn, 154336695Sdavidcs (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 155336695Sdavidcs U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); 156336695Sdavidcs 157336695Sdavidcs REG_WR(p_hwfn, 158336695Sdavidcs (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 159336695Sdavidcs U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); 160336695Sdavidcs 161336695Sdavidcs /* The message data must be written first, to prevent trigger before 162336695Sdavidcs * data is written. 163336695Sdavidcs */ 164336695Sdavidcs OSAL_WMB(p_hwfn->p_dev); 165336695Sdavidcs 166336695Sdavidcs REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); 167336695Sdavidcs 168336695Sdavidcs /* When PF would be done with the response, it would write back to the 169336695Sdavidcs * `done' address. Poll until then. 170336695Sdavidcs */ 171336695Sdavidcs while ((!*done) && time) { 172336695Sdavidcs OSAL_MSLEEP(25); 173336695Sdavidcs time--; 174336695Sdavidcs } 175336695Sdavidcs 176336695Sdavidcs if (!*done) { 177336695Sdavidcs DP_NOTICE(p_hwfn, true, 178336695Sdavidcs "VF <-- PF Timeout [Type %d]\n", 179336695Sdavidcs p_req->first_tlv.tl.type); 180336695Sdavidcs rc = ECORE_TIMEOUT; 181336695Sdavidcs } else { 182336695Sdavidcs if ((*done != PFVF_STATUS_SUCCESS) && 183336695Sdavidcs (*done != PFVF_STATUS_NO_RESOURCE)) 184336695Sdavidcs DP_NOTICE(p_hwfn, false, 185336695Sdavidcs "PF response: %d [Type %d]\n", 186336695Sdavidcs *done, p_req->first_tlv.tl.type); 187336695Sdavidcs else 188336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 189336695Sdavidcs "PF response: %d [Type %d]\n", 190336695Sdavidcs *done, p_req->first_tlv.tl.type); 191336695Sdavidcs } 192336695Sdavidcs 193336695Sdavidcs return rc; 194336695Sdavidcs} 195336695Sdavidcs 196336695Sdavidcsstatic void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn, 197336695Sdavidcs struct ecore_queue_cid *p_cid) 198336695Sdavidcs{ 199336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 200336695Sdavidcs struct vfpf_qid_tlv *p_qid_tlv; 201336695Sdavidcs 202336695Sdavidcs /* Only add QIDs for the queue if it was negotiated with PF */ 203336695Sdavidcs if (!(p_iov->acquire_resp.pfdev_info.capabilities & 204336695Sdavidcs PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 205336695Sdavidcs return; 206336695Sdavidcs 207336695Sdavidcs p_qid_tlv = ecore_add_tlv(&p_iov->offset, 208336695Sdavidcs CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); 209336695Sdavidcs p_qid_tlv->qid = p_cid->qid_usage_idx; 210336695Sdavidcs} 211336695Sdavidcs 212336695Sdavidcsstatic enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn, 213336695Sdavidcs bool b_final) 214336695Sdavidcs{ 215336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 216336695Sdavidcs struct pfvf_def_resp_tlv *resp; 217336695Sdavidcs struct vfpf_first_tlv *req; 218336695Sdavidcs u32 size; 219336695Sdavidcs enum _ecore_status_t rc; 220336695Sdavidcs 221336695Sdavidcs /* clear mailbox and prep first tlv */ 222336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 223336695Sdavidcs 224336695Sdavidcs /* add list termination tlv */ 225336695Sdavidcs ecore_add_tlv(&p_iov->offset, 226336695Sdavidcs CHANNEL_TLV_LIST_END, 227336695Sdavidcs sizeof(struct channel_list_end_tlv)); 228336695Sdavidcs 229336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 230336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 231336695Sdavidcs 232336695Sdavidcs if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) 233336695Sdavidcs rc = ECORE_AGAIN; 234336695Sdavidcs 235336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 236336695Sdavidcs if (!b_final) 237336695Sdavidcs return rc; 238336695Sdavidcs 239336695Sdavidcs p_hwfn->b_int_enabled = 0; 240336695Sdavidcs 241336695Sdavidcs if (p_iov->vf2pf_request) 242336695Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 243336695Sdavidcs p_iov->vf2pf_request, 244336695Sdavidcs p_iov->vf2pf_request_phys, 245336695Sdavidcs sizeof(union vfpf_tlvs)); 246336695Sdavidcs if (p_iov->pf2vf_reply) 247336695Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 248336695Sdavidcs p_iov->pf2vf_reply, 249336695Sdavidcs p_iov->pf2vf_reply_phys, 250336695Sdavidcs sizeof(union pfvf_tlvs)); 251336695Sdavidcs 252336695Sdavidcs if (p_iov->bulletin.p_virt) { 253336695Sdavidcs size = sizeof(struct ecore_bulletin_content); 254336695Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 255336695Sdavidcs p_iov->bulletin.p_virt, 256336695Sdavidcs p_iov->bulletin.phys, 257336695Sdavidcs size); 258336695Sdavidcs } 259336695Sdavidcs 260336695Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC 261336695Sdavidcs OSAL_MUTEX_DEALLOC(&p_iov->mutex); 262336695Sdavidcs#endif 263336695Sdavidcs 264336695Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); 265336695Sdavidcs p_hwfn->vf_iov_info = OSAL_NULL; 266336695Sdavidcs 267336695Sdavidcs return rc; 268336695Sdavidcs} 269336695Sdavidcs 270336695Sdavidcsenum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) 271336695Sdavidcs{ 272336695Sdavidcs return _ecore_vf_pf_release(p_hwfn, true); 273336695Sdavidcs} 274336695Sdavidcs 275336695Sdavidcs#define VF_ACQUIRE_THRESH 3 276336695Sdavidcsstatic void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn, 277336695Sdavidcs struct vf_pf_resc_request *p_req, 278336695Sdavidcs struct pf_vf_resc *p_resp) 279336695Sdavidcs{ 280336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 281336695Sdavidcs "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", 282336695Sdavidcs p_req->num_rxqs, p_resp->num_rxqs, 283336695Sdavidcs p_req->num_rxqs, p_resp->num_txqs, 284336695Sdavidcs p_req->num_sbs, p_resp->num_sbs, 285336695Sdavidcs p_req->num_mac_filters, p_resp->num_mac_filters, 286336695Sdavidcs p_req->num_vlan_filters, p_resp->num_vlan_filters, 287336695Sdavidcs p_req->num_mc_filters, p_resp->num_mc_filters, 288336695Sdavidcs p_req->num_cids, p_resp->num_cids); 289336695Sdavidcs 290336695Sdavidcs /* humble our request */ 291336695Sdavidcs p_req->num_txqs = p_resp->num_txqs; 292336695Sdavidcs p_req->num_rxqs = p_resp->num_rxqs; 293336695Sdavidcs p_req->num_sbs = p_resp->num_sbs; 294336695Sdavidcs p_req->num_mac_filters = p_resp->num_mac_filters; 295336695Sdavidcs p_req->num_vlan_filters = p_resp->num_vlan_filters; 296336695Sdavidcs p_req->num_mc_filters = p_resp->num_mc_filters; 297336695Sdavidcs p_req->num_cids = p_resp->num_cids; 298336695Sdavidcs} 299336695Sdavidcs 300336695Sdavidcsstatic enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) 301336695Sdavidcs{ 302336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 303336695Sdavidcs struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 304336695Sdavidcs struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 305336695Sdavidcs struct ecore_vf_acquire_sw_info vf_sw_info; 306336695Sdavidcs struct vf_pf_resc_request *p_resc; 307336695Sdavidcs bool resources_acquired = false; 308336695Sdavidcs struct vfpf_acquire_tlv *req; 309336695Sdavidcs int attempts = 0; 310336695Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 311336695Sdavidcs int eth_hsi_minor_ver; 312336695Sdavidcs 313336695Sdavidcs /* clear mailbox and prep first tlv */ 314336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 315336695Sdavidcs p_resc = &req->resc_request; 316336695Sdavidcs 317336695Sdavidcs /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ 318336695Sdavidcs req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 319336695Sdavidcs 320336695Sdavidcs p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF; 321336695Sdavidcs p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF; 322336695Sdavidcs p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF; 323336695Sdavidcs p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; 324336695Sdavidcs p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; 325336695Sdavidcs p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS; 326336695Sdavidcs 327336695Sdavidcs OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); 328336695Sdavidcs OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); 329336695Sdavidcs 330336695Sdavidcs req->vfdev_info.os_type = vf_sw_info.os_type; 331336695Sdavidcs req->vfdev_info.driver_version = vf_sw_info.driver_version; 332336695Sdavidcs req->vfdev_info.fw_major = FW_MAJOR_VERSION; 333336695Sdavidcs req->vfdev_info.fw_minor = FW_MINOR_VERSION; 334336695Sdavidcs req->vfdev_info.fw_revision = FW_REVISION_VERSION; 335336695Sdavidcs req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 336336695Sdavidcs req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 337336695Sdavidcs req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 338336695Sdavidcs 339336695Sdavidcs /* Fill capability field with any non-deprecated config we support */ 340336695Sdavidcs req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 341336695Sdavidcs 342336695Sdavidcs /* If we've mapped the doorbell bar, try using queue qids */ 343336695Sdavidcs if (p_iov->b_doorbell_bar) { 344336695Sdavidcs req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | 345336695Sdavidcs VFPF_ACQUIRE_CAP_QUEUE_QIDS; 346336695Sdavidcs p_resc->num_cids = ECORE_ETH_VF_MAX_NUM_CIDS; 347336695Sdavidcs } 348336695Sdavidcs 349336695Sdavidcs /* pf 2 vf bulletin board address */ 350336695Sdavidcs req->bulletin_addr = p_iov->bulletin.phys; 351336695Sdavidcs req->bulletin_size = p_iov->bulletin.size; 352336695Sdavidcs 353336695Sdavidcs /* add list termination tlv */ 354336695Sdavidcs ecore_add_tlv(&p_iov->offset, 355336695Sdavidcs CHANNEL_TLV_LIST_END, 356336695Sdavidcs sizeof(struct channel_list_end_tlv)); 357336695Sdavidcs 358336695Sdavidcs while (!resources_acquired) { 359336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "attempting to acquire resources\n"); 360336695Sdavidcs 361336695Sdavidcs /* Clear response buffer, as this might be a re-send */ 362336695Sdavidcs OSAL_MEMSET(p_iov->pf2vf_reply, 0, 363336695Sdavidcs sizeof(union pfvf_tlvs)); 364336695Sdavidcs 365336695Sdavidcs /* send acquire request */ 366336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, 367336695Sdavidcs &resp->hdr.status, 368336695Sdavidcs sizeof(*resp)); 369336695Sdavidcs if (rc != ECORE_SUCCESS) 370336695Sdavidcs goto exit; 371336695Sdavidcs 372336695Sdavidcs /* copy acquire response from buffer to p_hwfn */ 373336695Sdavidcs OSAL_MEMCPY(&p_iov->acquire_resp, 374336695Sdavidcs resp, 375336695Sdavidcs sizeof(p_iov->acquire_resp)); 376336695Sdavidcs 377336695Sdavidcs attempts++; 378336695Sdavidcs 379336695Sdavidcs if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 380336695Sdavidcs /* PF agrees to allocate our resources */ 381336695Sdavidcs if (!(resp->pfdev_info.capabilities & 382336695Sdavidcs PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 383336695Sdavidcs /* It's possible legacy PF mistakenly accepted; 384336695Sdavidcs * but we don't care - simply mark it as 385336695Sdavidcs * legacy and continue. 386336695Sdavidcs */ 387336695Sdavidcs req->vfdev_info.capabilities |= 388336695Sdavidcs VFPF_ACQUIRE_CAP_PRE_FP_HSI; 389336695Sdavidcs } 390336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "resources acquired\n"); 391336695Sdavidcs resources_acquired = true; 392336695Sdavidcs } /* PF refuses to allocate our resources */ 393336695Sdavidcs else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 394336695Sdavidcs attempts < VF_ACQUIRE_THRESH) { 395336695Sdavidcs ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 396336695Sdavidcs &resp->resc); 397336695Sdavidcs 398336695Sdavidcs } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 399336695Sdavidcs if (pfdev_info->major_fp_hsi && 400336695Sdavidcs (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 401336695Sdavidcs DP_NOTICE(p_hwfn, false, 402336695Sdavidcs "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", 403336695Sdavidcs pfdev_info->major_fp_hsi, 404336695Sdavidcs pfdev_info->minor_fp_hsi, 405336695Sdavidcs ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, 406336695Sdavidcs pfdev_info->major_fp_hsi); 407336695Sdavidcs rc = ECORE_INVAL; 408336695Sdavidcs goto exit; 409336695Sdavidcs } 410336695Sdavidcs 411336695Sdavidcs if (!pfdev_info->major_fp_hsi) { 412336695Sdavidcs if (req->vfdev_info.capabilities & 413336695Sdavidcs VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 414336695Sdavidcs DP_NOTICE(p_hwfn, false, 415336695Sdavidcs "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); 416336695Sdavidcs rc = ECORE_INVAL; 417336695Sdavidcs goto exit; 418336695Sdavidcs } else { 419336695Sdavidcs DP_INFO(p_hwfn, 420336695Sdavidcs "PF is old - try re-acquire to see if it supports FW-version override\n"); 421336695Sdavidcs req->vfdev_info.capabilities |= 422336695Sdavidcs VFPF_ACQUIRE_CAP_PRE_FP_HSI; 423336695Sdavidcs continue; 424336695Sdavidcs } 425336695Sdavidcs } 426336695Sdavidcs 427336695Sdavidcs /* If PF/VF are using same Major, PF must have had 428336695Sdavidcs * it's reasons. Simply fail. 429336695Sdavidcs */ 430336695Sdavidcs DP_NOTICE(p_hwfn, false, 431336695Sdavidcs "PF rejected acquisition by VF\n"); 432336695Sdavidcs rc = ECORE_INVAL; 433336695Sdavidcs goto exit; 434336695Sdavidcs } else { 435336695Sdavidcs DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", 436336695Sdavidcs resp->hdr.status); 437336695Sdavidcs rc = ECORE_AGAIN; 438336695Sdavidcs goto exit; 439336695Sdavidcs } 440336695Sdavidcs } 441336695Sdavidcs 442336695Sdavidcs /* Mark the PF as legacy, if needed */ 443336695Sdavidcs if (req->vfdev_info.capabilities & 444336695Sdavidcs VFPF_ACQUIRE_CAP_PRE_FP_HSI) 445336695Sdavidcs p_iov->b_pre_fp_hsi = true; 446336695Sdavidcs 447336695Sdavidcs /* In case PF doesn't support multi-queue Tx, update the number of 448336695Sdavidcs * CIDs to reflect the number of queues [older PFs didn't fill that 449336695Sdavidcs * field]. 450336695Sdavidcs */ 451336695Sdavidcs if (!(resp->pfdev_info.capabilities & 452336695Sdavidcs PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 453336695Sdavidcs resp->resc.num_cids = resp->resc.num_rxqs + 454336695Sdavidcs resp->resc.num_txqs; 455336695Sdavidcs 456336695Sdavidcs#ifndef LINUX_REMOVE 457336695Sdavidcs rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); 458336695Sdavidcs if (rc) { 459336695Sdavidcs DP_NOTICE(p_hwfn, true, 460336695Sdavidcs "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n", 461336695Sdavidcs rc); 462336695Sdavidcs rc = ECORE_AGAIN; 463336695Sdavidcs goto exit; 464336695Sdavidcs } 465336695Sdavidcs#endif 466336695Sdavidcs 467336695Sdavidcs /* Update bulletin board size with response from PF */ 468336695Sdavidcs p_iov->bulletin.size = resp->bulletin_size; 469336695Sdavidcs 470336695Sdavidcs /* get HW info */ 471336695Sdavidcs p_hwfn->p_dev->type = resp->pfdev_info.dev_type; 472336695Sdavidcs p_hwfn->p_dev->chip_rev = (u8) resp->pfdev_info.chip_rev; 473336695Sdavidcs 474336695Sdavidcs DP_INFO(p_hwfn, "Chip details - %s%d\n", 475336695Sdavidcs ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH", 476336695Sdavidcs CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); 477336695Sdavidcs 478336695Sdavidcs p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff; 479336695Sdavidcs 480336695Sdavidcs /* Learn of the possibility of CMT */ 481336695Sdavidcs if (IS_LEAD_HWFN(p_hwfn)) { 482336695Sdavidcs if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 483336695Sdavidcs DP_NOTICE(p_hwfn, false, "100g VF\n"); 484336695Sdavidcs p_hwfn->p_dev->num_hwfns = 2; 485336695Sdavidcs } 486336695Sdavidcs } 487336695Sdavidcs 488336695Sdavidcs eth_hsi_minor_ver = ETH_HSI_VER_MINOR; 489336695Sdavidcs 490336695Sdavidcs if (!p_iov->b_pre_fp_hsi && 491336695Sdavidcs (eth_hsi_minor_ver) && 492336695Sdavidcs (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 493336695Sdavidcs DP_INFO(p_hwfn, 494336695Sdavidcs "PF is using older fastpath HSI; %02x.%02x is configured\n", 495336695Sdavidcs ETH_HSI_VER_MAJOR, 496336695Sdavidcs resp->pfdev_info.minor_fp_hsi); 497336695Sdavidcs } 498336695Sdavidcs 499336695Sdavidcsexit: 500336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 501336695Sdavidcs 502336695Sdavidcs return rc; 503336695Sdavidcs} 504336695Sdavidcs 505336695Sdavidcsu32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, 506336695Sdavidcs enum BAR_ID bar_id) 507336695Sdavidcs{ 508336695Sdavidcs u32 bar_size; 509336695Sdavidcs 510336695Sdavidcs /* Regview size is fixed */ 511336695Sdavidcs if (bar_id == BAR_ID_0) 512336695Sdavidcs return 1 << 17; 513336695Sdavidcs 514336695Sdavidcs /* Doorbell is received from PF */ 515336695Sdavidcs bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; 516336695Sdavidcs if (bar_size) 517336695Sdavidcs return 1 << bar_size; 518336695Sdavidcs return 0; 519336695Sdavidcs} 520336695Sdavidcs 521336695Sdavidcsenum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn) 522336695Sdavidcs{ 523336695Sdavidcs struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev); 524336695Sdavidcs struct ecore_vf_iov *p_iov; 525336695Sdavidcs u32 reg; 526336695Sdavidcs enum _ecore_status_t rc; 527336695Sdavidcs 528336695Sdavidcs /* Set number of hwfns - might be overriden once leading hwfn learns 529336695Sdavidcs * actual configuration from PF. 530336695Sdavidcs */ 531336695Sdavidcs if (IS_LEAD_HWFN(p_hwfn)) 532336695Sdavidcs p_hwfn->p_dev->num_hwfns = 1; 533336695Sdavidcs 534336695Sdavidcs reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 535336695Sdavidcs p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 536336695Sdavidcs 537336695Sdavidcs reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 538336695Sdavidcs p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 539336695Sdavidcs 540336695Sdavidcs /* Allocate vf sriov info */ 541336695Sdavidcs p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov)); 542336695Sdavidcs if (!p_iov) { 543336695Sdavidcs DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sriov'\n"); 544336695Sdavidcs return ECORE_NOMEM; 545336695Sdavidcs } 546336695Sdavidcs 547336695Sdavidcs /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell 548336695Sdavidcs * value, but there are several incompatibily scenarios where that 549336695Sdavidcs * would be incorrect and we'd need to override it. 550336695Sdavidcs */ 551336695Sdavidcs if (p_hwfn->doorbells == OSAL_NULL) { 552336695Sdavidcs p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 553336695Sdavidcs PXP_VF_BAR0_START_DQ; 554336695Sdavidcs#ifndef LINUX_REMOVE 555336695Sdavidcs p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 556336695Sdavidcs (u8 *)p_hwfn->p_dev->doorbells; 557336695Sdavidcs#endif 558336695Sdavidcs 559336695Sdavidcs } else if (p_hwfn == p_lead) { 560336695Sdavidcs /* For leading hw-function, value is always correct, but need 561336695Sdavidcs * to handle scenario where legacy PF would not support 100g 562336695Sdavidcs * mapped bars later. 563336695Sdavidcs */ 564336695Sdavidcs p_iov->b_doorbell_bar = true; 565336695Sdavidcs } else { 566336695Sdavidcs /* here, value would be correct ONLY if the leading hwfn 567336695Sdavidcs * received indication that mapped-bars are supported. 568336695Sdavidcs */ 569336695Sdavidcs if (p_lead->vf_iov_info->b_doorbell_bar) 570336695Sdavidcs p_iov->b_doorbell_bar = true; 571336695Sdavidcs#ifdef LINUX_REMOVE 572336695Sdavidcs else 573336695Sdavidcs p_hwfn->doorbells = (u8 OSAL_IOMEM*) 574336695Sdavidcs p_hwfn->regview + 575336695Sdavidcs PXP_VF_BAR0_START_DQ; 576336695Sdavidcs#else 577336695Sdavidcs else { 578336695Sdavidcs p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 579336695Sdavidcs PXP_VF_BAR0_START_DQ; 580336695Sdavidcs p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 581336695Sdavidcs (u8 *)p_hwfn->p_dev->doorbells; 582336695Sdavidcs } 583336695Sdavidcs#endif 584336695Sdavidcs 585336695Sdavidcs } 586336695Sdavidcs 587336695Sdavidcs /* Allocate vf2pf msg */ 588336695Sdavidcs p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 589336695Sdavidcs &p_iov->vf2pf_request_phys, 590336695Sdavidcs sizeof(union vfpf_tlvs)); 591336695Sdavidcs if (!p_iov->vf2pf_request) { 592336695Sdavidcs DP_NOTICE(p_hwfn, true, "Failed to allocate `vf2pf_request' DMA memory\n"); 593336695Sdavidcs goto free_p_iov; 594336695Sdavidcs } 595336695Sdavidcs 596336695Sdavidcs p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 597336695Sdavidcs &p_iov->pf2vf_reply_phys, 598336695Sdavidcs sizeof(union pfvf_tlvs)); 599336695Sdavidcs if (!p_iov->pf2vf_reply) { 600336695Sdavidcs DP_NOTICE(p_hwfn, true, "Failed to allocate `pf2vf_reply' DMA memory\n"); 601336695Sdavidcs goto free_vf2pf_request; 602336695Sdavidcs } 603336695Sdavidcs 604336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 605336695Sdavidcs "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", 606336695Sdavidcs p_iov->vf2pf_request, 607336695Sdavidcs (unsigned long long)p_iov->vf2pf_request_phys, 608336695Sdavidcs p_iov->pf2vf_reply, 609336695Sdavidcs (unsigned long long)p_iov->pf2vf_reply_phys); 610336695Sdavidcs 611336695Sdavidcs /* Allocate Bulletin board */ 612336695Sdavidcs p_iov->bulletin.size = sizeof(struct ecore_bulletin_content); 613336695Sdavidcs p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 614336695Sdavidcs &p_iov->bulletin.phys, 615336695Sdavidcs p_iov->bulletin.size); 616336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 617336695Sdavidcs "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", 618336695Sdavidcs p_iov->bulletin.p_virt, 619336695Sdavidcs (unsigned long long)p_iov->bulletin.phys, 620336695Sdavidcs p_iov->bulletin.size); 621336695Sdavidcs 622336695Sdavidcs#ifdef CONFIG_ECORE_LOCK_ALLOC 623336695Sdavidcs OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex); 624336695Sdavidcs#endif 625336695Sdavidcs OSAL_MUTEX_INIT(&p_iov->mutex); 626336695Sdavidcs 627336695Sdavidcs p_hwfn->vf_iov_info = p_iov; 628336695Sdavidcs 629336695Sdavidcs p_hwfn->hw_info.personality = ECORE_PCI_ETH; 630336695Sdavidcs 631336695Sdavidcs rc = ecore_vf_pf_acquire(p_hwfn); 632336695Sdavidcs 633336695Sdavidcs /* If VF is 100g using a mapped bar and PF is too old to support that, 634336695Sdavidcs * acquisition would succeed - but the VF would have no way knowing 635336695Sdavidcs * the size of the doorbell bar configured in HW and thus will not 636336695Sdavidcs * know how to split it for 2nd hw-function. 637336695Sdavidcs * In this case we re-try without the indication of the mapped 638336695Sdavidcs * doorbell. 639336695Sdavidcs */ 640336695Sdavidcs if (rc == ECORE_SUCCESS && 641336695Sdavidcs p_iov->b_doorbell_bar && 642336695Sdavidcs !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) && 643336695Sdavidcs ECORE_IS_CMT(p_hwfn->p_dev)) { 644336695Sdavidcs rc = _ecore_vf_pf_release(p_hwfn, false); 645336695Sdavidcs if (rc != ECORE_SUCCESS) 646336695Sdavidcs return rc; 647336695Sdavidcs 648336695Sdavidcs p_iov->b_doorbell_bar = false; 649336695Sdavidcs p_hwfn->doorbells = (u8 OSAL_IOMEM*)p_hwfn->regview + 650336695Sdavidcs PXP_VF_BAR0_START_DQ; 651336695Sdavidcs#ifndef LINUX_REMOVE 652336695Sdavidcs p_hwfn->db_offset = (u8 *)p_hwfn->doorbells - 653336695Sdavidcs (u8 *)p_hwfn->p_dev->doorbells; 654336695Sdavidcs#endif 655336695Sdavidcs rc = ecore_vf_pf_acquire(p_hwfn); 656336695Sdavidcs } 657336695Sdavidcs 658336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 659336695Sdavidcs "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", 660336695Sdavidcs p_hwfn->regview, p_hwfn->doorbells, 661336695Sdavidcs p_hwfn->p_dev->doorbells); 662336695Sdavidcs 663336695Sdavidcs return rc; 664336695Sdavidcs 665336695Sdavidcsfree_vf2pf_request: 666336695Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request, 667336695Sdavidcs p_iov->vf2pf_request_phys, 668336695Sdavidcs sizeof(union vfpf_tlvs)); 669336695Sdavidcsfree_p_iov: 670336695Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_iov); 671336695Sdavidcs 672336695Sdavidcs return ECORE_NOMEM; 673336695Sdavidcs} 674336695Sdavidcs 675336695Sdavidcs#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A 676336695Sdavidcs#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 677336695Sdavidcs (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 678336695Sdavidcs 679336695Sdavidcsstatic void 680336695Sdavidcs__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 681336695Sdavidcs struct ecore_tunn_update_type *p_src, 682336695Sdavidcs enum ecore_tunn_clss mask, u8 *p_cls) 683336695Sdavidcs{ 684336695Sdavidcs if (p_src->b_update_mode) { 685336695Sdavidcs p_req->tun_mode_update_mask |= (1 << mask); 686336695Sdavidcs 687336695Sdavidcs if (p_src->b_mode_enabled) 688336695Sdavidcs p_req->tunn_mode |= (1 << mask); 689336695Sdavidcs } 690336695Sdavidcs 691336695Sdavidcs *p_cls = p_src->tun_cls; 692336695Sdavidcs} 693336695Sdavidcs 694336695Sdavidcsstatic void 695336695Sdavidcsecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 696336695Sdavidcs struct ecore_tunn_update_type *p_src, 697336695Sdavidcs enum ecore_tunn_clss mask, u8 *p_cls, 698336695Sdavidcs struct ecore_tunn_update_udp_port *p_port, 699336695Sdavidcs u8 *p_update_port, u16 *p_udp_port) 700336695Sdavidcs{ 701336695Sdavidcs if (p_port->b_update_port) { 702336695Sdavidcs *p_update_port = 1; 703336695Sdavidcs *p_udp_port = p_port->port; 704336695Sdavidcs } 705336695Sdavidcs 706336695Sdavidcs __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 707336695Sdavidcs} 708336695Sdavidcs 709336695Sdavidcsvoid ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) 710336695Sdavidcs{ 711336695Sdavidcs if (p_tun->vxlan.b_mode_enabled) 712336695Sdavidcs p_tun->vxlan.b_update_mode = true; 713336695Sdavidcs if (p_tun->l2_geneve.b_mode_enabled) 714336695Sdavidcs p_tun->l2_geneve.b_update_mode = true; 715336695Sdavidcs if (p_tun->ip_geneve.b_mode_enabled) 716336695Sdavidcs p_tun->ip_geneve.b_update_mode = true; 717336695Sdavidcs if (p_tun->l2_gre.b_mode_enabled) 718336695Sdavidcs p_tun->l2_gre.b_update_mode = true; 719336695Sdavidcs if (p_tun->ip_gre.b_mode_enabled) 720336695Sdavidcs p_tun->ip_gre.b_update_mode = true; 721336695Sdavidcs 722336695Sdavidcs p_tun->b_update_rx_cls = true; 723336695Sdavidcs p_tun->b_update_tx_cls = true; 724336695Sdavidcs} 725336695Sdavidcs 726336695Sdavidcsstatic void 727336695Sdavidcs__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, 728336695Sdavidcs u16 feature_mask, u8 tunn_mode, u8 tunn_cls, 729336695Sdavidcs enum ecore_tunn_mode val) 730336695Sdavidcs{ 731336695Sdavidcs if (feature_mask & (1 << val)) { 732336695Sdavidcs p_tun->b_mode_enabled = tunn_mode; 733336695Sdavidcs p_tun->tun_cls = tunn_cls; 734336695Sdavidcs } else { 735336695Sdavidcs p_tun->b_mode_enabled = false; 736336695Sdavidcs } 737336695Sdavidcs} 738336695Sdavidcs 739336695Sdavidcsstatic void 740336695Sdavidcsecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, 741336695Sdavidcs struct ecore_tunnel_info *p_tun, 742336695Sdavidcs struct pfvf_update_tunn_param_tlv *p_resp) 743336695Sdavidcs{ 744336695Sdavidcs /* Update mode and classes provided by PF */ 745336695Sdavidcs u16 feat_mask = p_resp->tunn_feature_mask; 746336695Sdavidcs 747336695Sdavidcs __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 748336695Sdavidcs p_resp->vxlan_mode, p_resp->vxlan_clss, 749336695Sdavidcs ECORE_MODE_VXLAN_TUNN); 750336695Sdavidcs __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 751336695Sdavidcs p_resp->l2geneve_mode, 752336695Sdavidcs p_resp->l2geneve_clss, 753336695Sdavidcs ECORE_MODE_L2GENEVE_TUNN); 754336695Sdavidcs __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 755336695Sdavidcs p_resp->ipgeneve_mode, 756336695Sdavidcs p_resp->ipgeneve_clss, 757336695Sdavidcs ECORE_MODE_IPGENEVE_TUNN); 758336695Sdavidcs __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 759336695Sdavidcs p_resp->l2gre_mode, p_resp->l2gre_clss, 760336695Sdavidcs ECORE_MODE_L2GRE_TUNN); 761336695Sdavidcs __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 762336695Sdavidcs p_resp->ipgre_mode, p_resp->ipgre_clss, 763336695Sdavidcs ECORE_MODE_IPGRE_TUNN); 764336695Sdavidcs p_tun->geneve_port.port = p_resp->geneve_udp_port; 765336695Sdavidcs p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 766336695Sdavidcs 767336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 768336695Sdavidcs "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 769336695Sdavidcs p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 770336695Sdavidcs p_tun->ip_geneve.b_mode_enabled, 771336695Sdavidcs p_tun->l2_gre.b_mode_enabled, 772336695Sdavidcs p_tun->ip_gre.b_mode_enabled); 773336695Sdavidcs} 774336695Sdavidcs 775336695Sdavidcsenum _ecore_status_t 776336695Sdavidcsecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, 777336695Sdavidcs struct ecore_tunnel_info *p_src) 778336695Sdavidcs{ 779336695Sdavidcs struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; 780336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 781336695Sdavidcs struct pfvf_update_tunn_param_tlv *p_resp; 782336695Sdavidcs struct vfpf_update_tunn_param_tlv *p_req; 783336695Sdavidcs enum _ecore_status_t rc; 784336695Sdavidcs 785336695Sdavidcs p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 786336695Sdavidcs sizeof(*p_req)); 787336695Sdavidcs 788336695Sdavidcs if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 789336695Sdavidcs p_req->update_tun_cls = 1; 790336695Sdavidcs 791336695Sdavidcs ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, (enum ecore_tunn_clss)ECORE_MODE_VXLAN_TUNN, 792336695Sdavidcs &p_req->vxlan_clss, &p_src->vxlan_port, 793336695Sdavidcs &p_req->update_vxlan_port, 794336695Sdavidcs &p_req->vxlan_port); 795336695Sdavidcs ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 796336695Sdavidcs (enum ecore_tunn_clss)ECORE_MODE_L2GENEVE_TUNN, 797336695Sdavidcs &p_req->l2geneve_clss, &p_src->geneve_port, 798336695Sdavidcs &p_req->update_geneve_port, 799336695Sdavidcs &p_req->geneve_port); 800336695Sdavidcs __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 801336695Sdavidcs (enum ecore_tunn_clss)ECORE_MODE_IPGENEVE_TUNN, 802336695Sdavidcs &p_req->ipgeneve_clss); 803336695Sdavidcs __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 804336695Sdavidcs (enum ecore_tunn_clss)ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 805336695Sdavidcs __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 806336695Sdavidcs (enum ecore_tunn_clss)ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 807336695Sdavidcs 808336695Sdavidcs /* add list termination tlv */ 809336695Sdavidcs ecore_add_tlv(&p_iov->offset, 810336695Sdavidcs CHANNEL_TLV_LIST_END, 811336695Sdavidcs sizeof(struct channel_list_end_tlv)); 812336695Sdavidcs 813336695Sdavidcs p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 814336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 815336695Sdavidcs 816336695Sdavidcs if (rc) 817336695Sdavidcs goto exit; 818336695Sdavidcs 819336695Sdavidcs if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 820336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 821336695Sdavidcs "Failed to update tunnel parameters\n"); 822336695Sdavidcs rc = ECORE_INVAL; 823336695Sdavidcs } 824336695Sdavidcs 825336695Sdavidcs ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 826336695Sdavidcsexit: 827336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 828336695Sdavidcs return rc; 829336695Sdavidcs} 830336695Sdavidcs 831336695Sdavidcsenum _ecore_status_t 832336695Sdavidcsecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, 833336695Sdavidcs struct ecore_queue_cid *p_cid, 834336695Sdavidcs u16 bd_max_bytes, 835336695Sdavidcs dma_addr_t bd_chain_phys_addr, 836336695Sdavidcs dma_addr_t cqe_pbl_addr, 837336695Sdavidcs u16 cqe_pbl_size, 838336695Sdavidcs void OSAL_IOMEM **pp_prod) 839336695Sdavidcs{ 840336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 841336695Sdavidcs struct pfvf_start_queue_resp_tlv *resp; 842336695Sdavidcs struct vfpf_start_rxq_tlv *req; 843336695Sdavidcs u16 rx_qid = p_cid->rel.queue_id; 844336695Sdavidcs enum _ecore_status_t rc; 845336695Sdavidcs 846336695Sdavidcs /* clear mailbox and prep first tlv */ 847336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 848336695Sdavidcs 849336695Sdavidcs req->rx_qid = rx_qid; 850336695Sdavidcs req->cqe_pbl_addr = cqe_pbl_addr; 851336695Sdavidcs req->cqe_pbl_size = cqe_pbl_size; 852336695Sdavidcs req->rxq_addr = bd_chain_phys_addr; 853336695Sdavidcs req->hw_sb = p_cid->sb_igu_id; 854336695Sdavidcs req->sb_index = p_cid->sb_idx; 855336695Sdavidcs req->bd_max_bytes = bd_max_bytes; 856336695Sdavidcs req->stat_id = -1; /* Keep initialized, for future compatibility */ 857336695Sdavidcs 858336695Sdavidcs /* If PF is legacy, we'll need to calculate producers ourselves 859336695Sdavidcs * as well as clean them. 860336695Sdavidcs */ 861336695Sdavidcs if (p_iov->b_pre_fp_hsi) { 862336695Sdavidcs u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 863336695Sdavidcs u32 init_prod_val = 0; 864336695Sdavidcs 865336695Sdavidcs *pp_prod = (u8 OSAL_IOMEM*) 866336695Sdavidcs p_hwfn->regview + 867336695Sdavidcs MSTORM_QZONE_START(p_hwfn->p_dev) + 868336695Sdavidcs hw_qid * MSTORM_QZONE_SIZE; 869336695Sdavidcs 870336695Sdavidcs /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 871336695Sdavidcs __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 872336695Sdavidcs (u32 *)(&init_prod_val)); 873336695Sdavidcs } 874336695Sdavidcs 875336695Sdavidcs ecore_vf_pf_add_qid(p_hwfn, p_cid); 876336695Sdavidcs 877336695Sdavidcs /* add list termination tlv */ 878336695Sdavidcs ecore_add_tlv(&p_iov->offset, 879336695Sdavidcs CHANNEL_TLV_LIST_END, 880336695Sdavidcs sizeof(struct channel_list_end_tlv)); 881336695Sdavidcs 882336695Sdavidcs resp = &p_iov->pf2vf_reply->queue_start; 883336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 884336695Sdavidcs if (rc) 885336695Sdavidcs goto exit; 886336695Sdavidcs 887336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 888336695Sdavidcs rc = ECORE_INVAL; 889336695Sdavidcs goto exit; 890336695Sdavidcs } 891336695Sdavidcs 892336695Sdavidcs /* Learn the address of the producer from the response */ 893336695Sdavidcs if (!p_iov->b_pre_fp_hsi) { 894336695Sdavidcs u32 init_prod_val = 0; 895336695Sdavidcs 896336695Sdavidcs *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset; 897336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 898336695Sdavidcs "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 899336695Sdavidcs rx_qid, *pp_prod, resp->offset); 900336695Sdavidcs 901336695Sdavidcs /* Init the rcq, rx bd and rx sge (if valid) producers to 0. 902336695Sdavidcs * It was actually the PF's responsibility, but since some 903336695Sdavidcs * old PFs might fail to do so, we do this as well. 904336695Sdavidcs */ 905336695Sdavidcs OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); 906336695Sdavidcs __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 907336695Sdavidcs (u32 *)&init_prod_val); 908336695Sdavidcs } 909336695Sdavidcs 910336695Sdavidcsexit: 911336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 912336695Sdavidcs 913336695Sdavidcs return rc; 914336695Sdavidcs} 915336695Sdavidcs 916336695Sdavidcsenum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, 917336695Sdavidcs struct ecore_queue_cid *p_cid, 918336695Sdavidcs bool cqe_completion) 919336695Sdavidcs{ 920336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 921336695Sdavidcs struct vfpf_stop_rxqs_tlv *req; 922336695Sdavidcs struct pfvf_def_resp_tlv *resp; 923336695Sdavidcs enum _ecore_status_t rc; 924336695Sdavidcs 925336695Sdavidcs /* clear mailbox and prep first tlv */ 926336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 927336695Sdavidcs 928336695Sdavidcs req->rx_qid = p_cid->rel.queue_id; 929336695Sdavidcs req->num_rxqs = 1; 930336695Sdavidcs req->cqe_completion = cqe_completion; 931336695Sdavidcs 932336695Sdavidcs ecore_vf_pf_add_qid(p_hwfn, p_cid); 933336695Sdavidcs 934336695Sdavidcs /* add list termination tlv */ 935336695Sdavidcs ecore_add_tlv(&p_iov->offset, 936336695Sdavidcs CHANNEL_TLV_LIST_END, 937336695Sdavidcs sizeof(struct channel_list_end_tlv)); 938336695Sdavidcs 939336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 940336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 941336695Sdavidcs if (rc) 942336695Sdavidcs goto exit; 943336695Sdavidcs 944336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 945336695Sdavidcs rc = ECORE_INVAL; 946336695Sdavidcs goto exit; 947336695Sdavidcs } 948336695Sdavidcs 949336695Sdavidcsexit: 950336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 951336695Sdavidcs 952336695Sdavidcs return rc; 953336695Sdavidcs} 954336695Sdavidcs 955336695Sdavidcsenum _ecore_status_t 956336695Sdavidcsecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, 957336695Sdavidcs struct ecore_queue_cid *p_cid, 958336695Sdavidcs dma_addr_t pbl_addr, u16 pbl_size, 959336695Sdavidcs void OSAL_IOMEM **pp_doorbell) 960336695Sdavidcs{ 961336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 962336695Sdavidcs struct pfvf_start_queue_resp_tlv *resp; 963336695Sdavidcs struct vfpf_start_txq_tlv *req; 964336695Sdavidcs u16 qid = p_cid->rel.queue_id; 965336695Sdavidcs enum _ecore_status_t rc; 966336695Sdavidcs 967336695Sdavidcs /* clear mailbox and prep first tlv */ 968336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 969336695Sdavidcs 970336695Sdavidcs req->tx_qid = qid; 971336695Sdavidcs 972336695Sdavidcs /* Tx */ 973336695Sdavidcs req->pbl_addr = pbl_addr; 974336695Sdavidcs req->pbl_size = pbl_size; 975336695Sdavidcs req->hw_sb = p_cid->sb_igu_id; 976336695Sdavidcs req->sb_index = p_cid->sb_idx; 977336695Sdavidcs 978336695Sdavidcs ecore_vf_pf_add_qid(p_hwfn, p_cid); 979336695Sdavidcs 980336695Sdavidcs /* add list termination tlv */ 981336695Sdavidcs ecore_add_tlv(&p_iov->offset, 982336695Sdavidcs CHANNEL_TLV_LIST_END, 983336695Sdavidcs sizeof(struct channel_list_end_tlv)); 984336695Sdavidcs 985336695Sdavidcs resp = &p_iov->pf2vf_reply->queue_start; 986336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 987336695Sdavidcs if (rc) 988336695Sdavidcs goto exit; 989336695Sdavidcs 990336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 991336695Sdavidcs rc = ECORE_INVAL; 992336695Sdavidcs goto exit; 993336695Sdavidcs } 994336695Sdavidcs 995336695Sdavidcs /* Modern PFs provide the actual offsets, while legacy 996336695Sdavidcs * provided only the queue id. 997336695Sdavidcs */ 998336695Sdavidcs if (!p_iov->b_pre_fp_hsi) { 999336695Sdavidcs *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 1000336695Sdavidcs resp->offset; 1001336695Sdavidcs } else { 1002336695Sdavidcs u8 cid = p_iov->acquire_resp.resc.cid[qid]; 1003336695Sdavidcs 1004336695Sdavidcs *pp_doorbell = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 1005336695Sdavidcs DB_ADDR_VF(cid, DQ_DEMS_LEGACY); 1006336695Sdavidcs } 1007336695Sdavidcs 1008336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1009336695Sdavidcs "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", 1010336695Sdavidcs qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); 1011336695Sdavidcsexit: 1012336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1013336695Sdavidcs 1014336695Sdavidcs return rc; 1015336695Sdavidcs} 1016336695Sdavidcs 1017336695Sdavidcsenum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, 1018336695Sdavidcs struct ecore_queue_cid *p_cid) 1019336695Sdavidcs{ 1020336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1021336695Sdavidcs struct vfpf_stop_txqs_tlv *req; 1022336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1023336695Sdavidcs enum _ecore_status_t rc; 1024336695Sdavidcs 1025336695Sdavidcs /* clear mailbox and prep first tlv */ 1026336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 1027336695Sdavidcs 1028336695Sdavidcs req->tx_qid = p_cid->rel.queue_id; 1029336695Sdavidcs req->num_txqs = 1; 1030336695Sdavidcs 1031336695Sdavidcs ecore_vf_pf_add_qid(p_hwfn, p_cid); 1032336695Sdavidcs 1033336695Sdavidcs /* add list termination tlv */ 1034336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1035336695Sdavidcs CHANNEL_TLV_LIST_END, 1036336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1037336695Sdavidcs 1038336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1039336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1040336695Sdavidcs if (rc) 1041336695Sdavidcs goto exit; 1042336695Sdavidcs 1043336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1044336695Sdavidcs rc = ECORE_INVAL; 1045336695Sdavidcs goto exit; 1046336695Sdavidcs } 1047336695Sdavidcs 1048336695Sdavidcsexit: 1049336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1050336695Sdavidcs 1051336695Sdavidcs return rc; 1052336695Sdavidcs} 1053336695Sdavidcs 1054336695Sdavidcs#ifndef LINUX_REMOVE 1055336695Sdavidcsenum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, 1056336695Sdavidcs struct ecore_queue_cid **pp_cid, 1057336695Sdavidcs u8 num_rxqs, 1058336695Sdavidcs u8 comp_cqe_flg, 1059336695Sdavidcs u8 comp_event_flg) 1060336695Sdavidcs{ 1061336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1062336695Sdavidcs struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1063336695Sdavidcs struct vfpf_update_rxq_tlv *req; 1064336695Sdavidcs enum _ecore_status_t rc; 1065336695Sdavidcs 1066336695Sdavidcs /* Starting with CHANNEL_TLV_QID and the need for additional queue 1067336695Sdavidcs * information, this API stopped supporting multiple rxqs. 1068336695Sdavidcs * TODO - remove this and change the API to accept a single queue-cid 1069336695Sdavidcs * in a follow-up patch. 1070336695Sdavidcs */ 1071336695Sdavidcs if (num_rxqs != 1) { 1072336695Sdavidcs DP_NOTICE(p_hwfn, true, 1073336695Sdavidcs "VFs can no longer update more than a single queue\n"); 1074336695Sdavidcs return ECORE_INVAL; 1075336695Sdavidcs } 1076336695Sdavidcs 1077336695Sdavidcs /* clear mailbox and prep first tlv */ 1078336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); 1079336695Sdavidcs 1080336695Sdavidcs req->rx_qid = (*pp_cid)->rel.queue_id; 1081336695Sdavidcs req->num_rxqs = 1; 1082336695Sdavidcs 1083336695Sdavidcs if (comp_cqe_flg) 1084336695Sdavidcs req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; 1085336695Sdavidcs if (comp_event_flg) 1086336695Sdavidcs req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; 1087336695Sdavidcs 1088336695Sdavidcs ecore_vf_pf_add_qid(p_hwfn, *pp_cid); 1089336695Sdavidcs 1090336695Sdavidcs /* add list termination tlv */ 1091336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1092336695Sdavidcs CHANNEL_TLV_LIST_END, 1093336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1094336695Sdavidcs 1095336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1096336695Sdavidcs if (rc) 1097336695Sdavidcs goto exit; 1098336695Sdavidcs 1099336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1100336695Sdavidcs rc = ECORE_INVAL; 1101336695Sdavidcs goto exit; 1102336695Sdavidcs } 1103336695Sdavidcs 1104336695Sdavidcsexit: 1105336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1106336695Sdavidcs return rc; 1107336695Sdavidcs} 1108336695Sdavidcs#endif 1109336695Sdavidcs 1110336695Sdavidcsenum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, 1111336695Sdavidcs u8 vport_id, 1112336695Sdavidcs u16 mtu, 1113336695Sdavidcs u8 inner_vlan_removal, 1114336695Sdavidcs enum ecore_tpa_mode tpa_mode, 1115336695Sdavidcs u8 max_buffers_per_cqe, 1116336695Sdavidcs u8 only_untagged, 1117336695Sdavidcs u8 zero_placement_offset) 1118336695Sdavidcs{ 1119336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1120336695Sdavidcs struct vfpf_vport_start_tlv *req; 1121336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1122336695Sdavidcs enum _ecore_status_t rc; 1123336695Sdavidcs int i; 1124336695Sdavidcs 1125336695Sdavidcs /* clear mailbox and prep first tlv */ 1126336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 1127336695Sdavidcs 1128336695Sdavidcs req->mtu = mtu; 1129336695Sdavidcs req->vport_id = vport_id; 1130336695Sdavidcs req->inner_vlan_removal = inner_vlan_removal; 1131336695Sdavidcs req->tpa_mode = tpa_mode; 1132336695Sdavidcs req->max_buffers_per_cqe = max_buffers_per_cqe; 1133336695Sdavidcs req->only_untagged = only_untagged; 1134336695Sdavidcs req->zero_placement_offset = zero_placement_offset; 1135336695Sdavidcs 1136336695Sdavidcs /* status blocks */ 1137336695Sdavidcs for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { 1138336695Sdavidcs struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; 1139336695Sdavidcs 1140336695Sdavidcs if (p_sb) 1141336695Sdavidcs req->sb_addr[i] = p_sb->sb_phys; 1142336695Sdavidcs } 1143336695Sdavidcs 1144336695Sdavidcs /* add list termination tlv */ 1145336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1146336695Sdavidcs CHANNEL_TLV_LIST_END, 1147336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1148336695Sdavidcs 1149336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1150336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1151336695Sdavidcs if (rc) 1152336695Sdavidcs goto exit; 1153336695Sdavidcs 1154336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1155336695Sdavidcs rc = ECORE_INVAL; 1156336695Sdavidcs goto exit; 1157336695Sdavidcs } 1158336695Sdavidcs 1159336695Sdavidcsexit: 1160336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1161336695Sdavidcs 1162336695Sdavidcs return rc; 1163336695Sdavidcs} 1164336695Sdavidcs 1165336695Sdavidcsenum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) 1166336695Sdavidcs{ 1167336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1168336695Sdavidcs struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1169336695Sdavidcs enum _ecore_status_t rc; 1170336695Sdavidcs 1171336695Sdavidcs /* clear mailbox and prep first tlv */ 1172336695Sdavidcs ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 1173336695Sdavidcs sizeof(struct vfpf_first_tlv)); 1174336695Sdavidcs 1175336695Sdavidcs /* add list termination tlv */ 1176336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1177336695Sdavidcs CHANNEL_TLV_LIST_END, 1178336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1179336695Sdavidcs 1180336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1181336695Sdavidcs if (rc) 1182336695Sdavidcs goto exit; 1183336695Sdavidcs 1184336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1185336695Sdavidcs rc = ECORE_INVAL; 1186336695Sdavidcs goto exit; 1187336695Sdavidcs } 1188336695Sdavidcs 1189336695Sdavidcsexit: 1190336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1191336695Sdavidcs 1192336695Sdavidcs return rc; 1193336695Sdavidcs} 1194336695Sdavidcs 1195336695Sdavidcsstatic bool 1196336695Sdavidcsecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn, 1197336695Sdavidcs struct ecore_sp_vport_update_params *p_data, 1198336695Sdavidcs u16 tlv) 1199336695Sdavidcs{ 1200336695Sdavidcs switch (tlv) { 1201336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 1202336695Sdavidcs return !!(p_data->update_vport_active_rx_flg || 1203336695Sdavidcs p_data->update_vport_active_tx_flg); 1204336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 1205336695Sdavidcs#ifndef ASIC_ONLY 1206336695Sdavidcs /* FPGA doesn't have PVFC and so can't support tx-switching */ 1207336695Sdavidcs return !!(p_data->update_tx_switching_flg && 1208336695Sdavidcs !CHIP_REV_IS_FPGA(p_hwfn->p_dev)); 1209336695Sdavidcs#else 1210336695Sdavidcs return !!p_data->update_tx_switching_flg; 1211336695Sdavidcs#endif 1212336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 1213336695Sdavidcs return !!p_data->update_inner_vlan_removal_flg; 1214336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 1215336695Sdavidcs return !!p_data->update_accept_any_vlan_flg; 1216336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_MCAST: 1217336695Sdavidcs return !!p_data->update_approx_mcast_flg; 1218336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 1219336695Sdavidcs return !!(p_data->accept_flags.update_rx_mode_config || 1220336695Sdavidcs p_data->accept_flags.update_tx_mode_config); 1221336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_RSS: 1222336695Sdavidcs return !!p_data->rss_params; 1223336695Sdavidcs case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 1224336695Sdavidcs return !!p_data->sge_tpa_params; 1225336695Sdavidcs default: 1226336695Sdavidcs DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n", 1227336695Sdavidcs tlv, ecore_channel_tlvs_string[tlv]); 1228336695Sdavidcs return false; 1229336695Sdavidcs } 1230336695Sdavidcs} 1231336695Sdavidcs 1232336695Sdavidcsstatic void 1233336695Sdavidcsecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, 1234336695Sdavidcs struct ecore_sp_vport_update_params *p_data) 1235336695Sdavidcs{ 1236336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1237336695Sdavidcs struct pfvf_def_resp_tlv *p_resp; 1238336695Sdavidcs u16 tlv; 1239336695Sdavidcs 1240336695Sdavidcs for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1241336695Sdavidcs tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; 1242336695Sdavidcs tlv++) { 1243336695Sdavidcs if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 1244336695Sdavidcs continue; 1245336695Sdavidcs 1246336695Sdavidcs p_resp = (struct pfvf_def_resp_tlv *) 1247336695Sdavidcs ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, 1248336695Sdavidcs tlv); 1249336695Sdavidcs if (p_resp && p_resp->hdr.status) 1250336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1251336695Sdavidcs "TLV[%d] type %s Configuration %s\n", 1252336695Sdavidcs tlv, ecore_channel_tlvs_string[tlv], 1253336695Sdavidcs (p_resp && p_resp->hdr.status) ? "succeeded" 1254336695Sdavidcs : "failed"); 1255336695Sdavidcs } 1256336695Sdavidcs} 1257336695Sdavidcs 1258336695Sdavidcsenum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, 1259336695Sdavidcs struct ecore_sp_vport_update_params *p_params) 1260336695Sdavidcs{ 1261336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1262336695Sdavidcs struct vfpf_vport_update_tlv *req; 1263336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1264336695Sdavidcs u8 update_rx, update_tx; 1265336695Sdavidcs u32 resp_size = 0; 1266336695Sdavidcs u16 size, tlv; 1267336695Sdavidcs enum _ecore_status_t rc; 1268336695Sdavidcs 1269336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1270336695Sdavidcs resp_size = sizeof(*resp); 1271336695Sdavidcs 1272336695Sdavidcs update_rx = p_params->update_vport_active_rx_flg; 1273336695Sdavidcs update_tx = p_params->update_vport_active_tx_flg; 1274336695Sdavidcs 1275336695Sdavidcs /* clear mailbox and prep header tlv */ 1276336695Sdavidcs ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 1277336695Sdavidcs 1278336695Sdavidcs /* Prepare extended tlvs */ 1279336695Sdavidcs if (update_rx || update_tx) { 1280336695Sdavidcs struct vfpf_vport_update_activate_tlv *p_act_tlv; 1281336695Sdavidcs 1282336695Sdavidcs size = sizeof(struct vfpf_vport_update_activate_tlv); 1283336695Sdavidcs p_act_tlv = ecore_add_tlv(&p_iov->offset, 1284336695Sdavidcs CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 1285336695Sdavidcs size); 1286336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1287336695Sdavidcs 1288336695Sdavidcs if (update_rx) { 1289336695Sdavidcs p_act_tlv->update_rx = update_rx; 1290336695Sdavidcs p_act_tlv->active_rx = p_params->vport_active_rx_flg; 1291336695Sdavidcs } 1292336695Sdavidcs 1293336695Sdavidcs if (update_tx) { 1294336695Sdavidcs p_act_tlv->update_tx = update_tx; 1295336695Sdavidcs p_act_tlv->active_tx = p_params->vport_active_tx_flg; 1296336695Sdavidcs } 1297336695Sdavidcs } 1298336695Sdavidcs 1299336695Sdavidcs#ifndef ECORE_UPSTREAM 1300336695Sdavidcs if (p_params->update_inner_vlan_removal_flg) { 1301336695Sdavidcs struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 1302336695Sdavidcs 1303336695Sdavidcs size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); 1304336695Sdavidcs p_vlan_tlv = ecore_add_tlv(&p_iov->offset, 1305336695Sdavidcs CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 1306336695Sdavidcs size); 1307336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1308336695Sdavidcs 1309336695Sdavidcs p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; 1310336695Sdavidcs } 1311336695Sdavidcs#endif 1312336695Sdavidcs 1313336695Sdavidcs if (p_params->update_tx_switching_flg) { 1314336695Sdavidcs struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 1315336695Sdavidcs 1316336695Sdavidcs size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 1317336695Sdavidcs tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1318336695Sdavidcs p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset, 1319336695Sdavidcs tlv, size); 1320336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1321336695Sdavidcs 1322336695Sdavidcs p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 1323336695Sdavidcs } 1324336695Sdavidcs 1325336695Sdavidcs if (p_params->update_approx_mcast_flg) { 1326336695Sdavidcs struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1327336695Sdavidcs 1328336695Sdavidcs size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 1329336695Sdavidcs p_mcast_tlv = ecore_add_tlv(&p_iov->offset, 1330336695Sdavidcs CHANNEL_TLV_VPORT_UPDATE_MCAST, 1331336695Sdavidcs size); 1332336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1333336695Sdavidcs 1334336695Sdavidcs OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, 1335336695Sdavidcs sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1336336695Sdavidcs } 1337336695Sdavidcs 1338336695Sdavidcs update_rx = p_params->accept_flags.update_rx_mode_config; 1339336695Sdavidcs update_tx = p_params->accept_flags.update_tx_mode_config; 1340336695Sdavidcs 1341336695Sdavidcs if (update_rx || update_tx) { 1342336695Sdavidcs struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1343336695Sdavidcs 1344336695Sdavidcs tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1345336695Sdavidcs size = sizeof(struct vfpf_vport_update_accept_param_tlv); 1346336695Sdavidcs p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1347336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1348336695Sdavidcs 1349336695Sdavidcs if (update_rx) { 1350336695Sdavidcs p_accept_tlv->update_rx_mode = update_rx; 1351336695Sdavidcs p_accept_tlv->rx_accept_filter = 1352336695Sdavidcs p_params->accept_flags.rx_accept_filter; 1353336695Sdavidcs } 1354336695Sdavidcs 1355336695Sdavidcs if (update_tx) { 1356336695Sdavidcs p_accept_tlv->update_tx_mode = update_tx; 1357336695Sdavidcs p_accept_tlv->tx_accept_filter = 1358336695Sdavidcs p_params->accept_flags.tx_accept_filter; 1359336695Sdavidcs } 1360336695Sdavidcs } 1361336695Sdavidcs 1362336695Sdavidcs if (p_params->rss_params) { 1363336695Sdavidcs struct ecore_rss_params *rss_params = p_params->rss_params; 1364336695Sdavidcs struct vfpf_vport_update_rss_tlv *p_rss_tlv; 1365336695Sdavidcs int i, table_size; 1366336695Sdavidcs 1367336695Sdavidcs size = sizeof(struct vfpf_vport_update_rss_tlv); 1368336695Sdavidcs p_rss_tlv = ecore_add_tlv(&p_iov->offset, 1369336695Sdavidcs CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1370336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1371336695Sdavidcs 1372336695Sdavidcs if (rss_params->update_rss_config) 1373336695Sdavidcs p_rss_tlv->update_rss_flags |= 1374336695Sdavidcs VFPF_UPDATE_RSS_CONFIG_FLAG; 1375336695Sdavidcs if (rss_params->update_rss_capabilities) 1376336695Sdavidcs p_rss_tlv->update_rss_flags |= 1377336695Sdavidcs VFPF_UPDATE_RSS_CAPS_FLAG; 1378336695Sdavidcs if (rss_params->update_rss_ind_table) 1379336695Sdavidcs p_rss_tlv->update_rss_flags |= 1380336695Sdavidcs VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1381336695Sdavidcs if (rss_params->update_rss_key) 1382336695Sdavidcs p_rss_tlv->update_rss_flags |= 1383336695Sdavidcs VFPF_UPDATE_RSS_KEY_FLAG; 1384336695Sdavidcs 1385336695Sdavidcs p_rss_tlv->rss_enable = rss_params->rss_enable; 1386336695Sdavidcs p_rss_tlv->rss_caps = rss_params->rss_caps; 1387336695Sdavidcs p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1388336695Sdavidcs 1389336695Sdavidcs table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE, 1390336695Sdavidcs 1 << p_rss_tlv->rss_table_size_log); 1391336695Sdavidcs for (i = 0; i < table_size; i++) { 1392336695Sdavidcs struct ecore_queue_cid *p_queue; 1393336695Sdavidcs 1394336695Sdavidcs p_queue = rss_params->rss_ind_table[i]; 1395336695Sdavidcs p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1396336695Sdavidcs } 1397336695Sdavidcs 1398336695Sdavidcs OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, 1399336695Sdavidcs sizeof(rss_params->rss_key)); 1400336695Sdavidcs } 1401336695Sdavidcs 1402336695Sdavidcs if (p_params->update_accept_any_vlan_flg) { 1403336695Sdavidcs struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1404336695Sdavidcs 1405336695Sdavidcs size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1406336695Sdavidcs tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1407336695Sdavidcs p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1408336695Sdavidcs 1409336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1410336695Sdavidcs p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1411336695Sdavidcs p_any_vlan_tlv->update_accept_any_vlan_flg = 1412336695Sdavidcs p_params->update_accept_any_vlan_flg; 1413336695Sdavidcs } 1414336695Sdavidcs 1415336695Sdavidcs#ifndef LINUX_REMOVE 1416336695Sdavidcs if (p_params->sge_tpa_params) { 1417336695Sdavidcs struct ecore_sge_tpa_params *sge_tpa_params; 1418336695Sdavidcs struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 1419336695Sdavidcs 1420336695Sdavidcs sge_tpa_params = p_params->sge_tpa_params; 1421336695Sdavidcs size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); 1422336695Sdavidcs p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset, 1423336695Sdavidcs CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 1424336695Sdavidcs size); 1425336695Sdavidcs resp_size += sizeof(struct pfvf_def_resp_tlv); 1426336695Sdavidcs 1427336695Sdavidcs if (sge_tpa_params->update_tpa_en_flg) 1428336695Sdavidcs p_sge_tpa_tlv->update_sge_tpa_flags |= 1429336695Sdavidcs VFPF_UPDATE_TPA_EN_FLAG; 1430336695Sdavidcs if (sge_tpa_params->update_tpa_param_flg) 1431336695Sdavidcs p_sge_tpa_tlv->update_sge_tpa_flags |= 1432336695Sdavidcs VFPF_UPDATE_TPA_PARAM_FLAG; 1433336695Sdavidcs 1434336695Sdavidcs if (sge_tpa_params->tpa_ipv4_en_flg) 1435336695Sdavidcs p_sge_tpa_tlv->sge_tpa_flags |= 1436336695Sdavidcs VFPF_TPA_IPV4_EN_FLAG; 1437336695Sdavidcs if (sge_tpa_params->tpa_ipv6_en_flg) 1438336695Sdavidcs p_sge_tpa_tlv->sge_tpa_flags |= 1439336695Sdavidcs VFPF_TPA_IPV6_EN_FLAG; 1440336695Sdavidcs if (sge_tpa_params->tpa_pkt_split_flg) 1441336695Sdavidcs p_sge_tpa_tlv->sge_tpa_flags |= 1442336695Sdavidcs VFPF_TPA_PKT_SPLIT_FLAG; 1443336695Sdavidcs if (sge_tpa_params->tpa_hdr_data_split_flg) 1444336695Sdavidcs p_sge_tpa_tlv->sge_tpa_flags |= 1445336695Sdavidcs VFPF_TPA_HDR_DATA_SPLIT_FLAG; 1446336695Sdavidcs if (sge_tpa_params->tpa_gro_consistent_flg) 1447336695Sdavidcs p_sge_tpa_tlv->sge_tpa_flags |= 1448336695Sdavidcs VFPF_TPA_GRO_CONSIST_FLAG; 1449336695Sdavidcs 1450336695Sdavidcs p_sge_tpa_tlv->tpa_max_aggs_num = 1451336695Sdavidcs sge_tpa_params->tpa_max_aggs_num; 1452336695Sdavidcs p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; 1453336695Sdavidcs p_sge_tpa_tlv->tpa_min_size_to_start = 1454336695Sdavidcs sge_tpa_params->tpa_min_size_to_start; 1455336695Sdavidcs p_sge_tpa_tlv->tpa_min_size_to_cont = 1456336695Sdavidcs sge_tpa_params->tpa_min_size_to_cont; 1457336695Sdavidcs 1458336695Sdavidcs p_sge_tpa_tlv->max_buffers_per_cqe = 1459336695Sdavidcs sge_tpa_params->max_buffers_per_cqe; 1460336695Sdavidcs } 1461336695Sdavidcs#endif 1462336695Sdavidcs 1463336695Sdavidcs /* add list termination tlv */ 1464336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1465336695Sdavidcs CHANNEL_TLV_LIST_END, 1466336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1467336695Sdavidcs 1468336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1469336695Sdavidcs if (rc) 1470336695Sdavidcs goto exit; 1471336695Sdavidcs 1472336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1473336695Sdavidcs rc = ECORE_INVAL; 1474336695Sdavidcs goto exit; 1475336695Sdavidcs } 1476336695Sdavidcs 1477336695Sdavidcs ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1478336695Sdavidcs 1479336695Sdavidcsexit: 1480336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1481336695Sdavidcs 1482336695Sdavidcs return rc; 1483336695Sdavidcs} 1484336695Sdavidcs 1485336695Sdavidcsenum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) 1486336695Sdavidcs{ 1487336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1488336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1489336695Sdavidcs struct vfpf_first_tlv *req; 1490336695Sdavidcs enum _ecore_status_t rc; 1491336695Sdavidcs 1492336695Sdavidcs /* clear mailbox and prep first tlv */ 1493336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1494336695Sdavidcs 1495336695Sdavidcs /* add list termination tlv */ 1496336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1497336695Sdavidcs CHANNEL_TLV_LIST_END, 1498336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1499336695Sdavidcs 1500336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1501336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1502336695Sdavidcs if (rc) 1503336695Sdavidcs goto exit; 1504336695Sdavidcs 1505336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1506336695Sdavidcs rc = ECORE_AGAIN; 1507336695Sdavidcs goto exit; 1508336695Sdavidcs } 1509336695Sdavidcs 1510336695Sdavidcs p_hwfn->b_int_enabled = 0; 1511336695Sdavidcs 1512336695Sdavidcsexit: 1513336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1514336695Sdavidcs 1515336695Sdavidcs return rc; 1516336695Sdavidcs} 1517336695Sdavidcs 1518336695Sdavidcsvoid ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, 1519336695Sdavidcs struct ecore_filter_mcast *p_filter_cmd) 1520336695Sdavidcs{ 1521336695Sdavidcs struct ecore_sp_vport_update_params sp_params; 1522336695Sdavidcs int i; 1523336695Sdavidcs 1524336695Sdavidcs OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); 1525336695Sdavidcs sp_params.update_approx_mcast_flg = 1; 1526336695Sdavidcs 1527336695Sdavidcs if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1528336695Sdavidcs for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1529336695Sdavidcs u32 bit; 1530336695Sdavidcs 1531336695Sdavidcs bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1532336695Sdavidcs sp_params.bins[bit / 32] |= 1 << (bit % 32); 1533336695Sdavidcs } 1534336695Sdavidcs } 1535336695Sdavidcs 1536336695Sdavidcs ecore_vf_pf_vport_update(p_hwfn, &sp_params); 1537336695Sdavidcs} 1538336695Sdavidcs 1539336695Sdavidcsenum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, 1540336695Sdavidcs struct ecore_filter_ucast *p_ucast) 1541336695Sdavidcs{ 1542336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1543336695Sdavidcs struct vfpf_ucast_filter_tlv *req; 1544336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1545336695Sdavidcs enum _ecore_status_t rc; 1546336695Sdavidcs 1547336695Sdavidcs#ifndef LINUX_REMOVE 1548336695Sdavidcs /* Sanitize */ 1549336695Sdavidcs if (p_ucast->opcode == ECORE_FILTER_MOVE) { 1550336695Sdavidcs DP_NOTICE(p_hwfn, true, "VFs don't support Moving of filters\n"); 1551336695Sdavidcs return ECORE_INVAL; 1552336695Sdavidcs } 1553336695Sdavidcs#endif 1554336695Sdavidcs 1555336695Sdavidcs /* clear mailbox and prep first tlv */ 1556336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1557336695Sdavidcs req->opcode = (u8)p_ucast->opcode; 1558336695Sdavidcs req->type = (u8)p_ucast->type; 1559336695Sdavidcs OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); 1560336695Sdavidcs req->vlan = p_ucast->vlan; 1561336695Sdavidcs 1562336695Sdavidcs /* add list termination tlv */ 1563336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1564336695Sdavidcs CHANNEL_TLV_LIST_END, 1565336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1566336695Sdavidcs 1567336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1568336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1569336695Sdavidcs if (rc) 1570336695Sdavidcs goto exit; 1571336695Sdavidcs 1572336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1573336695Sdavidcs rc = ECORE_AGAIN; 1574336695Sdavidcs goto exit; 1575336695Sdavidcs } 1576336695Sdavidcs 1577336695Sdavidcsexit: 1578336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1579336695Sdavidcs 1580336695Sdavidcs return rc; 1581336695Sdavidcs} 1582336695Sdavidcs 1583336695Sdavidcsenum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) 1584336695Sdavidcs{ 1585336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1586336695Sdavidcs struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1587336695Sdavidcs enum _ecore_status_t rc; 1588336695Sdavidcs 1589336695Sdavidcs /* clear mailbox and prep first tlv */ 1590336695Sdavidcs ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1591336695Sdavidcs sizeof(struct vfpf_first_tlv)); 1592336695Sdavidcs 1593336695Sdavidcs /* add list termination tlv */ 1594336695Sdavidcs ecore_add_tlv(&p_iov->offset, 1595336695Sdavidcs CHANNEL_TLV_LIST_END, 1596336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1597336695Sdavidcs 1598336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1599336695Sdavidcs if (rc) 1600336695Sdavidcs goto exit; 1601336695Sdavidcs 1602336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1603336695Sdavidcs rc = ECORE_INVAL; 1604336695Sdavidcs goto exit; 1605336695Sdavidcs } 1606336695Sdavidcs 1607336695Sdavidcsexit: 1608336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1609336695Sdavidcs 1610336695Sdavidcs return rc; 1611336695Sdavidcs} 1612336695Sdavidcs 1613336695Sdavidcsenum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, 1614336695Sdavidcs u16 *p_coal, 1615336695Sdavidcs struct ecore_queue_cid *p_cid) 1616336695Sdavidcs{ 1617336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1618336695Sdavidcs struct pfvf_read_coal_resp_tlv *resp; 1619336695Sdavidcs struct vfpf_read_coal_req_tlv *req; 1620336695Sdavidcs enum _ecore_status_t rc; 1621336695Sdavidcs 1622336695Sdavidcs /* clear mailbox and prep header tlv */ 1623336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, 1624336695Sdavidcs sizeof(*req)); 1625336695Sdavidcs req->qid = p_cid->rel.queue_id; 1626336695Sdavidcs req->is_rx = p_cid->b_is_rx ? 1 : 0; 1627336695Sdavidcs 1628336695Sdavidcs ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1629336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1630336695Sdavidcs resp = &p_iov->pf2vf_reply->read_coal_resp; 1631336695Sdavidcs 1632336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1633336695Sdavidcs if (rc != ECORE_SUCCESS) 1634336695Sdavidcs goto exit; 1635336695Sdavidcs 1636336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1637336695Sdavidcs goto exit; 1638336695Sdavidcs 1639336695Sdavidcs *p_coal = resp->coal; 1640336695Sdavidcsexit: 1641336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1642336695Sdavidcs 1643336695Sdavidcs return rc; 1644336695Sdavidcs} 1645336695Sdavidcs 1646336695Sdavidcsenum _ecore_status_t 1647336695Sdavidcsecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, 1648336695Sdavidcs struct ecore_queue_cid *p_cid) 1649336695Sdavidcs{ 1650336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1651336695Sdavidcs struct vfpf_update_coalesce *req; 1652336695Sdavidcs struct pfvf_def_resp_tlv *resp; 1653336695Sdavidcs enum _ecore_status_t rc; 1654336695Sdavidcs 1655336695Sdavidcs /* clear mailbox and prep header tlv */ 1656336695Sdavidcs req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, 1657336695Sdavidcs sizeof(*req)); 1658336695Sdavidcs 1659336695Sdavidcs req->rx_coal = rx_coal; 1660336695Sdavidcs req->tx_coal = tx_coal; 1661336695Sdavidcs req->qid = p_cid->rel.queue_id; 1662336695Sdavidcs 1663336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1664336695Sdavidcs "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", 1665336695Sdavidcs rx_coal, tx_coal, req->qid); 1666336695Sdavidcs 1667336695Sdavidcs /* add list termination tlv */ 1668336695Sdavidcs ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1669336695Sdavidcs sizeof(struct channel_list_end_tlv)); 1670336695Sdavidcs 1671336695Sdavidcs resp = &p_iov->pf2vf_reply->default_resp; 1672336695Sdavidcs rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1673336695Sdavidcs 1674336695Sdavidcs if (rc != ECORE_SUCCESS) 1675336695Sdavidcs goto exit; 1676336695Sdavidcs 1677336695Sdavidcs if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1678336695Sdavidcs goto exit; 1679336695Sdavidcs 1680336695Sdavidcs p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 1681336695Sdavidcs p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 1682336695Sdavidcs 1683336695Sdavidcsexit: 1684336695Sdavidcs ecore_vf_pf_req_end(p_hwfn, rc); 1685336695Sdavidcs return rc; 1686336695Sdavidcs} 1687336695Sdavidcs 1688336695Sdavidcsu16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, 1689336695Sdavidcs u16 sb_id) 1690336695Sdavidcs{ 1691336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1692336695Sdavidcs 1693336695Sdavidcs if (!p_iov) { 1694336695Sdavidcs DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1695336695Sdavidcs return 0; 1696336695Sdavidcs } 1697336695Sdavidcs 1698336695Sdavidcs return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1699336695Sdavidcs} 1700336695Sdavidcs 1701336695Sdavidcsvoid ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, 1702336695Sdavidcs u16 sb_id, struct ecore_sb_info *p_sb) 1703336695Sdavidcs{ 1704336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1705336695Sdavidcs 1706336695Sdavidcs if (!p_iov) { 1707336695Sdavidcs DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1708336695Sdavidcs return; 1709336695Sdavidcs } 1710336695Sdavidcs 1711336695Sdavidcs if (sb_id >= PFVF_MAX_SBS_PER_VF) { 1712336695Sdavidcs DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id); 1713336695Sdavidcs return; 1714336695Sdavidcs } 1715336695Sdavidcs 1716336695Sdavidcs p_iov->sbs_info[sb_id] = p_sb; 1717336695Sdavidcs} 1718336695Sdavidcs 1719336695Sdavidcsenum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, 1720336695Sdavidcs u8 *p_change) 1721336695Sdavidcs{ 1722336695Sdavidcs struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1723336695Sdavidcs struct ecore_bulletin_content shadow; 1724336695Sdavidcs u32 crc, crc_size; 1725336695Sdavidcs 1726336695Sdavidcs crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1727336695Sdavidcs *p_change = 0; 1728336695Sdavidcs 1729336695Sdavidcs /* Need to guarantee PF is not in the middle of writing it */ 1730336695Sdavidcs OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1731336695Sdavidcs 1732336695Sdavidcs /* If version did not update, no need to do anything */ 1733336695Sdavidcs if (shadow.version == p_iov->bulletin_shadow.version) 1734336695Sdavidcs return ECORE_SUCCESS; 1735336695Sdavidcs 1736336695Sdavidcs /* Verify the bulletin we see is valid */ 1737336695Sdavidcs crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size, 1738336695Sdavidcs p_iov->bulletin.size - crc_size); 1739336695Sdavidcs if (crc != shadow.crc) 1740336695Sdavidcs return ECORE_AGAIN; 1741336695Sdavidcs 1742336695Sdavidcs /* Set the shadow bulletin and process it */ 1743336695Sdavidcs OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1744336695Sdavidcs 1745336695Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1746336695Sdavidcs "Read a bulletin update %08x\n", shadow.version); 1747336695Sdavidcs 1748336695Sdavidcs *p_change = 1; 1749336695Sdavidcs 1750336695Sdavidcs return ECORE_SUCCESS; 1751336695Sdavidcs} 1752336695Sdavidcs 1753336695Sdavidcsvoid __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, 1754336695Sdavidcs struct ecore_bulletin_content *p_bulletin) 1755336695Sdavidcs{ 1756336695Sdavidcs OSAL_MEMSET(p_params, 0, sizeof(*p_params)); 1757336695Sdavidcs 1758336695Sdavidcs p_params->speed.autoneg = p_bulletin->req_autoneg; 1759336695Sdavidcs p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1760336695Sdavidcs p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1761336695Sdavidcs p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1762336695Sdavidcs p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1763336695Sdavidcs p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1764336695Sdavidcs p_params->loopback_mode = p_bulletin->req_loopback; 1765336695Sdavidcs} 1766336695Sdavidcs 1767336695Sdavidcsvoid ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, 1768336695Sdavidcs struct ecore_mcp_link_params *params) 1769336695Sdavidcs{ 1770336695Sdavidcs __ecore_vf_get_link_params(params, 1771336695Sdavidcs &(p_hwfn->vf_iov_info->bulletin_shadow)); 1772336695Sdavidcs} 1773336695Sdavidcs 1774336695Sdavidcsvoid __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, 1775336695Sdavidcs struct ecore_bulletin_content *p_bulletin) 1776336695Sdavidcs{ 1777336695Sdavidcs OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1778336695Sdavidcs 1779336695Sdavidcs p_link->link_up = p_bulletin->link_up; 1780336695Sdavidcs p_link->speed = p_bulletin->speed; 1781336695Sdavidcs p_link->full_duplex = p_bulletin->full_duplex; 1782336695Sdavidcs p_link->an = p_bulletin->autoneg; 1783336695Sdavidcs p_link->an_complete = p_bulletin->autoneg_complete; 1784336695Sdavidcs p_link->parallel_detection = p_bulletin->parallel_detection; 1785336695Sdavidcs p_link->pfc_enabled = p_bulletin->pfc_enabled; 1786336695Sdavidcs p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1787336695Sdavidcs p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1788336695Sdavidcs p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1789336695Sdavidcs p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1790336695Sdavidcs p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1791336695Sdavidcs} 1792336695Sdavidcs 1793336695Sdavidcsvoid ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, 1794336695Sdavidcs struct ecore_mcp_link_state *link) 1795336695Sdavidcs{ 1796336695Sdavidcs __ecore_vf_get_link_state(link, 1797336695Sdavidcs &(p_hwfn->vf_iov_info->bulletin_shadow)); 1798336695Sdavidcs} 1799336695Sdavidcs 1800336695Sdavidcsvoid __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, 1801336695Sdavidcs struct ecore_bulletin_content *p_bulletin) 1802336695Sdavidcs{ 1803336695Sdavidcs OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); 1804336695Sdavidcs p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1805336695Sdavidcs} 1806336695Sdavidcs 1807336695Sdavidcsvoid ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, 1808336695Sdavidcs struct ecore_mcp_link_capabilities *p_link_caps) 1809336695Sdavidcs{ 1810336695Sdavidcs __ecore_vf_get_link_caps(p_link_caps, 1811336695Sdavidcs &(p_hwfn->vf_iov_info->bulletin_shadow)); 1812336695Sdavidcs} 1813336695Sdavidcs 1814336695Sdavidcsvoid ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, 1815336695Sdavidcs u8 *num_rxqs) 1816336695Sdavidcs{ 1817336695Sdavidcs *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1818336695Sdavidcs} 1819336695Sdavidcs 1820336695Sdavidcsvoid ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, 1821336695Sdavidcs u8 *num_txqs) 1822336695Sdavidcs{ 1823336695Sdavidcs *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; 1824336695Sdavidcs} 1825336695Sdavidcs 1826336695Sdavidcsvoid ecore_vf_get_num_cids(struct ecore_hwfn *p_hwfn, 1827336695Sdavidcs u8 *num_cids) 1828336695Sdavidcs{ 1829336695Sdavidcs *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; 1830336695Sdavidcs} 1831336695Sdavidcs 1832336695Sdavidcsvoid ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, 1833336695Sdavidcs u8 *port_mac) 1834336695Sdavidcs{ 1835336695Sdavidcs OSAL_MEMCPY(port_mac, 1836336695Sdavidcs p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, 1837336695Sdavidcs ETH_ALEN); 1838336695Sdavidcs} 1839336695Sdavidcs 1840336695Sdavidcsvoid ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, 1841336695Sdavidcs u8 *num_vlan_filters) 1842336695Sdavidcs{ 1843336695Sdavidcs struct ecore_vf_iov *p_vf; 1844336695Sdavidcs 1845336695Sdavidcs p_vf = p_hwfn->vf_iov_info; 1846336695Sdavidcs *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1847336695Sdavidcs} 1848336695Sdavidcs 1849336695Sdavidcsvoid ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, 1850336695Sdavidcs u8 *num_mac_filters) 1851336695Sdavidcs{ 1852336695Sdavidcs struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info; 1853336695Sdavidcs 1854336695Sdavidcs *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1855336695Sdavidcs} 1856336695Sdavidcs 1857336695Sdavidcsbool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) 1858336695Sdavidcs{ 1859336695Sdavidcs struct ecore_bulletin_content *bulletin; 1860336695Sdavidcs 1861336695Sdavidcs bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1862336695Sdavidcs if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1863336695Sdavidcs return true; 1864336695Sdavidcs 1865336695Sdavidcs /* Forbid VF from changing a MAC enforced by PF */ 1866336695Sdavidcs if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) 1867336695Sdavidcs return false; 1868336695Sdavidcs 1869336695Sdavidcs return false; 1870336695Sdavidcs} 1871336695Sdavidcs 1872336695Sdavidcsbool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, 1873336695Sdavidcs u8 *p_is_forced) 1874336695Sdavidcs{ 1875336695Sdavidcs struct ecore_bulletin_content *bulletin; 1876336695Sdavidcs 1877336695Sdavidcs bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1878336695Sdavidcs 1879336695Sdavidcs if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1880336695Sdavidcs if (p_is_forced) 1881336695Sdavidcs *p_is_forced = 1; 1882336695Sdavidcs } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1883336695Sdavidcs if (p_is_forced) 1884336695Sdavidcs *p_is_forced = 0; 1885336695Sdavidcs } else { 1886336695Sdavidcs return false; 1887336695Sdavidcs } 1888336695Sdavidcs 1889336695Sdavidcs OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); 1890336695Sdavidcs 1891336695Sdavidcs return true; 1892336695Sdavidcs} 1893336695Sdavidcs 1894336695Sdavidcsvoid ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, 1895336695Sdavidcs u16 *p_vxlan_port, 1896336695Sdavidcs u16 *p_geneve_port) 1897336695Sdavidcs{ 1898336695Sdavidcs struct ecore_bulletin_content *p_bulletin; 1899336695Sdavidcs 1900336695Sdavidcs p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1901336695Sdavidcs 1902336695Sdavidcs *p_vxlan_port = p_bulletin->vxlan_udp_port; 1903336695Sdavidcs *p_geneve_port = p_bulletin->geneve_udp_port; 1904336695Sdavidcs} 1905336695Sdavidcs 1906336695Sdavidcs#ifndef LINUX_REMOVE 1907336695Sdavidcsbool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) 1908336695Sdavidcs{ 1909336695Sdavidcs struct ecore_bulletin_content *bulletin; 1910336695Sdavidcs 1911336695Sdavidcs bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1912336695Sdavidcs 1913336695Sdavidcs if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 1914336695Sdavidcs return false; 1915336695Sdavidcs 1916336695Sdavidcs if (dst_pvid) 1917336695Sdavidcs *dst_pvid = bulletin->pvid; 1918336695Sdavidcs 1919336695Sdavidcs return true; 1920336695Sdavidcs} 1921336695Sdavidcs 1922336695Sdavidcsbool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) 1923336695Sdavidcs{ 1924336695Sdavidcs return p_hwfn->vf_iov_info->b_pre_fp_hsi; 1925336695Sdavidcs} 1926336695Sdavidcs#endif 1927336695Sdavidcs 1928336695Sdavidcsvoid ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, 1929336695Sdavidcs u16 *fw_major, u16 *fw_minor, u16 *fw_rev, 1930336695Sdavidcs u16 *fw_eng) 1931336695Sdavidcs{ 1932336695Sdavidcs struct pf_vf_pfdev_info *info; 1933336695Sdavidcs 1934336695Sdavidcs info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1935336695Sdavidcs 1936336695Sdavidcs *fw_major = info->fw_major; 1937336695Sdavidcs *fw_minor = info->fw_minor; 1938336695Sdavidcs *fw_rev = info->fw_rev; 1939336695Sdavidcs *fw_eng = info->fw_eng; 1940336695Sdavidcs} 1941336695Sdavidcs 1942336695Sdavidcs#ifdef CONFIG_ECORE_SW_CHANNEL 1943336695Sdavidcsvoid ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw) 1944336695Sdavidcs{ 1945336695Sdavidcs p_hwfn->vf_iov_info->b_hw_channel = b_is_hw; 1946336695Sdavidcs} 1947336695Sdavidcs#endif 1948336695Sdavidcs 1949336695Sdavidcs#ifdef _NTDDK_ 1950336695Sdavidcs#pragma warning(pop) 1951336695Sdavidcs#endif 1952