1316485Sdavidcs/* 2316485Sdavidcs * Copyright (c) 2017-2018 Cavium, Inc. 3316485Sdavidcs * All rights reserved. 4316485Sdavidcs * 5316485Sdavidcs * Redistribution and use in source and binary forms, with or without 6316485Sdavidcs * modification, are permitted provided that the following conditions 7316485Sdavidcs * are met: 8316485Sdavidcs * 9316485Sdavidcs * 1. Redistributions of source code must retain the above copyright 10316485Sdavidcs * notice, this list of conditions and the following disclaimer. 11316485Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12316485Sdavidcs * notice, this list of conditions and the following disclaimer in the 13316485Sdavidcs * documentation and/or other materials provided with the distribution. 14316485Sdavidcs * 15316485Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16316485Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17316485Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18316485Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19316485Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20316485Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21316485Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22316485Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23316485Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24316485Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25316485Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26316485Sdavidcs */ 27316485Sdavidcs/* 28316485Sdavidcs * File : ecore_int.c 29316485Sdavidcs */ 30316485Sdavidcs#include <sys/cdefs.h> 31316485Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_int.c 337517 2018-08-09 01:17:35Z davidcs $"); 32316485Sdavidcs 33316485Sdavidcs#include "bcm_osal.h" 34316485Sdavidcs#include "ecore.h" 35316485Sdavidcs#include "ecore_spq.h" 36316485Sdavidcs#include "reg_addr.h" 37316485Sdavidcs#include "ecore_gtt_reg_addr.h" 38316485Sdavidcs#include "ecore_init_ops.h" 39316485Sdavidcs#include "ecore_rt_defs.h" 40316485Sdavidcs#include "ecore_int.h" 41316485Sdavidcs#include "reg_addr.h" 42316485Sdavidcs#include "ecore_hw.h" 43316485Sdavidcs#include "ecore_sriov.h" 44316485Sdavidcs#include "ecore_vf.h" 45316485Sdavidcs#include "ecore_hw_defs.h" 46316485Sdavidcs#include "ecore_hsi_common.h" 47316485Sdavidcs#include "ecore_mcp.h" 48316485Sdavidcs#include "ecore_dbg_fw_funcs.h" 49316485Sdavidcs 50316485Sdavidcs#ifdef DIAG 51316485Sdavidcs/* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor], 52316485Sdavidcs * and so the functions are lacking ecore prefix. 53316485Sdavidcs * If there would be other clients needing this [or if the content that isn't 54316485Sdavidcs * really optional there would increase], we'll need to re-think this. 55316485Sdavidcs */ 56316485Sdavidcsenum dbg_status dbg_read_attn(struct ecore_hwfn *dev, 57316485Sdavidcs struct ecore_ptt *ptt, 58316485Sdavidcs enum block_id block, 59316485Sdavidcs enum dbg_attn_type attn_type, 60316485Sdavidcs bool clear_status, 61316485Sdavidcs struct dbg_attn_block_result *results); 62316485Sdavidcs 63316485Sdavidcsenum dbg_status dbg_parse_attn(struct ecore_hwfn *dev, 64316485Sdavidcs struct dbg_attn_block_result *results); 65316485Sdavidcs 66320164Sdavidcsconst char* dbg_get_status_str(enum dbg_status status); 67320164Sdavidcs 68316485Sdavidcs#define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \ 69316485Sdavidcs dbg_read_attn(hwfn, ptt, id, type, clear, results) 70316485Sdavidcs#define ecore_dbg_parse_attn(hwfn, results) \ 71316485Sdavidcs dbg_parse_attn(hwfn, results) 72320164Sdavidcs#define ecore_dbg_get_status_str(status) \ 73320164Sdavidcs dbg_get_status_str(status) 74316485Sdavidcs#endif 75316485Sdavidcs 76316485Sdavidcsstruct ecore_pi_info { 77316485Sdavidcs ecore_int_comp_cb_t comp_cb; 78316485Sdavidcs void *cookie; /* Will be sent to the completion callback function */ 79316485Sdavidcs}; 80316485Sdavidcs 81316485Sdavidcsstruct ecore_sb_sp_info { 82316485Sdavidcs struct ecore_sb_info sb_info; 83316485Sdavidcs /* per protocol index data */ 84320164Sdavidcs struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 85316485Sdavidcs}; 86316485Sdavidcs 87316485Sdavidcsenum ecore_attention_type { 88316485Sdavidcs ECORE_ATTN_TYPE_ATTN, 89316485Sdavidcs ECORE_ATTN_TYPE_PARITY, 90316485Sdavidcs}; 91316485Sdavidcs 92316485Sdavidcs#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 93316485Sdavidcs ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 94316485Sdavidcs 95316485Sdavidcsstruct aeu_invert_reg_bit { 96316485Sdavidcs char bit_name[30]; 97316485Sdavidcs 98316485Sdavidcs#define ATTENTION_PARITY (1 << 0) 99316485Sdavidcs 100316485Sdavidcs#define ATTENTION_LENGTH_MASK (0x00000ff0) 101316485Sdavidcs#define ATTENTION_LENGTH_SHIFT (4) 102316485Sdavidcs#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 103316485Sdavidcs ATTENTION_LENGTH_SHIFT) 104316485Sdavidcs#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 105316485Sdavidcs#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 106316485Sdavidcs#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 107316485Sdavidcs ATTENTION_PARITY) 108316485Sdavidcs 109316485Sdavidcs/* Multiple bits start with this offset */ 110316485Sdavidcs#define ATTENTION_OFFSET_MASK (0x000ff000) 111316485Sdavidcs#define ATTENTION_OFFSET_SHIFT (12) 112316485Sdavidcs 113316485Sdavidcs#define ATTENTION_BB_MASK (0x00700000) 114316485Sdavidcs#define ATTENTION_BB_SHIFT (20) 115316485Sdavidcs#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 116316485Sdavidcs#define ATTENTION_BB_DIFFERENT (1 << 23) 117316485Sdavidcs 118316485Sdavidcs#define ATTENTION_CLEAR_ENABLE (1 << 28) 119316485Sdavidcs unsigned int flags; 120316485Sdavidcs 121316485Sdavidcs /* Callback to call if attention will be triggered */ 122316485Sdavidcs enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 123316485Sdavidcs 124316485Sdavidcs enum block_id block_index; 125316485Sdavidcs}; 126316485Sdavidcs 127316485Sdavidcsstruct aeu_invert_reg { 128316485Sdavidcs struct aeu_invert_reg_bit bits[32]; 129316485Sdavidcs}; 130316485Sdavidcs 131316485Sdavidcs#define MAX_ATTN_GRPS (8) 132316485Sdavidcs#define NUM_ATTN_REGS (9) 133316485Sdavidcs 134316485Sdavidcsstatic enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 135316485Sdavidcs{ 136316485Sdavidcs u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 137316485Sdavidcs 138316485Sdavidcs DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 139316485Sdavidcs tmp); 140316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 141316485Sdavidcs 0xffffffff); 142316485Sdavidcs 143316485Sdavidcs return ECORE_SUCCESS; 144316485Sdavidcs} 145316485Sdavidcs 146316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 147316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 148316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 149316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 150316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 151316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 152316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 153316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 154316485Sdavidcs#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 155316485Sdavidcs#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 156316485Sdavidcs#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 157316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 158316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 159316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 160316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 161316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 162316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 163316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 164316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 165316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 166316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 167316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 168316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 169316485Sdavidcs#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 170316485Sdavidcsstatic enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 171316485Sdavidcs{ 172316485Sdavidcs u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID); 173316485Sdavidcs 174316485Sdavidcs /* Disabled VF access */ 175316485Sdavidcs if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 176316485Sdavidcs u32 addr, data; 177316485Sdavidcs 178316485Sdavidcs addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 179316485Sdavidcs PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 180316485Sdavidcs data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 181316485Sdavidcs PSWHST_REG_VF_DISABLED_ERROR_DATA); 182316485Sdavidcs DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n", 183316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >> 184316485Sdavidcs ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 185316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >> 186316485Sdavidcs ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 187316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 188316485Sdavidcs ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 189316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 190316485Sdavidcs ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 191316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 192316485Sdavidcs ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 193316485Sdavidcs addr); 194316485Sdavidcs } 195316485Sdavidcs 196316485Sdavidcs tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 197316485Sdavidcs PSWHST_REG_INCORRECT_ACCESS_VALID); 198316485Sdavidcs if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 199316485Sdavidcs u32 addr, data, length; 200316485Sdavidcs 201316485Sdavidcs addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 202316485Sdavidcs PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 203316485Sdavidcs data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 204316485Sdavidcs PSWHST_REG_INCORRECT_ACCESS_DATA); 205316485Sdavidcs length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 206316485Sdavidcs PSWHST_REG_INCORRECT_ACCESS_LENGTH); 207316485Sdavidcs 208316485Sdavidcs DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 209316485Sdavidcs addr, length, 210316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 211316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 212316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 213316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 214316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 215316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 216316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 217316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 218316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 219316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 220316485Sdavidcs (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 221316485Sdavidcs ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 222316485Sdavidcs data); 223316485Sdavidcs } 224316485Sdavidcs 225316485Sdavidcs /* TODO - We know 'some' of these are legal due to virtualization, 226316485Sdavidcs * but is it true for all of them? 227316485Sdavidcs */ 228316485Sdavidcs return ECORE_SUCCESS; 229316485Sdavidcs} 230316485Sdavidcs 231316485Sdavidcs#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 232316485Sdavidcs#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 233316485Sdavidcs#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 234316485Sdavidcs#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 235316485Sdavidcs#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 236316485Sdavidcs#define ECORE_GRC_ATTENTION_PF_MASK (0xf) 237316485Sdavidcs#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 238316485Sdavidcs#define ECORE_GRC_ATTENTION_VF_SHIFT (4) 239316485Sdavidcs#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 240316485Sdavidcs#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 241316485Sdavidcs#define ECORE_GRC_ATTENTION_PRIV_VF (0) 242316485Sdavidcsstatic const char* grc_timeout_attn_master_to_str(u8 master) 243316485Sdavidcs{ 244316485Sdavidcs switch(master) { 245316485Sdavidcs case 1: return "PXP"; 246316485Sdavidcs case 2: return "MCP"; 247316485Sdavidcs case 3: return "MSDM"; 248316485Sdavidcs case 4: return "PSDM"; 249316485Sdavidcs case 5: return "YSDM"; 250316485Sdavidcs case 6: return "USDM"; 251316485Sdavidcs case 7: return "TSDM"; 252316485Sdavidcs case 8: return "XSDM"; 253316485Sdavidcs case 9: return "DBU"; 254316485Sdavidcs case 10: return "DMAE"; 255316485Sdavidcs default: 256320164Sdavidcs return "Unkown"; 257316485Sdavidcs } 258316485Sdavidcs} 259316485Sdavidcs 260316485Sdavidcsstatic enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 261316485Sdavidcs{ 262316485Sdavidcs u32 tmp, tmp2; 263316485Sdavidcs 264316485Sdavidcs /* We've already cleared the timeout interrupt register, so we learn 265316485Sdavidcs * of interrupts via the validity register 266316485Sdavidcs */ 267316485Sdavidcs tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 268316485Sdavidcs GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 269316485Sdavidcs if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 270316485Sdavidcs goto out; 271316485Sdavidcs 272316485Sdavidcs /* Read the GRC timeout information */ 273316485Sdavidcs tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 274316485Sdavidcs GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 275316485Sdavidcs tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 276316485Sdavidcs GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 277316485Sdavidcs 278320164Sdavidcs DP_NOTICE(p_hwfn->p_dev, false, 279320164Sdavidcs "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 280320164Sdavidcs tmp2, tmp, 281320164Sdavidcs (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 282320164Sdavidcs : "Read from", 283320164Sdavidcs (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 284320164Sdavidcs grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 285320164Sdavidcs ECORE_GRC_ATTENTION_MASTER_SHIFT), 286320164Sdavidcs (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 287320164Sdavidcs (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 288316485Sdavidcs ECORE_GRC_ATTENTION_PRIV_SHIFT) == 289320164Sdavidcs ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 290320164Sdavidcs (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 291320164Sdavidcs ECORE_GRC_ATTENTION_VF_SHIFT); 292316485Sdavidcs 293316485Sdavidcsout: 294316485Sdavidcs /* Regardles of anything else, clean the validity bit */ 295316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 296316485Sdavidcs GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 297316485Sdavidcs return ECORE_SUCCESS; 298316485Sdavidcs} 299316485Sdavidcs 300316485Sdavidcs#define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 301316485Sdavidcs#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 302316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 303316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 304316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 305316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 306316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 307316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 308316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 309316485Sdavidcs#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 310316485Sdavidcs#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 311316485Sdavidcs#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 312316485Sdavidcs#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 313337517Sdavidcs 314337517Sdavidcsenum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 315337517Sdavidcs struct ecore_ptt *p_ptt) 316316485Sdavidcs{ 317316485Sdavidcs u32 tmp; 318316485Sdavidcs 319337517Sdavidcs tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 320316485Sdavidcs if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 321316485Sdavidcs u32 addr_lo, addr_hi, details; 322316485Sdavidcs 323337517Sdavidcs addr_lo = ecore_rd(p_hwfn, p_ptt, 324316485Sdavidcs PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 325337517Sdavidcs addr_hi = ecore_rd(p_hwfn, p_ptt, 326316485Sdavidcs PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 327337517Sdavidcs details = ecore_rd(p_hwfn, p_ptt, 328316485Sdavidcs PGLUE_B_REG_TX_ERR_WR_DETAILS); 329316485Sdavidcs 330337517Sdavidcs DP_NOTICE(p_hwfn, false, 331337517Sdavidcs "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 332337517Sdavidcs addr_hi, addr_lo, details, 333337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 334337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 335337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 336337517Sdavidcs tmp, 337337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 338337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 339337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 340316485Sdavidcs } 341316485Sdavidcs 342337517Sdavidcs tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 343316485Sdavidcs if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 344316485Sdavidcs u32 addr_lo, addr_hi, details; 345316485Sdavidcs 346337517Sdavidcs addr_lo = ecore_rd(p_hwfn, p_ptt, 347316485Sdavidcs PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 348337517Sdavidcs addr_hi = ecore_rd(p_hwfn, p_ptt, 349316485Sdavidcs PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 350337517Sdavidcs details = ecore_rd(p_hwfn, p_ptt, 351316485Sdavidcs PGLUE_B_REG_TX_ERR_RD_DETAILS); 352316485Sdavidcs 353337517Sdavidcs DP_NOTICE(p_hwfn, false, 354337517Sdavidcs "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 355337517Sdavidcs addr_hi, addr_lo, details, 356337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 357337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 358337517Sdavidcs (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 359337517Sdavidcs tmp, 360337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 361337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 362337517Sdavidcs (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 363316485Sdavidcs } 364316485Sdavidcs 365337517Sdavidcs tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 366316485Sdavidcs if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 367337517Sdavidcs DP_NOTICE(p_hwfn, false, "ICPL eror - %08x\n", tmp); 368316485Sdavidcs 369337517Sdavidcs tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 370316485Sdavidcs if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 371316485Sdavidcs u32 addr_hi, addr_lo; 372316485Sdavidcs 373337517Sdavidcs addr_lo = ecore_rd(p_hwfn, p_ptt, 374316485Sdavidcs PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 375337517Sdavidcs addr_hi = ecore_rd(p_hwfn, p_ptt, 376316485Sdavidcs PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 377316485Sdavidcs 378337517Sdavidcs DP_NOTICE(p_hwfn, false, 379337517Sdavidcs "ICPL eror - %08x [Address %08x:%08x]\n", 380337517Sdavidcs tmp, addr_hi, addr_lo); 381316485Sdavidcs } 382316485Sdavidcs 383337517Sdavidcs tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 384316485Sdavidcs if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 385316485Sdavidcs u32 addr_hi, addr_lo, details; 386316485Sdavidcs 387337517Sdavidcs addr_lo = ecore_rd(p_hwfn, p_ptt, 388316485Sdavidcs PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 389337517Sdavidcs addr_hi = ecore_rd(p_hwfn, p_ptt, 390316485Sdavidcs PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 391337517Sdavidcs details = ecore_rd(p_hwfn, p_ptt, 392316485Sdavidcs PGLUE_B_REG_VF_ILT_ERR_DETAILS); 393316485Sdavidcs 394337517Sdavidcs DP_NOTICE(p_hwfn, false, 395337517Sdavidcs "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 396337517Sdavidcs details, tmp, addr_hi, addr_lo); 397316485Sdavidcs } 398316485Sdavidcs 399316485Sdavidcs /* Clear the indications */ 400337517Sdavidcs ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 401316485Sdavidcs 402316485Sdavidcs return ECORE_SUCCESS; 403316485Sdavidcs} 404316485Sdavidcs 405337517Sdavidcsstatic enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 406337517Sdavidcs{ 407337517Sdavidcs return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 408337517Sdavidcs} 409337517Sdavidcs 410316485Sdavidcsstatic enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 411316485Sdavidcs{ 412316485Sdavidcs DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 413316485Sdavidcs 414316485Sdavidcs ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 415316485Sdavidcs 416316485Sdavidcs return ECORE_INVAL; 417316485Sdavidcs} 418316485Sdavidcs 419316485Sdavidcsstatic enum _ecore_status_t 420316485Sdavidcsecore_general_attention_35(struct ecore_hwfn *p_hwfn) 421316485Sdavidcs{ 422316485Sdavidcs DP_INFO(p_hwfn, "General attention 35!\n"); 423316485Sdavidcs 424316485Sdavidcs return ECORE_SUCCESS; 425316485Sdavidcs} 426316485Sdavidcs 427320164Sdavidcs#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 428320164Sdavidcs#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 429320164Sdavidcs#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 430320164Sdavidcs#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 431320164Sdavidcs#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 432316485Sdavidcs 433320164Sdavidcs#define ECORE_DB_REC_COUNT 10 434320164Sdavidcs#define ECORE_DB_REC_INTERVAL 100 435320164Sdavidcs 436320164Sdavidcs/* assumes sticky overflow indication was set for this PF */ 437320164Sdavidcsstatic enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 438320164Sdavidcs struct ecore_ptt *p_ptt) 439320164Sdavidcs{ 440320164Sdavidcs u8 count = ECORE_DB_REC_COUNT; 441320164Sdavidcs u32 usage = 1; 442320164Sdavidcs 443320164Sdavidcs /* wait for usage to zero or count to run out. This is necessary since 444320164Sdavidcs * EDPM doorbell transactions can take multiple 64b cycles, and as such 445320164Sdavidcs * can "split" over the pci. Possibly, the doorbell drop can happen with 446320164Sdavidcs * half an EDPM in the queue and other half dropped. Another EDPM 447320164Sdavidcs * doorbell to the same address (from doorbell recovery mechanism or 448320164Sdavidcs * from the doorbelling entity) could have first half dropped and second 449320164Sdavidcs * half interperted as continuation of the first. To prevent such 450320164Sdavidcs * malformed doorbells from reaching the device, flush the queue before 451320164Sdavidcs * releaseing the overflow sticky indication. 452320164Sdavidcs */ 453320164Sdavidcs while (count-- && usage) { 454320164Sdavidcs usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 455320164Sdavidcs OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 456320164Sdavidcs } 457320164Sdavidcs 458320164Sdavidcs /* should have been depleted by now */ 459320164Sdavidcs if (usage) { 460320164Sdavidcs DP_NOTICE(p_hwfn->p_dev, false, 461320164Sdavidcs "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 462320164Sdavidcs ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 463320164Sdavidcs return ECORE_TIMEOUT; 464320164Sdavidcs } 465320164Sdavidcs 466320164Sdavidcs /* flush any pedning (e)dpm as they may never arrive */ 467320164Sdavidcs ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 468320164Sdavidcs 469320164Sdavidcs /* release overflow sticky indication (stop silently dropping everything) */ 470320164Sdavidcs ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 471320164Sdavidcs 472320164Sdavidcs /* repeat all last doorbells (doorbell drop recovery) */ 473320164Sdavidcs ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 474320164Sdavidcs 475320164Sdavidcs return ECORE_SUCCESS; 476320164Sdavidcs} 477320164Sdavidcs 478316485Sdavidcsstatic enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 479316485Sdavidcs{ 480320164Sdavidcs u32 int_sts, first_drop_reason, details, address, overflow, 481320164Sdavidcs all_drops_reason; 482320164Sdavidcs struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 483320164Sdavidcs enum _ecore_status_t rc; 484316485Sdavidcs 485320164Sdavidcs int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 486320164Sdavidcs DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 487320164Sdavidcs int_sts); 488316485Sdavidcs 489337517Sdavidcs /* int_sts may be zero since all PFs were interrupted for doorbell 490337517Sdavidcs * overflow but another one already handled it. Can abort here. If 491337517Sdavidcs * This PF also requires overflow recovery we will be interrupted again. 492337517Sdavidcs * The masked almost full indication may also be set. Ignoring. 493337517Sdavidcs */ 494337517Sdavidcs if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 495337517Sdavidcs return ECORE_SUCCESS; 496337517Sdavidcs 497320164Sdavidcs /* check if db_drop or overflow happened */ 498320164Sdavidcs if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 499320164Sdavidcs DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 500320164Sdavidcs 501320164Sdavidcs /* obtain data about db drop/overflow */ 502320164Sdavidcs first_drop_reason = ecore_rd(p_hwfn, p_ptt, 503320164Sdavidcs DORQ_REG_DB_DROP_REASON) & 504320164Sdavidcs ECORE_DORQ_ATTENTION_REASON_MASK; 505320164Sdavidcs details = ecore_rd(p_hwfn, p_ptt, 506320164Sdavidcs DORQ_REG_DB_DROP_DETAILS); 507320164Sdavidcs address = ecore_rd(p_hwfn, p_ptt, 508320164Sdavidcs DORQ_REG_DB_DROP_DETAILS_ADDRESS); 509320164Sdavidcs overflow = ecore_rd(p_hwfn, p_ptt, 510320164Sdavidcs DORQ_REG_PF_OVFL_STICKY); 511320164Sdavidcs all_drops_reason = ecore_rd(p_hwfn, p_ptt, 512320164Sdavidcs DORQ_REG_DB_DROP_DETAILS_REASON); 513320164Sdavidcs 514320164Sdavidcs /* log info */ 515320164Sdavidcs DP_NOTICE(p_hwfn->p_dev, false, 516320164Sdavidcs "Doorbell drop occurred\n" 517320164Sdavidcs "Address\t\t0x%08x\t(second BAR address)\n" 518320164Sdavidcs "FID\t\t0x%04x\t\t(Opaque FID)\n" 519320164Sdavidcs "Size\t\t0x%04x\t\t(in bytes)\n" 520320164Sdavidcs "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 521320164Sdavidcs "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 522320164Sdavidcs "Overflow\t0x%x\t\t(a per PF indication)\n", 523320164Sdavidcs address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 524320164Sdavidcs GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 525320164Sdavidcs first_drop_reason, all_drops_reason, overflow); 526320164Sdavidcs 527320164Sdavidcs /* if this PF caused overflow, initiate recovery */ 528320164Sdavidcs if (overflow) { 529320164Sdavidcs rc = ecore_db_rec_attn(p_hwfn, p_ptt); 530320164Sdavidcs if (rc != ECORE_SUCCESS) 531320164Sdavidcs return rc; 532320164Sdavidcs } 533320164Sdavidcs 534320164Sdavidcs /* clear the doorbell drop details and prepare for next drop */ 535320164Sdavidcs ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 536320164Sdavidcs 537320164Sdavidcs /* mark interrupt as handeld (note: even if drop was due to a diffrent 538320164Sdavidcs * reason than overflow we mark as handled) 539320164Sdavidcs */ 540320164Sdavidcs ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 541320164Sdavidcs DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 542320164Sdavidcs 543320164Sdavidcs /* if there are no indications otherthan drop indications, success */ 544320164Sdavidcs if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 545320164Sdavidcs DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 546320164Sdavidcs DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 547320164Sdavidcs return ECORE_SUCCESS; 548316485Sdavidcs } 549316485Sdavidcs 550320164Sdavidcs /* some other indication was present - non recoverable */ 551320164Sdavidcs DP_INFO(p_hwfn, "DORQ fatal attention\n"); 552320164Sdavidcs 553316485Sdavidcs return ECORE_INVAL; 554316485Sdavidcs} 555316485Sdavidcs 556316485Sdavidcsstatic enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 557316485Sdavidcs{ 558316485Sdavidcs#ifndef ASIC_ONLY 559316485Sdavidcs if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 560316485Sdavidcs u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 561316485Sdavidcs TM_REG_INT_STS_1); 562316485Sdavidcs 563316485Sdavidcs if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 564316485Sdavidcs TM_REG_INT_STS_1_PEND_CONN_SCAN)) 565316485Sdavidcs return ECORE_INVAL; 566316485Sdavidcs 567316485Sdavidcs if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 568316485Sdavidcs TM_REG_INT_STS_1_PEND_CONN_SCAN)) 569316485Sdavidcs DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n"); 570316485Sdavidcs val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 571316485Sdavidcs val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 572316485Sdavidcs TM_REG_INT_MASK_1_PEND_TASK_SCAN; 573316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 574316485Sdavidcs 575316485Sdavidcs return ECORE_SUCCESS; 576316485Sdavidcs } 577316485Sdavidcs#endif 578316485Sdavidcs 579316485Sdavidcs return ECORE_INVAL; 580316485Sdavidcs} 581316485Sdavidcs 582316485Sdavidcs/* Instead of major changes to the data-structure, we have a some 'special' 583316485Sdavidcs * identifiers for sources that changed meaning between adapters. 584316485Sdavidcs */ 585316485Sdavidcsenum aeu_invert_reg_special_type { 586316485Sdavidcs AEU_INVERT_REG_SPECIAL_CNIG_0, 587316485Sdavidcs AEU_INVERT_REG_SPECIAL_CNIG_1, 588316485Sdavidcs AEU_INVERT_REG_SPECIAL_CNIG_2, 589316485Sdavidcs AEU_INVERT_REG_SPECIAL_CNIG_3, 590316485Sdavidcs AEU_INVERT_REG_SPECIAL_MAX, 591316485Sdavidcs}; 592316485Sdavidcs 593316485Sdavidcsstatic struct aeu_invert_reg_bit 594316485Sdavidcsaeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 595316485Sdavidcs {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 596316485Sdavidcs {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 597316485Sdavidcs {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 598316485Sdavidcs {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 599316485Sdavidcs}; 600316485Sdavidcs 601316485Sdavidcs/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 602316485Sdavidcsstatic struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = 603316485Sdavidcs{ 604316485Sdavidcs { 605316485Sdavidcs { /* After Invert 1 */ 606316485Sdavidcs {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 607316485Sdavidcs } 608316485Sdavidcs }, 609316485Sdavidcs 610316485Sdavidcs { 611316485Sdavidcs { /* After Invert 2 */ 612316485Sdavidcs {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 613316485Sdavidcs {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 614337517Sdavidcs {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 615316485Sdavidcs {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 616316485Sdavidcs {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 617316485Sdavidcs {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 618316485Sdavidcs {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 619316485Sdavidcs {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 620316485Sdavidcs {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS}, 621316485Sdavidcs } 622316485Sdavidcs }, 623316485Sdavidcs 624316485Sdavidcs { 625316485Sdavidcs { /* After Invert 3 */ 626316485Sdavidcs {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 627316485Sdavidcs } 628316485Sdavidcs }, 629316485Sdavidcs 630316485Sdavidcs { 631316485Sdavidcs { /* After Invert 4 */ 632316485Sdavidcs {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID}, 633316485Sdavidcs {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 634316485Sdavidcs {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID}, 635316485Sdavidcs {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 636316485Sdavidcs ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS}, 637316485Sdavidcs {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 638316485Sdavidcs ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS}, 639316485Sdavidcs {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 640316485Sdavidcs ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM}, 641316485Sdavidcs {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 642316485Sdavidcs ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM}, 643316485Sdavidcs {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 644316485Sdavidcs {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 645316485Sdavidcs {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 646316485Sdavidcs {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 647316485Sdavidcs {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 648316485Sdavidcs {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 649316485Sdavidcs {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 650316485Sdavidcs {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 651316485Sdavidcs {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 652316485Sdavidcs {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 653316485Sdavidcs {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 654316485Sdavidcs {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 655316485Sdavidcs } 656316485Sdavidcs }, 657316485Sdavidcs 658316485Sdavidcs { 659316485Sdavidcs { /* After Invert 5 */ 660316485Sdavidcs {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 661316485Sdavidcs {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 662316485Sdavidcs {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 663316485Sdavidcs {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 664316485Sdavidcs {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 665316485Sdavidcs {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 666316485Sdavidcs {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 667316485Sdavidcs {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 668316485Sdavidcs {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 669316485Sdavidcs {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 670316485Sdavidcs {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 671316485Sdavidcs {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 672316485Sdavidcs {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 673316485Sdavidcs {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 674316485Sdavidcs {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 675316485Sdavidcs {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 676316485Sdavidcs } 677316485Sdavidcs }, 678316485Sdavidcs 679316485Sdavidcs { 680316485Sdavidcs { /* After Invert 6 */ 681316485Sdavidcs {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 682316485Sdavidcs {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 683316485Sdavidcs {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 684316485Sdavidcs {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 685316485Sdavidcs {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 686316485Sdavidcs {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 687316485Sdavidcs {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 688316485Sdavidcs {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 689316485Sdavidcs {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 690316485Sdavidcs {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 691316485Sdavidcs {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 692316485Sdavidcs {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 693316485Sdavidcs {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 694316485Sdavidcs {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 695316485Sdavidcs {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 696316485Sdavidcs {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 697316485Sdavidcs } 698316485Sdavidcs }, 699316485Sdavidcs 700316485Sdavidcs { 701316485Sdavidcs { /* After Invert 7 */ 702316485Sdavidcs {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 703316485Sdavidcs {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 704316485Sdavidcs {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 705316485Sdavidcs {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 706316485Sdavidcs {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 707316485Sdavidcs {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 708316485Sdavidcs {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 709316485Sdavidcs {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 710316485Sdavidcs {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 711316485Sdavidcs {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 712316485Sdavidcs {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 713316485Sdavidcs {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 714316485Sdavidcs {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 715316485Sdavidcs {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 716316485Sdavidcs {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 717316485Sdavidcs {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 718316485Sdavidcs {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 719316485Sdavidcs } 720316485Sdavidcs }, 721316485Sdavidcs 722316485Sdavidcs { 723316485Sdavidcs { /* After Invert 8 */ 724316485Sdavidcs {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 725316485Sdavidcs {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 726316485Sdavidcs {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 727316485Sdavidcs {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 728316485Sdavidcs {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 729316485Sdavidcs {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 730316485Sdavidcs {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 731316485Sdavidcs {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 732316485Sdavidcs {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 733316485Sdavidcs {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 734316485Sdavidcs {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 735316485Sdavidcs {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 736316485Sdavidcs {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 737316485Sdavidcs {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 738316485Sdavidcs {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 739316485Sdavidcs {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 740316485Sdavidcs {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 741316485Sdavidcs {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 742316485Sdavidcs {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 743316485Sdavidcs {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 744316485Sdavidcs {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 745316485Sdavidcs } 746316485Sdavidcs }, 747316485Sdavidcs 748316485Sdavidcs { 749316485Sdavidcs { /* After Invert 9 */ 750316485Sdavidcs {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 751316485Sdavidcs {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 752316485Sdavidcs {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 753316485Sdavidcs {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 754316485Sdavidcs {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 755316485Sdavidcs } 756316485Sdavidcs }, 757316485Sdavidcs 758316485Sdavidcs}; 759316485Sdavidcs 760316485Sdavidcsstatic struct aeu_invert_reg_bit * 761316485Sdavidcsecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 762316485Sdavidcs struct aeu_invert_reg_bit *p_bit) 763316485Sdavidcs{ 764316485Sdavidcs if (!ECORE_IS_BB(p_hwfn->p_dev)) 765316485Sdavidcs return p_bit; 766316485Sdavidcs 767316485Sdavidcs if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 768316485Sdavidcs return p_bit; 769316485Sdavidcs 770316485Sdavidcs return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 771316485Sdavidcs ATTENTION_BB_SHIFT]; 772316485Sdavidcs} 773316485Sdavidcs 774316485Sdavidcsstatic bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 775316485Sdavidcs struct aeu_invert_reg_bit *p_bit) 776316485Sdavidcs{ 777316485Sdavidcs return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 778316485Sdavidcs ATTENTION_PARITY); 779316485Sdavidcs} 780316485Sdavidcs 781316485Sdavidcs#define ATTN_STATE_BITS (0xfff) 782316485Sdavidcs#define ATTN_BITS_MASKABLE (0x3ff) 783316485Sdavidcsstruct ecore_sb_attn_info { 784316485Sdavidcs /* Virtual & Physical address of the SB */ 785316485Sdavidcs struct atten_status_block *sb_attn; 786316485Sdavidcs dma_addr_t sb_phys; 787316485Sdavidcs 788316485Sdavidcs /* Last seen running index */ 789316485Sdavidcs u16 index; 790316485Sdavidcs 791316485Sdavidcs /* A mask of the AEU bits resulting in a parity error */ 792316485Sdavidcs u32 parity_mask[NUM_ATTN_REGS]; 793316485Sdavidcs 794316485Sdavidcs /* A pointer to the attention description structure */ 795316485Sdavidcs struct aeu_invert_reg *p_aeu_desc; 796316485Sdavidcs 797316485Sdavidcs /* Previously asserted attentions, which are still unasserted */ 798316485Sdavidcs u16 known_attn; 799316485Sdavidcs 800316485Sdavidcs /* Cleanup address for the link's general hw attention */ 801316485Sdavidcs u32 mfw_attn_addr; 802316485Sdavidcs}; 803316485Sdavidcs 804316485Sdavidcsstatic u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 805316485Sdavidcs struct ecore_sb_attn_info *p_sb_desc) 806316485Sdavidcs{ 807316485Sdavidcs u16 rc = 0, index; 808316485Sdavidcs 809316485Sdavidcs OSAL_MMIOWB(p_hwfn->p_dev); 810316485Sdavidcs 811316485Sdavidcs index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 812316485Sdavidcs if (p_sb_desc->index != index) { 813316485Sdavidcs p_sb_desc->index = index; 814316485Sdavidcs rc = ECORE_SB_ATT_IDX; 815316485Sdavidcs } 816316485Sdavidcs 817316485Sdavidcs OSAL_MMIOWB(p_hwfn->p_dev); 818316485Sdavidcs 819316485Sdavidcs return rc; 820316485Sdavidcs} 821316485Sdavidcs 822316485Sdavidcs/** 823316485Sdavidcs * @brief ecore_int_assertion - handles asserted attention bits 824316485Sdavidcs * 825316485Sdavidcs * @param p_hwfn 826316485Sdavidcs * @param asserted_bits newly asserted bits 827316485Sdavidcs * @return enum _ecore_status_t 828316485Sdavidcs */ 829316485Sdavidcsstatic enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 830316485Sdavidcs u16 asserted_bits) 831316485Sdavidcs{ 832316485Sdavidcs struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 833316485Sdavidcs u32 igu_mask; 834316485Sdavidcs 835316485Sdavidcs /* Mask the source of the attention in the IGU */ 836316485Sdavidcs igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 837316485Sdavidcs IGU_REG_ATTENTION_ENABLE); 838316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 839316485Sdavidcs igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 840316485Sdavidcs igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 841316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 842316485Sdavidcs 843316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 844316485Sdavidcs "inner known ATTN state: 0x%04x --> 0x%04x\n", 845316485Sdavidcs sb_attn_sw->known_attn, 846316485Sdavidcs sb_attn_sw->known_attn | asserted_bits); 847316485Sdavidcs sb_attn_sw->known_attn |= asserted_bits; 848316485Sdavidcs 849316485Sdavidcs /* Handle MCP events */ 850316485Sdavidcs if (asserted_bits & 0x100) { 851316485Sdavidcs ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 852316485Sdavidcs /* Clean the MCP attention */ 853316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 854316485Sdavidcs sb_attn_sw->mfw_attn_addr, 0); 855316485Sdavidcs } 856316485Sdavidcs 857316485Sdavidcs /* FIXME - this will change once we'll have GOOD gtt definitions */ 858316485Sdavidcs DIRECT_REG_WR(p_hwfn, 859316485Sdavidcs (u8 OSAL_IOMEM*)p_hwfn->regview + 860316485Sdavidcs GTT_BAR0_MAP_REG_IGU_CMD + 861316485Sdavidcs ((IGU_CMD_ATTN_BIT_SET_UPPER - 862316485Sdavidcs IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 863316485Sdavidcs 864316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 865316485Sdavidcs asserted_bits); 866316485Sdavidcs 867316485Sdavidcs return ECORE_SUCCESS; 868316485Sdavidcs} 869316485Sdavidcs 870316485Sdavidcsstatic void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 871316485Sdavidcs enum block_id id, enum dbg_attn_type type, 872316485Sdavidcs bool b_clear) 873316485Sdavidcs{ 874316485Sdavidcs struct dbg_attn_block_result attn_results; 875316485Sdavidcs enum dbg_status status; 876316485Sdavidcs 877316485Sdavidcs OSAL_MEMSET(&attn_results, 0, sizeof(attn_results)); 878316485Sdavidcs 879316485Sdavidcs status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 880316485Sdavidcs b_clear, &attn_results); 881320164Sdavidcs#ifdef ATTN_DESC 882316485Sdavidcs if (status != DBG_STATUS_OK) 883316485Sdavidcs DP_NOTICE(p_hwfn, true, 884320164Sdavidcs "Failed to parse attention information [status: %s]\n", 885320164Sdavidcs ecore_dbg_get_status_str(status)); 886316485Sdavidcs else 887316485Sdavidcs ecore_dbg_parse_attn(p_hwfn, &attn_results); 888316485Sdavidcs#else 889320164Sdavidcs if (status != DBG_STATUS_OK) 890320164Sdavidcs DP_NOTICE(p_hwfn, true, 891320164Sdavidcs "Failed to parse attention information [status: %d]\n", 892320164Sdavidcs status); 893320164Sdavidcs else 894316485Sdavidcs ecore_dbg_print_attn(p_hwfn, &attn_results); 895316485Sdavidcs#endif 896316485Sdavidcs} 897316485Sdavidcs 898316485Sdavidcs/** 899316485Sdavidcs * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 900316485Sdavidcs * cause of the attention 901316485Sdavidcs * 902316485Sdavidcs * @param p_hwfn 903316485Sdavidcs * @param p_aeu - descriptor of an AEU bit which caused the attention 904316485Sdavidcs * @param aeu_en_reg - register offset of the AEU enable reg. which configured 905316485Sdavidcs * this bit to this group. 906316485Sdavidcs * @param bit_index - index of this bit in the aeu_en_reg 907316485Sdavidcs * 908316485Sdavidcs * @return enum _ecore_status_t 909316485Sdavidcs */ 910316485Sdavidcsstatic enum _ecore_status_t 911316485Sdavidcsecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 912316485Sdavidcs struct aeu_invert_reg_bit *p_aeu, 913316485Sdavidcs u32 aeu_en_reg, 914316485Sdavidcs const char *p_bit_name, 915316485Sdavidcs u32 bitmask) 916316485Sdavidcs{ 917316485Sdavidcs enum _ecore_status_t rc = ECORE_INVAL; 918316485Sdavidcs bool b_fatal = false; 919316485Sdavidcs 920316485Sdavidcs DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 921316485Sdavidcs p_bit_name, bitmask); 922316485Sdavidcs 923316485Sdavidcs /* Call callback before clearing the interrupt status */ 924316485Sdavidcs if (p_aeu->cb) { 925316485Sdavidcs DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 926316485Sdavidcs p_bit_name); 927316485Sdavidcs rc = p_aeu->cb(p_hwfn); 928316485Sdavidcs } 929316485Sdavidcs 930316485Sdavidcs if (rc != ECORE_SUCCESS) 931316485Sdavidcs b_fatal = true; 932316485Sdavidcs 933316485Sdavidcs /* Print HW block interrupt registers */ 934316485Sdavidcs if (p_aeu->block_index != MAX_BLOCK_ID) 935316485Sdavidcs ecore_int_attn_print(p_hwfn, p_aeu->block_index, 936316485Sdavidcs ATTN_TYPE_INTERRUPT, !b_fatal); 937316485Sdavidcs 938316485Sdavidcs /* Reach assertion if attention is fatal */ 939316485Sdavidcs if (b_fatal) { 940316485Sdavidcs DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 941316485Sdavidcs p_bit_name); 942316485Sdavidcs 943316485Sdavidcs ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 944316485Sdavidcs } 945316485Sdavidcs 946316485Sdavidcs /* Prevent this Attention from being asserted in the future */ 947316485Sdavidcs if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 948316485Sdavidcs p_hwfn->p_dev->attn_clr_en) { 949316485Sdavidcs u32 val; 950316485Sdavidcs u32 mask = ~bitmask; 951316485Sdavidcs val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 952316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 953316485Sdavidcs DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 954316485Sdavidcs p_bit_name); 955316485Sdavidcs } 956316485Sdavidcs 957316485Sdavidcs return rc; 958316485Sdavidcs} 959316485Sdavidcs 960316485Sdavidcs/** 961316485Sdavidcs * @brief ecore_int_deassertion_parity - handle a single parity AEU source 962316485Sdavidcs * 963316485Sdavidcs * @param p_hwfn 964316485Sdavidcs * @param p_aeu - descriptor of an AEU bit which caused the parity 965316485Sdavidcs * @param aeu_en_reg - address of the AEU enable register 966316485Sdavidcs * @param bit_index 967316485Sdavidcs */ 968316485Sdavidcsstatic void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 969316485Sdavidcs struct aeu_invert_reg_bit *p_aeu, 970316485Sdavidcs u32 aeu_en_reg, u8 bit_index) 971316485Sdavidcs{ 972316485Sdavidcs u32 block_id = p_aeu->block_index, mask, val; 973316485Sdavidcs 974316485Sdavidcs DP_NOTICE(p_hwfn->p_dev, false, 975316485Sdavidcs "%s parity attention is set [address 0x%08x, bit %d]\n", 976316485Sdavidcs p_aeu->bit_name, aeu_en_reg, bit_index); 977316485Sdavidcs 978337517Sdavidcs if (block_id != MAX_BLOCK_ID) { 979337517Sdavidcs ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 980316485Sdavidcs 981337517Sdavidcs /* In A0, there's a single parity bit for several blocks */ 982337517Sdavidcs if (block_id == BLOCK_BTB) { 983337517Sdavidcs ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 984337517Sdavidcs ATTN_TYPE_PARITY, false); 985337517Sdavidcs ecore_int_attn_print(p_hwfn, BLOCK_MCP, 986337517Sdavidcs ATTN_TYPE_PARITY, false); 987337517Sdavidcs } 988316485Sdavidcs } 989316485Sdavidcs 990316485Sdavidcs /* Prevent this parity error from being re-asserted */ 991316485Sdavidcs mask = ~(0x1 << bit_index); 992316485Sdavidcs val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 993316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 994316485Sdavidcs DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 995316485Sdavidcs p_aeu->bit_name); 996316485Sdavidcs} 997316485Sdavidcs 998316485Sdavidcs/** 999316485Sdavidcs * @brief - handles deassertion of previously asserted attentions. 1000316485Sdavidcs * 1001316485Sdavidcs * @param p_hwfn 1002316485Sdavidcs * @param deasserted_bits - newly deasserted bits 1003316485Sdavidcs * @return enum _ecore_status_t 1004316485Sdavidcs * 1005316485Sdavidcs */ 1006316485Sdavidcsstatic enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1007316485Sdavidcs u16 deasserted_bits) 1008316485Sdavidcs{ 1009316485Sdavidcs struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1010316485Sdavidcs u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1011316485Sdavidcs u8 i, j, k, bit_idx; 1012316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1013316485Sdavidcs 1014316485Sdavidcs /* Read the attention registers in the AEU */ 1015316485Sdavidcs for (i = 0; i < NUM_ATTN_REGS; i++) { 1016316485Sdavidcs aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1017316485Sdavidcs MISC_REG_AEU_AFTER_INVERT_1_IGU + 1018316485Sdavidcs i * 0x4); 1019316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1020316485Sdavidcs "Deasserted bits [%d]: %08x\n", 1021316485Sdavidcs i, aeu_inv_arr[i]); 1022316485Sdavidcs } 1023316485Sdavidcs 1024316485Sdavidcs /* Handle parity attentions first */ 1025316485Sdavidcs for (i = 0; i < NUM_ATTN_REGS; i++) 1026316485Sdavidcs { 1027316485Sdavidcs struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1028316485Sdavidcs u32 parities; 1029316485Sdavidcs 1030316485Sdavidcs aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1031316485Sdavidcs en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1032316485Sdavidcs parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1033316485Sdavidcs 1034316485Sdavidcs /* Skip register in which no parity bit is currently set */ 1035316485Sdavidcs if (!parities) 1036316485Sdavidcs continue; 1037316485Sdavidcs 1038316485Sdavidcs for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1039316485Sdavidcs struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1040316485Sdavidcs 1041316485Sdavidcs if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1042316485Sdavidcs !!(parities & (1 << bit_idx))) 1043316485Sdavidcs ecore_int_deassertion_parity(p_hwfn, p_bit, 1044316485Sdavidcs aeu_en, bit_idx); 1045316485Sdavidcs 1046316485Sdavidcs bit_idx += ATTENTION_LENGTH(p_bit->flags); 1047316485Sdavidcs } 1048316485Sdavidcs } 1049316485Sdavidcs 1050316485Sdavidcs /* Find non-parity cause for attention and act */ 1051316485Sdavidcs for (k = 0; k < MAX_ATTN_GRPS; k++) { 1052316485Sdavidcs struct aeu_invert_reg_bit *p_aeu; 1053316485Sdavidcs 1054316485Sdavidcs /* Handle only groups whose attention is currently deasserted */ 1055316485Sdavidcs if (!(deasserted_bits & (1 << k))) 1056316485Sdavidcs continue; 1057316485Sdavidcs 1058316485Sdavidcs for (i = 0; i < NUM_ATTN_REGS; i++) { 1059316485Sdavidcs u32 bits; 1060316485Sdavidcs 1061316485Sdavidcs aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1062316485Sdavidcs i * sizeof(u32) + 1063316485Sdavidcs k * sizeof(u32) * NUM_ATTN_REGS; 1064316485Sdavidcs en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1065316485Sdavidcs bits = aeu_inv_arr[i] & en; 1066316485Sdavidcs 1067316485Sdavidcs /* Skip if no bit from this group is currently set */ 1068316485Sdavidcs if (!bits) 1069316485Sdavidcs continue; 1070316485Sdavidcs 1071316485Sdavidcs /* Find all set bits from current register which belong 1072316485Sdavidcs * to current group, making them responsible for the 1073316485Sdavidcs * previous assertion. 1074316485Sdavidcs */ 1075316485Sdavidcs for (j = 0, bit_idx = 0; bit_idx < 32; j++) 1076316485Sdavidcs { 1077316485Sdavidcs long unsigned int bitmask; 1078316485Sdavidcs u8 bit, bit_len; 1079316485Sdavidcs 1080316485Sdavidcs /* Need to account bits with changed meaning */ 1081316485Sdavidcs p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1082316485Sdavidcs p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu); 1083316485Sdavidcs 1084316485Sdavidcs bit = bit_idx; 1085316485Sdavidcs bit_len = ATTENTION_LENGTH(p_aeu->flags); 1086316485Sdavidcs if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1087316485Sdavidcs /* Skip Parity */ 1088316485Sdavidcs bit++; 1089316485Sdavidcs bit_len--; 1090316485Sdavidcs } 1091316485Sdavidcs 1092316485Sdavidcs /* Find the bits relating to HW-block, then 1093316485Sdavidcs * shift so they'll become LSB. 1094316485Sdavidcs */ 1095316485Sdavidcs bitmask = bits & (((1 << bit_len) - 1) << bit); 1096316485Sdavidcs bitmask >>= bit; 1097316485Sdavidcs 1098316485Sdavidcs if (bitmask) { 1099316485Sdavidcs u32 flags = p_aeu->flags; 1100316485Sdavidcs char bit_name[30]; 1101316485Sdavidcs u8 num; 1102316485Sdavidcs 1103316485Sdavidcs num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1104316485Sdavidcs bit_len); 1105316485Sdavidcs 1106316485Sdavidcs /* Some bits represent more than a 1107316485Sdavidcs * a single interrupt. Correctly print 1108316485Sdavidcs * their name. 1109316485Sdavidcs */ 1110316485Sdavidcs if (ATTENTION_LENGTH(flags) > 2 || 1111316485Sdavidcs ((flags & ATTENTION_PAR_INT) && 1112316485Sdavidcs ATTENTION_LENGTH(flags) > 1)) 1113316485Sdavidcs OSAL_SNPRINTF(bit_name, 30, 1114316485Sdavidcs p_aeu->bit_name, 1115316485Sdavidcs num); 1116316485Sdavidcs else 1117316485Sdavidcs OSAL_STRNCPY(bit_name, 1118316485Sdavidcs p_aeu->bit_name, 1119316485Sdavidcs 30); 1120316485Sdavidcs 1121316485Sdavidcs /* We now need to pass bitmask in its 1122316485Sdavidcs * correct position. 1123316485Sdavidcs */ 1124316485Sdavidcs bitmask <<= bit; 1125316485Sdavidcs 1126316485Sdavidcs /* Handle source of the attention */ 1127316485Sdavidcs ecore_int_deassertion_aeu_bit(p_hwfn, 1128316485Sdavidcs p_aeu, 1129316485Sdavidcs aeu_en, 1130316485Sdavidcs bit_name, 1131316485Sdavidcs bitmask); 1132316485Sdavidcs } 1133316485Sdavidcs 1134316485Sdavidcs bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1135316485Sdavidcs } 1136316485Sdavidcs } 1137316485Sdavidcs } 1138316485Sdavidcs 1139316485Sdavidcs /* Clear IGU indication for the deasserted bits */ 1140316485Sdavidcs /* FIXME - this will change once we'll have GOOD gtt definitions */ 1141316485Sdavidcs DIRECT_REG_WR(p_hwfn, 1142316485Sdavidcs (u8 OSAL_IOMEM*)p_hwfn->regview + 1143316485Sdavidcs GTT_BAR0_MAP_REG_IGU_CMD + 1144316485Sdavidcs ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1145316485Sdavidcs IGU_CMD_INT_ACK_BASE) << 3), 1146316485Sdavidcs ~((u32)deasserted_bits)); 1147316485Sdavidcs 1148316485Sdavidcs /* Unmask deasserted attentions in IGU */ 1149316485Sdavidcs aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1150316485Sdavidcs IGU_REG_ATTENTION_ENABLE); 1151316485Sdavidcs aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1152316485Sdavidcs ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1153316485Sdavidcs 1154316485Sdavidcs /* Clear deassertion from inner state */ 1155316485Sdavidcs sb_attn_sw->known_attn &= ~deasserted_bits; 1156316485Sdavidcs 1157316485Sdavidcs return rc; 1158316485Sdavidcs} 1159316485Sdavidcs 1160316485Sdavidcsstatic enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1161316485Sdavidcs{ 1162316485Sdavidcs struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1163316485Sdavidcs struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1164316485Sdavidcs u16 index = 0, asserted_bits, deasserted_bits; 1165316485Sdavidcs u32 attn_bits = 0, attn_acks = 0; 1166316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1167316485Sdavidcs 1168316485Sdavidcs /* Read current attention bits/acks - safeguard against attentions 1169316485Sdavidcs * by guaranting work on a synchronized timeframe 1170316485Sdavidcs */ 1171316485Sdavidcs do { 1172316485Sdavidcs index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1173316485Sdavidcs attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1174316485Sdavidcs attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1175316485Sdavidcs } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1176316485Sdavidcs p_sb_attn->sb_index = index; 1177316485Sdavidcs 1178316485Sdavidcs /* Attention / Deassertion are meaningful (and in correct state) 1179316485Sdavidcs * only when they differ and consistent with known state - deassertion 1180316485Sdavidcs * when previous attention & current ack, and assertion when current 1181316485Sdavidcs * attention with no previous attention 1182316485Sdavidcs */ 1183316485Sdavidcs asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1184316485Sdavidcs ~p_sb_attn_sw->known_attn; 1185316485Sdavidcs deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1186316485Sdavidcs p_sb_attn_sw->known_attn; 1187316485Sdavidcs 1188316485Sdavidcs if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1189316485Sdavidcs DP_INFO(p_hwfn, 1190316485Sdavidcs "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1191316485Sdavidcs index, attn_bits, attn_acks, asserted_bits, 1192316485Sdavidcs deasserted_bits, p_sb_attn_sw->known_attn); 1193316485Sdavidcs else if (asserted_bits == 0x100) 1194316485Sdavidcs DP_INFO(p_hwfn, 1195316485Sdavidcs "MFW indication via attention\n"); 1196316485Sdavidcs else 1197316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1198316485Sdavidcs "MFW indication [deassertion]\n"); 1199316485Sdavidcs 1200316485Sdavidcs if (asserted_bits) { 1201316485Sdavidcs rc = ecore_int_assertion(p_hwfn, asserted_bits); 1202316485Sdavidcs if (rc) 1203316485Sdavidcs return rc; 1204316485Sdavidcs } 1205316485Sdavidcs 1206316485Sdavidcs if (deasserted_bits) 1207316485Sdavidcs rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1208316485Sdavidcs 1209316485Sdavidcs return rc; 1210316485Sdavidcs} 1211316485Sdavidcs 1212316485Sdavidcsstatic void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1213316485Sdavidcs void OSAL_IOMEM *igu_addr, u32 ack_cons) 1214316485Sdavidcs{ 1215316485Sdavidcs struct igu_prod_cons_update igu_ack = { 0 }; 1216316485Sdavidcs 1217316485Sdavidcs igu_ack.sb_id_and_flags = 1218316485Sdavidcs ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1219316485Sdavidcs (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1220316485Sdavidcs (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1221316485Sdavidcs (IGU_SEG_ACCESS_ATTN << 1222316485Sdavidcs IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1223316485Sdavidcs 1224316485Sdavidcs DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1225316485Sdavidcs 1226316485Sdavidcs /* Both segments (interrupts & acks) are written to same place address; 1227316485Sdavidcs * Need to guarantee all commands will be received (in-order) by HW. 1228316485Sdavidcs */ 1229316485Sdavidcs OSAL_MMIOWB(p_hwfn->p_dev); 1230316485Sdavidcs OSAL_BARRIER(p_hwfn->p_dev); 1231316485Sdavidcs} 1232316485Sdavidcs 1233316485Sdavidcsvoid ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1234316485Sdavidcs{ 1235316485Sdavidcs struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1236316485Sdavidcs struct ecore_pi_info *pi_info = OSAL_NULL; 1237316485Sdavidcs struct ecore_sb_attn_info *sb_attn; 1238316485Sdavidcs struct ecore_sb_info *sb_info; 1239316485Sdavidcs int arr_size; 1240316485Sdavidcs u16 rc = 0; 1241316485Sdavidcs 1242316485Sdavidcs if (!p_hwfn) 1243316485Sdavidcs return; 1244316485Sdavidcs 1245316485Sdavidcs if (!p_hwfn->p_sp_sb) { 1246316485Sdavidcs DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1247316485Sdavidcs return; 1248316485Sdavidcs } 1249316485Sdavidcs 1250316485Sdavidcs sb_info = &p_hwfn->p_sp_sb->sb_info; 1251316485Sdavidcs arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1252316485Sdavidcs if (!sb_info) { 1253316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n"); 1254316485Sdavidcs return; 1255316485Sdavidcs } 1256316485Sdavidcs 1257316485Sdavidcs if (!p_hwfn->p_sb_attn) { 1258316485Sdavidcs DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1259316485Sdavidcs return; 1260316485Sdavidcs } 1261316485Sdavidcs sb_attn = p_hwfn->p_sb_attn; 1262316485Sdavidcs 1263316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1264316485Sdavidcs p_hwfn, p_hwfn->my_id); 1265316485Sdavidcs 1266316485Sdavidcs /* Disable ack for def status block. Required both for msix + 1267316485Sdavidcs * inta in non-mask mode, in inta does no harm. 1268316485Sdavidcs */ 1269316485Sdavidcs ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1270316485Sdavidcs 1271316485Sdavidcs /* Gather Interrupts/Attentions information */ 1272316485Sdavidcs if (!sb_info->sb_virt) { 1273316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1274316485Sdavidcs } else { 1275316485Sdavidcs u32 tmp_index = sb_info->sb_ack; 1276316485Sdavidcs rc = ecore_sb_update_sb_idx(sb_info); 1277316485Sdavidcs DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1278316485Sdavidcs "Interrupt indices: 0x%08x --> 0x%08x\n", 1279316485Sdavidcs tmp_index, sb_info->sb_ack); 1280316485Sdavidcs } 1281316485Sdavidcs 1282316485Sdavidcs if (!sb_attn || !sb_attn->sb_attn) { 1283316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n"); 1284316485Sdavidcs } else { 1285316485Sdavidcs u16 tmp_index = sb_attn->index; 1286316485Sdavidcs 1287316485Sdavidcs rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1288316485Sdavidcs DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1289316485Sdavidcs "Attention indices: 0x%08x --> 0x%08x\n", 1290316485Sdavidcs tmp_index, sb_attn->index); 1291316485Sdavidcs } 1292316485Sdavidcs 1293316485Sdavidcs /* Check if we expect interrupts at this time. if not just ack them */ 1294316485Sdavidcs if (!(rc & ECORE_SB_EVENT_MASK)) { 1295316485Sdavidcs ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1296316485Sdavidcs return; 1297316485Sdavidcs } 1298316485Sdavidcs 1299316485Sdavidcs /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1300316485Sdavidcs if (!p_hwfn->p_dpc_ptt) { 1301316485Sdavidcs DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1302316485Sdavidcs ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1303316485Sdavidcs return; 1304316485Sdavidcs } 1305316485Sdavidcs 1306316485Sdavidcs if (rc & ECORE_SB_ATT_IDX) 1307316485Sdavidcs ecore_int_attentions(p_hwfn); 1308316485Sdavidcs 1309316485Sdavidcs if (rc & ECORE_SB_IDX) { 1310316485Sdavidcs int pi; 1311316485Sdavidcs 1312316485Sdavidcs /* Since we only looked at the SB index, it's possible more 1313316485Sdavidcs * than a single protocol-index on the SB incremented. 1314316485Sdavidcs * Iterate over all configured protocol indices and check 1315316485Sdavidcs * whether something happened for each. 1316316485Sdavidcs */ 1317316485Sdavidcs for (pi = 0; pi < arr_size; pi++) { 1318316485Sdavidcs pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1319316485Sdavidcs if (pi_info->comp_cb != OSAL_NULL) 1320316485Sdavidcs pi_info->comp_cb(p_hwfn, pi_info->cookie); 1321316485Sdavidcs } 1322316485Sdavidcs } 1323316485Sdavidcs 1324316485Sdavidcs if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1325316485Sdavidcs /* This should be done before the interrupts are enabled, 1326316485Sdavidcs * since otherwise a new attention will be generated. 1327316485Sdavidcs */ 1328316485Sdavidcs ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1329316485Sdavidcs } 1330316485Sdavidcs 1331316485Sdavidcs ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1332316485Sdavidcs} 1333316485Sdavidcs 1334316485Sdavidcsstatic void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1335316485Sdavidcs{ 1336316485Sdavidcs struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1337316485Sdavidcs 1338316485Sdavidcs if (!p_sb) 1339316485Sdavidcs return; 1340316485Sdavidcs 1341316485Sdavidcs if (p_sb->sb_attn) { 1342316485Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1343316485Sdavidcs p_sb->sb_phys, 1344316485Sdavidcs SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1345316485Sdavidcs } 1346316485Sdavidcs 1347316485Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_sb); 1348316485Sdavidcs p_hwfn->p_sb_attn = OSAL_NULL; 1349316485Sdavidcs} 1350316485Sdavidcs 1351316485Sdavidcsstatic void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1352316485Sdavidcs struct ecore_ptt *p_ptt) 1353316485Sdavidcs{ 1354316485Sdavidcs struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1355316485Sdavidcs 1356316485Sdavidcs OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1357316485Sdavidcs 1358316485Sdavidcs sb_info->index = 0; 1359316485Sdavidcs sb_info->known_attn = 0; 1360316485Sdavidcs 1361316485Sdavidcs /* Configure Attention Status Block in IGU */ 1362316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1363316485Sdavidcs DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1364316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1365316485Sdavidcs DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1366316485Sdavidcs} 1367316485Sdavidcs 1368316485Sdavidcsstatic void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1369316485Sdavidcs struct ecore_ptt *p_ptt, 1370316485Sdavidcs void *sb_virt_addr, 1371316485Sdavidcs dma_addr_t sb_phy_addr) 1372316485Sdavidcs{ 1373316485Sdavidcs struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1374316485Sdavidcs int i, j, k; 1375316485Sdavidcs 1376316485Sdavidcs sb_info->sb_attn = sb_virt_addr; 1377316485Sdavidcs sb_info->sb_phys = sb_phy_addr; 1378316485Sdavidcs 1379316485Sdavidcs /* Set the pointer to the AEU descriptors */ 1380316485Sdavidcs sb_info->p_aeu_desc = aeu_descs; 1381316485Sdavidcs 1382316485Sdavidcs /* Calculate Parity Masks */ 1383316485Sdavidcs OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1384316485Sdavidcs for (i = 0; i < NUM_ATTN_REGS; i++) { 1385316485Sdavidcs /* j is array index, k is bit index */ 1386316485Sdavidcs for (j = 0, k = 0; k < 32; j++) { 1387316485Sdavidcs struct aeu_invert_reg_bit *p_aeu; 1388316485Sdavidcs 1389316485Sdavidcs p_aeu = &aeu_descs[i].bits[j]; 1390316485Sdavidcs if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1391316485Sdavidcs sb_info->parity_mask[i] |= 1 << k; 1392316485Sdavidcs 1393316485Sdavidcs k += ATTENTION_LENGTH(p_aeu->flags); 1394316485Sdavidcs } 1395316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1396316485Sdavidcs "Attn Mask [Reg %d]: 0x%08x\n", 1397316485Sdavidcs i, sb_info->parity_mask[i]); 1398316485Sdavidcs } 1399316485Sdavidcs 1400316485Sdavidcs /* Set the address of cleanup for the mcp attention */ 1401316485Sdavidcs sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1402316485Sdavidcs MISC_REG_AEU_GENERAL_ATTN_0; 1403316485Sdavidcs 1404316485Sdavidcs ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1405316485Sdavidcs} 1406316485Sdavidcs 1407316485Sdavidcsstatic enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1408316485Sdavidcs struct ecore_ptt *p_ptt) 1409316485Sdavidcs{ 1410316485Sdavidcs struct ecore_dev *p_dev = p_hwfn->p_dev; 1411316485Sdavidcs struct ecore_sb_attn_info *p_sb; 1412316485Sdavidcs dma_addr_t p_phys = 0; 1413316485Sdavidcs void *p_virt; 1414316485Sdavidcs 1415316485Sdavidcs /* SB struct */ 1416316485Sdavidcs p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1417316485Sdavidcs if (!p_sb) { 1418337517Sdavidcs DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1419316485Sdavidcs return ECORE_NOMEM; 1420316485Sdavidcs } 1421316485Sdavidcs 1422316485Sdavidcs /* SB ring */ 1423316485Sdavidcs p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1424316485Sdavidcs SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1425316485Sdavidcs if (!p_virt) { 1426337517Sdavidcs DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1427316485Sdavidcs OSAL_FREE(p_dev, p_sb); 1428316485Sdavidcs return ECORE_NOMEM; 1429316485Sdavidcs } 1430316485Sdavidcs 1431316485Sdavidcs /* Attention setup */ 1432316485Sdavidcs p_hwfn->p_sb_attn = p_sb; 1433316485Sdavidcs ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1434316485Sdavidcs 1435316485Sdavidcs return ECORE_SUCCESS; 1436316485Sdavidcs} 1437316485Sdavidcs 1438316485Sdavidcs/* coalescing timeout = timeset << (timer_res + 1) */ 1439316485Sdavidcs#define ECORE_CAU_DEF_RX_USECS 24 1440316485Sdavidcs#define ECORE_CAU_DEF_TX_USECS 48 1441316485Sdavidcs 1442316485Sdavidcsvoid ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1443316485Sdavidcs struct cau_sb_entry *p_sb_entry, 1444316485Sdavidcs u8 pf_id, u16 vf_number, u8 vf_valid) 1445316485Sdavidcs{ 1446316485Sdavidcs struct ecore_dev *p_dev = p_hwfn->p_dev; 1447316485Sdavidcs u32 cau_state; 1448316485Sdavidcs u8 timer_res; 1449316485Sdavidcs 1450316485Sdavidcs OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1451316485Sdavidcs 1452316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1453316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1454316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1455316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1456316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1457316485Sdavidcs 1458316485Sdavidcs cau_state = CAU_HC_DISABLE_STATE; 1459316485Sdavidcs 1460316485Sdavidcs if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1461316485Sdavidcs cau_state = CAU_HC_ENABLE_STATE; 1462316485Sdavidcs if (!p_dev->rx_coalesce_usecs) 1463316485Sdavidcs p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1464316485Sdavidcs if (!p_dev->tx_coalesce_usecs) 1465316485Sdavidcs p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1466316485Sdavidcs } 1467316485Sdavidcs 1468316485Sdavidcs /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1469316485Sdavidcs if (p_dev->rx_coalesce_usecs <= 0x7F) 1470316485Sdavidcs timer_res = 0; 1471316485Sdavidcs else if (p_dev->rx_coalesce_usecs <= 0xFF) 1472316485Sdavidcs timer_res = 1; 1473316485Sdavidcs else 1474316485Sdavidcs timer_res = 2; 1475316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1476316485Sdavidcs 1477316485Sdavidcs if (p_dev->tx_coalesce_usecs <= 0x7F) 1478316485Sdavidcs timer_res = 0; 1479316485Sdavidcs else if (p_dev->tx_coalesce_usecs <= 0xFF) 1480316485Sdavidcs timer_res = 1; 1481316485Sdavidcs else 1482316485Sdavidcs timer_res = 2; 1483316485Sdavidcs SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1484316485Sdavidcs 1485316485Sdavidcs SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1486316485Sdavidcs SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1487316485Sdavidcs} 1488316485Sdavidcs 1489316485Sdavidcsstatic void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1490316485Sdavidcs struct ecore_ptt *p_ptt, 1491316485Sdavidcs u16 igu_sb_id, u32 pi_index, 1492316485Sdavidcs enum ecore_coalescing_fsm coalescing_fsm, 1493316485Sdavidcs u8 timeset) 1494316485Sdavidcs{ 1495316485Sdavidcs struct cau_pi_entry pi_entry; 1496316485Sdavidcs u32 sb_offset, pi_offset; 1497316485Sdavidcs 1498316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 1499316485Sdavidcs return;/* @@@TBD MichalK- VF CAU... */ 1500316485Sdavidcs 1501320164Sdavidcs sb_offset = igu_sb_id * PIS_PER_SB_E4; 1502316485Sdavidcs OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1503316485Sdavidcs 1504316485Sdavidcs SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1505316485Sdavidcs if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1506316485Sdavidcs SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1507316485Sdavidcs else 1508316485Sdavidcs SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1509316485Sdavidcs 1510316485Sdavidcs pi_offset = sb_offset + pi_index; 1511316485Sdavidcs if (p_hwfn->hw_init_done) { 1512316485Sdavidcs ecore_wr(p_hwfn, p_ptt, 1513316485Sdavidcs CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1514316485Sdavidcs *((u32 *)&(pi_entry))); 1515316485Sdavidcs } else { 1516316485Sdavidcs STORE_RT_REG(p_hwfn, 1517316485Sdavidcs CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1518316485Sdavidcs *((u32 *)&(pi_entry))); 1519316485Sdavidcs } 1520316485Sdavidcs} 1521316485Sdavidcs 1522316485Sdavidcsvoid ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1523316485Sdavidcs struct ecore_ptt *p_ptt, 1524316485Sdavidcs struct ecore_sb_info *p_sb, u32 pi_index, 1525316485Sdavidcs enum ecore_coalescing_fsm coalescing_fsm, 1526316485Sdavidcs u8 timeset) 1527316485Sdavidcs{ 1528316485Sdavidcs _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1529316485Sdavidcs pi_index, coalescing_fsm, timeset); 1530316485Sdavidcs} 1531316485Sdavidcs 1532316485Sdavidcsvoid ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1533316485Sdavidcs struct ecore_ptt *p_ptt, 1534316485Sdavidcs dma_addr_t sb_phys, u16 igu_sb_id, 1535316485Sdavidcs u16 vf_number, u8 vf_valid) 1536316485Sdavidcs{ 1537316485Sdavidcs struct cau_sb_entry sb_entry; 1538316485Sdavidcs 1539316485Sdavidcs ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1540316485Sdavidcs vf_number, vf_valid); 1541316485Sdavidcs 1542316485Sdavidcs if (p_hwfn->hw_init_done) { 1543316485Sdavidcs /* Wide-bus, initialize via DMAE */ 1544316485Sdavidcs u64 phys_addr = (u64)sb_phys; 1545316485Sdavidcs 1546316485Sdavidcs ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr, 1547316485Sdavidcs CAU_REG_SB_ADDR_MEMORY + 1548337517Sdavidcs igu_sb_id * sizeof(u64), 2, 1549337517Sdavidcs OSAL_NULL /* default parameters */); 1550316485Sdavidcs ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry, 1551316485Sdavidcs CAU_REG_SB_VAR_MEMORY + 1552337517Sdavidcs igu_sb_id * sizeof(u64), 2, 1553337517Sdavidcs OSAL_NULL /* default parameters */); 1554316485Sdavidcs } else { 1555316485Sdavidcs /* Initialize Status Block Address */ 1556316485Sdavidcs STORE_RT_REG_AGG(p_hwfn, 1557316485Sdavidcs CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2, 1558316485Sdavidcs sb_phys); 1559316485Sdavidcs 1560316485Sdavidcs STORE_RT_REG_AGG(p_hwfn, 1561316485Sdavidcs CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2, 1562316485Sdavidcs sb_entry); 1563316485Sdavidcs } 1564316485Sdavidcs 1565316485Sdavidcs /* Configure pi coalescing if set */ 1566316485Sdavidcs if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1567316485Sdavidcs /* eth will open queues for all tcs, so configure all of them 1568316485Sdavidcs * properly, rather than just the active ones 1569316485Sdavidcs */ 1570316485Sdavidcs u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1571316485Sdavidcs 1572316485Sdavidcs u8 timeset, timer_res; 1573316485Sdavidcs u8 i; 1574316485Sdavidcs 1575316485Sdavidcs /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1576316485Sdavidcs if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1577316485Sdavidcs timer_res = 0; 1578316485Sdavidcs else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1579316485Sdavidcs timer_res = 1; 1580316485Sdavidcs else 1581316485Sdavidcs timer_res = 2; 1582316485Sdavidcs timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1583316485Sdavidcs _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1584316485Sdavidcs ECORE_COAL_RX_STATE_MACHINE, 1585316485Sdavidcs timeset); 1586316485Sdavidcs 1587316485Sdavidcs if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1588316485Sdavidcs timer_res = 0; 1589316485Sdavidcs else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1590316485Sdavidcs timer_res = 1; 1591316485Sdavidcs else 1592316485Sdavidcs timer_res = 2; 1593316485Sdavidcs timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1594316485Sdavidcs for (i = 0; i < num_tc; i++) { 1595316485Sdavidcs _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1596316485Sdavidcs igu_sb_id, TX_PI(i), 1597316485Sdavidcs ECORE_COAL_TX_STATE_MACHINE, 1598316485Sdavidcs timeset); 1599316485Sdavidcs } 1600316485Sdavidcs } 1601316485Sdavidcs} 1602316485Sdavidcs 1603316485Sdavidcsvoid ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1604316485Sdavidcs struct ecore_ptt *p_ptt, 1605316485Sdavidcs struct ecore_sb_info *sb_info) 1606316485Sdavidcs{ 1607316485Sdavidcs /* zero status block and ack counter */ 1608316485Sdavidcs sb_info->sb_ack = 0; 1609316485Sdavidcs OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1610316485Sdavidcs 1611316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) 1612316485Sdavidcs ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1613316485Sdavidcs sb_info->igu_sb_id, 0, 0); 1614316485Sdavidcs} 1615316485Sdavidcs 1616316485Sdavidcsstruct ecore_igu_block * 1617316485Sdavidcsecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1618316485Sdavidcs{ 1619316485Sdavidcs struct ecore_igu_block *p_block; 1620316485Sdavidcs u16 igu_id; 1621316485Sdavidcs 1622316485Sdavidcs for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1623316485Sdavidcs igu_id++) { 1624316485Sdavidcs p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1625316485Sdavidcs 1626316485Sdavidcs if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1627316485Sdavidcs !(p_block->status & ECORE_IGU_STATUS_FREE)) 1628316485Sdavidcs continue; 1629316485Sdavidcs 1630316485Sdavidcs if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1631316485Sdavidcs b_is_pf) 1632316485Sdavidcs return p_block; 1633316485Sdavidcs } 1634316485Sdavidcs 1635316485Sdavidcs return OSAL_NULL; 1636316485Sdavidcs} 1637316485Sdavidcs 1638316485Sdavidcsstatic u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1639316485Sdavidcs u16 vector_id) 1640316485Sdavidcs{ 1641316485Sdavidcs struct ecore_igu_block *p_block; 1642316485Sdavidcs u16 igu_id; 1643316485Sdavidcs 1644316485Sdavidcs for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1645316485Sdavidcs igu_id++) { 1646316485Sdavidcs p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1647316485Sdavidcs 1648316485Sdavidcs if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1649316485Sdavidcs !p_block->is_pf || 1650316485Sdavidcs p_block->vector_number != vector_id) 1651316485Sdavidcs continue; 1652316485Sdavidcs 1653316485Sdavidcs return igu_id; 1654316485Sdavidcs } 1655316485Sdavidcs 1656316485Sdavidcs return ECORE_SB_INVALID_IDX; 1657316485Sdavidcs} 1658316485Sdavidcs 1659316485Sdavidcsu16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1660316485Sdavidcs{ 1661316485Sdavidcs u16 igu_sb_id; 1662316485Sdavidcs 1663316485Sdavidcs /* Assuming continuous set of IGU SBs dedicated for given PF */ 1664316485Sdavidcs if (sb_id == ECORE_SP_SB_ID) 1665316485Sdavidcs igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1666316485Sdavidcs else if (IS_PF(p_hwfn->p_dev)) 1667316485Sdavidcs igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1668316485Sdavidcs else 1669316485Sdavidcs igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1670316485Sdavidcs 1671316485Sdavidcs if (igu_sb_id == ECORE_SB_INVALID_IDX) 1672316485Sdavidcs DP_NOTICE(p_hwfn, true, 1673316485Sdavidcs "Slowpath SB vector %04x doesn't exist\n", 1674316485Sdavidcs sb_id); 1675316485Sdavidcs else if (sb_id == ECORE_SP_SB_ID) 1676316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1677316485Sdavidcs "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1678316485Sdavidcs else 1679316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1680316485Sdavidcs "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1681316485Sdavidcs 1682316485Sdavidcs return igu_sb_id; 1683316485Sdavidcs} 1684316485Sdavidcs 1685316485Sdavidcsenum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1686316485Sdavidcs struct ecore_ptt *p_ptt, 1687316485Sdavidcs struct ecore_sb_info *sb_info, 1688316485Sdavidcs void *sb_virt_addr, 1689316485Sdavidcs dma_addr_t sb_phy_addr, 1690316485Sdavidcs u16 sb_id) 1691316485Sdavidcs{ 1692316485Sdavidcs sb_info->sb_virt = sb_virt_addr; 1693316485Sdavidcs sb_info->sb_phys = sb_phy_addr; 1694316485Sdavidcs 1695316485Sdavidcs sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1696316485Sdavidcs 1697316485Sdavidcs if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1698316485Sdavidcs return ECORE_INVAL; 1699316485Sdavidcs 1700316485Sdavidcs /* Let the igu info reference the client's SB info */ 1701316485Sdavidcs if (sb_id != ECORE_SP_SB_ID) { 1702316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1703316485Sdavidcs struct ecore_igu_info *p_info; 1704316485Sdavidcs struct ecore_igu_block *p_block; 1705316485Sdavidcs 1706316485Sdavidcs p_info = p_hwfn->hw_info.p_igu_info; 1707316485Sdavidcs p_block = &p_info->entry[sb_info->igu_sb_id]; 1708316485Sdavidcs 1709316485Sdavidcs p_block->sb_info = sb_info; 1710316485Sdavidcs p_block->status &= ~ECORE_IGU_STATUS_FREE; 1711316485Sdavidcs p_info->usage.free_cnt--; 1712316485Sdavidcs } else { 1713316485Sdavidcs ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1714316485Sdavidcs } 1715316485Sdavidcs } 1716316485Sdavidcs 1717316485Sdavidcs#ifdef ECORE_CONFIG_DIRECT_HWFN 1718316485Sdavidcs sb_info->p_hwfn = p_hwfn; 1719316485Sdavidcs#endif 1720316485Sdavidcs sb_info->p_dev = p_hwfn->p_dev; 1721316485Sdavidcs 1722316485Sdavidcs /* The igu address will hold the absolute address that needs to be 1723316485Sdavidcs * written to for a specific status block 1724316485Sdavidcs */ 1725316485Sdavidcs if (IS_PF(p_hwfn->p_dev)) { 1726316485Sdavidcs sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview + 1727316485Sdavidcs GTT_BAR0_MAP_REG_IGU_CMD + 1728316485Sdavidcs (sb_info->igu_sb_id << 3); 1729316485Sdavidcs 1730316485Sdavidcs } else { 1731316485Sdavidcs sb_info->igu_addr = 1732316485Sdavidcs (u8 OSAL_IOMEM*)p_hwfn->regview + 1733316485Sdavidcs PXP_VF_BAR0_START_IGU + 1734316485Sdavidcs ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1735316485Sdavidcs } 1736316485Sdavidcs 1737316485Sdavidcs sb_info->flags |= ECORE_SB_INFO_INIT; 1738316485Sdavidcs 1739316485Sdavidcs ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1740316485Sdavidcs 1741316485Sdavidcs return ECORE_SUCCESS; 1742316485Sdavidcs} 1743316485Sdavidcs 1744316485Sdavidcsenum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1745316485Sdavidcs struct ecore_sb_info *sb_info, 1746316485Sdavidcs u16 sb_id) 1747316485Sdavidcs{ 1748316485Sdavidcs struct ecore_igu_info *p_info; 1749316485Sdavidcs struct ecore_igu_block *p_block; 1750316485Sdavidcs 1751316485Sdavidcs if (sb_info == OSAL_NULL) 1752316485Sdavidcs return ECORE_SUCCESS; 1753316485Sdavidcs 1754316485Sdavidcs /* zero status block and ack counter */ 1755316485Sdavidcs sb_info->sb_ack = 0; 1756316485Sdavidcs OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1757316485Sdavidcs 1758316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) { 1759316485Sdavidcs ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1760316485Sdavidcs return ECORE_SUCCESS; 1761316485Sdavidcs } 1762316485Sdavidcs 1763316485Sdavidcs p_info = p_hwfn->hw_info.p_igu_info; 1764316485Sdavidcs p_block = &p_info->entry[sb_info->igu_sb_id]; 1765316485Sdavidcs 1766316485Sdavidcs /* Vector 0 is reserved to Default SB */ 1767316485Sdavidcs if (p_block->vector_number == 0) { 1768316485Sdavidcs DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1769316485Sdavidcs return ECORE_INVAL; 1770316485Sdavidcs } 1771316485Sdavidcs 1772316485Sdavidcs /* Lose reference to client's SB info, and fix counters */ 1773316485Sdavidcs p_block->sb_info = OSAL_NULL; 1774316485Sdavidcs p_block->status |= ECORE_IGU_STATUS_FREE; 1775316485Sdavidcs p_info->usage.free_cnt++; 1776316485Sdavidcs 1777316485Sdavidcs return ECORE_SUCCESS; 1778316485Sdavidcs} 1779316485Sdavidcs 1780316485Sdavidcsstatic void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1781316485Sdavidcs{ 1782316485Sdavidcs struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1783316485Sdavidcs 1784316485Sdavidcs if (!p_sb) 1785316485Sdavidcs return; 1786316485Sdavidcs 1787316485Sdavidcs if (p_sb->sb_info.sb_virt) { 1788316485Sdavidcs OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1789316485Sdavidcs p_sb->sb_info.sb_virt, 1790316485Sdavidcs p_sb->sb_info.sb_phys, 1791316485Sdavidcs SB_ALIGNED_SIZE(p_hwfn)); 1792316485Sdavidcs } 1793316485Sdavidcs 1794316485Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_sb); 1795316485Sdavidcs p_hwfn->p_sp_sb = OSAL_NULL; 1796316485Sdavidcs} 1797316485Sdavidcs 1798316485Sdavidcsstatic enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1799316485Sdavidcs struct ecore_ptt *p_ptt) 1800316485Sdavidcs{ 1801316485Sdavidcs struct ecore_sb_sp_info *p_sb; 1802316485Sdavidcs dma_addr_t p_phys = 0; 1803316485Sdavidcs void *p_virt; 1804316485Sdavidcs 1805316485Sdavidcs /* SB struct */ 1806316485Sdavidcs p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); 1807316485Sdavidcs if (!p_sb) { 1808337517Sdavidcs DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1809316485Sdavidcs return ECORE_NOMEM; 1810316485Sdavidcs } 1811316485Sdavidcs 1812316485Sdavidcs /* SB ring */ 1813316485Sdavidcs p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1814316485Sdavidcs &p_phys, 1815316485Sdavidcs SB_ALIGNED_SIZE(p_hwfn)); 1816316485Sdavidcs if (!p_virt) { 1817337517Sdavidcs DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1818316485Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_sb); 1819316485Sdavidcs return ECORE_NOMEM; 1820316485Sdavidcs } 1821316485Sdavidcs 1822316485Sdavidcs 1823316485Sdavidcs /* Status Block setup */ 1824316485Sdavidcs p_hwfn->p_sp_sb = p_sb; 1825316485Sdavidcs ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1826316485Sdavidcs p_virt, p_phys, ECORE_SP_SB_ID); 1827316485Sdavidcs 1828316485Sdavidcs OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1829316485Sdavidcs 1830316485Sdavidcs return ECORE_SUCCESS; 1831316485Sdavidcs} 1832316485Sdavidcs 1833316485Sdavidcsenum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1834316485Sdavidcs ecore_int_comp_cb_t comp_cb, 1835316485Sdavidcs void *cookie, 1836316485Sdavidcs u8 *sb_idx, 1837316485Sdavidcs __le16 **p_fw_cons) 1838316485Sdavidcs{ 1839316485Sdavidcs struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1840316485Sdavidcs enum _ecore_status_t rc = ECORE_NOMEM; 1841316485Sdavidcs u8 pi; 1842316485Sdavidcs 1843316485Sdavidcs /* Look for a free index */ 1844316485Sdavidcs for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1845316485Sdavidcs if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1846316485Sdavidcs continue; 1847316485Sdavidcs 1848316485Sdavidcs p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1849316485Sdavidcs p_sp_sb->pi_info_arr[pi].cookie = cookie; 1850316485Sdavidcs *sb_idx = pi; 1851316485Sdavidcs *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1852316485Sdavidcs rc = ECORE_SUCCESS; 1853316485Sdavidcs break; 1854316485Sdavidcs } 1855316485Sdavidcs 1856316485Sdavidcs return rc; 1857316485Sdavidcs} 1858316485Sdavidcs 1859316485Sdavidcsenum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, 1860316485Sdavidcs u8 pi) 1861316485Sdavidcs{ 1862316485Sdavidcs struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1863316485Sdavidcs 1864316485Sdavidcs if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1865316485Sdavidcs return ECORE_NOMEM; 1866316485Sdavidcs 1867316485Sdavidcs p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1868316485Sdavidcs p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1869316485Sdavidcs 1870316485Sdavidcs return ECORE_SUCCESS; 1871316485Sdavidcs} 1872316485Sdavidcs 1873316485Sdavidcsu16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1874316485Sdavidcs{ 1875316485Sdavidcs return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1876316485Sdavidcs} 1877316485Sdavidcs 1878316485Sdavidcsvoid ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1879316485Sdavidcs struct ecore_ptt *p_ptt, 1880316485Sdavidcs enum ecore_int_mode int_mode) 1881316485Sdavidcs{ 1882316485Sdavidcs u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1883316485Sdavidcs 1884316485Sdavidcs#ifndef ASIC_ONLY 1885316485Sdavidcs if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1886316485Sdavidcs DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1887316485Sdavidcs igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1888316485Sdavidcs } 1889316485Sdavidcs#endif 1890316485Sdavidcs 1891316485Sdavidcs p_hwfn->p_dev->int_mode = int_mode; 1892316485Sdavidcs switch (p_hwfn->p_dev->int_mode) { 1893316485Sdavidcs case ECORE_INT_MODE_INTA: 1894316485Sdavidcs igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1895316485Sdavidcs igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1896316485Sdavidcs break; 1897316485Sdavidcs 1898316485Sdavidcs case ECORE_INT_MODE_MSI: 1899316485Sdavidcs igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1900316485Sdavidcs igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1901316485Sdavidcs break; 1902316485Sdavidcs 1903316485Sdavidcs case ECORE_INT_MODE_MSIX: 1904316485Sdavidcs igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1905316485Sdavidcs break; 1906316485Sdavidcs case ECORE_INT_MODE_POLL: 1907316485Sdavidcs break; 1908316485Sdavidcs } 1909316485Sdavidcs 1910316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1911316485Sdavidcs} 1912316485Sdavidcs 1913316485Sdavidcsstatic void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1914316485Sdavidcs struct ecore_ptt *p_ptt) 1915316485Sdavidcs{ 1916316485Sdavidcs#ifndef ASIC_ONLY 1917316485Sdavidcs if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1918316485Sdavidcs DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n"); 1919316485Sdavidcs return; 1920316485Sdavidcs } 1921316485Sdavidcs#endif 1922316485Sdavidcs 1923316485Sdavidcs /* Configure AEU signal change to produce attentions */ 1924316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1925316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1926316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1927316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1928316485Sdavidcs 1929316485Sdavidcs /* Flush the writes to IGU */ 1930316485Sdavidcs OSAL_MMIOWB(p_hwfn->p_dev); 1931316485Sdavidcs 1932316485Sdavidcs /* Unmask AEU signals toward IGU */ 1933316485Sdavidcs ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1934316485Sdavidcs} 1935316485Sdavidcs 1936316485Sdavidcsenum _ecore_status_t 1937316485Sdavidcsecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1938316485Sdavidcs enum ecore_int_mode int_mode) 1939316485Sdavidcs{ 1940316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 1941316485Sdavidcs 1942316485Sdavidcs ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1943316485Sdavidcs 1944316485Sdavidcs if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1945316485Sdavidcs rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1946316485Sdavidcs if (rc != ECORE_SUCCESS) { 1947316485Sdavidcs DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n"); 1948316485Sdavidcs return ECORE_NORESOURCES; 1949316485Sdavidcs } 1950316485Sdavidcs p_hwfn->b_int_requested = true; 1951316485Sdavidcs } 1952316485Sdavidcs 1953316485Sdavidcs /* Enable interrupt Generation */ 1954316485Sdavidcs ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1955316485Sdavidcs 1956316485Sdavidcs p_hwfn->b_int_enabled = 1; 1957316485Sdavidcs 1958316485Sdavidcs return rc; 1959316485Sdavidcs} 1960316485Sdavidcs 1961316485Sdavidcsvoid ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1962316485Sdavidcs struct ecore_ptt *p_ptt) 1963316485Sdavidcs{ 1964316485Sdavidcs p_hwfn->b_int_enabled = 0; 1965316485Sdavidcs 1966316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 1967316485Sdavidcs return; 1968316485Sdavidcs 1969316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1970316485Sdavidcs} 1971316485Sdavidcs 1972316485Sdavidcs#define IGU_CLEANUP_SLEEP_LENGTH (1000) 1973316485Sdavidcsstatic void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1974316485Sdavidcs struct ecore_ptt *p_ptt, 1975316485Sdavidcs u16 igu_sb_id, 1976316485Sdavidcs bool cleanup_set, 1977316485Sdavidcs u16 opaque_fid) 1978316485Sdavidcs{ 1979316485Sdavidcs u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1980316485Sdavidcs u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1981316485Sdavidcs u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1982316485Sdavidcs u8 type = 0; /* FIXME MichalS type??? */ 1983316485Sdavidcs 1984316485Sdavidcs OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1985316485Sdavidcs IGU_REG_CLEANUP_STATUS_0) != 0x200); 1986316485Sdavidcs 1987316485Sdavidcs /* USE Control Command Register to perform cleanup. There is an 1988316485Sdavidcs * option to do this using IGU bar, but then it can't be used for VFs. 1989316485Sdavidcs */ 1990316485Sdavidcs 1991316485Sdavidcs /* Set the data field */ 1992316485Sdavidcs SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1993316485Sdavidcs SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1994316485Sdavidcs SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1995316485Sdavidcs 1996316485Sdavidcs /* Set the control register */ 1997316485Sdavidcs SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1998316485Sdavidcs SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1999316485Sdavidcs SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 2000316485Sdavidcs 2001316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 2002316485Sdavidcs 2003316485Sdavidcs OSAL_BARRIER(p_hwfn->p_dev); 2004316485Sdavidcs 2005316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2006316485Sdavidcs 2007316485Sdavidcs /* Flush the write to IGU */ 2008316485Sdavidcs OSAL_MMIOWB(p_hwfn->p_dev); 2009316485Sdavidcs 2010316485Sdavidcs /* calculate where to read the status bit from */ 2011316485Sdavidcs sb_bit = 1 << (igu_sb_id % 32); 2012316485Sdavidcs sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2013316485Sdavidcs 2014316485Sdavidcs sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2015316485Sdavidcs 2016316485Sdavidcs /* Now wait for the command to complete */ 2017316485Sdavidcs while (--sleep_cnt) { 2018316485Sdavidcs val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2019316485Sdavidcs if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2020316485Sdavidcs break; 2021316485Sdavidcs OSAL_MSLEEP(5); 2022316485Sdavidcs } 2023316485Sdavidcs 2024316485Sdavidcs if (!sleep_cnt) 2025316485Sdavidcs DP_NOTICE(p_hwfn, true, 2026316485Sdavidcs "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2027316485Sdavidcs val, igu_sb_id); 2028316485Sdavidcs} 2029316485Sdavidcs 2030316485Sdavidcsvoid ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2031316485Sdavidcs struct ecore_ptt *p_ptt, 2032316485Sdavidcs u16 igu_sb_id, u16 opaque, bool b_set) 2033316485Sdavidcs{ 2034316485Sdavidcs struct ecore_igu_block *p_block; 2035316485Sdavidcs int pi, i; 2036316485Sdavidcs 2037316485Sdavidcs p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2038316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2039316485Sdavidcs "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2040316485Sdavidcs igu_sb_id, p_block->function_id, p_block->is_pf, 2041316485Sdavidcs p_block->vector_number); 2042316485Sdavidcs 2043316485Sdavidcs /* Set */ 2044316485Sdavidcs if (b_set) 2045316485Sdavidcs ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2046316485Sdavidcs 2047316485Sdavidcs /* Clear */ 2048316485Sdavidcs ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2049316485Sdavidcs 2050316485Sdavidcs /* Wait for the IGU SB to cleanup */ 2051316485Sdavidcs for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2052316485Sdavidcs u32 val; 2053316485Sdavidcs 2054316485Sdavidcs val = ecore_rd(p_hwfn, p_ptt, 2055316485Sdavidcs IGU_REG_WRITE_DONE_PENDING + 2056316485Sdavidcs ((igu_sb_id / 32) * 4)); 2057316485Sdavidcs if (val & (1 << (igu_sb_id % 32))) 2058316485Sdavidcs OSAL_UDELAY(10); 2059316485Sdavidcs else 2060316485Sdavidcs break; 2061316485Sdavidcs } 2062316485Sdavidcs if (i == IGU_CLEANUP_SLEEP_LENGTH) 2063316485Sdavidcs DP_NOTICE(p_hwfn, true, 2064316485Sdavidcs "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2065316485Sdavidcs igu_sb_id); 2066316485Sdavidcs 2067316485Sdavidcs /* Clear the CAU for the SB */ 2068316485Sdavidcs for (pi = 0; pi < 12; pi++) 2069316485Sdavidcs ecore_wr(p_hwfn, p_ptt, 2070316485Sdavidcs CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2071316485Sdavidcs} 2072316485Sdavidcs 2073316485Sdavidcsvoid ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2074316485Sdavidcs struct ecore_ptt *p_ptt, 2075316485Sdavidcs bool b_set, 2076316485Sdavidcs bool b_slowpath) 2077316485Sdavidcs{ 2078316485Sdavidcs struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2079316485Sdavidcs struct ecore_igu_block *p_block; 2080316485Sdavidcs u16 igu_sb_id = 0; 2081316485Sdavidcs u32 val = 0; 2082316485Sdavidcs 2083316485Sdavidcs /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2084316485Sdavidcs val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2085316485Sdavidcs val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2086316485Sdavidcs val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2087316485Sdavidcs ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2088316485Sdavidcs /* end temporary */ 2089316485Sdavidcs 2090316485Sdavidcs for (igu_sb_id = 0; 2091316485Sdavidcs igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2092316485Sdavidcs igu_sb_id++) { 2093316485Sdavidcs p_block = &p_info->entry[igu_sb_id]; 2094316485Sdavidcs 2095316485Sdavidcs if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2096316485Sdavidcs !p_block->is_pf || 2097316485Sdavidcs (p_block->status & ECORE_IGU_STATUS_DSB)) 2098316485Sdavidcs continue; 2099316485Sdavidcs 2100316485Sdavidcs ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2101316485Sdavidcs p_hwfn->hw_info.opaque_fid, 2102316485Sdavidcs b_set); 2103316485Sdavidcs } 2104316485Sdavidcs 2105316485Sdavidcs if (b_slowpath) 2106316485Sdavidcs ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2107316485Sdavidcs p_info->igu_dsb_id, 2108316485Sdavidcs p_hwfn->hw_info.opaque_fid, 2109316485Sdavidcs b_set); 2110316485Sdavidcs} 2111316485Sdavidcs 2112316485Sdavidcsint ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2113316485Sdavidcs struct ecore_ptt *p_ptt) 2114316485Sdavidcs{ 2115316485Sdavidcs struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2116316485Sdavidcs struct ecore_igu_block *p_block; 2117316485Sdavidcs int pf_sbs, vf_sbs; 2118316485Sdavidcs u16 igu_sb_id; 2119316485Sdavidcs u32 val, rval; 2120316485Sdavidcs 2121316485Sdavidcs if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2122316485Sdavidcs /* We're using an old MFW - have to prevent any switching 2123316485Sdavidcs * of SBs between PF and VFs as later driver wouldn't be 2124316485Sdavidcs * able to tell which belongs to which. 2125316485Sdavidcs */ 2126316485Sdavidcs p_info->b_allow_pf_vf_change = false; 2127316485Sdavidcs } else { 2128316485Sdavidcs /* Use the numbers the MFW have provided - 2129316485Sdavidcs * don't forget MFW accounts for the default SB as well. 2130316485Sdavidcs */ 2131316485Sdavidcs p_info->b_allow_pf_vf_change = true; 2132316485Sdavidcs 2133316485Sdavidcs if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2134316485Sdavidcs DP_INFO(p_hwfn, 2135316485Sdavidcs "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2136316485Sdavidcs RESC_NUM(p_hwfn, ECORE_SB) - 1, 2137316485Sdavidcs p_info->usage.cnt); 2138316485Sdavidcs p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2139316485Sdavidcs } 2140316485Sdavidcs 2141316485Sdavidcs /* TODO - how do we learn about VF SBs from MFW? */ 2142316485Sdavidcs if (IS_PF_SRIOV(p_hwfn)) { 2143316485Sdavidcs u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2144316485Sdavidcs 2145316485Sdavidcs if (vfs != p_info->usage.iov_cnt) 2146316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2147316485Sdavidcs "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2148316485Sdavidcs p_info->usage.iov_cnt, vfs); 2149316485Sdavidcs 2150316485Sdavidcs /* At this point we know how many SBs we have totally 2151316485Sdavidcs * in IGU + number of PF SBs. So we can validate that 2152316485Sdavidcs * we'd have sufficient for VF. 2153316485Sdavidcs */ 2154316485Sdavidcs if (vfs > p_info->usage.free_cnt + 2155316485Sdavidcs p_info->usage.free_cnt_iov - 2156316485Sdavidcs p_info->usage.cnt) { 2157316485Sdavidcs DP_NOTICE(p_hwfn, true, 2158316485Sdavidcs "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2159316485Sdavidcs p_info->usage.free_cnt + 2160316485Sdavidcs p_info->usage.free_cnt_iov, 2161316485Sdavidcs p_info->usage.cnt, vfs); 2162316485Sdavidcs return ECORE_INVAL; 2163316485Sdavidcs } 2164316485Sdavidcs } 2165316485Sdavidcs } 2166316485Sdavidcs 2167337517Sdavidcs /* Cap the number of VFs SBs by the number of VFs */ 2168337517Sdavidcs if (IS_PF_SRIOV(p_hwfn)) 2169337517Sdavidcs p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2170337517Sdavidcs 2171316485Sdavidcs /* Mark all SBs as free, now in the right PF/VFs division */ 2172316485Sdavidcs p_info->usage.free_cnt = p_info->usage.cnt; 2173316485Sdavidcs p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2174316485Sdavidcs p_info->usage.orig = p_info->usage.cnt; 2175316485Sdavidcs p_info->usage.iov_orig = p_info->usage.iov_cnt; 2176316485Sdavidcs 2177316485Sdavidcs /* We now proceed to re-configure the IGU cam to reflect the initial 2178316485Sdavidcs * configuration. We can start with the Default SB. 2179316485Sdavidcs */ 2180316485Sdavidcs pf_sbs = p_info->usage.cnt; 2181316485Sdavidcs vf_sbs = p_info->usage.iov_cnt; 2182316485Sdavidcs 2183316485Sdavidcs for (igu_sb_id = p_info->igu_dsb_id; 2184316485Sdavidcs igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2185316485Sdavidcs igu_sb_id++) { 2186316485Sdavidcs p_block = &p_info->entry[igu_sb_id]; 2187316485Sdavidcs val = 0; 2188316485Sdavidcs 2189316485Sdavidcs if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2190316485Sdavidcs continue; 2191316485Sdavidcs 2192316485Sdavidcs if (p_block->status & ECORE_IGU_STATUS_DSB) { 2193316485Sdavidcs p_block->function_id = p_hwfn->rel_pf_id; 2194316485Sdavidcs p_block->is_pf = 1; 2195316485Sdavidcs p_block->vector_number = 0; 2196316485Sdavidcs p_block->status = ECORE_IGU_STATUS_VALID | 2197316485Sdavidcs ECORE_IGU_STATUS_PF | 2198316485Sdavidcs ECORE_IGU_STATUS_DSB; 2199316485Sdavidcs } else if (pf_sbs) { 2200316485Sdavidcs pf_sbs--; 2201316485Sdavidcs p_block->function_id = p_hwfn->rel_pf_id; 2202316485Sdavidcs p_block->is_pf = 1; 2203316485Sdavidcs p_block->vector_number = p_info->usage.cnt - pf_sbs; 2204316485Sdavidcs p_block->status = ECORE_IGU_STATUS_VALID | 2205316485Sdavidcs ECORE_IGU_STATUS_PF | 2206316485Sdavidcs ECORE_IGU_STATUS_FREE; 2207316485Sdavidcs } else if (vf_sbs) { 2208316485Sdavidcs p_block->function_id = 2209316485Sdavidcs p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2210316485Sdavidcs p_info->usage.iov_cnt - vf_sbs; 2211316485Sdavidcs p_block->is_pf = 0; 2212316485Sdavidcs p_block->vector_number = 0; 2213316485Sdavidcs p_block->status = ECORE_IGU_STATUS_VALID | 2214316485Sdavidcs ECORE_IGU_STATUS_FREE; 2215316485Sdavidcs vf_sbs--; 2216316485Sdavidcs } else { 2217316485Sdavidcs p_block->function_id = 0; 2218316485Sdavidcs p_block->is_pf = 0; 2219316485Sdavidcs p_block->vector_number = 0; 2220316485Sdavidcs } 2221316485Sdavidcs 2222316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2223316485Sdavidcs p_block->function_id); 2224316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2225316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2226316485Sdavidcs p_block->vector_number); 2227316485Sdavidcs 2228316485Sdavidcs /* VF entries would be enabled when VF is initializaed */ 2229316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2230316485Sdavidcs 2231316485Sdavidcs rval = ecore_rd(p_hwfn, p_ptt, 2232316485Sdavidcs IGU_REG_MAPPING_MEMORY + 2233316485Sdavidcs sizeof(u32) * igu_sb_id); 2234316485Sdavidcs 2235316485Sdavidcs if (rval != val) { 2236316485Sdavidcs ecore_wr(p_hwfn, p_ptt, 2237316485Sdavidcs IGU_REG_MAPPING_MEMORY + 2238316485Sdavidcs sizeof(u32) * igu_sb_id, 2239316485Sdavidcs val); 2240316485Sdavidcs 2241316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2242316485Sdavidcs "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2243316485Sdavidcs igu_sb_id, p_block->function_id, 2244316485Sdavidcs p_block->is_pf, p_block->vector_number, 2245316485Sdavidcs rval, val); 2246316485Sdavidcs } 2247316485Sdavidcs } 2248316485Sdavidcs 2249316485Sdavidcs return 0; 2250316485Sdavidcs} 2251316485Sdavidcs 2252316485Sdavidcsint ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2253316485Sdavidcs struct ecore_ptt *p_ptt) 2254316485Sdavidcs{ 2255316485Sdavidcs struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2256316485Sdavidcs 2257316485Sdavidcs /* Return all the usage indications to default prior to the reset; 2258316485Sdavidcs * The reset expects the !orig to reflect the initial status of the 2259316485Sdavidcs * SBs, and would re-calculate the originals based on those. 2260316485Sdavidcs */ 2261316485Sdavidcs p_cnt->cnt = p_cnt->orig; 2262316485Sdavidcs p_cnt->free_cnt = p_cnt->orig; 2263316485Sdavidcs p_cnt->iov_cnt = p_cnt->iov_orig; 2264316485Sdavidcs p_cnt->free_cnt_iov = p_cnt->iov_orig; 2265316485Sdavidcs p_cnt->orig = 0; 2266316485Sdavidcs p_cnt->iov_orig = 0; 2267316485Sdavidcs 2268316485Sdavidcs /* TODO - we probably need to re-configure the CAU as well... */ 2269316485Sdavidcs return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2270316485Sdavidcs} 2271316485Sdavidcs 2272316485Sdavidcsstatic void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2273316485Sdavidcs struct ecore_ptt *p_ptt, 2274316485Sdavidcs u16 igu_sb_id) 2275316485Sdavidcs{ 2276316485Sdavidcs u32 val = ecore_rd(p_hwfn, p_ptt, 2277316485Sdavidcs IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2278316485Sdavidcs struct ecore_igu_block *p_block; 2279316485Sdavidcs 2280316485Sdavidcs p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2281316485Sdavidcs 2282316485Sdavidcs /* Fill the block information */ 2283316485Sdavidcs p_block->function_id = GET_FIELD(val, 2284316485Sdavidcs IGU_MAPPING_LINE_FUNCTION_NUMBER); 2285316485Sdavidcs p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2286316485Sdavidcs p_block->vector_number = GET_FIELD(val, 2287316485Sdavidcs IGU_MAPPING_LINE_VECTOR_NUMBER); 2288316485Sdavidcs p_block->igu_sb_id = igu_sb_id; 2289316485Sdavidcs} 2290316485Sdavidcs 2291316485Sdavidcsenum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2292316485Sdavidcs struct ecore_ptt *p_ptt) 2293316485Sdavidcs{ 2294316485Sdavidcs struct ecore_igu_info *p_igu_info; 2295316485Sdavidcs struct ecore_igu_block *p_block; 2296316485Sdavidcs u32 min_vf = 0, max_vf = 0; 2297316485Sdavidcs u16 igu_sb_id; 2298316485Sdavidcs 2299316485Sdavidcs p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2300316485Sdavidcs GFP_KERNEL, 2301316485Sdavidcs sizeof(*p_igu_info)); 2302316485Sdavidcs if (!p_hwfn->hw_info.p_igu_info) 2303316485Sdavidcs return ECORE_NOMEM; 2304316485Sdavidcs p_igu_info = p_hwfn->hw_info.p_igu_info; 2305316485Sdavidcs 2306316485Sdavidcs /* Distinguish between existent and onn-existent default SB */ 2307316485Sdavidcs p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2308316485Sdavidcs 2309316485Sdavidcs /* Find the range of VF ids whose SB belong to this PF */ 2310316485Sdavidcs if (p_hwfn->p_dev->p_iov_info) { 2311316485Sdavidcs struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2312316485Sdavidcs 2313316485Sdavidcs min_vf = p_iov->first_vf_in_pf; 2314316485Sdavidcs max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2315316485Sdavidcs } 2316316485Sdavidcs 2317316485Sdavidcs for (igu_sb_id = 0; 2318316485Sdavidcs igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2319316485Sdavidcs igu_sb_id++) { 2320316485Sdavidcs /* Read current entry; Notice it might not belong to this PF */ 2321316485Sdavidcs ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2322316485Sdavidcs p_block = &p_igu_info->entry[igu_sb_id]; 2323316485Sdavidcs 2324316485Sdavidcs if ((p_block->is_pf) && 2325316485Sdavidcs (p_block->function_id == p_hwfn->rel_pf_id)) { 2326316485Sdavidcs p_block->status = ECORE_IGU_STATUS_PF | 2327316485Sdavidcs ECORE_IGU_STATUS_VALID | 2328316485Sdavidcs ECORE_IGU_STATUS_FREE; 2329316485Sdavidcs 2330316485Sdavidcs if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2331316485Sdavidcs p_igu_info->usage.cnt++; 2332316485Sdavidcs } else if (!(p_block->is_pf) && 2333316485Sdavidcs (p_block->function_id >= min_vf) && 2334316485Sdavidcs (p_block->function_id < max_vf)) { 2335316485Sdavidcs /* Available for VFs of this PF */ 2336316485Sdavidcs p_block->status = ECORE_IGU_STATUS_VALID | 2337316485Sdavidcs ECORE_IGU_STATUS_FREE; 2338316485Sdavidcs 2339316485Sdavidcs if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2340316485Sdavidcs p_igu_info->usage.iov_cnt++; 2341316485Sdavidcs } 2342316485Sdavidcs 2343316485Sdavidcs /* Mark the First entry belonging to the PF or its VFs 2344316485Sdavidcs * as the default SB [we'll reset IGU prior to first usage]. 2345316485Sdavidcs */ 2346316485Sdavidcs if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2347316485Sdavidcs (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2348316485Sdavidcs p_igu_info->igu_dsb_id = igu_sb_id; 2349316485Sdavidcs p_block->status |= ECORE_IGU_STATUS_DSB; 2350316485Sdavidcs } 2351316485Sdavidcs 2352316485Sdavidcs /* While this isn't suitable for all clients, limit number 2353316485Sdavidcs * of prints by having each PF print only its entries with the 2354316485Sdavidcs * exception of PF0 which would print everything. 2355316485Sdavidcs */ 2356316485Sdavidcs if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2357316485Sdavidcs (p_hwfn->abs_pf_id == 0)) 2358316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2359316485Sdavidcs "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2360316485Sdavidcs igu_sb_id, p_block->function_id, 2361316485Sdavidcs p_block->is_pf, p_block->vector_number); 2362316485Sdavidcs } 2363316485Sdavidcs 2364316485Sdavidcs if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2365316485Sdavidcs DP_NOTICE(p_hwfn, true, 2366316485Sdavidcs "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2367316485Sdavidcs p_igu_info->igu_dsb_id); 2368316485Sdavidcs return ECORE_INVAL; 2369316485Sdavidcs } 2370316485Sdavidcs 2371316485Sdavidcs /* All non default SB are considered free at this point */ 2372316485Sdavidcs p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2373316485Sdavidcs p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2374316485Sdavidcs 2375316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2376316485Sdavidcs "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2377316485Sdavidcs p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2378316485Sdavidcs p_igu_info->usage.iov_cnt); 2379316485Sdavidcs 2380316485Sdavidcs return ECORE_SUCCESS; 2381316485Sdavidcs} 2382316485Sdavidcs 2383316485Sdavidcsenum _ecore_status_t 2384316485Sdavidcsecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2385316485Sdavidcs u16 sb_id, bool b_to_vf) 2386316485Sdavidcs{ 2387316485Sdavidcs struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2388316485Sdavidcs struct ecore_igu_block *p_block = OSAL_NULL; 2389316485Sdavidcs u16 igu_sb_id = 0, vf_num = 0; 2390316485Sdavidcs u32 val = 0; 2391316485Sdavidcs 2392316485Sdavidcs if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2393316485Sdavidcs return ECORE_INVAL; 2394316485Sdavidcs 2395316485Sdavidcs if (sb_id == ECORE_SP_SB_ID) 2396316485Sdavidcs return ECORE_INVAL; 2397316485Sdavidcs 2398316485Sdavidcs if (!p_info->b_allow_pf_vf_change) { 2399316485Sdavidcs DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2400316485Sdavidcs return ECORE_INVAL; 2401316485Sdavidcs } 2402316485Sdavidcs 2403316485Sdavidcs /* If we're moving a SB from PF to VF, the client had to specify 2404316485Sdavidcs * which vector it wants to move. 2405316485Sdavidcs */ 2406316485Sdavidcs if (b_to_vf) { 2407316485Sdavidcs igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2408316485Sdavidcs if (igu_sb_id == ECORE_SB_INVALID_IDX) 2409316485Sdavidcs return ECORE_INVAL; 2410316485Sdavidcs } 2411316485Sdavidcs 2412316485Sdavidcs /* If we're moving a SB from VF to PF, need to validate there isn't 2413316485Sdavidcs * already a line configured for that vector. 2414316485Sdavidcs */ 2415316485Sdavidcs if (!b_to_vf) { 2416316485Sdavidcs if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2417316485Sdavidcs ECORE_SB_INVALID_IDX) 2418316485Sdavidcs return ECORE_INVAL; 2419316485Sdavidcs } 2420316485Sdavidcs 2421316485Sdavidcs /* We need to validate that the SB can actually be relocated. 2422316485Sdavidcs * This would also handle the previous case where we've explicitly 2423316485Sdavidcs * stated which IGU SB needs to move. 2424316485Sdavidcs */ 2425316485Sdavidcs for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2426316485Sdavidcs igu_sb_id++) { 2427316485Sdavidcs p_block = &p_info->entry[igu_sb_id]; 2428316485Sdavidcs 2429316485Sdavidcs if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2430316485Sdavidcs !(p_block->status & ECORE_IGU_STATUS_FREE) || 2431316485Sdavidcs (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2432316485Sdavidcs if (b_to_vf) 2433316485Sdavidcs return ECORE_INVAL; 2434316485Sdavidcs else 2435316485Sdavidcs continue; 2436316485Sdavidcs } 2437316485Sdavidcs 2438316485Sdavidcs break; 2439316485Sdavidcs } 2440316485Sdavidcs 2441316485Sdavidcs if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2442316485Sdavidcs DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2443316485Sdavidcs "Failed to find a free SB to move\n"); 2444316485Sdavidcs return ECORE_INVAL; 2445316485Sdavidcs } 2446316485Sdavidcs 2447337517Sdavidcs if (p_block == OSAL_NULL) { 2448337517Sdavidcs DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2449337517Sdavidcs "SB address (p_block) is NULL\n"); 2450337517Sdavidcs return ECORE_INVAL; 2451337517Sdavidcs } 2452337517Sdavidcs 2453316485Sdavidcs /* At this point, p_block points to the SB we want to relocate */ 2454316485Sdavidcs if (b_to_vf) { 2455316485Sdavidcs p_block->status &= ~ECORE_IGU_STATUS_PF; 2456316485Sdavidcs 2457316485Sdavidcs /* It doesn't matter which VF number we choose, since we're 2458316485Sdavidcs * going to disable the line; But let's keep it in range. 2459316485Sdavidcs */ 2460316485Sdavidcs vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2461316485Sdavidcs 2462316485Sdavidcs p_block->function_id = (u8)vf_num; 2463316485Sdavidcs p_block->is_pf = 0; 2464316485Sdavidcs p_block->vector_number = 0; 2465316485Sdavidcs 2466316485Sdavidcs p_info->usage.cnt--; 2467316485Sdavidcs p_info->usage.free_cnt--; 2468316485Sdavidcs p_info->usage.iov_cnt++; 2469316485Sdavidcs p_info->usage.free_cnt_iov++; 2470316485Sdavidcs 2471316485Sdavidcs /* TODO - if SBs aren't really the limiting factor, 2472316485Sdavidcs * then it might not be accurate [in the since that 2473316485Sdavidcs * we might not need decrement the feature]. 2474316485Sdavidcs */ 2475316485Sdavidcs p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2476316485Sdavidcs p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2477316485Sdavidcs } else { 2478316485Sdavidcs p_block->status |= ECORE_IGU_STATUS_PF; 2479316485Sdavidcs p_block->function_id = p_hwfn->rel_pf_id; 2480316485Sdavidcs p_block->is_pf = 1; 2481316485Sdavidcs p_block->vector_number = sb_id + 1; 2482316485Sdavidcs 2483316485Sdavidcs p_info->usage.cnt++; 2484316485Sdavidcs p_info->usage.free_cnt++; 2485316485Sdavidcs p_info->usage.iov_cnt--; 2486316485Sdavidcs p_info->usage.free_cnt_iov--; 2487316485Sdavidcs 2488316485Sdavidcs p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2489316485Sdavidcs p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2490316485Sdavidcs } 2491316485Sdavidcs 2492316485Sdavidcs /* Update the IGU and CAU with the new configuration */ 2493316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2494316485Sdavidcs p_block->function_id); 2495316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2496316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2497316485Sdavidcs SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2498316485Sdavidcs p_block->vector_number); 2499316485Sdavidcs 2500316485Sdavidcs ecore_wr(p_hwfn, p_ptt, 2501316485Sdavidcs IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2502316485Sdavidcs val); 2503316485Sdavidcs 2504316485Sdavidcs ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2505316485Sdavidcs igu_sb_id, vf_num, 2506316485Sdavidcs p_block->is_pf ? 0 : 1); 2507316485Sdavidcs 2508316485Sdavidcs DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2509316485Sdavidcs "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2510316485Sdavidcs igu_sb_id, p_block->function_id, 2511316485Sdavidcs p_block->is_pf, p_block->vector_number); 2512316485Sdavidcs 2513316485Sdavidcs return ECORE_SUCCESS; 2514316485Sdavidcs} 2515316485Sdavidcs 2516316485Sdavidcs/** 2517316485Sdavidcs * @brief Initialize igu runtime registers 2518316485Sdavidcs * 2519316485Sdavidcs * @param p_hwfn 2520316485Sdavidcs */ 2521316485Sdavidcsvoid ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2522316485Sdavidcs{ 2523316485Sdavidcs u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2524316485Sdavidcs 2525316485Sdavidcs STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2526316485Sdavidcs} 2527316485Sdavidcs 2528316485Sdavidcs#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2529316485Sdavidcs IGU_CMD_INT_ACK_BASE) 2530316485Sdavidcs#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2531316485Sdavidcs IGU_CMD_INT_ACK_BASE) 2532316485Sdavidcsu64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2533316485Sdavidcs{ 2534316485Sdavidcs u32 intr_status_hi = 0, intr_status_lo = 0; 2535316485Sdavidcs u64 intr_status = 0; 2536316485Sdavidcs 2537316485Sdavidcs intr_status_lo = REG_RD(p_hwfn, 2538316485Sdavidcs GTT_BAR0_MAP_REG_IGU_CMD + 2539316485Sdavidcs LSB_IGU_CMD_ADDR * 8); 2540316485Sdavidcs intr_status_hi = REG_RD(p_hwfn, 2541316485Sdavidcs GTT_BAR0_MAP_REG_IGU_CMD + 2542316485Sdavidcs MSB_IGU_CMD_ADDR * 8); 2543316485Sdavidcs intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2544316485Sdavidcs 2545316485Sdavidcs return intr_status; 2546316485Sdavidcs} 2547316485Sdavidcs 2548316485Sdavidcsstatic void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2549316485Sdavidcs{ 2550316485Sdavidcs OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2551316485Sdavidcs p_hwfn->b_sp_dpc_enabled = true; 2552316485Sdavidcs} 2553316485Sdavidcs 2554316485Sdavidcsstatic enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2555316485Sdavidcs{ 2556316485Sdavidcs p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2557316485Sdavidcs if (!p_hwfn->sp_dpc) 2558316485Sdavidcs return ECORE_NOMEM; 2559316485Sdavidcs 2560316485Sdavidcs return ECORE_SUCCESS; 2561316485Sdavidcs} 2562316485Sdavidcs 2563316485Sdavidcsstatic void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2564316485Sdavidcs{ 2565316485Sdavidcs OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2566316485Sdavidcs p_hwfn->sp_dpc = OSAL_NULL; 2567316485Sdavidcs} 2568316485Sdavidcs 2569316485Sdavidcsenum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2570316485Sdavidcs struct ecore_ptt *p_ptt) 2571316485Sdavidcs{ 2572316485Sdavidcs enum _ecore_status_t rc = ECORE_SUCCESS; 2573316485Sdavidcs 2574316485Sdavidcs rc = ecore_int_sp_dpc_alloc(p_hwfn); 2575316485Sdavidcs if (rc != ECORE_SUCCESS) { 2576316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2577316485Sdavidcs return rc; 2578316485Sdavidcs } 2579316485Sdavidcs 2580316485Sdavidcs rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2581316485Sdavidcs if (rc != ECORE_SUCCESS) { 2582316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2583316485Sdavidcs return rc; 2584316485Sdavidcs } 2585316485Sdavidcs 2586316485Sdavidcs rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2587316485Sdavidcs if (rc != ECORE_SUCCESS) 2588316485Sdavidcs DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2589316485Sdavidcs 2590316485Sdavidcs return rc; 2591316485Sdavidcs} 2592316485Sdavidcs 2593316485Sdavidcsvoid ecore_int_free(struct ecore_hwfn *p_hwfn) 2594316485Sdavidcs{ 2595316485Sdavidcs ecore_int_sp_sb_free(p_hwfn); 2596316485Sdavidcs ecore_int_sb_attn_free(p_hwfn); 2597316485Sdavidcs ecore_int_sp_dpc_free(p_hwfn); 2598316485Sdavidcs} 2599316485Sdavidcs 2600316485Sdavidcsvoid ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2601316485Sdavidcs{ 2602316485Sdavidcs if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2603316485Sdavidcs return; 2604316485Sdavidcs 2605316485Sdavidcs ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2606316485Sdavidcs ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2607316485Sdavidcs ecore_int_sp_dpc_setup(p_hwfn); 2608316485Sdavidcs} 2609316485Sdavidcs 2610316485Sdavidcsvoid ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2611316485Sdavidcs struct ecore_sb_cnt_info *p_sb_cnt_info) 2612316485Sdavidcs{ 2613316485Sdavidcs struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2614316485Sdavidcs 2615316485Sdavidcs if (!p_igu_info || !p_sb_cnt_info) 2616316485Sdavidcs return; 2617316485Sdavidcs 2618316485Sdavidcs OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2619316485Sdavidcs sizeof(*p_sb_cnt_info)); 2620316485Sdavidcs} 2621316485Sdavidcs 2622316485Sdavidcsvoid ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2623316485Sdavidcs{ 2624316485Sdavidcs int i; 2625316485Sdavidcs 2626316485Sdavidcs for_each_hwfn(p_dev, i) 2627316485Sdavidcs p_dev->hwfns[i].b_int_requested = false; 2628316485Sdavidcs} 2629316485Sdavidcs 2630316485Sdavidcsvoid ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2631316485Sdavidcs{ 2632316485Sdavidcs p_dev->attn_clr_en = clr_enable; 2633316485Sdavidcs} 2634316485Sdavidcs 2635316485Sdavidcsenum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2636316485Sdavidcs struct ecore_ptt *p_ptt, 2637316485Sdavidcs u8 timer_res, u16 sb_id, bool tx) 2638316485Sdavidcs{ 2639316485Sdavidcs struct cau_sb_entry sb_entry; 2640316485Sdavidcs enum _ecore_status_t rc; 2641316485Sdavidcs 2642316485Sdavidcs if (!p_hwfn->hw_init_done) { 2643316485Sdavidcs DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2644316485Sdavidcs return ECORE_INVAL; 2645316485Sdavidcs } 2646316485Sdavidcs 2647316485Sdavidcs rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2648316485Sdavidcs sb_id * sizeof(u64), 2649337517Sdavidcs (u64)(osal_uintptr_t)&sb_entry, 2, 2650337517Sdavidcs OSAL_NULL /* default parameters */); 2651316485Sdavidcs if (rc != ECORE_SUCCESS) { 2652316485Sdavidcs DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2653316485Sdavidcs return rc; 2654316485Sdavidcs } 2655316485Sdavidcs 2656316485Sdavidcs if (tx) 2657316485Sdavidcs SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2658316485Sdavidcs else 2659316485Sdavidcs SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2660316485Sdavidcs 2661316485Sdavidcs rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2662316485Sdavidcs (u64)(osal_uintptr_t)&sb_entry, 2663337517Sdavidcs CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, 2664337517Sdavidcs OSAL_NULL /* default parameters */); 2665316485Sdavidcs if (rc != ECORE_SUCCESS) { 2666316485Sdavidcs DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2667316485Sdavidcs return rc; 2668316485Sdavidcs } 2669316485Sdavidcs 2670316485Sdavidcs return rc; 2671316485Sdavidcs} 2672316485Sdavidcs 2673316485Sdavidcsenum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2674316485Sdavidcs struct ecore_ptt *p_ptt, 2675316485Sdavidcs struct ecore_sb_info *p_sb, 2676316485Sdavidcs struct ecore_sb_info_dbg *p_info) 2677316485Sdavidcs{ 2678316485Sdavidcs u16 sbid = p_sb->igu_sb_id; 2679316485Sdavidcs int i; 2680316485Sdavidcs 2681316485Sdavidcs if (IS_VF(p_hwfn->p_dev)) 2682316485Sdavidcs return ECORE_INVAL; 2683316485Sdavidcs 2684316485Sdavidcs if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2685316485Sdavidcs return ECORE_INVAL; 2686316485Sdavidcs 2687316485Sdavidcs p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2688316485Sdavidcs IGU_REG_PRODUCER_MEMORY + sbid * 4); 2689316485Sdavidcs p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2690316485Sdavidcs IGU_REG_CONSUMER_MEM + sbid * 4); 2691316485Sdavidcs 2692320164Sdavidcs for (i = 0; i < PIS_PER_SB_E4; i++) 2693316485Sdavidcs p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2694316485Sdavidcs CAU_REG_PI_MEMORY + 2695320164Sdavidcs sbid * 4 * PIS_PER_SB_E4 + i * 4); 2696316485Sdavidcs 2697316485Sdavidcs return ECORE_SUCCESS; 2698316485Sdavidcs} 2699