1/* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28/* 29 * File : ecore_init_fw_funcs.c 30 */ 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/ecore_init_fw_funcs.c 337517 2018-08-09 01:17:35Z davidcs $"); 33 34#include "bcm_osal.h" 35#include "ecore_hw.h" 36#include "ecore_init_ops.h" 37#include "reg_addr.h" 38#include "ecore_rt_defs.h" 39#include "ecore_hsi_common.h" 40#include "ecore_hsi_init_func.h" 41#include "ecore_hsi_eth.h" 42#include "ecore_hsi_init_tool.h" 43#include "ecore_iro.h" 44#include "ecore_init_fw_funcs.h" 45 46#define CDU_VALIDATION_DEFAULT_CFG 61 47 48static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { 49 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ 50 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ 51 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ 52}; 53static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { 54 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ 55}; 56 57/* General constants */ 58#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0) 59#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0) 60#define QM_INVALID_PQ_ID 0xffff 61 62/* Feature enable */ 63#define QM_BYPASS_EN 1 64#define QM_BYTE_CRD_EN 1 65 66/* Other PQ constants */ 67#define QM_OTHER_PQS_PER_PF 4 68 69/* VOQ constants */ 70#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS) 71 72/* WFQ constants: */ 73 74/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ 75#define QM_WFQ_UPPER_BOUND 62500000 76 77/* Bit of VOQ in WFQ VP PQ map */ 78#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 79 80/* Bit of PF in WFQ VP PQ map */ 81#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 82#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6 83 84/* 0x9000 = 4*9*1024 */ 85#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) 86 87/* Max WFQ increment value is 0.7 * upper bound */ 88#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) 89 90/* Number of VOQs in E5 QmWfqCrd register */ 91#define QM_WFQ_CRD_E5_NUM_VOQS 16 92 93/* RL constants: */ 94 95/* Period in us */ 96#define QM_RL_PERIOD 5 97 98/* Period in 25MHz cycles */ 99#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 100 101/* RL increment value - rate is specified in mbps. the factor of 1.01 was 102* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC 103* 2544 test. In this scenario the PF RL was reducing the line rate to 99% 104* although the credit increment value was the correct one and FW calculated 105* correct packet sizes. The reason for the inaccuracy of the RL is unknown at 106* this point. 107*/ 108#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / (8 * 100)), 1) 109 110/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ 111#define QM_PF_RL_UPPER_BOUND 62500000 112 113/* Max PF RL increment value is 0.7 * upper bound */ 114#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) 115 116/* Vport RL Upper bound, link speed is in Mpbs */ 117#define QM_VP_RL_UPPER_BOUND(speed) ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000)) 118 119/* Max Vport RL increment value is the Vport RL upper bound */ 120#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) 121 122/* Vport RL credit threshold in case of QM bypass */ 123#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) 124 125/* AFullOprtnstcCrdMask constants */ 126#define QM_OPPOR_LINE_VOQ_DEF 1 127#define QM_OPPOR_FW_STOP_DEF 0 128#define QM_OPPOR_PQ_EMPTY_DEF 1 129 130/* Command Queue constants: */ 131 132/* Pure LB CmdQ lines (+spare) */ 133#define PBF_CMDQ_PURE_LB_LINES 150 134 135#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 136 137#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) 138 139#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) 140 141#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) 142 143/* BTB: blocks constants (block size = 256B) */ 144 145/* 256B blocks in 9700B packet */ 146#define BTB_JUMBO_PKT_BLOCKS 38 147 148/* Headroom per-port */ 149#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 150#define BTB_PURE_LB_FACTOR 10 151 152/* Factored (hence really 0.7) */ 153#define BTB_PURE_LB_RATIO 7 154 155/* QM stop command constants */ 156#define QM_STOP_PQ_MASK_WIDTH 32 157#define QM_STOP_CMD_ADDR 2 158#define QM_STOP_CMD_STRUCT_SIZE 2 159#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 160#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 161#define QM_STOP_CMD_PAUSE_MASK_MASK -1 162#define QM_STOP_CMD_GROUP_ID_OFFSET 1 163#define QM_STOP_CMD_GROUP_ID_SHIFT 16 164#define QM_STOP_CMD_GROUP_ID_MASK 15 165#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 166#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 167#define QM_STOP_CMD_PQ_TYPE_MASK 1 168#define QM_STOP_CMD_MAX_POLL_COUNT 100 169#define QM_STOP_CMD_POLL_PERIOD_US 500 170 171/* QM command macros */ 172#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE 173#define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value) 174 175#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map)) 176 177#define WRITE_PQ_INFO_TO_RAM 1 178#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) 179#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4 180 181/******************** INTERNAL IMPLEMENTATION *********************/ 182 183/* Returns the external VOQ number */ 184static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn, 185 u8 port_id, 186 u8 tc, 187 u8 max_phys_tcs_per_port) 188{ 189 if (tc == PURE_LB_TC) 190 return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id; 191 else 192 return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc; 193} 194 195/* Prepare PF RL enable/disable runtime init values */ 196static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, 197 bool pf_rl_en) 198{ 199 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 200 if (pf_rl_en) { 201 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 202 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; 203 204 /* Enable RLs for all VOQs */ 205 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask); 206#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 207 if (num_ext_voqs >= 32) 208 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32)); 209#endif 210 211 /* Write RL period */ 212 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 213 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 214 215 /* Set credit threshold for QM bypass flow */ 216 if (QM_BYPASS_EN) 217 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_PF_RL_UPPER_BOUND); 218 } 219} 220 221/* Prepare PF WFQ enable/disable runtime init values */ 222static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, 223 bool pf_wfq_en) 224{ 225 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 226 227 /* Set credit threshold for QM bypass flow */ 228 if (pf_wfq_en && QM_BYPASS_EN) 229 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 230} 231 232/* Prepare VPORT RL enable/disable runtime init values */ 233static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, 234 bool vport_rl_en) 235{ 236 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); 237 if (vport_rl_en) { 238 239 /* Write RL period (use timer 0 only) */ 240 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 241 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 242 243 /* Set credit threshold for QM bypass flow */ 244 if (QM_BYPASS_EN) 245 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_VP_RL_BYPASS_THRESH_SPEED); 246 } 247} 248 249/* Prepare VPORT WFQ enable/disable runtime init values */ 250static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, 251 bool vport_wfq_en) 252{ 253 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); 254 255 /* Set credit threshold for QM bypass flow */ 256 if (vport_wfq_en && QM_BYPASS_EN) 257 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND); 258} 259 260/* Prepare runtime init values to allocate PBF command queue lines for 261 * the specified VOQ. 262 */ 263static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn, 264 u8 ext_voq, 265 u16 cmdq_lines) 266{ 267 u32 qm_line_crd; 268 269 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 270 271 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); 272 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 273 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); 274} 275 276/* Prepare runtime init values to allocate PBF command queue lines. */ 277static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn, 278 u8 max_ports_per_engine, 279 u8 max_phys_tcs_per_port, 280 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 281{ 282 u8 tc, ext_voq, port_id, num_tcs_in_port; 283 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4; 284 285 /* Clear PBF lines of all VOQs */ 286 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) 287 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); 288 289 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 290 u16 phys_lines, phys_lines_per_tc; 291 292 if (!port_params[port_id].active) 293 continue; 294 295 /* Find number of command queue lines to divide between the 296 * active physical TCs. In E5, 1/8 of the lines are reserved. 297 * the lines for pure LB TC are subtracted. 298 */ 299 phys_lines = port_params[port_id].num_pbf_cmd_lines; 300 if (ECORE_IS_E5(p_hwfn->p_dev)) 301 phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO); 302 phys_lines -= PBF_CMDQ_PURE_LB_LINES; 303 304 /* Find #lines per active physical TC */ 305 num_tcs_in_port = 0; 306 for (tc = 0; tc < max_phys_tcs_per_port; tc++) 307 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 308 num_tcs_in_port++; 309 phys_lines_per_tc = phys_lines / num_tcs_in_port; 310 311 /* Init registers per active TC */ 312 for (tc = 0; tc < max_phys_tcs_per_port; tc++) { 313 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 314 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 315 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc); 316 } 317 318 /* Init registers for pure LB TC */ 319 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 320 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES); 321 } 322} 323 324/* Prepare runtime init values to allocate guaranteed BTB blocks for the 325 * specified port. The guaranteed BTB space is divided between the TCs as 326 * follows (shared space Is currently not used): 327 * 1. Parameters: 328 * B - BTB blocks for this port 329 * C - Number of physical TCs for this port 330 * 2. Calculation: 331 * a. 38 blocks (9700B jumbo frame) are allocated for global per port 332 * headroom. 333 * b. B = B - 38 (remainder after global headroom allocation). 334 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. 335 * d. B = B � MAX(38, B/(C+0.7)) (remainder after pure LB allocation). 336 * e. B/C blocks are allocated for each physical TC. 337 * Assumptions: 338 * - MTU is up to 9700 bytes (38 blocks) 339 * - All TCs are considered symmetrical (same rate and packet size) 340 * - No optimization for lossy TC (all are considered lossless). Shared space 341 * is not enabled and allocated for each TC. 342 */ 343static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, 344 u8 max_ports_per_engine, 345 u8 max_phys_tcs_per_port, 346 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 347{ 348 u32 usable_blocks, pure_lb_blocks, phys_blocks; 349 u8 tc, ext_voq, port_id, num_tcs_in_port; 350 351 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 352 if (!port_params[port_id].active) 353 continue; 354 355 /* Subtract headroom blocks */ 356 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; 357 358 /* Find blocks per physical TC. use factor to avoid floating 359 * arithmethic. 360 */ 361 num_tcs_in_port = 0; 362 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) 363 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) 364 num_tcs_in_port++; 365 366 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); 367 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); 368 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; 369 370 /* Init physical TCs */ 371 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 372 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) { 373 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); 374 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks); 375 } 376 } 377 378 /* Init pure LB TC */ 379 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); 380 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); 381 } 382} 383 384/* Prepare Tx PQ mapping runtime init values for the specified PF */ 385static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 386 struct ecore_ptt *p_ptt, 387 u8 port_id, 388 u8 pf_id, 389 u8 max_phys_tcs_per_port, 390 bool is_pf_loading, 391 u32 num_pf_cids, 392 u32 num_vf_cids, 393 u16 start_pq, 394 u16 num_pf_pqs, 395 u16 num_vf_pqs, 396 u8 start_vport, 397 u32 base_mem_addr_4kb, 398 struct init_qm_pq_params *pq_params, 399 struct init_qm_vport_params *vport_params) 400{ 401 /* A bit per Tx PQ indicating if the PQ is associated with a VF */ 402 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 403 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; 404 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; 405 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; 406 407 num_pqs = num_pf_pqs + num_vf_pqs; 408 409 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE; 410 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; 411 412 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids); 413 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids); 414 mem_addr_4kb = base_mem_addr_4kb; 415 416 /* Set mapping from PQ group to PF */ 417 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) 418 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 419 420 /* Set PQ sizes */ 421 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids)); 422 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids)); 423 424 /* Go over all Tx PQs */ 425 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) { 426 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; 427 u8 ext_voq, vport_id_in_pf; 428 bool is_vf_pq, rl_valid; 429 u16 first_tx_pq_id; 430 431 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 432 is_vf_pq = (i >= num_pf_pqs); 433 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls; 434 435 /* Update first Tx PQ of VPORT/TC */ 436 vport_id_in_pf = pq_params[i].vport_id - start_vport; 437 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id]; 438 if (first_tx_pq_id == QM_INVALID_PQ_ID) { 439 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT)); 440 441 /* Create new VP PQ */ 442 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id; 443 first_tx_pq_id = pq_id; 444 445 /* Map VP PQ to VOQ and PF */ 446 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val); 447 } 448 449 /* Check RL ID */ 450 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls) 451 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 452 453 /* Prepare PQ map entry */ 454 if (ECORE_IS_E5(p_hwfn->p_dev)) { 455 struct qm_rf_pq_map_e5 tx_pq_map; 456 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 457 } 458 else { 459 struct qm_rf_pq_map_e4 tx_pq_map; 460 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); 461 } 462 463 /* Set PQ base address */ 464 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); 465 466 /* Clear PQ pointer table entry (64 bit) */ 467 if (is_pf_loading) 468 for (j = 0; j < 2; j++) 469 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + (pq_id * 2) + j, 0); 470 471 /* Write PQ info to RAM */ 472 if (WRITE_PQ_INFO_TO_RAM != 0) 473 { 474 u32 pq_info = 0; 475 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); 476 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info); 477 } 478 479 /* If VF PQ, add indication to PQ VF mask */ 480 if (is_vf_pq) { 481 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE)); 482 mem_addr_4kb += vport_pq_mem_4kb; 483 } 484 else { 485 mem_addr_4kb += pq_mem_4kb; 486 } 487 } 488 489 /* Store Tx PQ VF mask to size select register */ 490 for (i = 0; i < num_tx_pq_vf_masks; i++) 491 if (tx_pq_vf_mask[i]) 492 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); 493} 494 495/* Prepare Other PQ mapping runtime init values for the specified PF */ 496static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, 497 u8 pf_id, 498 bool is_pf_loading, 499 u32 num_pf_cids, 500 u32 num_tids, 501 u32 base_mem_addr_4kb) 502{ 503 u32 pq_size, pq_mem_4kb, mem_addr_4kb; 504 u16 i, j, pq_id, pq_group; 505 506 /* A single other PQ group is used in each PF, where PQ group i is used 507 * in PF i. 508 */ 509 pq_group = pf_id; 510 pq_size = num_pf_cids + num_tids; 511 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); 512 mem_addr_4kb = base_mem_addr_4kb; 513 514 /* Map PQ group to PF */ 515 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); 516 517 /* Set PQ sizes */ 518 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); 519 520 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { 521 /* Set PQ base address */ 522 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); 523 524 /* Clear PQ pointer table entry */ 525 if (is_pf_loading) 526 for (j = 0; j < 2; j++) 527 STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET + (pq_id * 2) + j, 0); 528 529 mem_addr_4kb += pq_mem_4kb; 530 } 531} 532 533/* Prepare PF WFQ runtime init values for the specified PF. 534 * Return -1 on error. 535 */ 536static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, 537 u8 port_id, 538 u8 pf_id, 539 u16 pf_wfq, 540 u8 max_phys_tcs_per_port, 541 u16 num_tx_pqs, 542 struct init_qm_pq_params *pq_params) 543{ 544 u32 inc_val, crd_reg_offset; 545 u8 ext_voq; 546 u16 i; 547 548 inc_val = QM_WFQ_INC_VAL(pf_wfq); 549 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 550 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 551 return -1; 552 } 553 554 for (i = 0; i < num_tx_pqs; i++) { 555 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port); 556 crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ? 557 (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id : 558 (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB); 559 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 560 } 561 562 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); 563 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); 564 565 return 0; 566} 567 568/* Prepare PF RL runtime init values for the specified PF. 569 * Return -1 on error. 570 */ 571static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, 572 u8 pf_id, 573 u32 pf_rl) 574{ 575 u32 inc_val; 576 577 inc_val = QM_RL_INC_VAL(pf_rl); 578 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 579 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 580 return -1; 581 } 582 583 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 584 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); 585 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); 586 587 return 0; 588} 589 590/* Prepare VPORT WFQ runtime init values for the specified VPORTs. 591 * Return -1 on error. 592 */ 593static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn, 594 u8 num_vports, 595 struct init_qm_vport_params *vport_params) 596{ 597 u16 vport_pq_id; 598 u32 inc_val; 599 u8 tc, i; 600 601 /* Go over all PF VPORTs */ 602 for (i = 0; i < num_vports; i++) { 603 if (!vport_params[i].vport_wfq) 604 continue; 605 606 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); 607 if (inc_val > QM_WFQ_MAX_INC_VAL) { 608 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 609 return -1; 610 } 611 612 /* Each VPORT can have several VPORT PQ IDs for various TCs */ 613 for (tc = 0; tc < NUM_OF_TCS; tc++) { 614 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; 615 if (vport_pq_id != QM_INVALID_PQ_ID) { 616 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 617 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); 618 } 619 } 620 } 621 622 return 0; 623} 624 625/* Prepare VPORT RL runtime init values for the specified VPORTs. 626 * Return -1 on error. 627 */ 628static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn, 629 u8 start_vport, 630 u8 num_vports, 631 u32 link_speed, 632 struct init_qm_vport_params *vport_params) 633{ 634 u8 i, vport_id; 635 u32 inc_val; 636 637 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { 638 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 639 return -1; 640 } 641 642 /* Go over all PF VPORTs */ 643 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { 644 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? vport_params[i].vport_rl : link_speed); 645 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 646 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 647 return -1; 648 } 649 650 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT); 651 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_VP_RL_UPPER_BOUND(link_speed) | (u32)QM_RL_CRD_REG_SIGN_BIT); 652 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val); 653 } 654 655 return 0; 656} 657 658static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn, 659 struct ecore_ptt *p_ptt) 660{ 661 u32 reg_val, i; 662 663 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { 664 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US); 665 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); 666 } 667 668 /* Check if timeout while waiting for SDM command ready */ 669 if (i == QM_STOP_CMD_MAX_POLL_COUNT) { 670 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n"); 671 return false; 672 } 673 674 return true; 675} 676 677static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn, 678 struct ecore_ptt *p_ptt, 679 u32 cmd_addr, 680 u32 cmd_data_lsb, 681 u32 cmd_data_msb) 682{ 683 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 684 return false; 685 686 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); 687 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); 688 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); 689 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); 690 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); 691 692 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt); 693} 694 695 696/******************** INTERFACE IMPLEMENTATION *********************/ 697 698u32 ecore_qm_pf_mem_size(u32 num_pf_cids, 699 u32 num_vf_cids, 700 u32 num_tids, 701 u16 num_pf_pqs, 702 u16 num_vf_pqs) 703{ 704 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 705 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 706 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 707} 708 709int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, 710 u8 max_ports_per_engine, 711 u8 max_phys_tcs_per_port, 712 bool pf_rl_en, 713 bool pf_wfq_en, 714 bool vport_rl_en, 715 bool vport_wfq_en, 716 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 717{ 718 u32 mask; 719 720 /* Init AFullOprtnstcCrdMask */ 721 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | 722 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | 723 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | 724 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | 725 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | 726 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | 727 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | 728 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); 729 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); 730 731 /* Enable/disable PF RL */ 732 ecore_enable_pf_rl(p_hwfn, pf_rl_en); 733 734 /* Enable/disable PF WFQ */ 735 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en); 736 737 /* Enable/disable VPORT RL */ 738 ecore_enable_vport_rl(p_hwfn, vport_rl_en); 739 740 /* Enable/disable VPORT WFQ */ 741 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en); 742 743 /* Init PBF CMDQ line credit */ 744 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 745 746 /* Init BTB blocks in PBF */ 747 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params); 748 749 return 0; 750} 751 752int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, 753 struct ecore_ptt *p_ptt, 754 u8 port_id, 755 u8 pf_id, 756 u8 max_phys_tcs_per_port, 757 bool is_pf_loading, 758 u32 num_pf_cids, 759 u32 num_vf_cids, 760 u32 num_tids, 761 u16 start_pq, 762 u16 num_pf_pqs, 763 u16 num_vf_pqs, 764 u8 start_vport, 765 u8 num_vports, 766 u16 pf_wfq, 767 u32 pf_rl, 768 u32 link_speed, 769 struct init_qm_pq_params *pq_params, 770 struct init_qm_vport_params *vport_params) 771{ 772 u32 other_mem_size_4kb; 773 u8 tc, i; 774 775 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 776 777 /* Clear first Tx PQ ID array for each VPORT */ 778 for(i = 0; i < num_vports; i++) 779 for(tc = 0; tc < NUM_OF_TCS; tc++) 780 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; 781 782 /* Map Other PQs (if any) */ 783#if QM_OTHER_PQS_PER_PF > 0 784 ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, num_tids, 0); 785#endif 786 787 /* Map Tx PQs */ 788 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_pf_loading, num_pf_cids, num_vf_cids, 789 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); 790 791 /* Init PF WFQ */ 792 if (pf_wfq) 793 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params)) 794 return -1; 795 796 /* Init PF RL */ 797 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl)) 798 return -1; 799 800 /* Set VPORT WFQ */ 801 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params)) 802 return -1; 803 804 /* Set VPORT RL */ 805 if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, link_speed, vport_params)) 806 return -1; 807 808 return 0; 809} 810 811int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, 812 struct ecore_ptt *p_ptt, 813 u8 pf_id, 814 u16 pf_wfq) 815{ 816 u32 inc_val; 817 818 inc_val = QM_WFQ_INC_VAL(pf_wfq); 819 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 820 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n"); 821 return -1; 822 } 823 824 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); 825 826 return 0; 827} 828 829int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, 830 struct ecore_ptt *p_ptt, 831 u8 pf_id, 832 u32 pf_rl) 833{ 834 u32 inc_val; 835 836 inc_val = QM_RL_INC_VAL(pf_rl); 837 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 838 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n"); 839 return -1; 840 } 841 842 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 843 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); 844 845 return 0; 846} 847 848int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, 849 struct ecore_ptt *p_ptt, 850 u16 first_tx_pq_id[NUM_OF_TCS], 851 u16 vport_wfq) 852{ 853 u16 vport_pq_id; 854 u32 inc_val; 855 u8 tc; 856 857 inc_val = QM_WFQ_INC_VAL(vport_wfq); 858 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 859 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n"); 860 return -1; 861 } 862 863 for (tc = 0; tc < NUM_OF_TCS; tc++) { 864 vport_pq_id = first_tx_pq_id[tc]; 865 if (vport_pq_id != QM_INVALID_PQ_ID) { 866 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); 867 } 868 } 869 870 return 0; 871} 872 873int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, 874 struct ecore_ptt *p_ptt, 875 u8 vport_id, 876 u32 vport_rl, 877 u32 link_speed) 878{ 879 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; 880 881 if (vport_id >= max_qm_global_rls) { 882 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n"); 883 return -1; 884 } 885 886 inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); 887 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { 888 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n"); 889 return -1; 890 } 891 892 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 893 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); 894 895 return 0; 896} 897 898bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, 899 struct ecore_ptt *p_ptt, 900 bool is_release_cmd, 901 bool is_tx_pq, 902 u16 start_pq, 903 u16 num_pqs) 904{ 905 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0}; 906 u32 pq_mask = 0, last_pq, pq_id; 907 908 last_pq = start_pq + num_pqs - 1; 909 910 /* Set command's PQ type */ 911 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); 912 913 /* Go over requested PQs */ 914 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { 915 916 /* Set PQ bit in mask (stop command only) */ 917 if (!is_release_cmd) 918 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); 919 920 /* If last PQ or end of PQ mask, write command */ 921 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { 922 QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask); 923 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); 924 if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1])) 925 return false; 926 pq_mask = 0; 927 } 928 } 929 930 return true; 931} 932 933#ifndef UNUSED_HSI_FUNC 934 935/* NIG: ETS configuration constants */ 936#define NIG_TX_ETS_CLIENT_OFFSET 4 937#define NIG_LB_ETS_CLIENT_OFFSET 1 938#define NIG_ETS_MIN_WFQ_BYTES 1600 939 940/* NIG: ETS constants */ 941#define NIG_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 942 943/* NIG: RL constants */ 944 945/* Byte base type value */ 946#define NIG_RL_BASE_TYPE 1 947 948/* Period in us */ 949#define NIG_RL_PERIOD 1 950 951/* Period in 25MHz cycles */ 952#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD) 953 954/* Rate in mbps */ 955#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8) 956 957#define NIG_RL_MAX_VAL(inc_val,mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu))) 958 959/* NIG: packet prioritry configuration constants */ 960#define NIG_PRIORITY_MAP_TC_BITS 4 961 962 963void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, 964 struct ecore_ptt *p_ptt, 965 struct init_ets_req* req, 966 bool is_lb) 967{ 968 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff; 969 u32 tc_bound_base_addr, tc_bound_addr_diff; 970 u8 sp_tc_map = 0, wfq_tc_map = 0; 971 u8 tc, num_tc, tc_client_offset; 972 973 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS; 974 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET; 975 min_weight = 0xffffffff; 976 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 977 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : 978 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0; 979 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 980 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : 981 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; 982 983 for (tc = 0; tc < num_tc; tc++) { 984 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 985 986 /* Update SP map */ 987 if (tc_req->use_sp) 988 sp_tc_map |= (1 << tc); 989 990 if (!tc_req->use_wfq) 991 continue; 992 993 /* Update WFQ map */ 994 wfq_tc_map |= (1 << tc); 995 996 /* Find minimal weight */ 997 if (tc_req->weight < min_weight) 998 min_weight = tc_req->weight; 999 } 1000 1001 /* Write SP map */ 1002 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset)); 1003 1004 /* Write WFQ map */ 1005 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset)); 1006 1007 /* Write WFQ weights */ 1008 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) { 1009 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1010 u32 byte_weight; 1011 1012 if (!tc_req->use_wfq) 1013 continue; 1014 1015 /* Translate weight to bytes */ 1016 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1017 1018 /* Write WFQ weight */ 1019 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight); 1020 1021 /* Write WFQ upper bound */ 1022 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu)); 1023 } 1024} 1025 1026void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, 1027 struct ecore_ptt *p_ptt, 1028 struct init_nig_lb_rl_req* req) 1029{ 1030 u32 ctrl, inc_val, reg_offset; 1031 u8 tc; 1032 1033 /* Disable global MAC+LB RL */ 1034 ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT; 1035 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1036 1037 /* Configure and enable global MAC+LB RL */ 1038 if (req->lb_mac_rate) { 1039 1040 /* Configure */ 1041 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1042 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate); 1043 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val); 1044 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1045 1046 /* Enable */ 1047 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT; 1048 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); 1049 } 1050 1051 /* Disable global LB-only RL */ 1052 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT; 1053 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1054 1055 /* Configure and enable global LB-only RL */ 1056 if (req->lb_rate) { 1057 1058 /* Configure */ 1059 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M); 1060 inc_val = NIG_RL_INC_VAL(req->lb_rate); 1061 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val); 1062 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1063 1064 /* Enable */ 1065 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT; 1066 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); 1067 } 1068 1069 /* Per-TC RLs */ 1070 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) { 1071 1072 /* Disable TC RL */ 1073 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT; 1074 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1075 1076 /* Configure and enable TC RL */ 1077 if (!req->tc_rate[tc]) 1078 continue; 1079 1080 /* Configure */ 1081 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M); 1082 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]); 1083 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val); 1084 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu)); 1085 1086 /* Enable */ 1087 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT; 1088 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); 1089 } 1090} 1091 1092void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, 1093 struct ecore_ptt *p_ptt, 1094 struct init_nig_pri_tc_map_req* req) 1095{ 1096 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 }; 1097 u32 pri_tc_mask = 0; 1098 u8 pri, tc; 1099 1100 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) { 1101 if (!req->pri[pri].valid) 1102 continue; 1103 1104 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS)); 1105 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri); 1106 } 1107 1108 /* Write priority -> TC mask */ 1109 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask); 1110 1111 /* Write TC -> priority mask */ 1112 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 1113 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]); 1114 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]); 1115 } 1116} 1117 1118#endif /* UNUSED_HSI_FUNC */ 1119 1120#ifndef UNUSED_HSI_FUNC 1121 1122/* PRS: ETS configuration constants */ 1123#define PRS_ETS_MIN_WFQ_BYTES 1600 1124#define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu))) 1125 1126 1127void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, 1128 struct ecore_ptt *p_ptt, 1129 struct init_ets_req* req) 1130{ 1131 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff; 1132 u8 tc, sp_tc_map = 0, wfq_tc_map = 0; 1133 1134 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0; 1135 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0; 1136 1137 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1138 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1139 1140 /* Update SP map */ 1141 if (tc_req->use_sp) 1142 sp_tc_map |= (1 << tc); 1143 1144 if (!tc_req->use_wfq) 1145 continue; 1146 1147 /* Update WFQ map */ 1148 wfq_tc_map |= (1 << tc); 1149 1150 /* Find minimal weight */ 1151 if (tc_req->weight < min_weight) 1152 min_weight = tc_req->weight; 1153 } 1154 1155 /* Write SP map */ 1156 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map); 1157 1158 /* Write WFQ map */ 1159 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map); 1160 1161 /* Write WFQ weights */ 1162 for (tc = 0; tc < NUM_OF_TCS; tc++) { 1163 struct init_ets_tc_req *tc_req = &req->tc_req[tc]; 1164 u32 byte_weight; 1165 1166 if (!tc_req->use_wfq) 1167 continue; 1168 1169 /* Translate weight to bytes */ 1170 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight; 1171 1172 /* Write WFQ weight */ 1173 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight); 1174 1175 /* Write WFQ upper bound */ 1176 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu)); 1177 } 1178} 1179 1180#endif /* UNUSED_HSI_FUNC */ 1181#ifndef UNUSED_HSI_FUNC 1182 1183/* BRB: RAM configuration constants */ 1184#define BRB_TOTAL_RAM_BLOCKS_BB 4800 1185#define BRB_TOTAL_RAM_BLOCKS_K2 5632 1186#define BRB_BLOCK_SIZE 128 1187#define BRB_MIN_BLOCKS_PER_TC 9 1188#define BRB_HYST_BYTES 10240 1189#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE) 1190 1191/* Temporary big RAM allocation - should be updated */ 1192void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, 1193 struct ecore_ptt *p_ptt, 1194 struct init_brb_ram_req* req) 1195{ 1196 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks; 1197 u32 active_port_blocks, reg_offset = 0; 1198 u8 port, active_ports = 0; 1199 1200 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE); 1201 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE); 1202 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB; 1203 1204 /* Find number of active ports */ 1205 for (port = 0; port < MAX_NUM_PORTS; port++) 1206 if (req->num_active_tcs[port]) 1207 active_ports++; 1208 1209 active_port_blocks = (u32)(total_blocks / active_ports); 1210 1211 for (port = 0; port < req->max_ports_per_engine; port++) { 1212 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks; 1213 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th; 1214 u32 tc_guaranteed_blocks; 1215 u8 tc; 1216 1217 /* Calculate per-port sizes */ 1218 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE); 1219 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0; 1220 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks; 1221 port_shared_blocks = port_blocks - port_guaranteed_blocks; 1222 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC; 1223 full_xon_th = full_xoff_th + min_pkt_size_blocks; 1224 pause_xoff_th = tc_headroom_blocks; 1225 pause_xon_th = pause_xoff_th + min_pkt_size_blocks; 1226 1227 /* Init total size per port */ 1228 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks); 1229 1230 /* Init shared size per port */ 1231 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks); 1232 1233 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) { 1234 /* Clear init values for non-active TCs */ 1235 if (tc == req->num_active_tcs[port]) { 1236 tc_guaranteed_blocks = 0; 1237 full_xoff_th = 0; 1238 full_xon_th = 0; 1239 pause_xoff_th = 0; 1240 pause_xon_th = 0; 1241 } 1242 1243 /* Init guaranteed size per TC */ 1244 ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks); 1245 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS); 1246 1247 /* Init pause/full thresholds per physical TC - for 1248 * loopback traffic. 1249 */ 1250 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1251 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1252 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1253 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1254 1255 /* Init pause/full thresholds per physical TC - for 1256 * main traffic. 1257 */ 1258 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th); 1259 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th); 1260 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th); 1261 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th); 1262 } 1263 } 1264} 1265 1266#endif /* UNUSED_HSI_FUNC */ 1267#ifndef UNUSED_HSI_FUNC 1268 1269/* In MF, should be called once per port to set EtherType of OuterTag */ 1270void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) 1271{ 1272 /* Update DORQ register */ 1273 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType); 1274} 1275 1276#endif /* UNUSED_HSI_FUNC */ 1277 1278 1279#define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0) 1280#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 1281#define PRS_ETH_OUTPUT_FORMAT -46832 1282 1283void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, 1284 struct ecore_ptt *p_ptt, 1285 u16 dest_port) 1286{ 1287 /* Update PRS register */ 1288 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 1289 1290 /* Update NIG register */ 1291 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); 1292 1293 /* Update PBF register */ 1294 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 1295} 1296 1297void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, 1298 struct ecore_ptt *p_ptt, 1299 bool vxlan_enable) 1300{ 1301 u32 reg_val; 1302 1303 /* Update PRS register */ 1304 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1305 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); 1306 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1307 if (reg_val) /* TODO: handle E5 init */ 1308 { 1309 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1310 1311 /* Update output only if tunnel blocks not included. */ 1312 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1313 { 1314 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1315 } 1316 } 1317 1318 /* Update NIG register */ 1319 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1320 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable); 1321 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1322 1323 /* Update DORQ register */ 1324 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); 1325} 1326 1327void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, 1328 struct ecore_ptt *p_ptt, 1329 bool eth_gre_enable, 1330 bool ip_gre_enable) 1331{ 1332 u32 reg_val; 1333 1334 /* Update PRS register */ 1335 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1336 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1337 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1338 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1339 if (reg_val) /* TODO: handle E5 init */ 1340 { 1341 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1342 1343 /* Update output only if tunnel blocks not included. */ 1344 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1345 { 1346 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1347 } 1348 } 1349 1350 /* Update NIG register */ 1351 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1352 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable); 1353 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); 1354 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1355 1356 /* Update DORQ registers */ 1357 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); 1358 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); 1359} 1360 1361void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, 1362 struct ecore_ptt *p_ptt, 1363 u16 dest_port) 1364 1365{ 1366 /* Update PRS register */ 1367 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 1368 1369 /* Update NIG register */ 1370 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 1371 1372 /* Update PBF register */ 1373 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); 1374} 1375 1376void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, 1377 struct ecore_ptt *p_ptt, 1378 bool eth_geneve_enable, 1379 bool ip_geneve_enable) 1380{ 1381 u32 reg_val; 1382 1383 /* Update PRS register */ 1384 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1385 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable); 1386 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); 1387 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1388 if (reg_val) /* TODO: handle E5 init */ 1389 { 1390 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); 1391 1392 /* Update output only if tunnel blocks not included. */ 1393 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1394 { 1395 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1396 } 1397 } 1398 1399 /* Update NIG register */ 1400 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); 1401 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 1402 1403 /* EDPM with geneve tunnel not supported in BB */ 1404 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) 1405 return; 1406 1407 /* Update DORQ registers */ 1408 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0); 1409 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0); 1410} 1411 1412#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 1413#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 1414 1415void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, 1416 struct ecore_ptt *p_ptt, 1417 bool enable) 1418{ 1419 u32 reg_val, cfg_mask; 1420 1421 /* read PRS config register */ 1422 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); 1423 1424 /* set VXLAN_NO_L2_ENABLE mask */ 1425 cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); 1426 1427 if (enable) 1428 { 1429 /* set VXLAN_NO_L2_ENABLE flag */ 1430 reg_val |= cfg_mask; 1431 1432 /* update PRS FIC register */ 1433 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); 1434 } 1435 else 1436 { 1437 /* clear VXLAN_NO_L2_ENABLE flag */ 1438 reg_val &= ~cfg_mask; 1439 } 1440 1441 /* write PRS config register */ 1442 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); 1443} 1444 1445#ifndef UNUSED_HSI_FUNC 1446 1447#define T_ETH_PACKET_ACTION_GFT_EVENTID 23 1448#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 1449#define T_ETH_PACKET_MATCH_RFS_EVENTID 25 1450#define PARSER_ETH_CONN_CM_HDR 0 1451#define CAM_LINE_SIZE sizeof(u32) 1452#define RAM_LINE_SIZE sizeof(u64) 1453#define REG_SIZE sizeof(u32) 1454 1455 1456void ecore_gft_disable(struct ecore_hwfn *p_hwfn, 1457 struct ecore_ptt *p_ptt, 1458 u16 pf_id) 1459{ 1460 /* disable gft search for PF */ 1461 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); 1462 1463 /* Clean ram & cam for next gft session*/ 1464 1465 /* Zero camline */ 1466 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, 0); 1467 1468 /* Zero ramline */ 1469 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, 0); 1470 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, 0); 1471} 1472 1473 1474void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn, 1475 struct ecore_ptt *p_ptt) 1476{ 1477 u32 rfs_cm_hdr_event_id; 1478 1479 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1480 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); 1481 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1482 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1483 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); 1484} 1485 1486void ecore_gft_config(struct ecore_hwfn *p_hwfn, 1487 struct ecore_ptt *p_ptt, 1488 u16 pf_id, 1489 bool tcp, 1490 bool udp, 1491 bool ipv4, 1492 bool ipv6, 1493 enum gft_profile_type profile_type) 1494{ 1495 u32 reg_val, cam_line, ram_line_lo, ram_line_hi; 1496 1497 if (!ipv6 && !ipv4) 1498 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n"); 1499 if (!tcp && !udp) 1500 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n"); 1501 if (profile_type >= MAX_GFT_PROFILE_TYPE) 1502 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n"); 1503 1504 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1505 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1506 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1507 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); 1508 1509 /* Do not load context only cid in PRS on match. */ 1510 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); 1511 1512 /* Do not use tenant ID exist bit for gft search*/ 1513 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); 1514 1515 /* Set Cam */ 1516 cam_line = 0; 1517 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); 1518 1519 /* Filters are per PF!! */ 1520 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); 1521 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); 1522 1523 if (!(tcp && udp)) { 1524 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); 1525 if (tcp) 1526 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); 1527 else 1528 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); 1529 } 1530 1531 if (!(ipv4 && ipv6)) { 1532 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); 1533 if (ipv4) 1534 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); 1535 else 1536 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); 1537 } 1538 1539 /* Write characteristics to cam */ 1540 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line); 1541 cam_line = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id); 1542 1543 /* Write line to RAM - compare to filter 4 tuple */ 1544 ram_line_lo = 0; 1545 ram_line_hi = 0; 1546 1547 /* Tunnel type */ 1548 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); 1549 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); 1550 1551 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) 1552 { 1553 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1554 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1555 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1556 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1557 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); 1558 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1559 } 1560 else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) 1561 { 1562 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1563 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1564 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); 1565 } 1566 else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) 1567 { 1568 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); 1569 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1570 } 1571 else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) 1572 { 1573 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); 1574 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); 1575 } 1576 else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) 1577 { 1578 SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); 1579 } 1580 1581 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id, ram_line_lo); 1582 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, ram_line_hi); 1583 1584 /* Set default profile so that no filter match will happen */ 1585 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); 1586 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); 1587 1588 /* Enable gft search */ 1589 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); 1590} 1591 1592 1593#endif /* UNUSED_HSI_FUNC */ 1594 1595/* Configure VF zone size mode*/ 1596void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init) 1597{ 1598 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG; 1599 u32 msdm_vf_offset_mask; 1600 1601 if (mode == VF_ZONE_SIZE_MODE_DOUBLE) 1602 msdm_vf_size_log += 1; 1603 else if (mode == VF_ZONE_SIZE_MODE_QUAD) 1604 msdm_vf_size_log += 2; 1605 1606 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1; 1607 1608 if (runtime_init) { 1609 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log); 1610 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask); 1611 } 1612 else { 1613 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log); 1614 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask); 1615 } 1616} 1617 1618/* Get mstorm statistics for offset by VF zone size mode */ 1619u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode) 1620{ 1621 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id); 1622 1623 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) { 1624 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1625 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1626 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1627 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS); 1628 } 1629 1630 return offset; 1631} 1632 1633/* Get mstorm VF producer offset by VF zone size mode */ 1634u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode) 1635{ 1636 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id); 1637 1638 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) { 1639 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE) 1640 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1641 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD) 1642 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id; 1643 } 1644 1645 return offset; 1646} 1647 1648#ifndef LINUX_REMOVE 1649#define CRC8_INIT_VALUE 0xFF 1650#endif 1651static u8 cdu_crc8_table[CRC8_TABLE_SIZE]; 1652 1653/* Calculate and return CDU validation byte per connection type/region/cid */ 1654static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) 1655{ 1656 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; 1657 1658 static u8 crc8_table_valid; /*automatically initialized to 0*/ 1659 u8 crc, validation_byte = 0; 1660 u32 validation_string = 0; 1661 u32 data_to_crc; 1662 1663 if (crc8_table_valid == 0) { 1664 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07); 1665 crc8_table_valid = 1; 1666 } 1667 1668 /* The CRC is calculated on the String-to-compress: 1669 * [31:8] = {CID[31:20],CID[11:0]} 1670 * [7:4] = Region 1671 * [3:0] = Type 1672 */ 1673 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) 1674 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); 1675 1676 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) 1677 validation_string |= ((region & 0xF) << 4); 1678 1679 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) 1680 validation_string |= (conn_type & 0xF); 1681 1682 /* Convert to big-endian and calculate CRC8*/ 1683 data_to_crc = OSAL_BE32_TO_CPU(validation_string); 1684 1685 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); 1686 1687 /* The validation byte [7:0] is composed: 1688 * for type A validation 1689 * [7] = active configuration bit 1690 * [6:0] = crc[6:0] 1691 * 1692 * for type B validation 1693 * [7] = active configuration bit 1694 * [6:3] = connection_type[3:0] 1695 * [2:0] = crc[2:0] 1696 */ 1697 validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; 1698 1699 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) 1700 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); 1701 else 1702 validation_byte |= crc & 0x7F; 1703 1704 return validation_byte; 1705} 1706 1707/* Calcualte and set validation bytes for session context */ 1708void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid) 1709{ 1710 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1711 1712 p_ctx = (u8* const)p_ctx_mem; 1713 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1714 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1715 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1716 1717 OSAL_MEMSET(p_ctx, 0, ctx_size); 1718 1719 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid); 1720 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid); 1721 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid); 1722} 1723 1724/* Calcualte and set validation bytes for task context */ 1725void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid) 1726{ 1727 u8 *p_ctx, *region1_val_ptr; 1728 1729 p_ctx = (u8* const)p_ctx_mem; 1730 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1731 1732 OSAL_MEMSET(p_ctx, 0, ctx_size); 1733 1734 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid); 1735} 1736 1737/* Memset session context to 0 while preserving validation bytes */ 1738void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1739{ 1740 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; 1741 u8 x_val, t_val, u_val; 1742 1743 p_ctx = (u8* const)p_ctx_mem; 1744 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; 1745 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; 1746 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; 1747 1748 x_val = *x_val_ptr; 1749 t_val = *t_val_ptr; 1750 u_val = *u_val_ptr; 1751 1752 OSAL_MEMSET(p_ctx, 0, ctx_size); 1753 1754 *x_val_ptr = x_val; 1755 *t_val_ptr = t_val; 1756 *u_val_ptr = u_val; 1757} 1758 1759/* Memset task context to 0 while preserving validation bytes */ 1760void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) 1761{ 1762 u8 *p_ctx, *region1_val_ptr; 1763 u8 region1_val; 1764 1765 p_ctx = (u8* const)p_ctx_mem; 1766 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; 1767 1768 region1_val = *region1_val_ptr; 1769 1770 OSAL_MEMSET(p_ctx, 0, ctx_size); 1771 1772 *region1_val_ptr = region1_val; 1773} 1774 1775/* Enable and configure context validation */ 1776void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt) 1777{ 1778 u32 ctx_validation; 1779 1780 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ 1781 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; 1782 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); 1783 1784 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ 1785 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1786 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); 1787 1788 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ 1789 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1790 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); 1791} 1792 1793#define RSS_IND_TABLE_BASE_ADDR 4112 1794#define RSS_IND_TABLE_VPORT_SIZE 16 1795#define RSS_IND_TABLE_ENTRY_PER_LINE 8 1796 1797/* Update RSS indirection table entry. */ 1798void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn, 1799 struct ecore_ptt *p_ptt, 1800 u8 rss_id, 1801 u8 ind_table_index, 1802 u16 ind_table_value) 1803{ 1804 u32 cnt, rss_addr; 1805 u32 * reg_val; 1806 u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; 1807 u16 rss_ind_mask [RSS_IND_TABLE_ENTRY_PER_LINE]; 1808 1809 /* get entry address */ 1810 rss_addr = RSS_IND_TABLE_BASE_ADDR + 1811 RSS_IND_TABLE_VPORT_SIZE * rss_id + 1812 ind_table_index/RSS_IND_TABLE_ENTRY_PER_LINE; 1813 1814 /* prepare update command */ 1815 ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; 1816 1817 for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt ++) 1818 { 1819 if (cnt == ind_table_index) 1820 { 1821 rss_ind_entry[cnt] = ind_table_value; 1822 rss_ind_mask[cnt] = 0xFFFF; 1823 } 1824 else 1825 { 1826 rss_ind_entry[cnt] = 0; 1827 rss_ind_mask[cnt] = 0; 1828 } 1829 } 1830 1831 /* Update entry in HW*/ 1832 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); 1833 1834 reg_val = (u32*)rss_ind_mask; 1835 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); 1836 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); 1837 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); 1838 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); 1839 1840 reg_val = (u32*)rss_ind_entry; 1841 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); 1842 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); 1843 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); 1844 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); 1845} 1846 1847 1848