1255736Sdavidch/*- 2265797Sdavidcs * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3255736Sdavidch * 4255736Sdavidch * Redistribution and use in source and binary forms, with or without 5255736Sdavidch * modification, are permitted provided that the following conditions 6255736Sdavidch * are met: 7255736Sdavidch * 8255736Sdavidch * 1. Redistributions of source code must retain the above copyright 9255736Sdavidch * notice, this list of conditions and the following disclaimer. 10255736Sdavidch * 2. Redistributions in binary form must reproduce the above copyright 11255736Sdavidch * notice, this list of conditions and the following disclaimer in the 12255736Sdavidch * documentation and/or other materials provided with the distribution. 13255736Sdavidch * 14255736Sdavidch * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15255736Sdavidch * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18255736Sdavidch * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19255736Sdavidch * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20255736Sdavidch * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21255736Sdavidch * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22255736Sdavidch * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23255736Sdavidch * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24255736Sdavidch * THE POSSIBILITY OF SUCH DAMAGE. 25255736Sdavidch */ 26255736Sdavidch 27255736Sdavidch#include <sys/cdefs.h> 28255736Sdavidch__FBSDID("$FreeBSD: releng/10.2/sys/dev/bxe/ecore_init_ops.h 265797 2014-05-10 02:10:32Z davidcs $"); 29255736Sdavidch 30255736Sdavidch#ifndef ECORE_INIT_OPS_H 31255736Sdavidch#define ECORE_INIT_OPS_H 32255736Sdavidch 33255736Sdavidch 34255736Sdavidch 35255736Sdavidch 36255736Sdavidch 37255736Sdavidch 38255736Sdavidch 39255736Sdavidch 40255736Sdavidch 41255736Sdavidch 42255736Sdavidchstatic int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len); 43255736Sdavidchstatic void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val); 44255736Sdavidchstatic void ecore_write_dmae_phys_len(struct bxe_softc *sc, 45255736Sdavidch ecore_dma_addr_t phys_addr, uint32_t addr, 46255736Sdavidch uint32_t len); 47255736Sdavidch 48255736Sdavidchstatic void ecore_init_str_wr(struct bxe_softc *sc, uint32_t addr, 49255736Sdavidch const uint32_t *data, uint32_t len) 50255736Sdavidch{ 51255736Sdavidch uint32_t i; 52255736Sdavidch 53255736Sdavidch for (i = 0; i < len; i++) 54255736Sdavidch REG_WR(sc, addr + i*4, data[i]); 55255736Sdavidch} 56255736Sdavidch 57255736Sdavidchstatic void ecore_init_ind_wr(struct bxe_softc *sc, uint32_t addr, 58255736Sdavidch const uint32_t *data, uint32_t len) 59255736Sdavidch{ 60255736Sdavidch uint32_t i; 61255736Sdavidch 62255736Sdavidch for (i = 0; i < len; i++) 63255736Sdavidch ecore_reg_wr_ind(sc, addr + i*4, data[i]); 64255736Sdavidch} 65255736Sdavidch 66255736Sdavidchstatic void ecore_write_big_buf(struct bxe_softc *sc, uint32_t addr, uint32_t len, 67255736Sdavidch uint8_t wb) 68255736Sdavidch{ 69255736Sdavidch if (DMAE_READY(sc)) 70255736Sdavidch ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); 71255736Sdavidch 72255736Sdavidch /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ 73255736Sdavidch else if (wb && CHIP_IS_E1(sc)) 74255736Sdavidch ecore_init_ind_wr(sc, addr, GUNZIP_BUF(sc), len); 75255736Sdavidch 76255736Sdavidch /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 77255736Sdavidch else 78255736Sdavidch ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); 79255736Sdavidch} 80255736Sdavidch 81255736Sdavidchstatic void ecore_init_fill(struct bxe_softc *sc, uint32_t addr, int fill, 82255736Sdavidch uint32_t len, uint8_t wb) 83255736Sdavidch{ 84255736Sdavidch uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); 85255736Sdavidch uint32_t buf_len32 = buf_len/4; 86255736Sdavidch uint32_t i; 87255736Sdavidch 88255736Sdavidch ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len); 89255736Sdavidch 90255736Sdavidch for (i = 0; i < len; i += buf_len32) { 91255736Sdavidch uint32_t cur_len = min(buf_len32, len - i); 92255736Sdavidch 93255736Sdavidch ecore_write_big_buf(sc, addr + i*4, cur_len, wb); 94255736Sdavidch } 95255736Sdavidch} 96255736Sdavidch 97255736Sdavidchstatic void ecore_write_big_buf_wb(struct bxe_softc *sc, uint32_t addr, uint32_t len) 98255736Sdavidch{ 99255736Sdavidch if (DMAE_READY(sc)) 100255736Sdavidch ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); 101255736Sdavidch 102255736Sdavidch /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ 103255736Sdavidch else if (CHIP_IS_E1(sc)) 104255736Sdavidch ecore_init_ind_wr(sc, addr, GUNZIP_BUF(sc), len); 105255736Sdavidch 106255736Sdavidch /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 107255736Sdavidch else 108255736Sdavidch ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); 109255736Sdavidch} 110255736Sdavidch 111255736Sdavidchstatic void ecore_init_wr_64(struct bxe_softc *sc, uint32_t addr, 112255736Sdavidch const uint32_t *data, uint32_t len64) 113255736Sdavidch{ 114255736Sdavidch uint32_t buf_len32 = FW_BUF_SIZE/4; 115255736Sdavidch uint32_t len = len64*2; 116255736Sdavidch uint64_t data64 = 0; 117255736Sdavidch uint32_t i; 118255736Sdavidch 119255736Sdavidch /* 64 bit value is in a blob: first low DWORD, then high DWORD */ 120255736Sdavidch data64 = HILO_U64((*(data + 1)), (*data)); 121255736Sdavidch 122255736Sdavidch len64 = min((uint32_t)(FW_BUF_SIZE/8), len64); 123255736Sdavidch for (i = 0; i < len64; i++) { 124255736Sdavidch uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i; 125255736Sdavidch 126255736Sdavidch *pdata = data64; 127255736Sdavidch } 128255736Sdavidch 129255736Sdavidch for (i = 0; i < len; i += buf_len32) { 130255736Sdavidch uint32_t cur_len = min(buf_len32, len - i); 131255736Sdavidch 132255736Sdavidch ecore_write_big_buf_wb(sc, addr + i*4, cur_len); 133255736Sdavidch } 134255736Sdavidch} 135255736Sdavidch 136255736Sdavidch/********************************************************* 137255736Sdavidch There are different blobs for each PRAM section. 138255736Sdavidch In addition, each blob write operation is divided into a few operations 139255736Sdavidch in order to decrease the amount of phys. contiguous buffer needed. 140255736Sdavidch Thus, when we select a blob the address may be with some offset 141255736Sdavidch from the beginning of PRAM section. 142255736Sdavidch The same holds for the INT_TABLE sections. 143255736Sdavidch**********************************************************/ 144255736Sdavidch#define IF_IS_INT_TABLE_ADDR(base, addr) \ 145255736Sdavidch if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) 146255736Sdavidch 147255736Sdavidch#define IF_IS_PRAM_ADDR(base, addr) \ 148255736Sdavidch if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) 149255736Sdavidch 150255736Sdavidchstatic const uint8_t *ecore_sel_blob(struct bxe_softc *sc, uint32_t addr, 151255736Sdavidch const uint8_t *data) 152255736Sdavidch{ 153255736Sdavidch IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) 154255736Sdavidch data = INIT_TSEM_INT_TABLE_DATA(sc); 155255736Sdavidch else 156255736Sdavidch IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) 157255736Sdavidch data = INIT_CSEM_INT_TABLE_DATA(sc); 158255736Sdavidch else 159255736Sdavidch IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) 160255736Sdavidch data = INIT_USEM_INT_TABLE_DATA(sc); 161255736Sdavidch else 162255736Sdavidch IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) 163255736Sdavidch data = INIT_XSEM_INT_TABLE_DATA(sc); 164255736Sdavidch else 165255736Sdavidch IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) 166255736Sdavidch data = INIT_TSEM_PRAM_DATA(sc); 167255736Sdavidch else 168255736Sdavidch IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) 169255736Sdavidch data = INIT_CSEM_PRAM_DATA(sc); 170255736Sdavidch else 171255736Sdavidch IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) 172255736Sdavidch data = INIT_USEM_PRAM_DATA(sc); 173255736Sdavidch else 174255736Sdavidch IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) 175255736Sdavidch data = INIT_XSEM_PRAM_DATA(sc); 176255736Sdavidch 177255736Sdavidch return data; 178255736Sdavidch} 179255736Sdavidch 180255736Sdavidchstatic void ecore_init_wr_wb(struct bxe_softc *sc, uint32_t addr, 181255736Sdavidch const uint32_t *data, uint32_t len) 182255736Sdavidch{ 183255736Sdavidch if (DMAE_READY(sc)) 184255736Sdavidch VIRT_WR_DMAE_LEN(sc, data, addr, len, 0); 185255736Sdavidch 186255736Sdavidch /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ 187255736Sdavidch else if (CHIP_IS_E1(sc)) 188255736Sdavidch ecore_init_ind_wr(sc, addr, data, len); 189255736Sdavidch 190255736Sdavidch /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 191255736Sdavidch else 192255736Sdavidch ecore_init_str_wr(sc, addr, data, len); 193255736Sdavidch} 194255736Sdavidch 195255736Sdavidch#ifndef FW_ZIP_SUPPORT 196255736Sdavidchstatic void ecore_init_fw(struct bxe_softc *sc, uint32_t addr, uint32_t len) 197255736Sdavidch{ 198255736Sdavidch const uint8_t *data = NULL; 199255736Sdavidch 200255736Sdavidch data = ecore_sel_blob(sc, addr, (const uint8_t *)data); 201255736Sdavidch 202255736Sdavidch if (DMAE_READY(sc)) 203255736Sdavidch VIRT_WR_DMAE_LEN(sc, data, addr, len, 1); 204255736Sdavidch 205255736Sdavidch /* in E1 BIOS initiated ZLR may interrupt widebus writes */ 206255736Sdavidch else if (CHIP_IS_E1(sc)) 207255736Sdavidch ecore_init_ind_wr(sc, addr, (const uint32_t *)data, len); 208255736Sdavidch 209255736Sdavidch /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ 210255736Sdavidch else 211255736Sdavidch ecore_init_str_wr(sc, addr, (const uint32_t *)data, len); 212255736Sdavidch} 213255736Sdavidch 214255736Sdavidch#endif 215255736Sdavidch 216255736Sdavidchstatic void ecore_wr_64(struct bxe_softc *sc, uint32_t reg, uint32_t val_lo, 217255736Sdavidch uint32_t val_hi) 218255736Sdavidch{ 219255736Sdavidch uint32_t wb_write[2]; 220255736Sdavidch 221255736Sdavidch wb_write[0] = val_lo; 222255736Sdavidch wb_write[1] = val_hi; 223255736Sdavidch REG_WR_DMAE_LEN(sc, reg, wb_write, 2); 224255736Sdavidch} 225255736Sdavidch 226255736Sdavidchstatic void ecore_init_wr_zp(struct bxe_softc *sc, uint32_t addr, uint32_t len, 227255736Sdavidch uint32_t blob_off) 228255736Sdavidch{ 229255736Sdavidch const uint8_t *data = NULL; 230255736Sdavidch int rc; 231255736Sdavidch uint32_t i; 232255736Sdavidch 233255736Sdavidch data = ecore_sel_blob(sc, addr, data) + blob_off*4; 234255736Sdavidch 235255736Sdavidch rc = ecore_gunzip(sc, data, len); 236255736Sdavidch if (rc) 237255736Sdavidch return; 238255736Sdavidch 239255736Sdavidch /* gunzip_outlen is in dwords */ 240255736Sdavidch len = GUNZIP_OUTLEN(sc); 241255736Sdavidch for (i = 0; i < len; i++) 242255736Sdavidch ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t) 243255736Sdavidch ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]); 244255736Sdavidch 245255736Sdavidch ecore_write_big_buf_wb(sc, addr, len); 246255736Sdavidch} 247255736Sdavidch 248255736Sdavidchstatic void ecore_init_block(struct bxe_softc *sc, uint32_t block, uint32_t stage) 249255736Sdavidch{ 250255736Sdavidch uint16_t op_start = 251255736Sdavidch INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, 252255736Sdavidch STAGE_START)]; 253255736Sdavidch uint16_t op_end = 254255736Sdavidch INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, 255255736Sdavidch STAGE_END)]; 256255736Sdavidch const union init_op *op; 257255736Sdavidch uint32_t op_idx, op_type, addr, len; 258255736Sdavidch const uint32_t *data, *data_base; 259255736Sdavidch 260255736Sdavidch /* If empty block */ 261255736Sdavidch if (op_start == op_end) 262255736Sdavidch return; 263255736Sdavidch 264255736Sdavidch data_base = INIT_DATA(sc); 265255736Sdavidch 266255736Sdavidch for (op_idx = op_start; op_idx < op_end; op_idx++) { 267255736Sdavidch 268255736Sdavidch op = (const union init_op *)&(INIT_OPS(sc)[op_idx]); 269255736Sdavidch /* Get generic data */ 270255736Sdavidch op_type = op->raw.op; 271255736Sdavidch addr = op->raw.offset; 272255736Sdavidch /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and 273255736Sdavidch * OP_WR64 (we assume that op_arr_write and op_write have the 274255736Sdavidch * same structure). 275255736Sdavidch */ 276255736Sdavidch len = op->arr_wr.data_len; 277255736Sdavidch data = data_base + op->arr_wr.data_off; 278255736Sdavidch 279255736Sdavidch switch (op_type) { 280255736Sdavidch case OP_RD: 281255736Sdavidch REG_RD(sc, addr); 282255736Sdavidch break; 283255736Sdavidch case OP_WR: 284255736Sdavidch REG_WR(sc, addr, op->write.val); 285255736Sdavidch break; 286255736Sdavidch case OP_SW: 287255736Sdavidch ecore_init_str_wr(sc, addr, data, len); 288255736Sdavidch break; 289255736Sdavidch case OP_WB: 290255736Sdavidch ecore_init_wr_wb(sc, addr, data, len); 291255736Sdavidch break; 292255736Sdavidch#ifndef FW_ZIP_SUPPORT 293255736Sdavidch case OP_FW: 294255736Sdavidch ecore_init_fw(sc, addr, len); 295255736Sdavidch break; 296255736Sdavidch#endif 297255736Sdavidch case OP_ZR: 298255736Sdavidch ecore_init_fill(sc, addr, 0, op->zero.len, 0); 299255736Sdavidch break; 300255736Sdavidch case OP_WB_ZR: 301255736Sdavidch ecore_init_fill(sc, addr, 0, op->zero.len, 1); 302255736Sdavidch break; 303255736Sdavidch case OP_ZP: 304255736Sdavidch ecore_init_wr_zp(sc, addr, len, 305255736Sdavidch op->arr_wr.data_off); 306255736Sdavidch break; 307255736Sdavidch case OP_WR_64: 308255736Sdavidch ecore_init_wr_64(sc, addr, data, len); 309255736Sdavidch break; 310255736Sdavidch case OP_IF_MODE_AND: 311255736Sdavidch /* if any of the flags doesn't match, skip the 312255736Sdavidch * conditional block. 313255736Sdavidch */ 314255736Sdavidch if ((INIT_MODE_FLAGS(sc) & 315255736Sdavidch op->if_mode.mode_bit_map) != 316255736Sdavidch op->if_mode.mode_bit_map) 317255736Sdavidch op_idx += op->if_mode.cmd_offset; 318255736Sdavidch break; 319255736Sdavidch case OP_IF_MODE_OR: 320255736Sdavidch /* if all the flags don't match, skip the conditional 321255736Sdavidch * block. 322255736Sdavidch */ 323255736Sdavidch if ((INIT_MODE_FLAGS(sc) & 324255736Sdavidch op->if_mode.mode_bit_map) == 0) 325255736Sdavidch op_idx += op->if_mode.cmd_offset; 326255736Sdavidch break; 327255736Sdavidch /* the following opcodes are unused at the moment. */ 328255736Sdavidch case OP_IF_PHASE: 329255736Sdavidch case OP_RT: 330255736Sdavidch case OP_DELAY: 331255736Sdavidch case OP_VERIFY: 332255736Sdavidch default: 333255736Sdavidch /* Should never get here! */ 334255736Sdavidch 335255736Sdavidch break; 336255736Sdavidch } 337255736Sdavidch } 338255736Sdavidch} 339255736Sdavidch 340255736Sdavidch 341255736Sdavidch/**************************************************************************** 342255736Sdavidch* PXP Arbiter 343255736Sdavidch****************************************************************************/ 344255736Sdavidch/* 345255736Sdavidch * This code configures the PCI read/write arbiter 346255736Sdavidch * which implements a weighted round robin 347255736Sdavidch * between the virtual queues in the chip. 348255736Sdavidch * 349255736Sdavidch * The values were derived for each PCI max payload and max request size. 350255736Sdavidch * since max payload and max request size are only known at run time, 351255736Sdavidch * this is done as a separate init stage. 352255736Sdavidch */ 353255736Sdavidch 354255736Sdavidch#define NUM_WR_Q 13 355255736Sdavidch#define NUM_RD_Q 29 356255736Sdavidch#define MAX_RD_ORD 3 357255736Sdavidch#define MAX_WR_ORD 2 358255736Sdavidch 359255736Sdavidch/* configuration for one arbiter queue */ 360255736Sdavidchstruct arb_line { 361255736Sdavidch int l; 362255736Sdavidch int add; 363255736Sdavidch int ubound; 364255736Sdavidch}; 365255736Sdavidch 366255736Sdavidch/* derived configuration for each read queue for each max request size */ 367255736Sdavidchstatic const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { 368255736Sdavidch/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, 369255736Sdavidch { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, 370255736Sdavidch { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, 371255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, 372255736Sdavidch { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, 373255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 374255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 375255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 376255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, 377255736Sdavidch/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 378255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 379255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 380255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 381255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 382255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 383255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 384255736Sdavidch { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, 385255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 386255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 387255736Sdavidch/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 388255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 389255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 390255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 391255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 392255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 393255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 394255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 395255736Sdavidch { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, 396255736Sdavidch { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } 397255736Sdavidch}; 398255736Sdavidch 399255736Sdavidch/* derived configuration for each write queue for each max request size */ 400255736Sdavidchstatic const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { 401255736Sdavidch/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, 402255736Sdavidch { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, 403255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 404255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 405255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 406255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, 407255736Sdavidch { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, 408255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 409255736Sdavidch { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, 410255736Sdavidch/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, 411255736Sdavidch { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, 412255736Sdavidch { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, 413255736Sdavidch { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } 414255736Sdavidch}; 415255736Sdavidch 416255736Sdavidch/* register addresses for read queues */ 417255736Sdavidchstatic const struct arb_line read_arb_addr[NUM_RD_Q-1] = { 418255736Sdavidch/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, 419255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND0}, 420255736Sdavidch {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, 421255736Sdavidch PXP2_REG_PSWRQ_BW_UB1}, 422255736Sdavidch {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, 423255736Sdavidch PXP2_REG_PSWRQ_BW_UB2}, 424255736Sdavidch {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, 425255736Sdavidch PXP2_REG_PSWRQ_BW_UB3}, 426255736Sdavidch {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, 427255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND4}, 428255736Sdavidch {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, 429255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND5}, 430255736Sdavidch {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, 431255736Sdavidch PXP2_REG_PSWRQ_BW_UB6}, 432255736Sdavidch {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, 433255736Sdavidch PXP2_REG_PSWRQ_BW_UB7}, 434255736Sdavidch {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, 435255736Sdavidch PXP2_REG_PSWRQ_BW_UB8}, 436255736Sdavidch/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, 437255736Sdavidch PXP2_REG_PSWRQ_BW_UB9}, 438255736Sdavidch {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, 439255736Sdavidch PXP2_REG_PSWRQ_BW_UB10}, 440255736Sdavidch {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, 441255736Sdavidch PXP2_REG_PSWRQ_BW_UB11}, 442255736Sdavidch {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, 443255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND12}, 444255736Sdavidch {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, 445255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND13}, 446255736Sdavidch {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, 447255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND14}, 448255736Sdavidch {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, 449255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND15}, 450255736Sdavidch {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, 451255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND16}, 452255736Sdavidch {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, 453255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND17}, 454255736Sdavidch {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, 455255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND18}, 456255736Sdavidch/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, 457255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND19}, 458255736Sdavidch {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, 459255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND20}, 460255736Sdavidch {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, 461255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND22}, 462255736Sdavidch {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, 463255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND23}, 464255736Sdavidch {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, 465255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND24}, 466255736Sdavidch {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, 467255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND25}, 468255736Sdavidch {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, 469255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND26}, 470255736Sdavidch {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, 471255736Sdavidch PXP2_REG_RQ_BW_RD_UBOUND27}, 472255736Sdavidch {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, 473255736Sdavidch PXP2_REG_PSWRQ_BW_UB28} 474255736Sdavidch}; 475255736Sdavidch 476255736Sdavidch/* register addresses for write queues */ 477255736Sdavidchstatic const struct arb_line write_arb_addr[NUM_WR_Q-1] = { 478255736Sdavidch/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, 479255736Sdavidch PXP2_REG_PSWRQ_BW_UB1}, 480255736Sdavidch {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, 481255736Sdavidch PXP2_REG_PSWRQ_BW_UB2}, 482255736Sdavidch {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, 483255736Sdavidch PXP2_REG_PSWRQ_BW_UB3}, 484255736Sdavidch {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, 485255736Sdavidch PXP2_REG_PSWRQ_BW_UB6}, 486255736Sdavidch {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, 487255736Sdavidch PXP2_REG_PSWRQ_BW_UB7}, 488255736Sdavidch {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, 489255736Sdavidch PXP2_REG_PSWRQ_BW_UB8}, 490255736Sdavidch {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, 491255736Sdavidch PXP2_REG_PSWRQ_BW_UB9}, 492255736Sdavidch {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, 493255736Sdavidch PXP2_REG_PSWRQ_BW_UB10}, 494255736Sdavidch {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, 495255736Sdavidch PXP2_REG_PSWRQ_BW_UB11}, 496255736Sdavidch/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, 497255736Sdavidch PXP2_REG_PSWRQ_BW_UB28}, 498255736Sdavidch {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, 499255736Sdavidch PXP2_REG_RQ_BW_WR_UBOUND29}, 500255736Sdavidch {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, 501255736Sdavidch PXP2_REG_RQ_BW_WR_UBOUND30} 502255736Sdavidch}; 503255736Sdavidch 504255736Sdavidchstatic void ecore_init_pxp_arb(struct bxe_softc *sc, int r_order, 505255736Sdavidch int w_order) 506255736Sdavidch{ 507255736Sdavidch uint32_t val, i; 508255736Sdavidch 509255736Sdavidch if (r_order > MAX_RD_ORD) { 510255736Sdavidch ECORE_MSG(sc, "read order of %d order adjusted to %d\n", 511255736Sdavidch r_order, MAX_RD_ORD); 512255736Sdavidch r_order = MAX_RD_ORD; 513255736Sdavidch } 514255736Sdavidch if (w_order > MAX_WR_ORD) { 515255736Sdavidch ECORE_MSG(sc, "write order of %d order adjusted to %d\n", 516255736Sdavidch w_order, MAX_WR_ORD); 517255736Sdavidch w_order = MAX_WR_ORD; 518255736Sdavidch } 519255736Sdavidch if (CHIP_REV_IS_FPGA(sc)) { 520255736Sdavidch ECORE_MSG(sc, "write order adjusted to 1 for FPGA\n"); 521255736Sdavidch w_order = 0; 522255736Sdavidch } 523255736Sdavidch ECORE_MSG(sc, "read order %d write order %d\n", r_order, w_order); 524255736Sdavidch 525255736Sdavidch for (i = 0; i < NUM_RD_Q-1; i++) { 526255736Sdavidch REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l); 527255736Sdavidch REG_WR(sc, read_arb_addr[i].add, 528255736Sdavidch read_arb_data[i][r_order].add); 529255736Sdavidch REG_WR(sc, read_arb_addr[i].ubound, 530255736Sdavidch read_arb_data[i][r_order].ubound); 531255736Sdavidch } 532255736Sdavidch 533255736Sdavidch for (i = 0; i < NUM_WR_Q-1; i++) { 534255736Sdavidch if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || 535255736Sdavidch (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { 536255736Sdavidch 537255736Sdavidch REG_WR(sc, write_arb_addr[i].l, 538255736Sdavidch write_arb_data[i][w_order].l); 539255736Sdavidch 540255736Sdavidch REG_WR(sc, write_arb_addr[i].add, 541255736Sdavidch write_arb_data[i][w_order].add); 542255736Sdavidch 543255736Sdavidch REG_WR(sc, write_arb_addr[i].ubound, 544255736Sdavidch write_arb_data[i][w_order].ubound); 545255736Sdavidch } else { 546255736Sdavidch 547255736Sdavidch val = REG_RD(sc, write_arb_addr[i].l); 548255736Sdavidch REG_WR(sc, write_arb_addr[i].l, 549255736Sdavidch val | (write_arb_data[i][w_order].l << 10)); 550255736Sdavidch 551255736Sdavidch val = REG_RD(sc, write_arb_addr[i].add); 552255736Sdavidch REG_WR(sc, write_arb_addr[i].add, 553255736Sdavidch val | (write_arb_data[i][w_order].add << 10)); 554255736Sdavidch 555255736Sdavidch val = REG_RD(sc, write_arb_addr[i].ubound); 556255736Sdavidch REG_WR(sc, write_arb_addr[i].ubound, 557255736Sdavidch val | (write_arb_data[i][w_order].ubound << 7)); 558255736Sdavidch } 559255736Sdavidch } 560255736Sdavidch 561255736Sdavidch val = write_arb_data[NUM_WR_Q-1][w_order].add; 562255736Sdavidch val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; 563255736Sdavidch val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; 564255736Sdavidch REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val); 565255736Sdavidch 566255736Sdavidch val = read_arb_data[NUM_RD_Q-1][r_order].add; 567255736Sdavidch val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; 568255736Sdavidch val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; 569255736Sdavidch REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val); 570255736Sdavidch 571255736Sdavidch REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order); 572255736Sdavidch REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order); 573255736Sdavidch REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order); 574255736Sdavidch REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order); 575255736Sdavidch 576255736Sdavidch if ((CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) && (r_order == MAX_RD_ORD)) 577255736Sdavidch REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00); 578255736Sdavidch 579255736Sdavidch if (CHIP_IS_E3(sc)) 580255736Sdavidch REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); 581255736Sdavidch else if (CHIP_IS_E2(sc)) 582255736Sdavidch REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); 583255736Sdavidch else 584255736Sdavidch REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); 585255736Sdavidch 586255736Sdavidch if (!CHIP_IS_E1(sc)) { 587255736Sdavidch /* MPS w_order optimal TH presently TH 588255736Sdavidch * 128 0 0 2 589255736Sdavidch * 256 1 1 3 590255736Sdavidch * >=512 2 2 3 591255736Sdavidch */ 592255736Sdavidch /* DMAE is special */ 593255736Sdavidch if (!CHIP_IS_E1H(sc)) { 594255736Sdavidch /* E2 can use optimal TH */ 595255736Sdavidch val = w_order; 596255736Sdavidch REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val); 597255736Sdavidch } else { 598255736Sdavidch val = ((w_order == 0) ? 2 : 3); 599255736Sdavidch REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); 600255736Sdavidch } 601255736Sdavidch 602255736Sdavidch REG_WR(sc, PXP2_REG_WR_HC_MPS, val); 603255736Sdavidch REG_WR(sc, PXP2_REG_WR_USDM_MPS, val); 604255736Sdavidch REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val); 605255736Sdavidch REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val); 606255736Sdavidch REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val); 607255736Sdavidch REG_WR(sc, PXP2_REG_WR_QM_MPS, val); 608255736Sdavidch REG_WR(sc, PXP2_REG_WR_TM_MPS, val); 609255736Sdavidch REG_WR(sc, PXP2_REG_WR_SRC_MPS, val); 610255736Sdavidch REG_WR(sc, PXP2_REG_WR_DBG_MPS, val); 611255736Sdavidch REG_WR(sc, PXP2_REG_WR_CDU_MPS, val); 612255736Sdavidch } 613255736Sdavidch 614255736Sdavidch /* Validate number of tags suppoted by device */ 615255736Sdavidch#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 616255736Sdavidch val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST); 617255736Sdavidch val &= 0xFF; 618255736Sdavidch if (val <= 0x20) 619255736Sdavidch REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20); 620255736Sdavidch} 621255736Sdavidch 622255736Sdavidch/**************************************************************************** 623255736Sdavidch* ILT management 624255736Sdavidch****************************************************************************/ 625255736Sdavidch/* 626255736Sdavidch * This codes hides the low level HW interaction for ILT management and 627255736Sdavidch * configuration. The API consists of a shadow ILT table which is set by the 628255736Sdavidch * driver and a set of routines to use it to configure the HW. 629255736Sdavidch * 630255736Sdavidch */ 631255736Sdavidch 632255736Sdavidch/* ILT HW init operations */ 633255736Sdavidch 634255736Sdavidch/* ILT memory management operations */ 635255736Sdavidch#define ILT_MEMOP_ALLOC 0 636255736Sdavidch#define ILT_MEMOP_FREE 1 637255736Sdavidch 638255736Sdavidch/* the phys address is shifted right 12 bits and has an added 639255736Sdavidch * 1=valid bit added to the 53rd bit 640255736Sdavidch * then since this is a wide register(TM) 641255736Sdavidch * we split it into two 32 bit writes 642255736Sdavidch */ 643255736Sdavidch#define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) 644255736Sdavidch#define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) 645255736Sdavidch#define ILT_RANGE(f, l) (((l) << 10) | f) 646255736Sdavidch 647255736Sdavidchstatic int ecore_ilt_line_mem_op(struct bxe_softc *sc, 648255736Sdavidch struct ilt_line *line, uint32_t size, uint8_t memop) 649255736Sdavidch{ 650255736Sdavidch if (memop == ILT_MEMOP_FREE) { 651255736Sdavidch ECORE_ILT_FREE(line->page, line->page_mapping, line->size); 652255736Sdavidch return 0; 653255736Sdavidch } 654255736Sdavidch ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size); 655255736Sdavidch if (!line->page) 656255736Sdavidch return -1; 657255736Sdavidch line->size = size; 658255736Sdavidch return 0; 659255736Sdavidch} 660255736Sdavidch 661255736Sdavidch 662255736Sdavidchstatic int ecore_ilt_client_mem_op(struct bxe_softc *sc, int cli_num, 663255736Sdavidch uint8_t memop) 664255736Sdavidch{ 665255736Sdavidch int i, rc; 666255736Sdavidch struct ecore_ilt *ilt = SC_ILT(sc); 667255736Sdavidch struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 668255736Sdavidch 669255736Sdavidch if (!ilt || !ilt->lines) 670255736Sdavidch return -1; 671255736Sdavidch 672255736Sdavidch if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) 673255736Sdavidch return 0; 674255736Sdavidch 675255736Sdavidch for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { 676255736Sdavidch rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i], 677255736Sdavidch ilt_cli->page_size, memop); 678255736Sdavidch } 679255736Sdavidch return rc; 680255736Sdavidch} 681255736Sdavidch 682255736Sdavidchstatic inline int ecore_ilt_mem_op_cnic(struct bxe_softc *sc, uint8_t memop) 683255736Sdavidch{ 684255736Sdavidch int rc = 0; 685255736Sdavidch 686255736Sdavidch if (CONFIGURE_NIC_MODE(sc)) 687255736Sdavidch rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); 688255736Sdavidch if (!rc) 689255736Sdavidch rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop); 690255736Sdavidch 691255736Sdavidch return rc; 692255736Sdavidch} 693255736Sdavidch 694255736Sdavidchstatic int ecore_ilt_mem_op(struct bxe_softc *sc, uint8_t memop) 695255736Sdavidch{ 696255736Sdavidch int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop); 697255736Sdavidch if (!rc) 698255736Sdavidch rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop); 699255736Sdavidch if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) 700255736Sdavidch rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); 701255736Sdavidch 702255736Sdavidch return rc; 703255736Sdavidch} 704255736Sdavidch 705255736Sdavidchstatic void ecore_ilt_line_wr(struct bxe_softc *sc, int abs_idx, 706255736Sdavidch ecore_dma_addr_t page_mapping) 707255736Sdavidch{ 708255736Sdavidch uint32_t reg; 709255736Sdavidch 710255736Sdavidch if (CHIP_IS_E1(sc)) 711255736Sdavidch reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8; 712255736Sdavidch else 713255736Sdavidch reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; 714255736Sdavidch 715255736Sdavidch ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); 716255736Sdavidch} 717255736Sdavidch 718255736Sdavidchstatic void ecore_ilt_line_init_op(struct bxe_softc *sc, 719255736Sdavidch struct ecore_ilt *ilt, int idx, uint8_t initop) 720255736Sdavidch{ 721255736Sdavidch ecore_dma_addr_t null_mapping; 722255736Sdavidch int abs_idx = ilt->start_line + idx; 723255736Sdavidch 724255736Sdavidch 725255736Sdavidch switch (initop) { 726255736Sdavidch case INITOP_INIT: 727255736Sdavidch /* set in the init-value array */ 728255736Sdavidch case INITOP_SET: 729255736Sdavidch ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping); 730255736Sdavidch break; 731255736Sdavidch case INITOP_CLEAR: 732255736Sdavidch null_mapping = 0; 733255736Sdavidch ecore_ilt_line_wr(sc, abs_idx, null_mapping); 734255736Sdavidch break; 735255736Sdavidch } 736255736Sdavidch} 737255736Sdavidch 738255736Sdavidchstatic void ecore_ilt_boundry_init_op(struct bxe_softc *sc, 739255736Sdavidch struct ilt_client_info *ilt_cli, 740255736Sdavidch uint32_t ilt_start, uint8_t initop) 741255736Sdavidch{ 742255736Sdavidch uint32_t start_reg = 0; 743255736Sdavidch uint32_t end_reg = 0; 744255736Sdavidch 745255736Sdavidch /* The boundary is either SET or INIT, 746255736Sdavidch CLEAR => SET and for now SET ~~ INIT */ 747255736Sdavidch 748255736Sdavidch /* find the appropriate regs */ 749255736Sdavidch if (CHIP_IS_E1(sc)) { 750255736Sdavidch switch (ilt_cli->client_num) { 751255736Sdavidch case ILT_CLIENT_CDU: 752255736Sdavidch start_reg = PXP2_REG_PSWRQ_CDU0_L2P; 753255736Sdavidch break; 754255736Sdavidch case ILT_CLIENT_QM: 755255736Sdavidch start_reg = PXP2_REG_PSWRQ_QM0_L2P; 756255736Sdavidch break; 757255736Sdavidch case ILT_CLIENT_SRC: 758255736Sdavidch start_reg = PXP2_REG_PSWRQ_SRC0_L2P; 759255736Sdavidch break; 760255736Sdavidch case ILT_CLIENT_TM: 761255736Sdavidch start_reg = PXP2_REG_PSWRQ_TM0_L2P; 762255736Sdavidch break; 763255736Sdavidch } 764255736Sdavidch REG_WR(sc, start_reg + SC_FUNC(sc)*4, 765255736Sdavidch ILT_RANGE((ilt_start + ilt_cli->start), 766255736Sdavidch (ilt_start + ilt_cli->end))); 767255736Sdavidch } else { 768255736Sdavidch switch (ilt_cli->client_num) { 769255736Sdavidch case ILT_CLIENT_CDU: 770255736Sdavidch start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; 771255736Sdavidch end_reg = PXP2_REG_RQ_CDU_LAST_ILT; 772255736Sdavidch break; 773255736Sdavidch case ILT_CLIENT_QM: 774255736Sdavidch start_reg = PXP2_REG_RQ_QM_FIRST_ILT; 775255736Sdavidch end_reg = PXP2_REG_RQ_QM_LAST_ILT; 776255736Sdavidch break; 777255736Sdavidch case ILT_CLIENT_SRC: 778255736Sdavidch start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; 779255736Sdavidch end_reg = PXP2_REG_RQ_SRC_LAST_ILT; 780255736Sdavidch break; 781255736Sdavidch case ILT_CLIENT_TM: 782255736Sdavidch start_reg = PXP2_REG_RQ_TM_FIRST_ILT; 783255736Sdavidch end_reg = PXP2_REG_RQ_TM_LAST_ILT; 784255736Sdavidch break; 785255736Sdavidch } 786255736Sdavidch REG_WR(sc, start_reg, (ilt_start + ilt_cli->start)); 787255736Sdavidch REG_WR(sc, end_reg, (ilt_start + ilt_cli->end)); 788255736Sdavidch } 789255736Sdavidch} 790255736Sdavidch 791255736Sdavidchstatic void ecore_ilt_client_init_op_ilt(struct bxe_softc *sc, 792255736Sdavidch struct ecore_ilt *ilt, 793255736Sdavidch struct ilt_client_info *ilt_cli, 794255736Sdavidch uint8_t initop) 795255736Sdavidch{ 796255736Sdavidch int i; 797255736Sdavidch 798255736Sdavidch if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) 799255736Sdavidch return; 800255736Sdavidch 801255736Sdavidch for (i = ilt_cli->start; i <= ilt_cli->end; i++) 802255736Sdavidch ecore_ilt_line_init_op(sc, ilt, i, initop); 803255736Sdavidch 804255736Sdavidch /* init/clear the ILT boundries */ 805255736Sdavidch ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line, initop); 806255736Sdavidch} 807255736Sdavidch 808255736Sdavidchstatic void ecore_ilt_client_init_op(struct bxe_softc *sc, 809255736Sdavidch struct ilt_client_info *ilt_cli, uint8_t initop) 810255736Sdavidch{ 811255736Sdavidch struct ecore_ilt *ilt = SC_ILT(sc); 812255736Sdavidch 813255736Sdavidch ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop); 814255736Sdavidch} 815255736Sdavidch 816255736Sdavidchstatic void ecore_ilt_client_id_init_op(struct bxe_softc *sc, 817255736Sdavidch int cli_num, uint8_t initop) 818255736Sdavidch{ 819255736Sdavidch struct ecore_ilt *ilt = SC_ILT(sc); 820255736Sdavidch struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 821255736Sdavidch 822255736Sdavidch ecore_ilt_client_init_op(sc, ilt_cli, initop); 823255736Sdavidch} 824255736Sdavidch 825255736Sdavidchstatic inline void ecore_ilt_init_op_cnic(struct bxe_softc *sc, uint8_t initop) 826255736Sdavidch{ 827255736Sdavidch if (CONFIGURE_NIC_MODE(sc)) 828255736Sdavidch ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); 829255736Sdavidch ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop); 830255736Sdavidch} 831255736Sdavidch 832255736Sdavidchstatic void ecore_ilt_init_op(struct bxe_softc *sc, uint8_t initop) 833255736Sdavidch{ 834255736Sdavidch ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop); 835255736Sdavidch ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop); 836255736Sdavidch if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) 837255736Sdavidch ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); 838255736Sdavidch} 839255736Sdavidch 840255736Sdavidchstatic void ecore_ilt_init_client_psz(struct bxe_softc *sc, int cli_num, 841255736Sdavidch uint32_t psz_reg, uint8_t initop) 842255736Sdavidch{ 843255736Sdavidch struct ecore_ilt *ilt = SC_ILT(sc); 844255736Sdavidch struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; 845255736Sdavidch 846255736Sdavidch if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) 847255736Sdavidch return; 848255736Sdavidch 849255736Sdavidch switch (initop) { 850255736Sdavidch case INITOP_INIT: 851255736Sdavidch /* set in the init-value array */ 852255736Sdavidch case INITOP_SET: 853255736Sdavidch REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12)); 854255736Sdavidch break; 855255736Sdavidch case INITOP_CLEAR: 856255736Sdavidch break; 857255736Sdavidch } 858255736Sdavidch} 859255736Sdavidch 860255736Sdavidch/* 861255736Sdavidch * called during init common stage, ilt clients should be initialized 862255736Sdavidch * prioir to calling this function 863255736Sdavidch */ 864255736Sdavidchstatic void ecore_ilt_init_page_size(struct bxe_softc *sc, uint8_t initop) 865255736Sdavidch{ 866255736Sdavidch ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU, 867255736Sdavidch PXP2_REG_RQ_CDU_P_SIZE, initop); 868255736Sdavidch ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM, 869255736Sdavidch PXP2_REG_RQ_QM_P_SIZE, initop); 870255736Sdavidch ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC, 871255736Sdavidch PXP2_REG_RQ_SRC_P_SIZE, initop); 872255736Sdavidch ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM, 873255736Sdavidch PXP2_REG_RQ_TM_P_SIZE, initop); 874255736Sdavidch} 875255736Sdavidch 876255736Sdavidch/**************************************************************************** 877255736Sdavidch* QM initializations 878255736Sdavidch****************************************************************************/ 879255736Sdavidch#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ 880255736Sdavidch#define QM_INIT_MIN_CID_COUNT 31 881255736Sdavidch#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) 882255736Sdavidch 883255736Sdavidch/* called during init port stage */ 884255736Sdavidchstatic void ecore_qm_init_cid_count(struct bxe_softc *sc, int qm_cid_count, 885255736Sdavidch uint8_t initop) 886255736Sdavidch{ 887255736Sdavidch int port = SC_PORT(sc); 888255736Sdavidch 889255736Sdavidch if (QM_INIT(qm_cid_count)) { 890255736Sdavidch switch (initop) { 891255736Sdavidch case INITOP_INIT: 892255736Sdavidch /* set in the init-value array */ 893255736Sdavidch case INITOP_SET: 894255736Sdavidch REG_WR(sc, QM_REG_CONNNUM_0 + port*4, 895255736Sdavidch qm_cid_count/16 - 1); 896255736Sdavidch break; 897255736Sdavidch case INITOP_CLEAR: 898255736Sdavidch break; 899255736Sdavidch } 900255736Sdavidch } 901255736Sdavidch} 902255736Sdavidch 903255736Sdavidchstatic void ecore_qm_set_ptr_table(struct bxe_softc *sc, int qm_cid_count, 904255736Sdavidch uint32_t base_reg, uint32_t reg) 905255736Sdavidch{ 906255736Sdavidch int i; 907255736Sdavidch uint32_t wb_data[2] = {0, 0}; 908255736Sdavidch for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { 909255736Sdavidch REG_WR(sc, base_reg + i*4, 910255736Sdavidch qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); 911255736Sdavidch ecore_init_wr_wb(sc, reg + i*8, 912255736Sdavidch wb_data, 2); 913255736Sdavidch } 914255736Sdavidch} 915255736Sdavidch 916255736Sdavidch/* called during init common stage */ 917255736Sdavidchstatic void ecore_qm_init_ptr_table(struct bxe_softc *sc, int qm_cid_count, 918255736Sdavidch uint8_t initop) 919255736Sdavidch{ 920255736Sdavidch if (!QM_INIT(qm_cid_count)) 921255736Sdavidch return; 922255736Sdavidch 923255736Sdavidch switch (initop) { 924255736Sdavidch case INITOP_INIT: 925255736Sdavidch /* set in the init-value array */ 926255736Sdavidch case INITOP_SET: 927255736Sdavidch ecore_qm_set_ptr_table(sc, qm_cid_count, 928255736Sdavidch QM_REG_BASEADDR, QM_REG_PTRTBL); 929255736Sdavidch if (CHIP_IS_E1H(sc)) 930255736Sdavidch ecore_qm_set_ptr_table(sc, qm_cid_count, 931255736Sdavidch QM_REG_BASEADDR_EXT_A, 932255736Sdavidch QM_REG_PTRTBL_EXT_A); 933255736Sdavidch break; 934255736Sdavidch case INITOP_CLEAR: 935255736Sdavidch break; 936255736Sdavidch } 937255736Sdavidch} 938255736Sdavidch 939255736Sdavidch/**************************************************************************** 940255736Sdavidch* SRC initializations 941255736Sdavidch****************************************************************************/ 942255736Sdavidch#ifdef ECORE_L5 943255736Sdavidch/* called during init func stage */ 944255736Sdavidchstatic void ecore_src_init_t2(struct bxe_softc *sc, struct src_ent *t2, 945255736Sdavidch ecore_dma_addr_t t2_mapping, int src_cid_count) 946255736Sdavidch{ 947255736Sdavidch int i; 948255736Sdavidch int port = SC_PORT(sc); 949255736Sdavidch 950255736Sdavidch /* Initialize T2 */ 951255736Sdavidch for (i = 0; i < src_cid_count-1; i++) 952255736Sdavidch t2[i].next = (uint64_t)(t2_mapping + 953255736Sdavidch (i+1)*sizeof(struct src_ent)); 954255736Sdavidch 955255736Sdavidch /* tell the searcher where the T2 table is */ 956255736Sdavidch REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count); 957255736Sdavidch 958255736Sdavidch ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16, 959255736Sdavidch U64_LO(t2_mapping), U64_HI(t2_mapping)); 960255736Sdavidch 961255736Sdavidch ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16, 962255736Sdavidch U64_LO((uint64_t)t2_mapping + 963255736Sdavidch (src_cid_count-1) * sizeof(struct src_ent)), 964255736Sdavidch U64_HI((uint64_t)t2_mapping + 965255736Sdavidch (src_cid_count-1) * sizeof(struct src_ent))); 966255736Sdavidch} 967255736Sdavidch#endif 968255736Sdavidch#endif /* ECORE_INIT_OPS_H */ 969