1/*- 2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/common/t4_hw.c 355244 2019-11-30 19:52:47Z np $"); 29 30#include "opt_inet.h" 31 32#include <sys/param.h> 33#include <sys/eventhandler.h> 34 35#include "common.h" 36#include "t4_regs.h" 37#include "t4_regs_values.h" 38#include "firmware/t4fw_interface.h" 39 40#undef msleep 41#define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46} while (0) 47 48/** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65{ 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79} 80 81static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 82 int polarity, int attempts, int delay) 83{ 84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 85 delay, NULL); 86} 87 88/** 89 * t4_set_reg_field - set a register field to a value 90 * @adapter: the adapter to program 91 * @addr: the register address 92 * @mask: specifies the portion of the register to modify 93 * @val: the new value for the register field 94 * 95 * Sets a register field specified by the supplied mask to the 96 * given value. 97 */ 98void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 99 u32 val) 100{ 101 u32 v = t4_read_reg(adapter, addr) & ~mask; 102 103 t4_write_reg(adapter, addr, v | val); 104 (void) t4_read_reg(adapter, addr); /* flush */ 105} 106 107/** 108 * t4_read_indirect - read indirectly addressed registers 109 * @adap: the adapter 110 * @addr_reg: register holding the indirect address 111 * @data_reg: register holding the value of the indirect register 112 * @vals: where the read register values are stored 113 * @nregs: how many indirect registers to read 114 * @start_idx: index of first indirect register to read 115 * 116 * Reads registers that are accessed indirectly through an address/data 117 * register pair. 118 */ 119void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 120 unsigned int data_reg, u32 *vals, 121 unsigned int nregs, unsigned int start_idx) 122{ 123 while (nregs--) { 124 t4_write_reg(adap, addr_reg, start_idx); 125 *vals++ = t4_read_reg(adap, data_reg); 126 start_idx++; 127 } 128} 129 130/** 131 * t4_write_indirect - write indirectly addressed registers 132 * @adap: the adapter 133 * @addr_reg: register holding the indirect addresses 134 * @data_reg: register holding the value for the indirect registers 135 * @vals: values to write 136 * @nregs: how many indirect registers to write 137 * @start_idx: address of first indirect register to write 138 * 139 * Writes a sequential block of registers that are accessed indirectly 140 * through an address/data register pair. 141 */ 142void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 143 unsigned int data_reg, const u32 *vals, 144 unsigned int nregs, unsigned int start_idx) 145{ 146 while (nregs--) { 147 t4_write_reg(adap, addr_reg, start_idx++); 148 t4_write_reg(adap, data_reg, *vals++); 149 } 150} 151 152/* 153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 154 * mechanism. This guarantees that we get the real value even if we're 155 * operating within a Virtual Machine and the Hypervisor is trapping our 156 * Configuration Space accesses. 157 * 158 * N.B. This routine should only be used as a last resort: the firmware uses 159 * the backdoor registers on a regular basis and we can end up 160 * conflicting with it's uses! 161 */ 162u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 163{ 164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 165 u32 val; 166 167 if (chip_id(adap) <= CHELSIO_T5) 168 req |= F_ENABLE; 169 else 170 req |= F_T6_ENABLE; 171 172 if (is_t4(adap)) 173 req |= F_LOCALCFG; 174 175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 177 178 /* 179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 180 * Configuration Space read. (None of the other fields matter when 181 * F_ENABLE is 0 so a simple register write is easier than a 182 * read-modify-write via t4_set_reg_field().) 183 */ 184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 185 186 return val; 187} 188 189/* 190 * t4_report_fw_error - report firmware error 191 * @adap: the adapter 192 * 193 * The adapter firmware can indicate error conditions to the host. 194 * If the firmware has indicated an error, print out the reason for 195 * the firmware error. 196 */ 197static void t4_report_fw_error(struct adapter *adap) 198{ 199 static const char *const reason[] = { 200 "Crash", /* PCIE_FW_EVAL_CRASH */ 201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 207 "Reserved", /* reserved */ 208 }; 209 u32 pcie_fw; 210 211 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 212 if (pcie_fw & F_PCIE_FW_ERR) { 213 adap->flags &= ~FW_OK; 214 CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n", 215 reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw); 216 if (pcie_fw != 0xffffffff) 217 t4_os_dump_devlog(adap); 218 } 219} 220 221/* 222 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 223 */ 224static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 225 u32 mbox_addr) 226{ 227 for ( ; nflit; nflit--, mbox_addr += 8) 228 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 229} 230 231/* 232 * Handle a FW assertion reported in a mailbox. 233 */ 234static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 235{ 236 CH_ALERT(adap, 237 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 238 asrt->u.assert.filename_0_7, 239 be32_to_cpu(asrt->u.assert.line), 240 be32_to_cpu(asrt->u.assert.x), 241 be32_to_cpu(asrt->u.assert.y)); 242} 243 244struct port_tx_state { 245 uint64_t rx_pause; 246 uint64_t tx_frames; 247}; 248 249static void 250read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state) 251{ 252 uint32_t rx_pause_reg, tx_frames_reg; 253 254 if (is_t4(sc)) { 255 tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); 256 rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); 257 } else { 258 tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L); 259 rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L); 260 } 261 262 tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg); 263 tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg); 264} 265 266static void 267read_tx_state(struct adapter *sc, struct port_tx_state *tx_state) 268{ 269 int i; 270 271 for_each_port(sc, i) 272 read_tx_state_one(sc, i, &tx_state[i]); 273} 274 275static void 276check_tx_state(struct adapter *sc, struct port_tx_state *tx_state) 277{ 278 uint32_t port_ctl_reg; 279 uint64_t tx_frames, rx_pause; 280 int i; 281 282 for_each_port(sc, i) { 283 rx_pause = tx_state[i].rx_pause; 284 tx_frames = tx_state[i].tx_frames; 285 read_tx_state_one(sc, i, &tx_state[i]); /* update */ 286 287 if (is_t4(sc)) 288 port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL); 289 else 290 port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL); 291 if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN && 292 rx_pause != tx_state[i].rx_pause && 293 tx_frames == tx_state[i].tx_frames) { 294 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0); 295 mdelay(1); 296 t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN); 297 } 298 } 299} 300 301#define X_CIM_PF_NOACCESS 0xeeeeeeee 302/** 303 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 304 * @adap: the adapter 305 * @mbox: index of the mailbox to use 306 * @cmd: the command to write 307 * @size: command length in bytes 308 * @rpl: where to optionally store the reply 309 * @sleep_ok: if true we may sleep while awaiting command completion 310 * @timeout: time to wait for command to finish before timing out 311 * (negative implies @sleep_ok=false) 312 * 313 * Sends the given command to FW through the selected mailbox and waits 314 * for the FW to execute the command. If @rpl is not %NULL it is used to 315 * store the FW's reply to the command. The command and its optional 316 * reply are of the same length. Some FW commands like RESET and 317 * INITIALIZE can take a considerable amount of time to execute. 318 * @sleep_ok determines whether we may sleep while awaiting the response. 319 * If sleeping is allowed we use progressive backoff otherwise we spin. 320 * Note that passing in a negative @timeout is an alternate mechanism 321 * for specifying @sleep_ok=false. This is useful when a higher level 322 * interface allows for specification of @timeout but not @sleep_ok ... 323 * 324 * The return value is 0 on success or a negative errno on failure. A 325 * failure can happen either because we are not able to execute the 326 * command or FW executes it but signals an error. In the latter case 327 * the return value is the error code indicated by FW (negated). 328 */ 329int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 330 int size, void *rpl, bool sleep_ok, int timeout) 331{ 332 /* 333 * We delay in small increments at first in an effort to maintain 334 * responsiveness for simple, fast executing commands but then back 335 * off to larger delays to a maximum retry delay. 336 */ 337 static const int delay[] = { 338 1, 1, 3, 5, 10, 10, 20, 50, 100 339 }; 340 u32 v; 341 u64 res; 342 int i, ms, delay_idx, ret, next_tx_check; 343 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 344 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 345 u32 ctl; 346 __be64 cmd_rpl[MBOX_LEN/8]; 347 u32 pcie_fw; 348 struct port_tx_state tx_state[MAX_NPORTS]; 349 350 if (adap->flags & CHK_MBOX_ACCESS) 351 ASSERT_SYNCHRONIZED_OP(adap); 352 353 if (size <= 0 || (size & 15) || size > MBOX_LEN) 354 return -EINVAL; 355 356 if (adap->flags & IS_VF) { 357 if (is_t6(adap)) 358 data_reg = FW_T6VF_MBDATA_BASE_ADDR; 359 else 360 data_reg = FW_T4VF_MBDATA_BASE_ADDR; 361 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); 362 } 363 364 /* 365 * If we have a negative timeout, that implies that we can't sleep. 366 */ 367 if (timeout < 0) { 368 sleep_ok = false; 369 timeout = -timeout; 370 } 371 372 /* 373 * Attempt to gain access to the mailbox. 374 */ 375 for (i = 0; i < 4; i++) { 376 ctl = t4_read_reg(adap, ctl_reg); 377 v = G_MBOWNER(ctl); 378 if (v != X_MBOWNER_NONE) 379 break; 380 } 381 382 /* 383 * If we were unable to gain access, report the error to our caller. 384 */ 385 if (v != X_MBOWNER_PL) { 386 t4_report_fw_error(adap); 387 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 388 return ret; 389 } 390 391 /* 392 * If we gain ownership of the mailbox and there's a "valid" message 393 * in it, this is likely an asynchronous error message from the 394 * firmware. So we'll report that and then proceed on with attempting 395 * to issue our own command ... which may well fail if the error 396 * presaged the firmware crashing ... 397 */ 398 if (ctl & F_MBMSGVALID) { 399 CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true); 400 } 401 402 /* 403 * Copy in the new mailbox command and send it on its way ... 404 */ 405 memset(cmd_rpl, 0, sizeof(cmd_rpl)); 406 memcpy(cmd_rpl, cmd, size); 407 CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false); 408 for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++) 409 t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i])); 410 411 if (adap->flags & IS_VF) { 412 /* 413 * For the VFs, the Mailbox Data "registers" are 414 * actually backed by T4's "MA" interface rather than 415 * PL Registers (as is the case for the PFs). Because 416 * these are in different coherency domains, the write 417 * to the VF's PL-register-backed Mailbox Control can 418 * race in front of the writes to the MA-backed VF 419 * Mailbox Data "registers". So we need to do a 420 * read-back on at least one byte of the VF Mailbox 421 * Data registers before doing the write to the VF 422 * Mailbox Control register. 423 */ 424 t4_read_reg(adap, data_reg); 425 } 426 427 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 428 read_tx_state(adap, &tx_state[0]); /* also flushes the write_reg */ 429 next_tx_check = 1000; 430 delay_idx = 0; 431 ms = delay[0]; 432 433 /* 434 * Loop waiting for the reply; bail out if we time out or the firmware 435 * reports an error. 436 */ 437 pcie_fw = 0; 438 for (i = 0; i < timeout; i += ms) { 439 if (!(adap->flags & IS_VF)) { 440 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 441 if (pcie_fw & F_PCIE_FW_ERR) 442 break; 443 } 444 445 if (i >= next_tx_check) { 446 check_tx_state(adap, &tx_state[0]); 447 next_tx_check = i + 1000; 448 } 449 450 if (sleep_ok) { 451 ms = delay[delay_idx]; /* last element may repeat */ 452 if (delay_idx < ARRAY_SIZE(delay) - 1) 453 delay_idx++; 454 msleep(ms); 455 } else { 456 mdelay(ms); 457 } 458 459 v = t4_read_reg(adap, ctl_reg); 460 if (v == X_CIM_PF_NOACCESS) 461 continue; 462 if (G_MBOWNER(v) == X_MBOWNER_PL) { 463 if (!(v & F_MBMSGVALID)) { 464 t4_write_reg(adap, ctl_reg, 465 V_MBOWNER(X_MBOWNER_NONE)); 466 continue; 467 } 468 469 /* 470 * Retrieve the command reply and release the mailbox. 471 */ 472 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 473 CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false); 474 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 475 476 res = be64_to_cpu(cmd_rpl[0]); 477 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 478 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 479 res = V_FW_CMD_RETVAL(EIO); 480 } else if (rpl) 481 memcpy(rpl, cmd_rpl, size); 482 return -G_FW_CMD_RETVAL((int)res); 483 } 484 } 485 486 /* 487 * We timed out waiting for a reply to our mailbox command. Report 488 * the error and also check to see if the firmware reported any 489 * errors ... 490 */ 491 CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n", 492 *(const u8 *)cmd, mbox, pcie_fw); 493 CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true); 494 CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true); 495 496 if (pcie_fw & F_PCIE_FW_ERR) { 497 ret = -ENXIO; 498 t4_report_fw_error(adap); 499 } else { 500 ret = -ETIMEDOUT; 501 t4_os_dump_devlog(adap); 502 } 503 504 t4_fatal_err(adap, true); 505 return ret; 506} 507 508int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 509 void *rpl, bool sleep_ok) 510{ 511 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 512 sleep_ok, FW_CMD_MAX_TIMEOUT); 513 514} 515 516static int t4_edc_err_read(struct adapter *adap, int idx) 517{ 518 u32 edc_ecc_err_addr_reg; 519 u32 edc_bist_status_rdata_reg; 520 521 if (is_t4(adap)) { 522 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 523 return 0; 524 } 525 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 526 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 527 return 0; 528 } 529 530 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 531 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 532 533 CH_WARN(adap, 534 "edc%d err addr 0x%x: 0x%x.\n", 535 idx, edc_ecc_err_addr_reg, 536 t4_read_reg(adap, edc_ecc_err_addr_reg)); 537 CH_WARN(adap, 538 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 539 edc_bist_status_rdata_reg, 540 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 541 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 542 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 543 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 544 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 545 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 546 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 547 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 548 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 549 550 return 0; 551} 552 553/** 554 * t4_mc_read - read from MC through backdoor accesses 555 * @adap: the adapter 556 * @idx: which MC to access 557 * @addr: address of first byte requested 558 * @data: 64 bytes of data containing the requested address 559 * @ecc: where to store the corresponding 64-bit ECC word 560 * 561 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 562 * that covers the requested address @addr. If @parity is not %NULL it 563 * is assigned the 64-bit ECC word for the read data. 564 */ 565int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 566{ 567 int i; 568 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 569 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 570 571 if (is_t4(adap)) { 572 mc_bist_cmd_reg = A_MC_BIST_CMD; 573 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 574 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 575 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 576 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 577 } else { 578 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 579 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 580 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 581 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 582 idx); 583 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 584 idx); 585 } 586 587 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 588 return -EBUSY; 589 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 590 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 591 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 592 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 593 F_START_BIST | V_BIST_CMD_GAP(1)); 594 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 595 if (i) 596 return i; 597 598#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 599 600 for (i = 15; i >= 0; i--) 601 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 602 if (ecc) 603 *ecc = t4_read_reg64(adap, MC_DATA(16)); 604#undef MC_DATA 605 return 0; 606} 607 608/** 609 * t4_edc_read - read from EDC through backdoor accesses 610 * @adap: the adapter 611 * @idx: which EDC to access 612 * @addr: address of first byte requested 613 * @data: 64 bytes of data containing the requested address 614 * @ecc: where to store the corresponding 64-bit ECC word 615 * 616 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 617 * that covers the requested address @addr. If @parity is not %NULL it 618 * is assigned the 64-bit ECC word for the read data. 619 */ 620int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 621{ 622 int i; 623 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 624 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 625 626 if (is_t4(adap)) { 627 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 628 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 629 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 630 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 631 idx); 632 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 633 idx); 634 } else { 635/* 636 * These macro are missing in t4_regs.h file. 637 * Added temporarily for testing. 638 */ 639#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 640#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 641 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 642 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 643 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 644 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 645 idx); 646 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 647 idx); 648#undef EDC_REG_T5 649#undef EDC_STRIDE_T5 650 } 651 652 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 653 return -EBUSY; 654 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 655 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 656 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 657 t4_write_reg(adap, edc_bist_cmd_reg, 658 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 659 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 660 if (i) 661 return i; 662 663#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 664 665 for (i = 15; i >= 0; i--) 666 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 667 if (ecc) 668 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 669#undef EDC_DATA 670 return 0; 671} 672 673/** 674 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 675 * @adap: the adapter 676 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 677 * @addr: address within indicated memory type 678 * @len: amount of memory to read 679 * @buf: host memory buffer 680 * 681 * Reads an [almost] arbitrary memory region in the firmware: the 682 * firmware memory address, length and host buffer must be aligned on 683 * 32-bit boudaries. The memory is returned as a raw byte sequence from 684 * the firmware's memory. If this memory contains data structures which 685 * contain multi-byte integers, it's the callers responsibility to 686 * perform appropriate byte order conversions. 687 */ 688int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 689 __be32 *buf) 690{ 691 u32 pos, start, end, offset; 692 int ret; 693 694 /* 695 * Argument sanity checks ... 696 */ 697 if ((addr & 0x3) || (len & 0x3)) 698 return -EINVAL; 699 700 /* 701 * The underlaying EDC/MC read routines read 64 bytes at a time so we 702 * need to round down the start and round up the end. We'll start 703 * copying out of the first line at (addr - start) a word at a time. 704 */ 705 start = rounddown2(addr, 64); 706 end = roundup2(addr + len, 64); 707 offset = (addr - start)/sizeof(__be32); 708 709 for (pos = start; pos < end; pos += 64, offset = 0) { 710 __be32 data[16]; 711 712 /* 713 * Read the chip's memory block and bail if there's an error. 714 */ 715 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 716 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 717 else 718 ret = t4_edc_read(adap, mtype, pos, data, NULL); 719 if (ret) 720 return ret; 721 722 /* 723 * Copy the data into the caller's memory buffer. 724 */ 725 while (offset < 16 && len > 0) { 726 *buf++ = data[offset++]; 727 len -= sizeof(__be32); 728 } 729 } 730 731 return 0; 732} 733 734/* 735 * Return the specified PCI-E Configuration Space register from our Physical 736 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 737 * since we prefer to let the firmware own all of these registers, but if that 738 * fails we go for it directly ourselves. 739 */ 740u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 741{ 742 743 /* 744 * If fw_attach != 0, construct and send the Firmware LDST Command to 745 * retrieve the specified PCI-E Configuration Space register. 746 */ 747 if (drv_fw_attach != 0) { 748 struct fw_ldst_cmd ldst_cmd; 749 int ret; 750 751 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 752 ldst_cmd.op_to_addrspace = 753 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 754 F_FW_CMD_REQUEST | 755 F_FW_CMD_READ | 756 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 757 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 758 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 759 ldst_cmd.u.pcie.ctrl_to_fn = 760 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 761 ldst_cmd.u.pcie.r = reg; 762 763 /* 764 * If the LDST Command succeeds, return the result, otherwise 765 * fall through to reading it directly ourselves ... 766 */ 767 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 768 &ldst_cmd); 769 if (ret == 0) 770 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 771 772 CH_WARN(adap, "Firmware failed to return " 773 "Configuration Space register %d, err = %d\n", 774 reg, -ret); 775 } 776 777 /* 778 * Read the desired Configuration Space register via the PCI-E 779 * Backdoor mechanism. 780 */ 781 return t4_hw_pci_read_cfg4(adap, reg); 782} 783 784/** 785 * t4_get_regs_len - return the size of the chips register set 786 * @adapter: the adapter 787 * 788 * Returns the size of the chip's BAR0 register space. 789 */ 790unsigned int t4_get_regs_len(struct adapter *adapter) 791{ 792 unsigned int chip_version = chip_id(adapter); 793 794 switch (chip_version) { 795 case CHELSIO_T4: 796 if (adapter->flags & IS_VF) 797 return FW_T4VF_REGMAP_SIZE; 798 return T4_REGMAP_SIZE; 799 800 case CHELSIO_T5: 801 case CHELSIO_T6: 802 if (adapter->flags & IS_VF) 803 return FW_T4VF_REGMAP_SIZE; 804 return T5_REGMAP_SIZE; 805 } 806 807 CH_ERR(adapter, 808 "Unsupported chip version %d\n", chip_version); 809 return 0; 810} 811 812/** 813 * t4_get_regs - read chip registers into provided buffer 814 * @adap: the adapter 815 * @buf: register buffer 816 * @buf_size: size (in bytes) of register buffer 817 * 818 * If the provided register buffer isn't large enough for the chip's 819 * full register range, the register dump will be truncated to the 820 * register buffer's size. 821 */ 822void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 823{ 824 static const unsigned int t4_reg_ranges[] = { 825 0x1008, 0x1108, 826 0x1180, 0x1184, 827 0x1190, 0x1194, 828 0x11a0, 0x11a4, 829 0x11b0, 0x11b4, 830 0x11fc, 0x123c, 831 0x1300, 0x173c, 832 0x1800, 0x18fc, 833 0x3000, 0x30d8, 834 0x30e0, 0x30e4, 835 0x30ec, 0x5910, 836 0x5920, 0x5924, 837 0x5960, 0x5960, 838 0x5968, 0x5968, 839 0x5970, 0x5970, 840 0x5978, 0x5978, 841 0x5980, 0x5980, 842 0x5988, 0x5988, 843 0x5990, 0x5990, 844 0x5998, 0x5998, 845 0x59a0, 0x59d4, 846 0x5a00, 0x5ae0, 847 0x5ae8, 0x5ae8, 848 0x5af0, 0x5af0, 849 0x5af8, 0x5af8, 850 0x6000, 0x6098, 851 0x6100, 0x6150, 852 0x6200, 0x6208, 853 0x6240, 0x6248, 854 0x6280, 0x62b0, 855 0x62c0, 0x6338, 856 0x6370, 0x638c, 857 0x6400, 0x643c, 858 0x6500, 0x6524, 859 0x6a00, 0x6a04, 860 0x6a14, 0x6a38, 861 0x6a60, 0x6a70, 862 0x6a78, 0x6a78, 863 0x6b00, 0x6b0c, 864 0x6b1c, 0x6b84, 865 0x6bf0, 0x6bf8, 866 0x6c00, 0x6c0c, 867 0x6c1c, 0x6c84, 868 0x6cf0, 0x6cf8, 869 0x6d00, 0x6d0c, 870 0x6d1c, 0x6d84, 871 0x6df0, 0x6df8, 872 0x6e00, 0x6e0c, 873 0x6e1c, 0x6e84, 874 0x6ef0, 0x6ef8, 875 0x6f00, 0x6f0c, 876 0x6f1c, 0x6f84, 877 0x6ff0, 0x6ff8, 878 0x7000, 0x700c, 879 0x701c, 0x7084, 880 0x70f0, 0x70f8, 881 0x7100, 0x710c, 882 0x711c, 0x7184, 883 0x71f0, 0x71f8, 884 0x7200, 0x720c, 885 0x721c, 0x7284, 886 0x72f0, 0x72f8, 887 0x7300, 0x730c, 888 0x731c, 0x7384, 889 0x73f0, 0x73f8, 890 0x7400, 0x7450, 891 0x7500, 0x7530, 892 0x7600, 0x760c, 893 0x7614, 0x761c, 894 0x7680, 0x76cc, 895 0x7700, 0x7798, 896 0x77c0, 0x77fc, 897 0x7900, 0x79fc, 898 0x7b00, 0x7b58, 899 0x7b60, 0x7b84, 900 0x7b8c, 0x7c38, 901 0x7d00, 0x7d38, 902 0x7d40, 0x7d80, 903 0x7d8c, 0x7ddc, 904 0x7de4, 0x7e04, 905 0x7e10, 0x7e1c, 906 0x7e24, 0x7e38, 907 0x7e40, 0x7e44, 908 0x7e4c, 0x7e78, 909 0x7e80, 0x7ea4, 910 0x7eac, 0x7edc, 911 0x7ee8, 0x7efc, 912 0x8dc0, 0x8e04, 913 0x8e10, 0x8e1c, 914 0x8e30, 0x8e78, 915 0x8ea0, 0x8eb8, 916 0x8ec0, 0x8f6c, 917 0x8fc0, 0x9008, 918 0x9010, 0x9058, 919 0x9060, 0x9060, 920 0x9068, 0x9074, 921 0x90fc, 0x90fc, 922 0x9400, 0x9408, 923 0x9410, 0x9458, 924 0x9600, 0x9600, 925 0x9608, 0x9638, 926 0x9640, 0x96bc, 927 0x9800, 0x9808, 928 0x9820, 0x983c, 929 0x9850, 0x9864, 930 0x9c00, 0x9c6c, 931 0x9c80, 0x9cec, 932 0x9d00, 0x9d6c, 933 0x9d80, 0x9dec, 934 0x9e00, 0x9e6c, 935 0x9e80, 0x9eec, 936 0x9f00, 0x9f6c, 937 0x9f80, 0x9fec, 938 0xd004, 0xd004, 939 0xd010, 0xd03c, 940 0xdfc0, 0xdfe0, 941 0xe000, 0xea7c, 942 0xf000, 0x11110, 943 0x11118, 0x11190, 944 0x19040, 0x1906c, 945 0x19078, 0x19080, 946 0x1908c, 0x190e4, 947 0x190f0, 0x190f8, 948 0x19100, 0x19110, 949 0x19120, 0x19124, 950 0x19150, 0x19194, 951 0x1919c, 0x191b0, 952 0x191d0, 0x191e8, 953 0x19238, 0x1924c, 954 0x193f8, 0x1943c, 955 0x1944c, 0x19474, 956 0x19490, 0x194e0, 957 0x194f0, 0x194f8, 958 0x19800, 0x19c08, 959 0x19c10, 0x19c90, 960 0x19ca0, 0x19ce4, 961 0x19cf0, 0x19d40, 962 0x19d50, 0x19d94, 963 0x19da0, 0x19de8, 964 0x19df0, 0x19e40, 965 0x19e50, 0x19e90, 966 0x19ea0, 0x19f4c, 967 0x1a000, 0x1a004, 968 0x1a010, 0x1a06c, 969 0x1a0b0, 0x1a0e4, 970 0x1a0ec, 0x1a0f4, 971 0x1a100, 0x1a108, 972 0x1a114, 0x1a120, 973 0x1a128, 0x1a130, 974 0x1a138, 0x1a138, 975 0x1a190, 0x1a1c4, 976 0x1a1fc, 0x1a1fc, 977 0x1e040, 0x1e04c, 978 0x1e284, 0x1e28c, 979 0x1e2c0, 0x1e2c0, 980 0x1e2e0, 0x1e2e0, 981 0x1e300, 0x1e384, 982 0x1e3c0, 0x1e3c8, 983 0x1e440, 0x1e44c, 984 0x1e684, 0x1e68c, 985 0x1e6c0, 0x1e6c0, 986 0x1e6e0, 0x1e6e0, 987 0x1e700, 0x1e784, 988 0x1e7c0, 0x1e7c8, 989 0x1e840, 0x1e84c, 990 0x1ea84, 0x1ea8c, 991 0x1eac0, 0x1eac0, 992 0x1eae0, 0x1eae0, 993 0x1eb00, 0x1eb84, 994 0x1ebc0, 0x1ebc8, 995 0x1ec40, 0x1ec4c, 996 0x1ee84, 0x1ee8c, 997 0x1eec0, 0x1eec0, 998 0x1eee0, 0x1eee0, 999 0x1ef00, 0x1ef84, 1000 0x1efc0, 0x1efc8, 1001 0x1f040, 0x1f04c, 1002 0x1f284, 0x1f28c, 1003 0x1f2c0, 0x1f2c0, 1004 0x1f2e0, 0x1f2e0, 1005 0x1f300, 0x1f384, 1006 0x1f3c0, 0x1f3c8, 1007 0x1f440, 0x1f44c, 1008 0x1f684, 0x1f68c, 1009 0x1f6c0, 0x1f6c0, 1010 0x1f6e0, 0x1f6e0, 1011 0x1f700, 0x1f784, 1012 0x1f7c0, 0x1f7c8, 1013 0x1f840, 0x1f84c, 1014 0x1fa84, 0x1fa8c, 1015 0x1fac0, 0x1fac0, 1016 0x1fae0, 0x1fae0, 1017 0x1fb00, 0x1fb84, 1018 0x1fbc0, 0x1fbc8, 1019 0x1fc40, 0x1fc4c, 1020 0x1fe84, 0x1fe8c, 1021 0x1fec0, 0x1fec0, 1022 0x1fee0, 0x1fee0, 1023 0x1ff00, 0x1ff84, 1024 0x1ffc0, 0x1ffc8, 1025 0x20000, 0x2002c, 1026 0x20100, 0x2013c, 1027 0x20190, 0x201a0, 1028 0x201a8, 0x201b8, 1029 0x201c4, 0x201c8, 1030 0x20200, 0x20318, 1031 0x20400, 0x204b4, 1032 0x204c0, 0x20528, 1033 0x20540, 0x20614, 1034 0x21000, 0x21040, 1035 0x2104c, 0x21060, 1036 0x210c0, 0x210ec, 1037 0x21200, 0x21268, 1038 0x21270, 0x21284, 1039 0x212fc, 0x21388, 1040 0x21400, 0x21404, 1041 0x21500, 0x21500, 1042 0x21510, 0x21518, 1043 0x2152c, 0x21530, 1044 0x2153c, 0x2153c, 1045 0x21550, 0x21554, 1046 0x21600, 0x21600, 1047 0x21608, 0x2161c, 1048 0x21624, 0x21628, 1049 0x21630, 0x21634, 1050 0x2163c, 0x2163c, 1051 0x21700, 0x2171c, 1052 0x21780, 0x2178c, 1053 0x21800, 0x21818, 1054 0x21820, 0x21828, 1055 0x21830, 0x21848, 1056 0x21850, 0x21854, 1057 0x21860, 0x21868, 1058 0x21870, 0x21870, 1059 0x21878, 0x21898, 1060 0x218a0, 0x218a8, 1061 0x218b0, 0x218c8, 1062 0x218d0, 0x218d4, 1063 0x218e0, 0x218e8, 1064 0x218f0, 0x218f0, 1065 0x218f8, 0x21a18, 1066 0x21a20, 0x21a28, 1067 0x21a30, 0x21a48, 1068 0x21a50, 0x21a54, 1069 0x21a60, 0x21a68, 1070 0x21a70, 0x21a70, 1071 0x21a78, 0x21a98, 1072 0x21aa0, 0x21aa8, 1073 0x21ab0, 0x21ac8, 1074 0x21ad0, 0x21ad4, 1075 0x21ae0, 0x21ae8, 1076 0x21af0, 0x21af0, 1077 0x21af8, 0x21c18, 1078 0x21c20, 0x21c20, 1079 0x21c28, 0x21c30, 1080 0x21c38, 0x21c38, 1081 0x21c80, 0x21c98, 1082 0x21ca0, 0x21ca8, 1083 0x21cb0, 0x21cc8, 1084 0x21cd0, 0x21cd4, 1085 0x21ce0, 0x21ce8, 1086 0x21cf0, 0x21cf0, 1087 0x21cf8, 0x21d7c, 1088 0x21e00, 0x21e04, 1089 0x22000, 0x2202c, 1090 0x22100, 0x2213c, 1091 0x22190, 0x221a0, 1092 0x221a8, 0x221b8, 1093 0x221c4, 0x221c8, 1094 0x22200, 0x22318, 1095 0x22400, 0x224b4, 1096 0x224c0, 0x22528, 1097 0x22540, 0x22614, 1098 0x23000, 0x23040, 1099 0x2304c, 0x23060, 1100 0x230c0, 0x230ec, 1101 0x23200, 0x23268, 1102 0x23270, 0x23284, 1103 0x232fc, 0x23388, 1104 0x23400, 0x23404, 1105 0x23500, 0x23500, 1106 0x23510, 0x23518, 1107 0x2352c, 0x23530, 1108 0x2353c, 0x2353c, 1109 0x23550, 0x23554, 1110 0x23600, 0x23600, 1111 0x23608, 0x2361c, 1112 0x23624, 0x23628, 1113 0x23630, 0x23634, 1114 0x2363c, 0x2363c, 1115 0x23700, 0x2371c, 1116 0x23780, 0x2378c, 1117 0x23800, 0x23818, 1118 0x23820, 0x23828, 1119 0x23830, 0x23848, 1120 0x23850, 0x23854, 1121 0x23860, 0x23868, 1122 0x23870, 0x23870, 1123 0x23878, 0x23898, 1124 0x238a0, 0x238a8, 1125 0x238b0, 0x238c8, 1126 0x238d0, 0x238d4, 1127 0x238e0, 0x238e8, 1128 0x238f0, 0x238f0, 1129 0x238f8, 0x23a18, 1130 0x23a20, 0x23a28, 1131 0x23a30, 0x23a48, 1132 0x23a50, 0x23a54, 1133 0x23a60, 0x23a68, 1134 0x23a70, 0x23a70, 1135 0x23a78, 0x23a98, 1136 0x23aa0, 0x23aa8, 1137 0x23ab0, 0x23ac8, 1138 0x23ad0, 0x23ad4, 1139 0x23ae0, 0x23ae8, 1140 0x23af0, 0x23af0, 1141 0x23af8, 0x23c18, 1142 0x23c20, 0x23c20, 1143 0x23c28, 0x23c30, 1144 0x23c38, 0x23c38, 1145 0x23c80, 0x23c98, 1146 0x23ca0, 0x23ca8, 1147 0x23cb0, 0x23cc8, 1148 0x23cd0, 0x23cd4, 1149 0x23ce0, 0x23ce8, 1150 0x23cf0, 0x23cf0, 1151 0x23cf8, 0x23d7c, 1152 0x23e00, 0x23e04, 1153 0x24000, 0x2402c, 1154 0x24100, 0x2413c, 1155 0x24190, 0x241a0, 1156 0x241a8, 0x241b8, 1157 0x241c4, 0x241c8, 1158 0x24200, 0x24318, 1159 0x24400, 0x244b4, 1160 0x244c0, 0x24528, 1161 0x24540, 0x24614, 1162 0x25000, 0x25040, 1163 0x2504c, 0x25060, 1164 0x250c0, 0x250ec, 1165 0x25200, 0x25268, 1166 0x25270, 0x25284, 1167 0x252fc, 0x25388, 1168 0x25400, 0x25404, 1169 0x25500, 0x25500, 1170 0x25510, 0x25518, 1171 0x2552c, 0x25530, 1172 0x2553c, 0x2553c, 1173 0x25550, 0x25554, 1174 0x25600, 0x25600, 1175 0x25608, 0x2561c, 1176 0x25624, 0x25628, 1177 0x25630, 0x25634, 1178 0x2563c, 0x2563c, 1179 0x25700, 0x2571c, 1180 0x25780, 0x2578c, 1181 0x25800, 0x25818, 1182 0x25820, 0x25828, 1183 0x25830, 0x25848, 1184 0x25850, 0x25854, 1185 0x25860, 0x25868, 1186 0x25870, 0x25870, 1187 0x25878, 0x25898, 1188 0x258a0, 0x258a8, 1189 0x258b0, 0x258c8, 1190 0x258d0, 0x258d4, 1191 0x258e0, 0x258e8, 1192 0x258f0, 0x258f0, 1193 0x258f8, 0x25a18, 1194 0x25a20, 0x25a28, 1195 0x25a30, 0x25a48, 1196 0x25a50, 0x25a54, 1197 0x25a60, 0x25a68, 1198 0x25a70, 0x25a70, 1199 0x25a78, 0x25a98, 1200 0x25aa0, 0x25aa8, 1201 0x25ab0, 0x25ac8, 1202 0x25ad0, 0x25ad4, 1203 0x25ae0, 0x25ae8, 1204 0x25af0, 0x25af0, 1205 0x25af8, 0x25c18, 1206 0x25c20, 0x25c20, 1207 0x25c28, 0x25c30, 1208 0x25c38, 0x25c38, 1209 0x25c80, 0x25c98, 1210 0x25ca0, 0x25ca8, 1211 0x25cb0, 0x25cc8, 1212 0x25cd0, 0x25cd4, 1213 0x25ce0, 0x25ce8, 1214 0x25cf0, 0x25cf0, 1215 0x25cf8, 0x25d7c, 1216 0x25e00, 0x25e04, 1217 0x26000, 0x2602c, 1218 0x26100, 0x2613c, 1219 0x26190, 0x261a0, 1220 0x261a8, 0x261b8, 1221 0x261c4, 0x261c8, 1222 0x26200, 0x26318, 1223 0x26400, 0x264b4, 1224 0x264c0, 0x26528, 1225 0x26540, 0x26614, 1226 0x27000, 0x27040, 1227 0x2704c, 0x27060, 1228 0x270c0, 0x270ec, 1229 0x27200, 0x27268, 1230 0x27270, 0x27284, 1231 0x272fc, 0x27388, 1232 0x27400, 0x27404, 1233 0x27500, 0x27500, 1234 0x27510, 0x27518, 1235 0x2752c, 0x27530, 1236 0x2753c, 0x2753c, 1237 0x27550, 0x27554, 1238 0x27600, 0x27600, 1239 0x27608, 0x2761c, 1240 0x27624, 0x27628, 1241 0x27630, 0x27634, 1242 0x2763c, 0x2763c, 1243 0x27700, 0x2771c, 1244 0x27780, 0x2778c, 1245 0x27800, 0x27818, 1246 0x27820, 0x27828, 1247 0x27830, 0x27848, 1248 0x27850, 0x27854, 1249 0x27860, 0x27868, 1250 0x27870, 0x27870, 1251 0x27878, 0x27898, 1252 0x278a0, 0x278a8, 1253 0x278b0, 0x278c8, 1254 0x278d0, 0x278d4, 1255 0x278e0, 0x278e8, 1256 0x278f0, 0x278f0, 1257 0x278f8, 0x27a18, 1258 0x27a20, 0x27a28, 1259 0x27a30, 0x27a48, 1260 0x27a50, 0x27a54, 1261 0x27a60, 0x27a68, 1262 0x27a70, 0x27a70, 1263 0x27a78, 0x27a98, 1264 0x27aa0, 0x27aa8, 1265 0x27ab0, 0x27ac8, 1266 0x27ad0, 0x27ad4, 1267 0x27ae0, 0x27ae8, 1268 0x27af0, 0x27af0, 1269 0x27af8, 0x27c18, 1270 0x27c20, 0x27c20, 1271 0x27c28, 0x27c30, 1272 0x27c38, 0x27c38, 1273 0x27c80, 0x27c98, 1274 0x27ca0, 0x27ca8, 1275 0x27cb0, 0x27cc8, 1276 0x27cd0, 0x27cd4, 1277 0x27ce0, 0x27ce8, 1278 0x27cf0, 0x27cf0, 1279 0x27cf8, 0x27d7c, 1280 0x27e00, 0x27e04, 1281 }; 1282 1283 static const unsigned int t4vf_reg_ranges[] = { 1284 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 1285 VF_MPS_REG(A_MPS_VF_CTL), 1286 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 1287 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), 1288 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 1289 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 1290 FW_T4VF_MBDATA_BASE_ADDR, 1291 FW_T4VF_MBDATA_BASE_ADDR + 1292 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 1293 }; 1294 1295 static const unsigned int t5_reg_ranges[] = { 1296 0x1008, 0x10c0, 1297 0x10cc, 0x10f8, 1298 0x1100, 0x1100, 1299 0x110c, 0x1148, 1300 0x1180, 0x1184, 1301 0x1190, 0x1194, 1302 0x11a0, 0x11a4, 1303 0x11b0, 0x11b4, 1304 0x11fc, 0x123c, 1305 0x1280, 0x173c, 1306 0x1800, 0x18fc, 1307 0x3000, 0x3028, 1308 0x3060, 0x30b0, 1309 0x30b8, 0x30d8, 1310 0x30e0, 0x30fc, 1311 0x3140, 0x357c, 1312 0x35a8, 0x35cc, 1313 0x35ec, 0x35ec, 1314 0x3600, 0x5624, 1315 0x56cc, 0x56ec, 1316 0x56f4, 0x5720, 1317 0x5728, 0x575c, 1318 0x580c, 0x5814, 1319 0x5890, 0x589c, 1320 0x58a4, 0x58ac, 1321 0x58b8, 0x58bc, 1322 0x5940, 0x59c8, 1323 0x59d0, 0x59dc, 1324 0x59fc, 0x5a18, 1325 0x5a60, 0x5a70, 1326 0x5a80, 0x5a9c, 1327 0x5b94, 0x5bfc, 1328 0x6000, 0x6020, 1329 0x6028, 0x6040, 1330 0x6058, 0x609c, 1331 0x60a8, 0x614c, 1332 0x7700, 0x7798, 1333 0x77c0, 0x78fc, 1334 0x7b00, 0x7b58, 1335 0x7b60, 0x7b84, 1336 0x7b8c, 0x7c54, 1337 0x7d00, 0x7d38, 1338 0x7d40, 0x7d80, 1339 0x7d8c, 0x7ddc, 1340 0x7de4, 0x7e04, 1341 0x7e10, 0x7e1c, 1342 0x7e24, 0x7e38, 1343 0x7e40, 0x7e44, 1344 0x7e4c, 0x7e78, 1345 0x7e80, 0x7edc, 1346 0x7ee8, 0x7efc, 1347 0x8dc0, 0x8de0, 1348 0x8df8, 0x8e04, 1349 0x8e10, 0x8e84, 1350 0x8ea0, 0x8f84, 1351 0x8fc0, 0x9058, 1352 0x9060, 0x9060, 1353 0x9068, 0x90f8, 1354 0x9400, 0x9408, 1355 0x9410, 0x9470, 1356 0x9600, 0x9600, 1357 0x9608, 0x9638, 1358 0x9640, 0x96f4, 1359 0x9800, 0x9808, 1360 0x9820, 0x983c, 1361 0x9850, 0x9864, 1362 0x9c00, 0x9c6c, 1363 0x9c80, 0x9cec, 1364 0x9d00, 0x9d6c, 1365 0x9d80, 0x9dec, 1366 0x9e00, 0x9e6c, 1367 0x9e80, 0x9eec, 1368 0x9f00, 0x9f6c, 1369 0x9f80, 0xa020, 1370 0xd004, 0xd004, 1371 0xd010, 0xd03c, 1372 0xdfc0, 0xdfe0, 1373 0xe000, 0x1106c, 1374 0x11074, 0x11088, 1375 0x1109c, 0x1117c, 1376 0x11190, 0x11204, 1377 0x19040, 0x1906c, 1378 0x19078, 0x19080, 1379 0x1908c, 0x190e8, 1380 0x190f0, 0x190f8, 1381 0x19100, 0x19110, 1382 0x19120, 0x19124, 1383 0x19150, 0x19194, 1384 0x1919c, 0x191b0, 1385 0x191d0, 0x191e8, 1386 0x19238, 0x19290, 1387 0x193f8, 0x19428, 1388 0x19430, 0x19444, 1389 0x1944c, 0x1946c, 1390 0x19474, 0x19474, 1391 0x19490, 0x194cc, 1392 0x194f0, 0x194f8, 1393 0x19c00, 0x19c08, 1394 0x19c10, 0x19c60, 1395 0x19c94, 0x19ce4, 1396 0x19cf0, 0x19d40, 1397 0x19d50, 0x19d94, 1398 0x19da0, 0x19de8, 1399 0x19df0, 0x19e10, 1400 0x19e50, 0x19e90, 1401 0x19ea0, 0x19f24, 1402 0x19f34, 0x19f34, 1403 0x19f40, 0x19f50, 1404 0x19f90, 0x19fb4, 1405 0x19fc4, 0x19fe4, 1406 0x1a000, 0x1a004, 1407 0x1a010, 0x1a06c, 1408 0x1a0b0, 0x1a0e4, 1409 0x1a0ec, 0x1a0f8, 1410 0x1a100, 0x1a108, 1411 0x1a114, 0x1a120, 1412 0x1a128, 0x1a130, 1413 0x1a138, 0x1a138, 1414 0x1a190, 0x1a1c4, 1415 0x1a1fc, 0x1a1fc, 1416 0x1e008, 0x1e00c, 1417 0x1e040, 0x1e044, 1418 0x1e04c, 0x1e04c, 1419 0x1e284, 0x1e290, 1420 0x1e2c0, 0x1e2c0, 1421 0x1e2e0, 0x1e2e0, 1422 0x1e300, 0x1e384, 1423 0x1e3c0, 0x1e3c8, 1424 0x1e408, 0x1e40c, 1425 0x1e440, 0x1e444, 1426 0x1e44c, 0x1e44c, 1427 0x1e684, 0x1e690, 1428 0x1e6c0, 0x1e6c0, 1429 0x1e6e0, 0x1e6e0, 1430 0x1e700, 0x1e784, 1431 0x1e7c0, 0x1e7c8, 1432 0x1e808, 0x1e80c, 1433 0x1e840, 0x1e844, 1434 0x1e84c, 0x1e84c, 1435 0x1ea84, 0x1ea90, 1436 0x1eac0, 0x1eac0, 1437 0x1eae0, 0x1eae0, 1438 0x1eb00, 0x1eb84, 1439 0x1ebc0, 0x1ebc8, 1440 0x1ec08, 0x1ec0c, 1441 0x1ec40, 0x1ec44, 1442 0x1ec4c, 0x1ec4c, 1443 0x1ee84, 0x1ee90, 1444 0x1eec0, 0x1eec0, 1445 0x1eee0, 0x1eee0, 1446 0x1ef00, 0x1ef84, 1447 0x1efc0, 0x1efc8, 1448 0x1f008, 0x1f00c, 1449 0x1f040, 0x1f044, 1450 0x1f04c, 0x1f04c, 1451 0x1f284, 0x1f290, 1452 0x1f2c0, 0x1f2c0, 1453 0x1f2e0, 0x1f2e0, 1454 0x1f300, 0x1f384, 1455 0x1f3c0, 0x1f3c8, 1456 0x1f408, 0x1f40c, 1457 0x1f440, 0x1f444, 1458 0x1f44c, 0x1f44c, 1459 0x1f684, 0x1f690, 1460 0x1f6c0, 0x1f6c0, 1461 0x1f6e0, 0x1f6e0, 1462 0x1f700, 0x1f784, 1463 0x1f7c0, 0x1f7c8, 1464 0x1f808, 0x1f80c, 1465 0x1f840, 0x1f844, 1466 0x1f84c, 0x1f84c, 1467 0x1fa84, 0x1fa90, 1468 0x1fac0, 0x1fac0, 1469 0x1fae0, 0x1fae0, 1470 0x1fb00, 0x1fb84, 1471 0x1fbc0, 0x1fbc8, 1472 0x1fc08, 0x1fc0c, 1473 0x1fc40, 0x1fc44, 1474 0x1fc4c, 0x1fc4c, 1475 0x1fe84, 0x1fe90, 1476 0x1fec0, 0x1fec0, 1477 0x1fee0, 0x1fee0, 1478 0x1ff00, 0x1ff84, 1479 0x1ffc0, 0x1ffc8, 1480 0x30000, 0x30030, 1481 0x30100, 0x30144, 1482 0x30190, 0x301a0, 1483 0x301a8, 0x301b8, 1484 0x301c4, 0x301c8, 1485 0x301d0, 0x301d0, 1486 0x30200, 0x30318, 1487 0x30400, 0x304b4, 1488 0x304c0, 0x3052c, 1489 0x30540, 0x3061c, 1490 0x30800, 0x30828, 1491 0x30834, 0x30834, 1492 0x308c0, 0x30908, 1493 0x30910, 0x309ac, 1494 0x30a00, 0x30a14, 1495 0x30a1c, 0x30a2c, 1496 0x30a44, 0x30a50, 1497 0x30a74, 0x30a74, 1498 0x30a7c, 0x30afc, 1499 0x30b08, 0x30c24, 1500 0x30d00, 0x30d00, 1501 0x30d08, 0x30d14, 1502 0x30d1c, 0x30d20, 1503 0x30d3c, 0x30d3c, 1504 0x30d48, 0x30d50, 1505 0x31200, 0x3120c, 1506 0x31220, 0x31220, 1507 0x31240, 0x31240, 1508 0x31600, 0x3160c, 1509 0x31a00, 0x31a1c, 1510 0x31e00, 0x31e20, 1511 0x31e38, 0x31e3c, 1512 0x31e80, 0x31e80, 1513 0x31e88, 0x31ea8, 1514 0x31eb0, 0x31eb4, 1515 0x31ec8, 0x31ed4, 1516 0x31fb8, 0x32004, 1517 0x32200, 0x32200, 1518 0x32208, 0x32240, 1519 0x32248, 0x32280, 1520 0x32288, 0x322c0, 1521 0x322c8, 0x322fc, 1522 0x32600, 0x32630, 1523 0x32a00, 0x32abc, 1524 0x32b00, 0x32b10, 1525 0x32b20, 0x32b30, 1526 0x32b40, 0x32b50, 1527 0x32b60, 0x32b70, 1528 0x33000, 0x33028, 1529 0x33030, 0x33048, 1530 0x33060, 0x33068, 1531 0x33070, 0x3309c, 1532 0x330f0, 0x33128, 1533 0x33130, 0x33148, 1534 0x33160, 0x33168, 1535 0x33170, 0x3319c, 1536 0x331f0, 0x33238, 1537 0x33240, 0x33240, 1538 0x33248, 0x33250, 1539 0x3325c, 0x33264, 1540 0x33270, 0x332b8, 1541 0x332c0, 0x332e4, 1542 0x332f8, 0x33338, 1543 0x33340, 0x33340, 1544 0x33348, 0x33350, 1545 0x3335c, 0x33364, 1546 0x33370, 0x333b8, 1547 0x333c0, 0x333e4, 1548 0x333f8, 0x33428, 1549 0x33430, 0x33448, 1550 0x33460, 0x33468, 1551 0x33470, 0x3349c, 1552 0x334f0, 0x33528, 1553 0x33530, 0x33548, 1554 0x33560, 0x33568, 1555 0x33570, 0x3359c, 1556 0x335f0, 0x33638, 1557 0x33640, 0x33640, 1558 0x33648, 0x33650, 1559 0x3365c, 0x33664, 1560 0x33670, 0x336b8, 1561 0x336c0, 0x336e4, 1562 0x336f8, 0x33738, 1563 0x33740, 0x33740, 1564 0x33748, 0x33750, 1565 0x3375c, 0x33764, 1566 0x33770, 0x337b8, 1567 0x337c0, 0x337e4, 1568 0x337f8, 0x337fc, 1569 0x33814, 0x33814, 1570 0x3382c, 0x3382c, 1571 0x33880, 0x3388c, 1572 0x338e8, 0x338ec, 1573 0x33900, 0x33928, 1574 0x33930, 0x33948, 1575 0x33960, 0x33968, 1576 0x33970, 0x3399c, 1577 0x339f0, 0x33a38, 1578 0x33a40, 0x33a40, 1579 0x33a48, 0x33a50, 1580 0x33a5c, 0x33a64, 1581 0x33a70, 0x33ab8, 1582 0x33ac0, 0x33ae4, 1583 0x33af8, 0x33b10, 1584 0x33b28, 0x33b28, 1585 0x33b3c, 0x33b50, 1586 0x33bf0, 0x33c10, 1587 0x33c28, 0x33c28, 1588 0x33c3c, 0x33c50, 1589 0x33cf0, 0x33cfc, 1590 0x34000, 0x34030, 1591 0x34100, 0x34144, 1592 0x34190, 0x341a0, 1593 0x341a8, 0x341b8, 1594 0x341c4, 0x341c8, 1595 0x341d0, 0x341d0, 1596 0x34200, 0x34318, 1597 0x34400, 0x344b4, 1598 0x344c0, 0x3452c, 1599 0x34540, 0x3461c, 1600 0x34800, 0x34828, 1601 0x34834, 0x34834, 1602 0x348c0, 0x34908, 1603 0x34910, 0x349ac, 1604 0x34a00, 0x34a14, 1605 0x34a1c, 0x34a2c, 1606 0x34a44, 0x34a50, 1607 0x34a74, 0x34a74, 1608 0x34a7c, 0x34afc, 1609 0x34b08, 0x34c24, 1610 0x34d00, 0x34d00, 1611 0x34d08, 0x34d14, 1612 0x34d1c, 0x34d20, 1613 0x34d3c, 0x34d3c, 1614 0x34d48, 0x34d50, 1615 0x35200, 0x3520c, 1616 0x35220, 0x35220, 1617 0x35240, 0x35240, 1618 0x35600, 0x3560c, 1619 0x35a00, 0x35a1c, 1620 0x35e00, 0x35e20, 1621 0x35e38, 0x35e3c, 1622 0x35e80, 0x35e80, 1623 0x35e88, 0x35ea8, 1624 0x35eb0, 0x35eb4, 1625 0x35ec8, 0x35ed4, 1626 0x35fb8, 0x36004, 1627 0x36200, 0x36200, 1628 0x36208, 0x36240, 1629 0x36248, 0x36280, 1630 0x36288, 0x362c0, 1631 0x362c8, 0x362fc, 1632 0x36600, 0x36630, 1633 0x36a00, 0x36abc, 1634 0x36b00, 0x36b10, 1635 0x36b20, 0x36b30, 1636 0x36b40, 0x36b50, 1637 0x36b60, 0x36b70, 1638 0x37000, 0x37028, 1639 0x37030, 0x37048, 1640 0x37060, 0x37068, 1641 0x37070, 0x3709c, 1642 0x370f0, 0x37128, 1643 0x37130, 0x37148, 1644 0x37160, 0x37168, 1645 0x37170, 0x3719c, 1646 0x371f0, 0x37238, 1647 0x37240, 0x37240, 1648 0x37248, 0x37250, 1649 0x3725c, 0x37264, 1650 0x37270, 0x372b8, 1651 0x372c0, 0x372e4, 1652 0x372f8, 0x37338, 1653 0x37340, 0x37340, 1654 0x37348, 0x37350, 1655 0x3735c, 0x37364, 1656 0x37370, 0x373b8, 1657 0x373c0, 0x373e4, 1658 0x373f8, 0x37428, 1659 0x37430, 0x37448, 1660 0x37460, 0x37468, 1661 0x37470, 0x3749c, 1662 0x374f0, 0x37528, 1663 0x37530, 0x37548, 1664 0x37560, 0x37568, 1665 0x37570, 0x3759c, 1666 0x375f0, 0x37638, 1667 0x37640, 0x37640, 1668 0x37648, 0x37650, 1669 0x3765c, 0x37664, 1670 0x37670, 0x376b8, 1671 0x376c0, 0x376e4, 1672 0x376f8, 0x37738, 1673 0x37740, 0x37740, 1674 0x37748, 0x37750, 1675 0x3775c, 0x37764, 1676 0x37770, 0x377b8, 1677 0x377c0, 0x377e4, 1678 0x377f8, 0x377fc, 1679 0x37814, 0x37814, 1680 0x3782c, 0x3782c, 1681 0x37880, 0x3788c, 1682 0x378e8, 0x378ec, 1683 0x37900, 0x37928, 1684 0x37930, 0x37948, 1685 0x37960, 0x37968, 1686 0x37970, 0x3799c, 1687 0x379f0, 0x37a38, 1688 0x37a40, 0x37a40, 1689 0x37a48, 0x37a50, 1690 0x37a5c, 0x37a64, 1691 0x37a70, 0x37ab8, 1692 0x37ac0, 0x37ae4, 1693 0x37af8, 0x37b10, 1694 0x37b28, 0x37b28, 1695 0x37b3c, 0x37b50, 1696 0x37bf0, 0x37c10, 1697 0x37c28, 0x37c28, 1698 0x37c3c, 0x37c50, 1699 0x37cf0, 0x37cfc, 1700 0x38000, 0x38030, 1701 0x38100, 0x38144, 1702 0x38190, 0x381a0, 1703 0x381a8, 0x381b8, 1704 0x381c4, 0x381c8, 1705 0x381d0, 0x381d0, 1706 0x38200, 0x38318, 1707 0x38400, 0x384b4, 1708 0x384c0, 0x3852c, 1709 0x38540, 0x3861c, 1710 0x38800, 0x38828, 1711 0x38834, 0x38834, 1712 0x388c0, 0x38908, 1713 0x38910, 0x389ac, 1714 0x38a00, 0x38a14, 1715 0x38a1c, 0x38a2c, 1716 0x38a44, 0x38a50, 1717 0x38a74, 0x38a74, 1718 0x38a7c, 0x38afc, 1719 0x38b08, 0x38c24, 1720 0x38d00, 0x38d00, 1721 0x38d08, 0x38d14, 1722 0x38d1c, 0x38d20, 1723 0x38d3c, 0x38d3c, 1724 0x38d48, 0x38d50, 1725 0x39200, 0x3920c, 1726 0x39220, 0x39220, 1727 0x39240, 0x39240, 1728 0x39600, 0x3960c, 1729 0x39a00, 0x39a1c, 1730 0x39e00, 0x39e20, 1731 0x39e38, 0x39e3c, 1732 0x39e80, 0x39e80, 1733 0x39e88, 0x39ea8, 1734 0x39eb0, 0x39eb4, 1735 0x39ec8, 0x39ed4, 1736 0x39fb8, 0x3a004, 1737 0x3a200, 0x3a200, 1738 0x3a208, 0x3a240, 1739 0x3a248, 0x3a280, 1740 0x3a288, 0x3a2c0, 1741 0x3a2c8, 0x3a2fc, 1742 0x3a600, 0x3a630, 1743 0x3aa00, 0x3aabc, 1744 0x3ab00, 0x3ab10, 1745 0x3ab20, 0x3ab30, 1746 0x3ab40, 0x3ab50, 1747 0x3ab60, 0x3ab70, 1748 0x3b000, 0x3b028, 1749 0x3b030, 0x3b048, 1750 0x3b060, 0x3b068, 1751 0x3b070, 0x3b09c, 1752 0x3b0f0, 0x3b128, 1753 0x3b130, 0x3b148, 1754 0x3b160, 0x3b168, 1755 0x3b170, 0x3b19c, 1756 0x3b1f0, 0x3b238, 1757 0x3b240, 0x3b240, 1758 0x3b248, 0x3b250, 1759 0x3b25c, 0x3b264, 1760 0x3b270, 0x3b2b8, 1761 0x3b2c0, 0x3b2e4, 1762 0x3b2f8, 0x3b338, 1763 0x3b340, 0x3b340, 1764 0x3b348, 0x3b350, 1765 0x3b35c, 0x3b364, 1766 0x3b370, 0x3b3b8, 1767 0x3b3c0, 0x3b3e4, 1768 0x3b3f8, 0x3b428, 1769 0x3b430, 0x3b448, 1770 0x3b460, 0x3b468, 1771 0x3b470, 0x3b49c, 1772 0x3b4f0, 0x3b528, 1773 0x3b530, 0x3b548, 1774 0x3b560, 0x3b568, 1775 0x3b570, 0x3b59c, 1776 0x3b5f0, 0x3b638, 1777 0x3b640, 0x3b640, 1778 0x3b648, 0x3b650, 1779 0x3b65c, 0x3b664, 1780 0x3b670, 0x3b6b8, 1781 0x3b6c0, 0x3b6e4, 1782 0x3b6f8, 0x3b738, 1783 0x3b740, 0x3b740, 1784 0x3b748, 0x3b750, 1785 0x3b75c, 0x3b764, 1786 0x3b770, 0x3b7b8, 1787 0x3b7c0, 0x3b7e4, 1788 0x3b7f8, 0x3b7fc, 1789 0x3b814, 0x3b814, 1790 0x3b82c, 0x3b82c, 1791 0x3b880, 0x3b88c, 1792 0x3b8e8, 0x3b8ec, 1793 0x3b900, 0x3b928, 1794 0x3b930, 0x3b948, 1795 0x3b960, 0x3b968, 1796 0x3b970, 0x3b99c, 1797 0x3b9f0, 0x3ba38, 1798 0x3ba40, 0x3ba40, 1799 0x3ba48, 0x3ba50, 1800 0x3ba5c, 0x3ba64, 1801 0x3ba70, 0x3bab8, 1802 0x3bac0, 0x3bae4, 1803 0x3baf8, 0x3bb10, 1804 0x3bb28, 0x3bb28, 1805 0x3bb3c, 0x3bb50, 1806 0x3bbf0, 0x3bc10, 1807 0x3bc28, 0x3bc28, 1808 0x3bc3c, 0x3bc50, 1809 0x3bcf0, 0x3bcfc, 1810 0x3c000, 0x3c030, 1811 0x3c100, 0x3c144, 1812 0x3c190, 0x3c1a0, 1813 0x3c1a8, 0x3c1b8, 1814 0x3c1c4, 0x3c1c8, 1815 0x3c1d0, 0x3c1d0, 1816 0x3c200, 0x3c318, 1817 0x3c400, 0x3c4b4, 1818 0x3c4c0, 0x3c52c, 1819 0x3c540, 0x3c61c, 1820 0x3c800, 0x3c828, 1821 0x3c834, 0x3c834, 1822 0x3c8c0, 0x3c908, 1823 0x3c910, 0x3c9ac, 1824 0x3ca00, 0x3ca14, 1825 0x3ca1c, 0x3ca2c, 1826 0x3ca44, 0x3ca50, 1827 0x3ca74, 0x3ca74, 1828 0x3ca7c, 0x3cafc, 1829 0x3cb08, 0x3cc24, 1830 0x3cd00, 0x3cd00, 1831 0x3cd08, 0x3cd14, 1832 0x3cd1c, 0x3cd20, 1833 0x3cd3c, 0x3cd3c, 1834 0x3cd48, 0x3cd50, 1835 0x3d200, 0x3d20c, 1836 0x3d220, 0x3d220, 1837 0x3d240, 0x3d240, 1838 0x3d600, 0x3d60c, 1839 0x3da00, 0x3da1c, 1840 0x3de00, 0x3de20, 1841 0x3de38, 0x3de3c, 1842 0x3de80, 0x3de80, 1843 0x3de88, 0x3dea8, 1844 0x3deb0, 0x3deb4, 1845 0x3dec8, 0x3ded4, 1846 0x3dfb8, 0x3e004, 1847 0x3e200, 0x3e200, 1848 0x3e208, 0x3e240, 1849 0x3e248, 0x3e280, 1850 0x3e288, 0x3e2c0, 1851 0x3e2c8, 0x3e2fc, 1852 0x3e600, 0x3e630, 1853 0x3ea00, 0x3eabc, 1854 0x3eb00, 0x3eb10, 1855 0x3eb20, 0x3eb30, 1856 0x3eb40, 0x3eb50, 1857 0x3eb60, 0x3eb70, 1858 0x3f000, 0x3f028, 1859 0x3f030, 0x3f048, 1860 0x3f060, 0x3f068, 1861 0x3f070, 0x3f09c, 1862 0x3f0f0, 0x3f128, 1863 0x3f130, 0x3f148, 1864 0x3f160, 0x3f168, 1865 0x3f170, 0x3f19c, 1866 0x3f1f0, 0x3f238, 1867 0x3f240, 0x3f240, 1868 0x3f248, 0x3f250, 1869 0x3f25c, 0x3f264, 1870 0x3f270, 0x3f2b8, 1871 0x3f2c0, 0x3f2e4, 1872 0x3f2f8, 0x3f338, 1873 0x3f340, 0x3f340, 1874 0x3f348, 0x3f350, 1875 0x3f35c, 0x3f364, 1876 0x3f370, 0x3f3b8, 1877 0x3f3c0, 0x3f3e4, 1878 0x3f3f8, 0x3f428, 1879 0x3f430, 0x3f448, 1880 0x3f460, 0x3f468, 1881 0x3f470, 0x3f49c, 1882 0x3f4f0, 0x3f528, 1883 0x3f530, 0x3f548, 1884 0x3f560, 0x3f568, 1885 0x3f570, 0x3f59c, 1886 0x3f5f0, 0x3f638, 1887 0x3f640, 0x3f640, 1888 0x3f648, 0x3f650, 1889 0x3f65c, 0x3f664, 1890 0x3f670, 0x3f6b8, 1891 0x3f6c0, 0x3f6e4, 1892 0x3f6f8, 0x3f738, 1893 0x3f740, 0x3f740, 1894 0x3f748, 0x3f750, 1895 0x3f75c, 0x3f764, 1896 0x3f770, 0x3f7b8, 1897 0x3f7c0, 0x3f7e4, 1898 0x3f7f8, 0x3f7fc, 1899 0x3f814, 0x3f814, 1900 0x3f82c, 0x3f82c, 1901 0x3f880, 0x3f88c, 1902 0x3f8e8, 0x3f8ec, 1903 0x3f900, 0x3f928, 1904 0x3f930, 0x3f948, 1905 0x3f960, 0x3f968, 1906 0x3f970, 0x3f99c, 1907 0x3f9f0, 0x3fa38, 1908 0x3fa40, 0x3fa40, 1909 0x3fa48, 0x3fa50, 1910 0x3fa5c, 0x3fa64, 1911 0x3fa70, 0x3fab8, 1912 0x3fac0, 0x3fae4, 1913 0x3faf8, 0x3fb10, 1914 0x3fb28, 0x3fb28, 1915 0x3fb3c, 0x3fb50, 1916 0x3fbf0, 0x3fc10, 1917 0x3fc28, 0x3fc28, 1918 0x3fc3c, 0x3fc50, 1919 0x3fcf0, 0x3fcfc, 1920 0x40000, 0x4000c, 1921 0x40040, 0x40050, 1922 0x40060, 0x40068, 1923 0x4007c, 0x4008c, 1924 0x40094, 0x400b0, 1925 0x400c0, 0x40144, 1926 0x40180, 0x4018c, 1927 0x40200, 0x40254, 1928 0x40260, 0x40264, 1929 0x40270, 0x40288, 1930 0x40290, 0x40298, 1931 0x402ac, 0x402c8, 1932 0x402d0, 0x402e0, 1933 0x402f0, 0x402f0, 1934 0x40300, 0x4033c, 1935 0x403f8, 0x403fc, 1936 0x41304, 0x413c4, 1937 0x41400, 0x4140c, 1938 0x41414, 0x4141c, 1939 0x41480, 0x414d0, 1940 0x44000, 0x44054, 1941 0x4405c, 0x44078, 1942 0x440c0, 0x44174, 1943 0x44180, 0x441ac, 1944 0x441b4, 0x441b8, 1945 0x441c0, 0x44254, 1946 0x4425c, 0x44278, 1947 0x442c0, 0x44374, 1948 0x44380, 0x443ac, 1949 0x443b4, 0x443b8, 1950 0x443c0, 0x44454, 1951 0x4445c, 0x44478, 1952 0x444c0, 0x44574, 1953 0x44580, 0x445ac, 1954 0x445b4, 0x445b8, 1955 0x445c0, 0x44654, 1956 0x4465c, 0x44678, 1957 0x446c0, 0x44774, 1958 0x44780, 0x447ac, 1959 0x447b4, 0x447b8, 1960 0x447c0, 0x44854, 1961 0x4485c, 0x44878, 1962 0x448c0, 0x44974, 1963 0x44980, 0x449ac, 1964 0x449b4, 0x449b8, 1965 0x449c0, 0x449fc, 1966 0x45000, 0x45004, 1967 0x45010, 0x45030, 1968 0x45040, 0x45060, 1969 0x45068, 0x45068, 1970 0x45080, 0x45084, 1971 0x450a0, 0x450b0, 1972 0x45200, 0x45204, 1973 0x45210, 0x45230, 1974 0x45240, 0x45260, 1975 0x45268, 0x45268, 1976 0x45280, 0x45284, 1977 0x452a0, 0x452b0, 1978 0x460c0, 0x460e4, 1979 0x47000, 0x4703c, 1980 0x47044, 0x4708c, 1981 0x47200, 0x47250, 1982 0x47400, 0x47408, 1983 0x47414, 0x47420, 1984 0x47600, 0x47618, 1985 0x47800, 0x47814, 1986 0x48000, 0x4800c, 1987 0x48040, 0x48050, 1988 0x48060, 0x48068, 1989 0x4807c, 0x4808c, 1990 0x48094, 0x480b0, 1991 0x480c0, 0x48144, 1992 0x48180, 0x4818c, 1993 0x48200, 0x48254, 1994 0x48260, 0x48264, 1995 0x48270, 0x48288, 1996 0x48290, 0x48298, 1997 0x482ac, 0x482c8, 1998 0x482d0, 0x482e0, 1999 0x482f0, 0x482f0, 2000 0x48300, 0x4833c, 2001 0x483f8, 0x483fc, 2002 0x49304, 0x493c4, 2003 0x49400, 0x4940c, 2004 0x49414, 0x4941c, 2005 0x49480, 0x494d0, 2006 0x4c000, 0x4c054, 2007 0x4c05c, 0x4c078, 2008 0x4c0c0, 0x4c174, 2009 0x4c180, 0x4c1ac, 2010 0x4c1b4, 0x4c1b8, 2011 0x4c1c0, 0x4c254, 2012 0x4c25c, 0x4c278, 2013 0x4c2c0, 0x4c374, 2014 0x4c380, 0x4c3ac, 2015 0x4c3b4, 0x4c3b8, 2016 0x4c3c0, 0x4c454, 2017 0x4c45c, 0x4c478, 2018 0x4c4c0, 0x4c574, 2019 0x4c580, 0x4c5ac, 2020 0x4c5b4, 0x4c5b8, 2021 0x4c5c0, 0x4c654, 2022 0x4c65c, 0x4c678, 2023 0x4c6c0, 0x4c774, 2024 0x4c780, 0x4c7ac, 2025 0x4c7b4, 0x4c7b8, 2026 0x4c7c0, 0x4c854, 2027 0x4c85c, 0x4c878, 2028 0x4c8c0, 0x4c974, 2029 0x4c980, 0x4c9ac, 2030 0x4c9b4, 0x4c9b8, 2031 0x4c9c0, 0x4c9fc, 2032 0x4d000, 0x4d004, 2033 0x4d010, 0x4d030, 2034 0x4d040, 0x4d060, 2035 0x4d068, 0x4d068, 2036 0x4d080, 0x4d084, 2037 0x4d0a0, 0x4d0b0, 2038 0x4d200, 0x4d204, 2039 0x4d210, 0x4d230, 2040 0x4d240, 0x4d260, 2041 0x4d268, 0x4d268, 2042 0x4d280, 0x4d284, 2043 0x4d2a0, 0x4d2b0, 2044 0x4e0c0, 0x4e0e4, 2045 0x4f000, 0x4f03c, 2046 0x4f044, 0x4f08c, 2047 0x4f200, 0x4f250, 2048 0x4f400, 0x4f408, 2049 0x4f414, 0x4f420, 2050 0x4f600, 0x4f618, 2051 0x4f800, 0x4f814, 2052 0x50000, 0x50084, 2053 0x50090, 0x500cc, 2054 0x50400, 0x50400, 2055 0x50800, 0x50884, 2056 0x50890, 0x508cc, 2057 0x50c00, 0x50c00, 2058 0x51000, 0x5101c, 2059 0x51300, 0x51308, 2060 }; 2061 2062 static const unsigned int t5vf_reg_ranges[] = { 2063 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2064 VF_MPS_REG(A_MPS_VF_CTL), 2065 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2066 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2067 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2068 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2069 FW_T4VF_MBDATA_BASE_ADDR, 2070 FW_T4VF_MBDATA_BASE_ADDR + 2071 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2072 }; 2073 2074 static const unsigned int t6_reg_ranges[] = { 2075 0x1008, 0x101c, 2076 0x1024, 0x10a8, 2077 0x10b4, 0x10f8, 2078 0x1100, 0x1114, 2079 0x111c, 0x112c, 2080 0x1138, 0x113c, 2081 0x1144, 0x114c, 2082 0x1180, 0x1184, 2083 0x1190, 0x1194, 2084 0x11a0, 0x11a4, 2085 0x11b0, 0x11b4, 2086 0x11fc, 0x1274, 2087 0x1280, 0x133c, 2088 0x1800, 0x18fc, 2089 0x3000, 0x302c, 2090 0x3060, 0x30b0, 2091 0x30b8, 0x30d8, 2092 0x30e0, 0x30fc, 2093 0x3140, 0x357c, 2094 0x35a8, 0x35cc, 2095 0x35ec, 0x35ec, 2096 0x3600, 0x5624, 2097 0x56cc, 0x56ec, 2098 0x56f4, 0x5720, 2099 0x5728, 0x575c, 2100 0x580c, 0x5814, 2101 0x5890, 0x589c, 2102 0x58a4, 0x58ac, 2103 0x58b8, 0x58bc, 2104 0x5940, 0x595c, 2105 0x5980, 0x598c, 2106 0x59b0, 0x59c8, 2107 0x59d0, 0x59dc, 2108 0x59fc, 0x5a18, 2109 0x5a60, 0x5a6c, 2110 0x5a80, 0x5a8c, 2111 0x5a94, 0x5a9c, 2112 0x5b94, 0x5bfc, 2113 0x5c10, 0x5e48, 2114 0x5e50, 0x5e94, 2115 0x5ea0, 0x5eb0, 2116 0x5ec0, 0x5ec0, 2117 0x5ec8, 0x5ed0, 2118 0x5ee0, 0x5ee0, 2119 0x5ef0, 0x5ef0, 2120 0x5f00, 0x5f00, 2121 0x6000, 0x6020, 2122 0x6028, 0x6040, 2123 0x6058, 0x609c, 2124 0x60a8, 0x619c, 2125 0x7700, 0x7798, 2126 0x77c0, 0x7880, 2127 0x78cc, 0x78fc, 2128 0x7b00, 0x7b58, 2129 0x7b60, 0x7b84, 2130 0x7b8c, 0x7c54, 2131 0x7d00, 0x7d38, 2132 0x7d40, 0x7d84, 2133 0x7d8c, 0x7ddc, 2134 0x7de4, 0x7e04, 2135 0x7e10, 0x7e1c, 2136 0x7e24, 0x7e38, 2137 0x7e40, 0x7e44, 2138 0x7e4c, 0x7e78, 2139 0x7e80, 0x7edc, 2140 0x7ee8, 0x7efc, 2141 0x8dc0, 0x8de4, 2142 0x8df8, 0x8e04, 2143 0x8e10, 0x8e84, 2144 0x8ea0, 0x8f88, 2145 0x8fb8, 0x9058, 2146 0x9060, 0x9060, 2147 0x9068, 0x90f8, 2148 0x9100, 0x9124, 2149 0x9400, 0x9470, 2150 0x9600, 0x9600, 2151 0x9608, 0x9638, 2152 0x9640, 0x9704, 2153 0x9710, 0x971c, 2154 0x9800, 0x9808, 2155 0x9820, 0x983c, 2156 0x9850, 0x9864, 2157 0x9c00, 0x9c6c, 2158 0x9c80, 0x9cec, 2159 0x9d00, 0x9d6c, 2160 0x9d80, 0x9dec, 2161 0x9e00, 0x9e6c, 2162 0x9e80, 0x9eec, 2163 0x9f00, 0x9f6c, 2164 0x9f80, 0xa020, 2165 0xd004, 0xd03c, 2166 0xd100, 0xd118, 2167 0xd200, 0xd214, 2168 0xd220, 0xd234, 2169 0xd240, 0xd254, 2170 0xd260, 0xd274, 2171 0xd280, 0xd294, 2172 0xd2a0, 0xd2b4, 2173 0xd2c0, 0xd2d4, 2174 0xd2e0, 0xd2f4, 2175 0xd300, 0xd31c, 2176 0xdfc0, 0xdfe0, 2177 0xe000, 0xf008, 2178 0xf010, 0xf018, 2179 0xf020, 0xf028, 2180 0x11000, 0x11014, 2181 0x11048, 0x1106c, 2182 0x11074, 0x11088, 2183 0x11098, 0x11120, 2184 0x1112c, 0x1117c, 2185 0x11190, 0x112e0, 2186 0x11300, 0x1130c, 2187 0x12000, 0x1206c, 2188 0x19040, 0x1906c, 2189 0x19078, 0x19080, 2190 0x1908c, 0x190e8, 2191 0x190f0, 0x190f8, 2192 0x19100, 0x19110, 2193 0x19120, 0x19124, 2194 0x19150, 0x19194, 2195 0x1919c, 0x191b0, 2196 0x191d0, 0x191e8, 2197 0x19238, 0x19290, 2198 0x192a4, 0x192b0, 2199 0x192bc, 0x192bc, 2200 0x19348, 0x1934c, 2201 0x193f8, 0x19418, 2202 0x19420, 0x19428, 2203 0x19430, 0x19444, 2204 0x1944c, 0x1946c, 2205 0x19474, 0x19474, 2206 0x19490, 0x194cc, 2207 0x194f0, 0x194f8, 2208 0x19c00, 0x19c48, 2209 0x19c50, 0x19c80, 2210 0x19c94, 0x19c98, 2211 0x19ca0, 0x19cbc, 2212 0x19ce4, 0x19ce4, 2213 0x19cf0, 0x19cf8, 2214 0x19d00, 0x19d28, 2215 0x19d50, 0x19d78, 2216 0x19d94, 0x19d98, 2217 0x19da0, 0x19dc8, 2218 0x19df0, 0x19e10, 2219 0x19e50, 0x19e6c, 2220 0x19ea0, 0x19ebc, 2221 0x19ec4, 0x19ef4, 2222 0x19f04, 0x19f2c, 2223 0x19f34, 0x19f34, 2224 0x19f40, 0x19f50, 2225 0x19f90, 0x19fac, 2226 0x19fc4, 0x19fc8, 2227 0x19fd0, 0x19fe4, 2228 0x1a000, 0x1a004, 2229 0x1a010, 0x1a06c, 2230 0x1a0b0, 0x1a0e4, 2231 0x1a0ec, 0x1a0f8, 2232 0x1a100, 0x1a108, 2233 0x1a114, 0x1a120, 2234 0x1a128, 0x1a130, 2235 0x1a138, 0x1a138, 2236 0x1a190, 0x1a1c4, 2237 0x1a1fc, 0x1a1fc, 2238 0x1e008, 0x1e00c, 2239 0x1e040, 0x1e044, 2240 0x1e04c, 0x1e04c, 2241 0x1e284, 0x1e290, 2242 0x1e2c0, 0x1e2c0, 2243 0x1e2e0, 0x1e2e0, 2244 0x1e300, 0x1e384, 2245 0x1e3c0, 0x1e3c8, 2246 0x1e408, 0x1e40c, 2247 0x1e440, 0x1e444, 2248 0x1e44c, 0x1e44c, 2249 0x1e684, 0x1e690, 2250 0x1e6c0, 0x1e6c0, 2251 0x1e6e0, 0x1e6e0, 2252 0x1e700, 0x1e784, 2253 0x1e7c0, 0x1e7c8, 2254 0x1e808, 0x1e80c, 2255 0x1e840, 0x1e844, 2256 0x1e84c, 0x1e84c, 2257 0x1ea84, 0x1ea90, 2258 0x1eac0, 0x1eac0, 2259 0x1eae0, 0x1eae0, 2260 0x1eb00, 0x1eb84, 2261 0x1ebc0, 0x1ebc8, 2262 0x1ec08, 0x1ec0c, 2263 0x1ec40, 0x1ec44, 2264 0x1ec4c, 0x1ec4c, 2265 0x1ee84, 0x1ee90, 2266 0x1eec0, 0x1eec0, 2267 0x1eee0, 0x1eee0, 2268 0x1ef00, 0x1ef84, 2269 0x1efc0, 0x1efc8, 2270 0x1f008, 0x1f00c, 2271 0x1f040, 0x1f044, 2272 0x1f04c, 0x1f04c, 2273 0x1f284, 0x1f290, 2274 0x1f2c0, 0x1f2c0, 2275 0x1f2e0, 0x1f2e0, 2276 0x1f300, 0x1f384, 2277 0x1f3c0, 0x1f3c8, 2278 0x1f408, 0x1f40c, 2279 0x1f440, 0x1f444, 2280 0x1f44c, 0x1f44c, 2281 0x1f684, 0x1f690, 2282 0x1f6c0, 0x1f6c0, 2283 0x1f6e0, 0x1f6e0, 2284 0x1f700, 0x1f784, 2285 0x1f7c0, 0x1f7c8, 2286 0x1f808, 0x1f80c, 2287 0x1f840, 0x1f844, 2288 0x1f84c, 0x1f84c, 2289 0x1fa84, 0x1fa90, 2290 0x1fac0, 0x1fac0, 2291 0x1fae0, 0x1fae0, 2292 0x1fb00, 0x1fb84, 2293 0x1fbc0, 0x1fbc8, 2294 0x1fc08, 0x1fc0c, 2295 0x1fc40, 0x1fc44, 2296 0x1fc4c, 0x1fc4c, 2297 0x1fe84, 0x1fe90, 2298 0x1fec0, 0x1fec0, 2299 0x1fee0, 0x1fee0, 2300 0x1ff00, 0x1ff84, 2301 0x1ffc0, 0x1ffc8, 2302 0x30000, 0x30030, 2303 0x30100, 0x30168, 2304 0x30190, 0x301a0, 2305 0x301a8, 0x301b8, 2306 0x301c4, 0x301c8, 2307 0x301d0, 0x301d0, 2308 0x30200, 0x30320, 2309 0x30400, 0x304b4, 2310 0x304c0, 0x3052c, 2311 0x30540, 0x3061c, 2312 0x30800, 0x308a0, 2313 0x308c0, 0x30908, 2314 0x30910, 0x309b8, 2315 0x30a00, 0x30a04, 2316 0x30a0c, 0x30a14, 2317 0x30a1c, 0x30a2c, 2318 0x30a44, 0x30a50, 2319 0x30a74, 0x30a74, 2320 0x30a7c, 0x30afc, 2321 0x30b08, 0x30c24, 2322 0x30d00, 0x30d14, 2323 0x30d1c, 0x30d3c, 2324 0x30d44, 0x30d4c, 2325 0x30d54, 0x30d74, 2326 0x30d7c, 0x30d7c, 2327 0x30de0, 0x30de0, 2328 0x30e00, 0x30ed4, 2329 0x30f00, 0x30fa4, 2330 0x30fc0, 0x30fc4, 2331 0x31000, 0x31004, 2332 0x31080, 0x310fc, 2333 0x31208, 0x31220, 2334 0x3123c, 0x31254, 2335 0x31300, 0x31300, 2336 0x31308, 0x3131c, 2337 0x31338, 0x3133c, 2338 0x31380, 0x31380, 2339 0x31388, 0x313a8, 2340 0x313b4, 0x313b4, 2341 0x31400, 0x31420, 2342 0x31438, 0x3143c, 2343 0x31480, 0x31480, 2344 0x314a8, 0x314a8, 2345 0x314b0, 0x314b4, 2346 0x314c8, 0x314d4, 2347 0x31a40, 0x31a4c, 2348 0x31af0, 0x31b20, 2349 0x31b38, 0x31b3c, 2350 0x31b80, 0x31b80, 2351 0x31ba8, 0x31ba8, 2352 0x31bb0, 0x31bb4, 2353 0x31bc8, 0x31bd4, 2354 0x32140, 0x3218c, 2355 0x321f0, 0x321f4, 2356 0x32200, 0x32200, 2357 0x32218, 0x32218, 2358 0x32400, 0x32400, 2359 0x32408, 0x3241c, 2360 0x32618, 0x32620, 2361 0x32664, 0x32664, 2362 0x326a8, 0x326a8, 2363 0x326ec, 0x326ec, 2364 0x32a00, 0x32abc, 2365 0x32b00, 0x32b18, 2366 0x32b20, 0x32b38, 2367 0x32b40, 0x32b58, 2368 0x32b60, 0x32b78, 2369 0x32c00, 0x32c00, 2370 0x32c08, 0x32c3c, 2371 0x33000, 0x3302c, 2372 0x33034, 0x33050, 2373 0x33058, 0x33058, 2374 0x33060, 0x3308c, 2375 0x3309c, 0x330ac, 2376 0x330c0, 0x330c0, 2377 0x330c8, 0x330d0, 2378 0x330d8, 0x330e0, 2379 0x330ec, 0x3312c, 2380 0x33134, 0x33150, 2381 0x33158, 0x33158, 2382 0x33160, 0x3318c, 2383 0x3319c, 0x331ac, 2384 0x331c0, 0x331c0, 2385 0x331c8, 0x331d0, 2386 0x331d8, 0x331e0, 2387 0x331ec, 0x33290, 2388 0x33298, 0x332c4, 2389 0x332e4, 0x33390, 2390 0x33398, 0x333c4, 2391 0x333e4, 0x3342c, 2392 0x33434, 0x33450, 2393 0x33458, 0x33458, 2394 0x33460, 0x3348c, 2395 0x3349c, 0x334ac, 2396 0x334c0, 0x334c0, 2397 0x334c8, 0x334d0, 2398 0x334d8, 0x334e0, 2399 0x334ec, 0x3352c, 2400 0x33534, 0x33550, 2401 0x33558, 0x33558, 2402 0x33560, 0x3358c, 2403 0x3359c, 0x335ac, 2404 0x335c0, 0x335c0, 2405 0x335c8, 0x335d0, 2406 0x335d8, 0x335e0, 2407 0x335ec, 0x33690, 2408 0x33698, 0x336c4, 2409 0x336e4, 0x33790, 2410 0x33798, 0x337c4, 2411 0x337e4, 0x337fc, 2412 0x33814, 0x33814, 2413 0x33854, 0x33868, 2414 0x33880, 0x3388c, 2415 0x338c0, 0x338d0, 2416 0x338e8, 0x338ec, 2417 0x33900, 0x3392c, 2418 0x33934, 0x33950, 2419 0x33958, 0x33958, 2420 0x33960, 0x3398c, 2421 0x3399c, 0x339ac, 2422 0x339c0, 0x339c0, 2423 0x339c8, 0x339d0, 2424 0x339d8, 0x339e0, 2425 0x339ec, 0x33a90, 2426 0x33a98, 0x33ac4, 2427 0x33ae4, 0x33b10, 2428 0x33b24, 0x33b28, 2429 0x33b38, 0x33b50, 2430 0x33bf0, 0x33c10, 2431 0x33c24, 0x33c28, 2432 0x33c38, 0x33c50, 2433 0x33cf0, 0x33cfc, 2434 0x34000, 0x34030, 2435 0x34100, 0x34168, 2436 0x34190, 0x341a0, 2437 0x341a8, 0x341b8, 2438 0x341c4, 0x341c8, 2439 0x341d0, 0x341d0, 2440 0x34200, 0x34320, 2441 0x34400, 0x344b4, 2442 0x344c0, 0x3452c, 2443 0x34540, 0x3461c, 2444 0x34800, 0x348a0, 2445 0x348c0, 0x34908, 2446 0x34910, 0x349b8, 2447 0x34a00, 0x34a04, 2448 0x34a0c, 0x34a14, 2449 0x34a1c, 0x34a2c, 2450 0x34a44, 0x34a50, 2451 0x34a74, 0x34a74, 2452 0x34a7c, 0x34afc, 2453 0x34b08, 0x34c24, 2454 0x34d00, 0x34d14, 2455 0x34d1c, 0x34d3c, 2456 0x34d44, 0x34d4c, 2457 0x34d54, 0x34d74, 2458 0x34d7c, 0x34d7c, 2459 0x34de0, 0x34de0, 2460 0x34e00, 0x34ed4, 2461 0x34f00, 0x34fa4, 2462 0x34fc0, 0x34fc4, 2463 0x35000, 0x35004, 2464 0x35080, 0x350fc, 2465 0x35208, 0x35220, 2466 0x3523c, 0x35254, 2467 0x35300, 0x35300, 2468 0x35308, 0x3531c, 2469 0x35338, 0x3533c, 2470 0x35380, 0x35380, 2471 0x35388, 0x353a8, 2472 0x353b4, 0x353b4, 2473 0x35400, 0x35420, 2474 0x35438, 0x3543c, 2475 0x35480, 0x35480, 2476 0x354a8, 0x354a8, 2477 0x354b0, 0x354b4, 2478 0x354c8, 0x354d4, 2479 0x35a40, 0x35a4c, 2480 0x35af0, 0x35b20, 2481 0x35b38, 0x35b3c, 2482 0x35b80, 0x35b80, 2483 0x35ba8, 0x35ba8, 2484 0x35bb0, 0x35bb4, 2485 0x35bc8, 0x35bd4, 2486 0x36140, 0x3618c, 2487 0x361f0, 0x361f4, 2488 0x36200, 0x36200, 2489 0x36218, 0x36218, 2490 0x36400, 0x36400, 2491 0x36408, 0x3641c, 2492 0x36618, 0x36620, 2493 0x36664, 0x36664, 2494 0x366a8, 0x366a8, 2495 0x366ec, 0x366ec, 2496 0x36a00, 0x36abc, 2497 0x36b00, 0x36b18, 2498 0x36b20, 0x36b38, 2499 0x36b40, 0x36b58, 2500 0x36b60, 0x36b78, 2501 0x36c00, 0x36c00, 2502 0x36c08, 0x36c3c, 2503 0x37000, 0x3702c, 2504 0x37034, 0x37050, 2505 0x37058, 0x37058, 2506 0x37060, 0x3708c, 2507 0x3709c, 0x370ac, 2508 0x370c0, 0x370c0, 2509 0x370c8, 0x370d0, 2510 0x370d8, 0x370e0, 2511 0x370ec, 0x3712c, 2512 0x37134, 0x37150, 2513 0x37158, 0x37158, 2514 0x37160, 0x3718c, 2515 0x3719c, 0x371ac, 2516 0x371c0, 0x371c0, 2517 0x371c8, 0x371d0, 2518 0x371d8, 0x371e0, 2519 0x371ec, 0x37290, 2520 0x37298, 0x372c4, 2521 0x372e4, 0x37390, 2522 0x37398, 0x373c4, 2523 0x373e4, 0x3742c, 2524 0x37434, 0x37450, 2525 0x37458, 0x37458, 2526 0x37460, 0x3748c, 2527 0x3749c, 0x374ac, 2528 0x374c0, 0x374c0, 2529 0x374c8, 0x374d0, 2530 0x374d8, 0x374e0, 2531 0x374ec, 0x3752c, 2532 0x37534, 0x37550, 2533 0x37558, 0x37558, 2534 0x37560, 0x3758c, 2535 0x3759c, 0x375ac, 2536 0x375c0, 0x375c0, 2537 0x375c8, 0x375d0, 2538 0x375d8, 0x375e0, 2539 0x375ec, 0x37690, 2540 0x37698, 0x376c4, 2541 0x376e4, 0x37790, 2542 0x37798, 0x377c4, 2543 0x377e4, 0x377fc, 2544 0x37814, 0x37814, 2545 0x37854, 0x37868, 2546 0x37880, 0x3788c, 2547 0x378c0, 0x378d0, 2548 0x378e8, 0x378ec, 2549 0x37900, 0x3792c, 2550 0x37934, 0x37950, 2551 0x37958, 0x37958, 2552 0x37960, 0x3798c, 2553 0x3799c, 0x379ac, 2554 0x379c0, 0x379c0, 2555 0x379c8, 0x379d0, 2556 0x379d8, 0x379e0, 2557 0x379ec, 0x37a90, 2558 0x37a98, 0x37ac4, 2559 0x37ae4, 0x37b10, 2560 0x37b24, 0x37b28, 2561 0x37b38, 0x37b50, 2562 0x37bf0, 0x37c10, 2563 0x37c24, 0x37c28, 2564 0x37c38, 0x37c50, 2565 0x37cf0, 0x37cfc, 2566 0x40040, 0x40040, 2567 0x40080, 0x40084, 2568 0x40100, 0x40100, 2569 0x40140, 0x401bc, 2570 0x40200, 0x40214, 2571 0x40228, 0x40228, 2572 0x40240, 0x40258, 2573 0x40280, 0x40280, 2574 0x40304, 0x40304, 2575 0x40330, 0x4033c, 2576 0x41304, 0x413c8, 2577 0x413d0, 0x413dc, 2578 0x413f0, 0x413f0, 2579 0x41400, 0x4140c, 2580 0x41414, 0x4141c, 2581 0x41480, 0x414d0, 2582 0x44000, 0x4407c, 2583 0x440c0, 0x441ac, 2584 0x441b4, 0x4427c, 2585 0x442c0, 0x443ac, 2586 0x443b4, 0x4447c, 2587 0x444c0, 0x445ac, 2588 0x445b4, 0x4467c, 2589 0x446c0, 0x447ac, 2590 0x447b4, 0x4487c, 2591 0x448c0, 0x449ac, 2592 0x449b4, 0x44a7c, 2593 0x44ac0, 0x44bac, 2594 0x44bb4, 0x44c7c, 2595 0x44cc0, 0x44dac, 2596 0x44db4, 0x44e7c, 2597 0x44ec0, 0x44fac, 2598 0x44fb4, 0x4507c, 2599 0x450c0, 0x451ac, 2600 0x451b4, 0x451fc, 2601 0x45800, 0x45804, 2602 0x45810, 0x45830, 2603 0x45840, 0x45860, 2604 0x45868, 0x45868, 2605 0x45880, 0x45884, 2606 0x458a0, 0x458b0, 2607 0x45a00, 0x45a04, 2608 0x45a10, 0x45a30, 2609 0x45a40, 0x45a60, 2610 0x45a68, 0x45a68, 2611 0x45a80, 0x45a84, 2612 0x45aa0, 0x45ab0, 2613 0x460c0, 0x460e4, 2614 0x47000, 0x4703c, 2615 0x47044, 0x4708c, 2616 0x47200, 0x47250, 2617 0x47400, 0x47408, 2618 0x47414, 0x47420, 2619 0x47600, 0x47618, 2620 0x47800, 0x47814, 2621 0x47820, 0x4782c, 2622 0x50000, 0x50084, 2623 0x50090, 0x500cc, 2624 0x50300, 0x50384, 2625 0x50400, 0x50400, 2626 0x50800, 0x50884, 2627 0x50890, 0x508cc, 2628 0x50b00, 0x50b84, 2629 0x50c00, 0x50c00, 2630 0x51000, 0x51020, 2631 0x51028, 0x510b0, 2632 0x51300, 0x51324, 2633 }; 2634 2635 static const unsigned int t6vf_reg_ranges[] = { 2636 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2637 VF_MPS_REG(A_MPS_VF_CTL), 2638 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2639 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2640 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2641 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2642 FW_T6VF_MBDATA_BASE_ADDR, 2643 FW_T6VF_MBDATA_BASE_ADDR + 2644 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2645 }; 2646 2647 u32 *buf_end = (u32 *)(buf + buf_size); 2648 const unsigned int *reg_ranges; 2649 int reg_ranges_size, range; 2650 unsigned int chip_version = chip_id(adap); 2651 2652 /* 2653 * Select the right set of register ranges to dump depending on the 2654 * adapter chip type. 2655 */ 2656 switch (chip_version) { 2657 case CHELSIO_T4: 2658 if (adap->flags & IS_VF) { 2659 reg_ranges = t4vf_reg_ranges; 2660 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); 2661 } else { 2662 reg_ranges = t4_reg_ranges; 2663 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2664 } 2665 break; 2666 2667 case CHELSIO_T5: 2668 if (adap->flags & IS_VF) { 2669 reg_ranges = t5vf_reg_ranges; 2670 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); 2671 } else { 2672 reg_ranges = t5_reg_ranges; 2673 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2674 } 2675 break; 2676 2677 case CHELSIO_T6: 2678 if (adap->flags & IS_VF) { 2679 reg_ranges = t6vf_reg_ranges; 2680 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 2681 } else { 2682 reg_ranges = t6_reg_ranges; 2683 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2684 } 2685 break; 2686 2687 default: 2688 CH_ERR(adap, 2689 "Unsupported chip version %d\n", chip_version); 2690 return; 2691 } 2692 2693 /* 2694 * Clear the register buffer and insert the appropriate register 2695 * values selected by the above register ranges. 2696 */ 2697 memset(buf, 0, buf_size); 2698 for (range = 0; range < reg_ranges_size; range += 2) { 2699 unsigned int reg = reg_ranges[range]; 2700 unsigned int last_reg = reg_ranges[range + 1]; 2701 u32 *bufp = (u32 *)(buf + reg); 2702 2703 /* 2704 * Iterate across the register range filling in the register 2705 * buffer but don't write past the end of the register buffer. 2706 */ 2707 while (reg <= last_reg && bufp < buf_end) { 2708 *bufp++ = t4_read_reg(adap, reg); 2709 reg += sizeof(u32); 2710 } 2711 } 2712} 2713 2714/* 2715 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID 2716 * header followed by one or more VPD-R sections, each with its own header. 2717 */ 2718struct t4_vpd_hdr { 2719 u8 id_tag; 2720 u8 id_len[2]; 2721 u8 id_data[ID_LEN]; 2722}; 2723 2724struct t4_vpdr_hdr { 2725 u8 vpdr_tag; 2726 u8 vpdr_len[2]; 2727}; 2728 2729/* 2730 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2731 */ 2732#define EEPROM_DELAY 10 /* 10us per poll spin */ 2733#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 2734 2735#define EEPROM_STAT_ADDR 0x7bfc 2736#define VPD_SIZE 0x800 2737#define VPD_BASE 0x400 2738#define VPD_BASE_OLD 0 2739#define VPD_LEN 1024 2740#define VPD_INFO_FLD_HDR_SIZE 3 2741#define CHELSIO_VPD_UNIQUE_ID 0x82 2742 2743/* 2744 * Small utility function to wait till any outstanding VPD Access is complete. 2745 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2746 * VPD Access in flight. This allows us to handle the problem of having a 2747 * previous VPD Access time out and prevent an attempt to inject a new VPD 2748 * Request before any in-flight VPD reguest has completed. 2749 */ 2750static int t4_seeprom_wait(struct adapter *adapter) 2751{ 2752 unsigned int base = adapter->params.pci.vpd_cap_addr; 2753 int max_poll; 2754 2755 /* 2756 * If no VPD Access is in flight, we can just return success right 2757 * away. 2758 */ 2759 if (!adapter->vpd_busy) 2760 return 0; 2761 2762 /* 2763 * Poll the VPD Capability Address/Flag register waiting for it 2764 * to indicate that the operation is complete. 2765 */ 2766 max_poll = EEPROM_MAX_POLL; 2767 do { 2768 u16 val; 2769 2770 udelay(EEPROM_DELAY); 2771 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2772 2773 /* 2774 * If the operation is complete, mark the VPD as no longer 2775 * busy and return success. 2776 */ 2777 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2778 adapter->vpd_busy = 0; 2779 return 0; 2780 } 2781 } while (--max_poll); 2782 2783 /* 2784 * Failure! Note that we leave the VPD Busy status set in order to 2785 * avoid pushing a new VPD Access request into the VPD Capability till 2786 * the current operation eventually succeeds. It's a bug to issue a 2787 * new request when an existing request is in flight and will result 2788 * in corrupt hardware state. 2789 */ 2790 return -ETIMEDOUT; 2791} 2792 2793/** 2794 * t4_seeprom_read - read a serial EEPROM location 2795 * @adapter: adapter to read 2796 * @addr: EEPROM virtual address 2797 * @data: where to store the read data 2798 * 2799 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 2800 * VPD capability. Note that this function must be called with a virtual 2801 * address. 2802 */ 2803int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 2804{ 2805 unsigned int base = adapter->params.pci.vpd_cap_addr; 2806 int ret; 2807 2808 /* 2809 * VPD Accesses must alway be 4-byte aligned! 2810 */ 2811 if (addr >= EEPROMVSIZE || (addr & 3)) 2812 return -EINVAL; 2813 2814 /* 2815 * Wait for any previous operation which may still be in flight to 2816 * complete. 2817 */ 2818 ret = t4_seeprom_wait(adapter); 2819 if (ret) { 2820 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2821 return ret; 2822 } 2823 2824 /* 2825 * Issue our new VPD Read request, mark the VPD as being busy and wait 2826 * for our request to complete. If it doesn't complete, note the 2827 * error and return it to our caller. Note that we do not reset the 2828 * VPD Busy status! 2829 */ 2830 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 2831 adapter->vpd_busy = 1; 2832 adapter->vpd_flag = PCI_VPD_ADDR_F; 2833 ret = t4_seeprom_wait(adapter); 2834 if (ret) { 2835 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 2836 return ret; 2837 } 2838 2839 /* 2840 * Grab the returned data, swizzle it into our endianness and 2841 * return success. 2842 */ 2843 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 2844 *data = le32_to_cpu(*data); 2845 return 0; 2846} 2847 2848/** 2849 * t4_seeprom_write - write a serial EEPROM location 2850 * @adapter: adapter to write 2851 * @addr: virtual EEPROM address 2852 * @data: value to write 2853 * 2854 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 2855 * VPD capability. Note that this function must be called with a virtual 2856 * address. 2857 */ 2858int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 2859{ 2860 unsigned int base = adapter->params.pci.vpd_cap_addr; 2861 int ret; 2862 u32 stats_reg; 2863 int max_poll; 2864 2865 /* 2866 * VPD Accesses must alway be 4-byte aligned! 2867 */ 2868 if (addr >= EEPROMVSIZE || (addr & 3)) 2869 return -EINVAL; 2870 2871 /* 2872 * Wait for any previous operation which may still be in flight to 2873 * complete. 2874 */ 2875 ret = t4_seeprom_wait(adapter); 2876 if (ret) { 2877 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2878 return ret; 2879 } 2880 2881 /* 2882 * Issue our new VPD Read request, mark the VPD as being busy and wait 2883 * for our request to complete. If it doesn't complete, note the 2884 * error and return it to our caller. Note that we do not reset the 2885 * VPD Busy status! 2886 */ 2887 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 2888 cpu_to_le32(data)); 2889 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 2890 (u16)addr | PCI_VPD_ADDR_F); 2891 adapter->vpd_busy = 1; 2892 adapter->vpd_flag = 0; 2893 ret = t4_seeprom_wait(adapter); 2894 if (ret) { 2895 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 2896 return ret; 2897 } 2898 2899 /* 2900 * Reset PCI_VPD_DATA register after a transaction and wait for our 2901 * request to complete. If it doesn't complete, return error. 2902 */ 2903 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 2904 max_poll = EEPROM_MAX_POLL; 2905 do { 2906 udelay(EEPROM_DELAY); 2907 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 2908 } while ((stats_reg & 0x1) && --max_poll); 2909 if (!max_poll) 2910 return -ETIMEDOUT; 2911 2912 /* Return success! */ 2913 return 0; 2914} 2915 2916/** 2917 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 2918 * @phys_addr: the physical EEPROM address 2919 * @fn: the PCI function number 2920 * @sz: size of function-specific area 2921 * 2922 * Translate a physical EEPROM address to virtual. The first 1K is 2923 * accessed through virtual addresses starting at 31K, the rest is 2924 * accessed through virtual addresses starting at 0. 2925 * 2926 * The mapping is as follows: 2927 * [0..1K) -> [31K..32K) 2928 * [1K..1K+A) -> [ES-A..ES) 2929 * [1K+A..ES) -> [0..ES-A-1K) 2930 * 2931 * where A = @fn * @sz, and ES = EEPROM size. 2932 */ 2933int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2934{ 2935 fn *= sz; 2936 if (phys_addr < 1024) 2937 return phys_addr + (31 << 10); 2938 if (phys_addr < 1024 + fn) 2939 return EEPROMSIZE - fn + phys_addr - 1024; 2940 if (phys_addr < EEPROMSIZE) 2941 return phys_addr - 1024 - fn; 2942 return -EINVAL; 2943} 2944 2945/** 2946 * t4_seeprom_wp - enable/disable EEPROM write protection 2947 * @adapter: the adapter 2948 * @enable: whether to enable or disable write protection 2949 * 2950 * Enables or disables write protection on the serial EEPROM. 2951 */ 2952int t4_seeprom_wp(struct adapter *adapter, int enable) 2953{ 2954 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 2955} 2956 2957/** 2958 * get_vpd_keyword_val - Locates an information field keyword in the VPD 2959 * @vpd: Pointer to buffered vpd data structure 2960 * @kw: The keyword to search for 2961 * @region: VPD region to search (starting from 0) 2962 * 2963 * Returns the value of the information field keyword or 2964 * -ENOENT otherwise. 2965 */ 2966static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region) 2967{ 2968 int i, tag; 2969 unsigned int offset, len; 2970 const struct t4_vpdr_hdr *vpdr; 2971 2972 offset = sizeof(struct t4_vpd_hdr); 2973 vpdr = (const void *)(vpd + offset); 2974 tag = vpdr->vpdr_tag; 2975 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 2976 while (region--) { 2977 offset += sizeof(struct t4_vpdr_hdr) + len; 2978 vpdr = (const void *)(vpd + offset); 2979 if (++tag != vpdr->vpdr_tag) 2980 return -ENOENT; 2981 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 2982 } 2983 offset += sizeof(struct t4_vpdr_hdr); 2984 2985 if (offset + len > VPD_LEN) { 2986 return -ENOENT; 2987 } 2988 2989 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 2990 if (memcmp(vpd + i , kw , 2) == 0){ 2991 i += VPD_INFO_FLD_HDR_SIZE; 2992 return i; 2993 } 2994 2995 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2]; 2996 } 2997 2998 return -ENOENT; 2999} 3000 3001 3002/** 3003 * get_vpd_params - read VPD parameters from VPD EEPROM 3004 * @adapter: adapter to read 3005 * @p: where to store the parameters 3006 * @vpd: caller provided temporary space to read the VPD into 3007 * 3008 * Reads card parameters stored in VPD EEPROM. 3009 */ 3010static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 3011 uint16_t device_id, u32 *buf) 3012{ 3013 int i, ret, addr; 3014 int ec, sn, pn, na, md; 3015 u8 csum; 3016 const u8 *vpd = (const u8 *)buf; 3017 3018 /* 3019 * Card information normally starts at VPD_BASE but early cards had 3020 * it at 0. 3021 */ 3022 ret = t4_seeprom_read(adapter, VPD_BASE, buf); 3023 if (ret) 3024 return (ret); 3025 3026 /* 3027 * The VPD shall have a unique identifier specified by the PCI SIG. 3028 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 3029 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 3030 * is expected to automatically put this entry at the 3031 * beginning of the VPD. 3032 */ 3033 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 3034 3035 for (i = 0; i < VPD_LEN; i += 4) { 3036 ret = t4_seeprom_read(adapter, addr + i, buf++); 3037 if (ret) 3038 return ret; 3039 } 3040 3041#define FIND_VPD_KW(var,name) do { \ 3042 var = get_vpd_keyword_val(vpd, name, 0); \ 3043 if (var < 0) { \ 3044 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 3045 return -EINVAL; \ 3046 } \ 3047} while (0) 3048 3049 FIND_VPD_KW(i, "RV"); 3050 for (csum = 0; i >= 0; i--) 3051 csum += vpd[i]; 3052 3053 if (csum) { 3054 CH_ERR(adapter, 3055 "corrupted VPD EEPROM, actual csum %u\n", csum); 3056 return -EINVAL; 3057 } 3058 3059 FIND_VPD_KW(ec, "EC"); 3060 FIND_VPD_KW(sn, "SN"); 3061 FIND_VPD_KW(pn, "PN"); 3062 FIND_VPD_KW(na, "NA"); 3063#undef FIND_VPD_KW 3064 3065 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN); 3066 strstrip(p->id); 3067 memcpy(p->ec, vpd + ec, EC_LEN); 3068 strstrip(p->ec); 3069 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3070 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3071 strstrip(p->sn); 3072 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3073 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3074 strstrip((char *)p->pn); 3075 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3076 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3077 strstrip((char *)p->na); 3078 3079 if (device_id & 0x80) 3080 return 0; /* Custom card */ 3081 3082 md = get_vpd_keyword_val(vpd, "VF", 1); 3083 if (md < 0) { 3084 snprintf(p->md, sizeof(p->md), "unknown"); 3085 } else { 3086 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2]; 3087 memcpy(p->md, vpd + md, min(i, MD_LEN)); 3088 strstrip((char *)p->md); 3089 } 3090 3091 return 0; 3092} 3093 3094/* serial flash and firmware constants and flash config file constants */ 3095enum { 3096 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3097 3098 /* flash command opcodes */ 3099 SF_PROG_PAGE = 2, /* program 256B page */ 3100 SF_WR_DISABLE = 4, /* disable writes */ 3101 SF_RD_STATUS = 5, /* read status register */ 3102 SF_WR_ENABLE = 6, /* enable writes */ 3103 SF_RD_DATA_FAST = 0xb, /* read flash */ 3104 SF_RD_ID = 0x9f, /* read ID */ 3105 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ 3106}; 3107 3108/** 3109 * sf1_read - read data from the serial flash 3110 * @adapter: the adapter 3111 * @byte_cnt: number of bytes to read 3112 * @cont: whether another operation will be chained 3113 * @lock: whether to lock SF for PL access only 3114 * @valp: where to store the read data 3115 * 3116 * Reads up to 4 bytes of data from the serial flash. The location of 3117 * the read needs to be specified prior to calling this by issuing the 3118 * appropriate commands to the serial flash. 3119 */ 3120static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3121 int lock, u32 *valp) 3122{ 3123 int ret; 3124 3125 if (!byte_cnt || byte_cnt > 4) 3126 return -EINVAL; 3127 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3128 return -EBUSY; 3129 t4_write_reg(adapter, A_SF_OP, 3130 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3131 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3132 if (!ret) 3133 *valp = t4_read_reg(adapter, A_SF_DATA); 3134 return ret; 3135} 3136 3137/** 3138 * sf1_write - write data to the serial flash 3139 * @adapter: the adapter 3140 * @byte_cnt: number of bytes to write 3141 * @cont: whether another operation will be chained 3142 * @lock: whether to lock SF for PL access only 3143 * @val: value to write 3144 * 3145 * Writes up to 4 bytes of data to the serial flash. The location of 3146 * the write needs to be specified prior to calling this by issuing the 3147 * appropriate commands to the serial flash. 3148 */ 3149static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3150 int lock, u32 val) 3151{ 3152 if (!byte_cnt || byte_cnt > 4) 3153 return -EINVAL; 3154 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3155 return -EBUSY; 3156 t4_write_reg(adapter, A_SF_DATA, val); 3157 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3158 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3159 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3160} 3161 3162/** 3163 * flash_wait_op - wait for a flash operation to complete 3164 * @adapter: the adapter 3165 * @attempts: max number of polls of the status register 3166 * @delay: delay between polls in ms 3167 * 3168 * Wait for a flash operation to complete by polling the status register. 3169 */ 3170static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3171{ 3172 int ret; 3173 u32 status; 3174 3175 while (1) { 3176 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3177 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3178 return ret; 3179 if (!(status & 1)) 3180 return 0; 3181 if (--attempts == 0) 3182 return -EAGAIN; 3183 if (delay) 3184 msleep(delay); 3185 } 3186} 3187 3188/** 3189 * t4_read_flash - read words from serial flash 3190 * @adapter: the adapter 3191 * @addr: the start address for the read 3192 * @nwords: how many 32-bit words to read 3193 * @data: where to store the read data 3194 * @byte_oriented: whether to store data as bytes or as words 3195 * 3196 * Read the specified number of 32-bit words from the serial flash. 3197 * If @byte_oriented is set the read data is stored as a byte array 3198 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3199 * natural endianness. 3200 */ 3201int t4_read_flash(struct adapter *adapter, unsigned int addr, 3202 unsigned int nwords, u32 *data, int byte_oriented) 3203{ 3204 int ret; 3205 3206 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3207 return -EINVAL; 3208 3209 addr = swab32(addr) | SF_RD_DATA_FAST; 3210 3211 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3212 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3213 return ret; 3214 3215 for ( ; nwords; nwords--, data++) { 3216 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3217 if (nwords == 1) 3218 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3219 if (ret) 3220 return ret; 3221 if (byte_oriented) 3222 *data = (__force __u32)(cpu_to_be32(*data)); 3223 } 3224 return 0; 3225} 3226 3227/** 3228 * t4_write_flash - write up to a page of data to the serial flash 3229 * @adapter: the adapter 3230 * @addr: the start address to write 3231 * @n: length of data to write in bytes 3232 * @data: the data to write 3233 * @byte_oriented: whether to store data as bytes or as words 3234 * 3235 * Writes up to a page of data (256 bytes) to the serial flash starting 3236 * at the given address. All the data must be written to the same page. 3237 * If @byte_oriented is set the write data is stored as byte stream 3238 * (i.e. matches what on disk), otherwise in big-endian. 3239 */ 3240int t4_write_flash(struct adapter *adapter, unsigned int addr, 3241 unsigned int n, const u8 *data, int byte_oriented) 3242{ 3243 int ret; 3244 u32 buf[SF_PAGE_SIZE / 4]; 3245 unsigned int i, c, left, val, offset = addr & 0xff; 3246 3247 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3248 return -EINVAL; 3249 3250 val = swab32(addr) | SF_PROG_PAGE; 3251 3252 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3253 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3254 goto unlock; 3255 3256 for (left = n; left; left -= c) { 3257 c = min(left, 4U); 3258 for (val = 0, i = 0; i < c; ++i) 3259 val = (val << 8) + *data++; 3260 3261 if (!byte_oriented) 3262 val = cpu_to_be32(val); 3263 3264 ret = sf1_write(adapter, c, c != left, 1, val); 3265 if (ret) 3266 goto unlock; 3267 } 3268 ret = flash_wait_op(adapter, 8, 1); 3269 if (ret) 3270 goto unlock; 3271 3272 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3273 3274 /* Read the page to verify the write succeeded */ 3275 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3276 byte_oriented); 3277 if (ret) 3278 return ret; 3279 3280 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3281 CH_ERR(adapter, 3282 "failed to correctly write the flash page at %#x\n", 3283 addr); 3284 return -EIO; 3285 } 3286 return 0; 3287 3288unlock: 3289 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3290 return ret; 3291} 3292 3293/** 3294 * t4_get_fw_version - read the firmware version 3295 * @adapter: the adapter 3296 * @vers: where to place the version 3297 * 3298 * Reads the FW version from flash. 3299 */ 3300int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3301{ 3302 return t4_read_flash(adapter, FLASH_FW_START + 3303 offsetof(struct fw_hdr, fw_ver), 1, 3304 vers, 0); 3305} 3306 3307/** 3308 * t4_get_fw_hdr - read the firmware header 3309 * @adapter: the adapter 3310 * @hdr: where to place the version 3311 * 3312 * Reads the FW header from flash into caller provided buffer. 3313 */ 3314int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr) 3315{ 3316 return t4_read_flash(adapter, FLASH_FW_START, 3317 sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1); 3318} 3319 3320/** 3321 * t4_get_bs_version - read the firmware bootstrap version 3322 * @adapter: the adapter 3323 * @vers: where to place the version 3324 * 3325 * Reads the FW Bootstrap version from flash. 3326 */ 3327int t4_get_bs_version(struct adapter *adapter, u32 *vers) 3328{ 3329 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + 3330 offsetof(struct fw_hdr, fw_ver), 1, 3331 vers, 0); 3332} 3333 3334/** 3335 * t4_get_tp_version - read the TP microcode version 3336 * @adapter: the adapter 3337 * @vers: where to place the version 3338 * 3339 * Reads the TP microcode version from flash. 3340 */ 3341int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3342{ 3343 return t4_read_flash(adapter, FLASH_FW_START + 3344 offsetof(struct fw_hdr, tp_microcode_ver), 3345 1, vers, 0); 3346} 3347 3348/** 3349 * t4_get_exprom_version - return the Expansion ROM version (if any) 3350 * @adapter: the adapter 3351 * @vers: where to place the version 3352 * 3353 * Reads the Expansion ROM header from FLASH and returns the version 3354 * number (if present) through the @vers return value pointer. We return 3355 * this in the Firmware Version Format since it's convenient. Return 3356 * 0 on success, -ENOENT if no Expansion ROM is present. 3357 */ 3358int t4_get_exprom_version(struct adapter *adap, u32 *vers) 3359{ 3360 struct exprom_header { 3361 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3362 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3363 } *hdr; 3364 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3365 sizeof(u32))]; 3366 int ret; 3367 3368 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 3369 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3370 0); 3371 if (ret) 3372 return ret; 3373 3374 hdr = (struct exprom_header *)exprom_header_buf; 3375 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3376 return -ENOENT; 3377 3378 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3379 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3380 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3381 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3382 return 0; 3383} 3384 3385/** 3386 * t4_get_scfg_version - return the Serial Configuration version 3387 * @adapter: the adapter 3388 * @vers: where to place the version 3389 * 3390 * Reads the Serial Configuration Version via the Firmware interface 3391 * (thus this can only be called once we're ready to issue Firmware 3392 * commands). The format of the Serial Configuration version is 3393 * adapter specific. Returns 0 on success, an error on failure. 3394 * 3395 * Note that early versions of the Firmware didn't include the ability 3396 * to retrieve the Serial Configuration version, so we zero-out the 3397 * return-value parameter in that case to avoid leaving it with 3398 * garbage in it. 3399 * 3400 * Also note that the Firmware will return its cached copy of the Serial 3401 * Initialization Revision ID, not the actual Revision ID as written in 3402 * the Serial EEPROM. This is only an issue if a new VPD has been written 3403 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 3404 * it's best to defer calling this routine till after a FW_RESET_CMD has 3405 * been issued if the Host Driver will be performing a full adapter 3406 * initialization. 3407 */ 3408int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 3409{ 3410 u32 scfgrev_param; 3411 int ret; 3412 3413 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3414 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 3415 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3416 1, &scfgrev_param, vers); 3417 if (ret) 3418 *vers = 0; 3419 return ret; 3420} 3421 3422/** 3423 * t4_get_vpd_version - return the VPD version 3424 * @adapter: the adapter 3425 * @vers: where to place the version 3426 * 3427 * Reads the VPD via the Firmware interface (thus this can only be called 3428 * once we're ready to issue Firmware commands). The format of the 3429 * VPD version is adapter specific. Returns 0 on success, an error on 3430 * failure. 3431 * 3432 * Note that early versions of the Firmware didn't include the ability 3433 * to retrieve the VPD version, so we zero-out the return-value parameter 3434 * in that case to avoid leaving it with garbage in it. 3435 * 3436 * Also note that the Firmware will return its cached copy of the VPD 3437 * Revision ID, not the actual Revision ID as written in the Serial 3438 * EEPROM. This is only an issue if a new VPD has been written and the 3439 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 3440 * to defer calling this routine till after a FW_RESET_CMD has been issued 3441 * if the Host Driver will be performing a full adapter initialization. 3442 */ 3443int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 3444{ 3445 u32 vpdrev_param; 3446 int ret; 3447 3448 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3449 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 3450 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3451 1, &vpdrev_param, vers); 3452 if (ret) 3453 *vers = 0; 3454 return ret; 3455} 3456 3457/** 3458 * t4_get_version_info - extract various chip/firmware version information 3459 * @adapter: the adapter 3460 * 3461 * Reads various chip/firmware version numbers and stores them into the 3462 * adapter Adapter Parameters structure. If any of the efforts fails 3463 * the first failure will be returned, but all of the version numbers 3464 * will be read. 3465 */ 3466int t4_get_version_info(struct adapter *adapter) 3467{ 3468 int ret = 0; 3469 3470 #define FIRST_RET(__getvinfo) \ 3471 do { \ 3472 int __ret = __getvinfo; \ 3473 if (__ret && !ret) \ 3474 ret = __ret; \ 3475 } while (0) 3476 3477 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 3478 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 3479 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 3480 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 3481 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 3482 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 3483 3484 #undef FIRST_RET 3485 3486 return ret; 3487} 3488 3489/** 3490 * t4_flash_erase_sectors - erase a range of flash sectors 3491 * @adapter: the adapter 3492 * @start: the first sector to erase 3493 * @end: the last sector to erase 3494 * 3495 * Erases the sectors in the given inclusive range. 3496 */ 3497int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 3498{ 3499 int ret = 0; 3500 3501 if (end >= adapter->params.sf_nsec) 3502 return -EINVAL; 3503 3504 while (start <= end) { 3505 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3506 (ret = sf1_write(adapter, 4, 0, 1, 3507 SF_ERASE_SECTOR | (start << 8))) != 0 || 3508 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 3509 CH_ERR(adapter, 3510 "erase of flash sector %d failed, error %d\n", 3511 start, ret); 3512 break; 3513 } 3514 start++; 3515 } 3516 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3517 return ret; 3518} 3519 3520/** 3521 * t4_flash_cfg_addr - return the address of the flash configuration file 3522 * @adapter: the adapter 3523 * 3524 * Return the address within the flash where the Firmware Configuration 3525 * File is stored, or an error if the device FLASH is too small to contain 3526 * a Firmware Configuration File. 3527 */ 3528int t4_flash_cfg_addr(struct adapter *adapter) 3529{ 3530 /* 3531 * If the device FLASH isn't large enough to hold a Firmware 3532 * Configuration File, return an error. 3533 */ 3534 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 3535 return -ENOSPC; 3536 3537 return FLASH_CFG_START; 3538} 3539 3540/* 3541 * Return TRUE if the specified firmware matches the adapter. I.e. T4 3542 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 3543 * and emit an error message for mismatched firmware to save our caller the 3544 * effort ... 3545 */ 3546static int t4_fw_matches_chip(struct adapter *adap, 3547 const struct fw_hdr *hdr) 3548{ 3549 /* 3550 * The expression below will return FALSE for any unsupported adapter 3551 * which will keep us "honest" in the future ... 3552 */ 3553 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 3554 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 3555 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) 3556 return 1; 3557 3558 CH_ERR(adap, 3559 "FW image (%d) is not suitable for this adapter (%d)\n", 3560 hdr->chip, chip_id(adap)); 3561 return 0; 3562} 3563 3564/** 3565 * t4_load_fw - download firmware 3566 * @adap: the adapter 3567 * @fw_data: the firmware image to write 3568 * @size: image size 3569 * 3570 * Write the supplied firmware image to the card's serial flash. 3571 */ 3572int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 3573{ 3574 u32 csum; 3575 int ret, addr; 3576 unsigned int i; 3577 u8 first_page[SF_PAGE_SIZE]; 3578 const u32 *p = (const u32 *)fw_data; 3579 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3580 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3581 unsigned int fw_start_sec; 3582 unsigned int fw_start; 3583 unsigned int fw_size; 3584 3585 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 3586 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 3587 fw_start = FLASH_FWBOOTSTRAP_START; 3588 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 3589 } else { 3590 fw_start_sec = FLASH_FW_START_SEC; 3591 fw_start = FLASH_FW_START; 3592 fw_size = FLASH_FW_MAX_SIZE; 3593 } 3594 3595 if (!size) { 3596 CH_ERR(adap, "FW image has no data\n"); 3597 return -EINVAL; 3598 } 3599 if (size & 511) { 3600 CH_ERR(adap, 3601 "FW image size not multiple of 512 bytes\n"); 3602 return -EINVAL; 3603 } 3604 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 3605 CH_ERR(adap, 3606 "FW image size differs from size in FW header\n"); 3607 return -EINVAL; 3608 } 3609 if (size > fw_size) { 3610 CH_ERR(adap, "FW image too large, max is %u bytes\n", 3611 fw_size); 3612 return -EFBIG; 3613 } 3614 if (!t4_fw_matches_chip(adap, hdr)) 3615 return -EINVAL; 3616 3617 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 3618 csum += be32_to_cpu(p[i]); 3619 3620 if (csum != 0xffffffff) { 3621 CH_ERR(adap, 3622 "corrupted firmware image, checksum %#x\n", csum); 3623 return -EINVAL; 3624 } 3625 3626 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 3627 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 3628 if (ret) 3629 goto out; 3630 3631 /* 3632 * We write the correct version at the end so the driver can see a bad 3633 * version if the FW write fails. Start by writing a copy of the 3634 * first page with a bad version. 3635 */ 3636 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3637 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3638 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 3639 if (ret) 3640 goto out; 3641 3642 addr = fw_start; 3643 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3644 addr += SF_PAGE_SIZE; 3645 fw_data += SF_PAGE_SIZE; 3646 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 3647 if (ret) 3648 goto out; 3649 } 3650 3651 ret = t4_write_flash(adap, 3652 fw_start + offsetof(struct fw_hdr, fw_ver), 3653 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 3654out: 3655 if (ret) 3656 CH_ERR(adap, "firmware download failed, error %d\n", 3657 ret); 3658 return ret; 3659} 3660 3661/** 3662 * t4_fwcache - firmware cache operation 3663 * @adap: the adapter 3664 * @op : the operation (flush or flush and invalidate) 3665 */ 3666int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 3667{ 3668 struct fw_params_cmd c; 3669 3670 memset(&c, 0, sizeof(c)); 3671 c.op_to_vfn = 3672 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 3673 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 3674 V_FW_PARAMS_CMD_PFN(adap->pf) | 3675 V_FW_PARAMS_CMD_VFN(0)); 3676 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3677 c.param[0].mnem = 3678 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3679 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 3680 c.param[0].val = (__force __be32)op; 3681 3682 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 3683} 3684 3685void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 3686 unsigned int *pif_req_wrptr, 3687 unsigned int *pif_rsp_wrptr) 3688{ 3689 int i, j; 3690 u32 cfg, val, req, rsp; 3691 3692 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3693 if (cfg & F_LADBGEN) 3694 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3695 3696 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 3697 req = G_POLADBGWRPTR(val); 3698 rsp = G_PILADBGWRPTR(val); 3699 if (pif_req_wrptr) 3700 *pif_req_wrptr = req; 3701 if (pif_rsp_wrptr) 3702 *pif_rsp_wrptr = rsp; 3703 3704 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 3705 for (j = 0; j < 6; j++) { 3706 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 3707 V_PILADBGRDPTR(rsp)); 3708 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 3709 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 3710 req++; 3711 rsp++; 3712 } 3713 req = (req + 2) & M_POLADBGRDPTR; 3714 rsp = (rsp + 2) & M_PILADBGRDPTR; 3715 } 3716 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3717} 3718 3719void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 3720{ 3721 u32 cfg; 3722 int i, j, idx; 3723 3724 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3725 if (cfg & F_LADBGEN) 3726 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3727 3728 for (i = 0; i < CIM_MALA_SIZE; i++) { 3729 for (j = 0; j < 5; j++) { 3730 idx = 8 * i + j; 3731 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 3732 V_PILADBGRDPTR(idx)); 3733 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 3734 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 3735 } 3736 } 3737 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3738} 3739 3740void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 3741{ 3742 unsigned int i, j; 3743 3744 for (i = 0; i < 8; i++) { 3745 u32 *p = la_buf + i; 3746 3747 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 3748 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 3749 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 3750 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 3751 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 3752 } 3753} 3754 3755/** 3756 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 3757 * @caps16: a 16-bit Port Capabilities value 3758 * 3759 * Returns the equivalent 32-bit Port Capabilities value. 3760 */ 3761static uint32_t fwcaps16_to_caps32(uint16_t caps16) 3762{ 3763 uint32_t caps32 = 0; 3764 3765 #define CAP16_TO_CAP32(__cap) \ 3766 do { \ 3767 if (caps16 & FW_PORT_CAP_##__cap) \ 3768 caps32 |= FW_PORT_CAP32_##__cap; \ 3769 } while (0) 3770 3771 CAP16_TO_CAP32(SPEED_100M); 3772 CAP16_TO_CAP32(SPEED_1G); 3773 CAP16_TO_CAP32(SPEED_25G); 3774 CAP16_TO_CAP32(SPEED_10G); 3775 CAP16_TO_CAP32(SPEED_40G); 3776 CAP16_TO_CAP32(SPEED_100G); 3777 CAP16_TO_CAP32(FC_RX); 3778 CAP16_TO_CAP32(FC_TX); 3779 CAP16_TO_CAP32(ANEG); 3780 CAP16_TO_CAP32(FORCE_PAUSE); 3781 CAP16_TO_CAP32(MDIAUTO); 3782 CAP16_TO_CAP32(MDISTRAIGHT); 3783 CAP16_TO_CAP32(FEC_RS); 3784 CAP16_TO_CAP32(FEC_BASER_RS); 3785 CAP16_TO_CAP32(802_3_PAUSE); 3786 CAP16_TO_CAP32(802_3_ASM_DIR); 3787 3788 #undef CAP16_TO_CAP32 3789 3790 return caps32; 3791} 3792 3793/** 3794 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits 3795 * @caps32: a 32-bit Port Capabilities value 3796 * 3797 * Returns the equivalent 16-bit Port Capabilities value. Note that 3798 * not all 32-bit Port Capabilities can be represented in the 16-bit 3799 * Port Capabilities and some fields/values may not make it. 3800 */ 3801static uint16_t fwcaps32_to_caps16(uint32_t caps32) 3802{ 3803 uint16_t caps16 = 0; 3804 3805 #define CAP32_TO_CAP16(__cap) \ 3806 do { \ 3807 if (caps32 & FW_PORT_CAP32_##__cap) \ 3808 caps16 |= FW_PORT_CAP_##__cap; \ 3809 } while (0) 3810 3811 CAP32_TO_CAP16(SPEED_100M); 3812 CAP32_TO_CAP16(SPEED_1G); 3813 CAP32_TO_CAP16(SPEED_10G); 3814 CAP32_TO_CAP16(SPEED_25G); 3815 CAP32_TO_CAP16(SPEED_40G); 3816 CAP32_TO_CAP16(SPEED_100G); 3817 CAP32_TO_CAP16(FC_RX); 3818 CAP32_TO_CAP16(FC_TX); 3819 CAP32_TO_CAP16(802_3_PAUSE); 3820 CAP32_TO_CAP16(802_3_ASM_DIR); 3821 CAP32_TO_CAP16(ANEG); 3822 CAP32_TO_CAP16(FORCE_PAUSE); 3823 CAP32_TO_CAP16(MDIAUTO); 3824 CAP32_TO_CAP16(MDISTRAIGHT); 3825 CAP32_TO_CAP16(FEC_RS); 3826 CAP32_TO_CAP16(FEC_BASER_RS); 3827 3828 #undef CAP32_TO_CAP16 3829 3830 return caps16; 3831} 3832 3833static bool 3834is_bt(struct port_info *pi) 3835{ 3836 3837 return (pi->port_type == FW_PORT_TYPE_BT_SGMII || 3838 pi->port_type == FW_PORT_TYPE_BT_XFI || 3839 pi->port_type == FW_PORT_TYPE_BT_XAUI); 3840} 3841 3842/** 3843 * t4_link_l1cfg - apply link configuration to MAC/PHY 3844 * @phy: the PHY to setup 3845 * @mac: the MAC to setup 3846 * @lc: the requested link configuration 3847 * 3848 * Set up a port's MAC and PHY according to a desired link configuration. 3849 * - If the PHY can auto-negotiate first decide what to advertise, then 3850 * enable/disable auto-negotiation as desired, and reset. 3851 * - If the PHY does not auto-negotiate just reset it. 3852 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 3853 * otherwise do it later based on the outcome of auto-negotiation. 3854 */ 3855int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 3856 struct link_config *lc) 3857{ 3858 struct fw_port_cmd c; 3859 unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); 3860 unsigned int aneg, fc, fec, speed, rcap; 3861 3862 fc = 0; 3863 if (lc->requested_fc & PAUSE_RX) 3864 fc |= FW_PORT_CAP32_FC_RX; 3865 if (lc->requested_fc & PAUSE_TX) 3866 fc |= FW_PORT_CAP32_FC_TX; 3867 if (!(lc->requested_fc & PAUSE_AUTONEG)) 3868 fc |= FW_PORT_CAP32_FORCE_PAUSE; 3869 3870 fec = 0; 3871 if (lc->requested_fec == FEC_AUTO) 3872 fec = lc->fec_hint; 3873 else { 3874 if (lc->requested_fec & FEC_RS) 3875 fec |= FW_PORT_CAP32_FEC_RS; 3876 if (lc->requested_fec & FEC_BASER_RS) 3877 fec |= FW_PORT_CAP32_FEC_BASER_RS; 3878 } 3879 3880 if (lc->requested_aneg == AUTONEG_DISABLE) 3881 aneg = 0; 3882 else if (lc->requested_aneg == AUTONEG_ENABLE) 3883 aneg = FW_PORT_CAP32_ANEG; 3884 else 3885 aneg = lc->supported & FW_PORT_CAP32_ANEG; 3886 3887 if (aneg) { 3888 speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED); 3889 } else if (lc->requested_speed != 0) 3890 speed = speed_to_fwcap(lc->requested_speed); 3891 else 3892 speed = fwcap_top_speed(lc->supported); 3893 3894 /* Force AN on for BT cards. */ 3895 if (is_bt(adap->port[adap->chan_map[port]])) 3896 aneg = lc->supported & FW_PORT_CAP32_ANEG; 3897 3898 rcap = aneg | speed | fc | fec; 3899 if ((rcap | lc->supported) != lc->supported) { 3900#ifdef INVARIANTS 3901 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap, 3902 lc->supported); 3903#endif 3904 rcap &= lc->supported; 3905 } 3906 rcap |= mdi; 3907 3908 memset(&c, 0, sizeof(c)); 3909 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3910 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3911 V_FW_PORT_CMD_PORTID(port)); 3912 if (adap->params.port_caps32) { 3913 c.action_to_len16 = 3914 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) | 3915 FW_LEN16(c)); 3916 c.u.l1cfg32.rcap32 = cpu_to_be32(rcap); 3917 } else { 3918 c.action_to_len16 = 3919 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3920 FW_LEN16(c)); 3921 c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); 3922 } 3923 3924 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3925} 3926 3927/** 3928 * t4_restart_aneg - restart autonegotiation 3929 * @adap: the adapter 3930 * @mbox: mbox to use for the FW command 3931 * @port: the port id 3932 * 3933 * Restarts autonegotiation for the selected port. 3934 */ 3935int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 3936{ 3937 struct fw_port_cmd c; 3938 3939 memset(&c, 0, sizeof(c)); 3940 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3941 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3942 V_FW_PORT_CMD_PORTID(port)); 3943 c.action_to_len16 = 3944 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3945 FW_LEN16(c)); 3946 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 3947 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3948} 3949 3950struct intr_details { 3951 u32 mask; 3952 const char *msg; 3953}; 3954 3955struct intr_action { 3956 u32 mask; 3957 int arg; 3958 bool (*action)(struct adapter *, int, bool); 3959}; 3960 3961#define NONFATAL_IF_DISABLED 1 3962struct intr_info { 3963 const char *name; /* name of the INT_CAUSE register */ 3964 int cause_reg; /* INT_CAUSE register */ 3965 int enable_reg; /* INT_ENABLE register */ 3966 u32 fatal; /* bits that are fatal */ 3967 int flags; /* hints */ 3968 const struct intr_details *details; 3969 const struct intr_action *actions; 3970}; 3971 3972static inline char 3973intr_alert_char(u32 cause, u32 enable, u32 fatal) 3974{ 3975 3976 if (cause & fatal) 3977 return ('!'); 3978 if (cause & enable) 3979 return ('*'); 3980 return ('-'); 3981} 3982 3983static void 3984t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause) 3985{ 3986 u32 enable, fatal, leftover; 3987 const struct intr_details *details; 3988 char alert; 3989 3990 enable = t4_read_reg(adap, ii->enable_reg); 3991 if (ii->flags & NONFATAL_IF_DISABLED) 3992 fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg); 3993 else 3994 fatal = ii->fatal; 3995 alert = intr_alert_char(cause, enable, fatal); 3996 CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n", 3997 alert, ii->name, ii->cause_reg, cause, enable, fatal); 3998 3999 leftover = cause; 4000 for (details = ii->details; details && details->mask != 0; details++) { 4001 u32 msgbits = details->mask & cause; 4002 if (msgbits == 0) 4003 continue; 4004 alert = intr_alert_char(msgbits, enable, ii->fatal); 4005 CH_ALERT(adap, " %c [0x%08x] %s\n", alert, msgbits, 4006 details->msg); 4007 leftover &= ~msgbits; 4008 } 4009 if (leftover != 0 && leftover != cause) 4010 CH_ALERT(adap, " ? [0x%08x]\n", leftover); 4011} 4012 4013/* 4014 * Returns true for fatal error. 4015 */ 4016static bool 4017t4_handle_intr(struct adapter *adap, const struct intr_info *ii, 4018 u32 additional_cause, bool verbose) 4019{ 4020 u32 cause, fatal; 4021 bool rc; 4022 const struct intr_action *action; 4023 4024 /* 4025 * Read and display cause. Note that the top level PL_INT_CAUSE is a 4026 * bit special and we need to completely ignore the bits that are not in 4027 * PL_INT_ENABLE. 4028 */ 4029 cause = t4_read_reg(adap, ii->cause_reg); 4030 if (ii->cause_reg == A_PL_INT_CAUSE) 4031 cause &= t4_read_reg(adap, ii->enable_reg); 4032 if (verbose || cause != 0) 4033 t4_show_intr_info(adap, ii, cause); 4034 fatal = cause & ii->fatal; 4035 if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED) 4036 fatal &= t4_read_reg(adap, ii->enable_reg); 4037 cause |= additional_cause; 4038 if (cause == 0) 4039 return (false); 4040 4041 rc = fatal != 0; 4042 for (action = ii->actions; action && action->mask != 0; action++) { 4043 if (!(action->mask & cause)) 4044 continue; 4045 rc |= (action->action)(adap, action->arg, verbose); 4046 } 4047 4048 /* clear */ 4049 t4_write_reg(adap, ii->cause_reg, cause); 4050 (void)t4_read_reg(adap, ii->cause_reg); 4051 4052 return (rc); 4053} 4054 4055/* 4056 * Interrupt handler for the PCIE module. 4057 */ 4058static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose) 4059{ 4060 static const struct intr_details sysbus_intr_details[] = { 4061 { F_RNPP, "RXNP array parity error" }, 4062 { F_RPCP, "RXPC array parity error" }, 4063 { F_RCIP, "RXCIF array parity error" }, 4064 { F_RCCP, "Rx completions control array parity error" }, 4065 { F_RFTP, "RXFT array parity error" }, 4066 { 0 } 4067 }; 4068 static const struct intr_info sysbus_intr_info = { 4069 .name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS", 4070 .cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4071 .enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE, 4072 .fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP, 4073 .flags = 0, 4074 .details = sysbus_intr_details, 4075 .actions = NULL, 4076 }; 4077 static const struct intr_details pcie_port_intr_details[] = { 4078 { F_TPCP, "TXPC array parity error" }, 4079 { F_TNPP, "TXNP array parity error" }, 4080 { F_TFTP, "TXFT array parity error" }, 4081 { F_TCAP, "TXCA array parity error" }, 4082 { F_TCIP, "TXCIF array parity error" }, 4083 { F_RCAP, "RXCA array parity error" }, 4084 { F_OTDD, "outbound request TLP discarded" }, 4085 { F_RDPE, "Rx data parity error" }, 4086 { F_TDUE, "Tx uncorrectable data error" }, 4087 { 0 } 4088 }; 4089 static const struct intr_info pcie_port_intr_info = { 4090 .name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS", 4091 .cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4092 .enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE, 4093 .fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP | 4094 F_OTDD | F_RDPE | F_TDUE, 4095 .flags = 0, 4096 .details = pcie_port_intr_details, 4097 .actions = NULL, 4098 }; 4099 static const struct intr_details pcie_intr_details[] = { 4100 { F_MSIADDRLPERR, "MSI AddrL parity error" }, 4101 { F_MSIADDRHPERR, "MSI AddrH parity error" }, 4102 { F_MSIDATAPERR, "MSI data parity error" }, 4103 { F_MSIXADDRLPERR, "MSI-X AddrL parity error" }, 4104 { F_MSIXADDRHPERR, "MSI-X AddrH parity error" }, 4105 { F_MSIXDATAPERR, "MSI-X data parity error" }, 4106 { F_MSIXDIPERR, "MSI-X DI parity error" }, 4107 { F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" }, 4108 { F_PIOREQPERR, "PCIe PIO request FIFO parity error" }, 4109 { F_TARTAGPERR, "PCIe target tag FIFO parity error" }, 4110 { F_CCNTPERR, "PCIe CMD channel count parity error" }, 4111 { F_CREQPERR, "PCIe CMD channel request parity error" }, 4112 { F_CRSPPERR, "PCIe CMD channel response parity error" }, 4113 { F_DCNTPERR, "PCIe DMA channel count parity error" }, 4114 { F_DREQPERR, "PCIe DMA channel request parity error" }, 4115 { F_DRSPPERR, "PCIe DMA channel response parity error" }, 4116 { F_HCNTPERR, "PCIe HMA channel count parity error" }, 4117 { F_HREQPERR, "PCIe HMA channel request parity error" }, 4118 { F_HRSPPERR, "PCIe HMA channel response parity error" }, 4119 { F_CFGSNPPERR, "PCIe config snoop FIFO parity error" }, 4120 { F_FIDPERR, "PCIe FID parity error" }, 4121 { F_INTXCLRPERR, "PCIe INTx clear parity error" }, 4122 { F_MATAGPERR, "PCIe MA tag parity error" }, 4123 { F_PIOTAGPERR, "PCIe PIO tag parity error" }, 4124 { F_RXCPLPERR, "PCIe Rx completion parity error" }, 4125 { F_RXWRPERR, "PCIe Rx write parity error" }, 4126 { F_RPLPERR, "PCIe replay buffer parity error" }, 4127 { F_PCIESINT, "PCIe core secondary fault" }, 4128 { F_PCIEPINT, "PCIe core primary fault" }, 4129 { F_UNXSPLCPLERR, "PCIe unexpected split completion error" }, 4130 { 0 } 4131 }; 4132 static const struct intr_details t5_pcie_intr_details[] = { 4133 { F_IPGRPPERR, "Parity errors observed by IP" }, 4134 { F_NONFATALERR, "PCIe non-fatal error" }, 4135 { F_READRSPERR, "Outbound read error" }, 4136 { F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" }, 4137 { F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" }, 4138 { F_IPRETRYPERR, "PCIe IP replay buffer parity error" }, 4139 { F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" }, 4140 { F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" }, 4141 { F_PIOTAGQPERR, "PIO tag queue FIFO parity error" }, 4142 { F_MAGRPPERR, "MA group FIFO parity error" }, 4143 { F_VFIDPERR, "VFID SRAM parity error" }, 4144 { F_FIDPERR, "FID SRAM parity error" }, 4145 { F_CFGSNPPERR, "config snoop FIFO parity error" }, 4146 { F_HRSPPERR, "HMA channel response data SRAM parity error" }, 4147 { F_HREQRDPERR, "HMA channel read request SRAM parity error" }, 4148 { F_HREQWRPERR, "HMA channel write request SRAM parity error" }, 4149 { F_DRSPPERR, "DMA channel response data SRAM parity error" }, 4150 { F_DREQRDPERR, "DMA channel write request SRAM parity error" }, 4151 { F_CRSPPERR, "CMD channel response data SRAM parity error" }, 4152 { F_CREQRDPERR, "CMD channel read request SRAM parity error" }, 4153 { F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" }, 4154 { F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" }, 4155 { F_PIOREQGRPPERR, "PIO request group FIFOs parity error" }, 4156 { F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" }, 4157 { F_MSIXDIPERR, "MSI-X DI SRAM parity error" }, 4158 { F_MSIXDATAPERR, "MSI-X data SRAM parity error" }, 4159 { F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" }, 4160 { F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" }, 4161 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error" }, 4162 { F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" }, 4163 { F_MSTGRPPERR, "Master response read queue SRAM parity error" }, 4164 { 0 } 4165 }; 4166 struct intr_info pcie_intr_info = { 4167 .name = "PCIE_INT_CAUSE", 4168 .cause_reg = A_PCIE_INT_CAUSE, 4169 .enable_reg = A_PCIE_INT_ENABLE, 4170 .fatal = 0xffffffff, 4171 .flags = NONFATAL_IF_DISABLED, 4172 .details = NULL, 4173 .actions = NULL, 4174 }; 4175 bool fatal = false; 4176 4177 if (is_t4(adap)) { 4178 fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose); 4179 fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose); 4180 4181 pcie_intr_info.details = pcie_intr_details; 4182 } else { 4183 pcie_intr_info.details = t5_pcie_intr_details; 4184 } 4185 fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose); 4186 4187 return (fatal); 4188} 4189 4190/* 4191 * TP interrupt handler. 4192 */ 4193static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose) 4194{ 4195 static const struct intr_details tp_intr_details[] = { 4196 { 0x3fffffff, "TP parity error" }, 4197 { F_FLMTXFLSTEMPTY, "TP out of Tx pages" }, 4198 { 0 } 4199 }; 4200 static const struct intr_info tp_intr_info = { 4201 .name = "TP_INT_CAUSE", 4202 .cause_reg = A_TP_INT_CAUSE, 4203 .enable_reg = A_TP_INT_ENABLE, 4204 .fatal = 0x7fffffff, 4205 .flags = NONFATAL_IF_DISABLED, 4206 .details = tp_intr_details, 4207 .actions = NULL, 4208 }; 4209 4210 return (t4_handle_intr(adap, &tp_intr_info, 0, verbose)); 4211} 4212 4213/* 4214 * SGE interrupt handler. 4215 */ 4216static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose) 4217{ 4218 static const struct intr_info sge_int1_info = { 4219 .name = "SGE_INT_CAUSE1", 4220 .cause_reg = A_SGE_INT_CAUSE1, 4221 .enable_reg = A_SGE_INT_ENABLE1, 4222 .fatal = 0xffffffff, 4223 .flags = NONFATAL_IF_DISABLED, 4224 .details = NULL, 4225 .actions = NULL, 4226 }; 4227 static const struct intr_info sge_int2_info = { 4228 .name = "SGE_INT_CAUSE2", 4229 .cause_reg = A_SGE_INT_CAUSE2, 4230 .enable_reg = A_SGE_INT_ENABLE2, 4231 .fatal = 0xffffffff, 4232 .flags = NONFATAL_IF_DISABLED, 4233 .details = NULL, 4234 .actions = NULL, 4235 }; 4236 static const struct intr_details sge_int3_details[] = { 4237 { F_ERR_FLM_DBP, 4238 "DBP pointer delivery for invalid context or QID" }, 4239 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, 4240 "Invalid QID or header request by IDMA" }, 4241 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, 4242 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, 4243 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, 4244 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, 4245 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, 4246 { F_ERR_TIMER_ABOVE_MAX_QID, 4247 "SGE GTS with timer 0-5 for IQID > 1023" }, 4248 { F_ERR_CPL_EXCEED_IQE_SIZE, 4249 "SGE received CPL exceeding IQE size" }, 4250 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, 4251 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, 4252 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, 4253 { F_ERR_DROPPED_DB, "SGE DB dropped" }, 4254 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 4255 "SGE IQID > 1023 received CPL for FL" }, 4256 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4257 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, 4258 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, 4259 { F_ERR_ING_CTXT_PRIO, 4260 "Ingress context manager priority user error" }, 4261 { F_ERR_EGR_CTXT_PRIO, 4262 "Egress context manager priority user error" }, 4263 { F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" }, 4264 { F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" }, 4265 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, 4266 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, 4267 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, 4268 { 0x0000000f, "SGE context access for invalid queue" }, 4269 { 0 } 4270 }; 4271 static const struct intr_details t6_sge_int3_details[] = { 4272 { F_ERR_FLM_DBP, 4273 "DBP pointer delivery for invalid context or QID" }, 4274 { F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0, 4275 "Invalid QID or header request by IDMA" }, 4276 { F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" }, 4277 { F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" }, 4278 { F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" }, 4279 { F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" }, 4280 { F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" }, 4281 { F_ERR_TIMER_ABOVE_MAX_QID, 4282 "SGE GTS with timer 0-5 for IQID > 1023" }, 4283 { F_ERR_CPL_EXCEED_IQE_SIZE, 4284 "SGE received CPL exceeding IQE size" }, 4285 { F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" }, 4286 { F_ERR_ITP_TIME_PAUSED, "SGE ITP error" }, 4287 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" }, 4288 { F_ERR_DROPPED_DB, "SGE DB dropped" }, 4289 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 4290 "SGE IQID > 1023 received CPL for FL" }, 4291 { F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4292 F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" }, 4293 { F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" }, 4294 { F_ERR_ING_CTXT_PRIO, 4295 "Ingress context manager priority user error" }, 4296 { F_ERR_EGR_CTXT_PRIO, 4297 "Egress context manager priority user error" }, 4298 { F_DBP_TBUF_FULL, "SGE DBP tbuf full" }, 4299 { F_FATAL_WRE_LEN, 4300 "SGE WRE packet less than advertized length" }, 4301 { F_REG_ADDRESS_ERR, "Undefined SGE register accessed" }, 4302 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" }, 4303 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID" }, 4304 { 0x0000000f, "SGE context access for invalid queue" }, 4305 { 0 } 4306 }; 4307 struct intr_info sge_int3_info = { 4308 .name = "SGE_INT_CAUSE3", 4309 .cause_reg = A_SGE_INT_CAUSE3, 4310 .enable_reg = A_SGE_INT_ENABLE3, 4311 .fatal = F_ERR_CPL_EXCEED_IQE_SIZE, 4312 .flags = 0, 4313 .details = NULL, 4314 .actions = NULL, 4315 }; 4316 static const struct intr_info sge_int4_info = { 4317 .name = "SGE_INT_CAUSE4", 4318 .cause_reg = A_SGE_INT_CAUSE4, 4319 .enable_reg = A_SGE_INT_ENABLE4, 4320 .fatal = 0, 4321 .flags = 0, 4322 .details = NULL, 4323 .actions = NULL, 4324 }; 4325 static const struct intr_info sge_int5_info = { 4326 .name = "SGE_INT_CAUSE5", 4327 .cause_reg = A_SGE_INT_CAUSE5, 4328 .enable_reg = A_SGE_INT_ENABLE5, 4329 .fatal = 0xffffffff, 4330 .flags = NONFATAL_IF_DISABLED, 4331 .details = NULL, 4332 .actions = NULL, 4333 }; 4334 static const struct intr_info sge_int6_info = { 4335 .name = "SGE_INT_CAUSE6", 4336 .cause_reg = A_SGE_INT_CAUSE6, 4337 .enable_reg = A_SGE_INT_ENABLE6, 4338 .fatal = 0, 4339 .flags = 0, 4340 .details = NULL, 4341 .actions = NULL, 4342 }; 4343 4344 bool fatal; 4345 u32 v; 4346 4347 if (chip_id(adap) <= CHELSIO_T5) { 4348 sge_int3_info.details = sge_int3_details; 4349 } else { 4350 sge_int3_info.details = t6_sge_int3_details; 4351 } 4352 4353 fatal = false; 4354 fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose); 4355 fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose); 4356 fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose); 4357 fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose); 4358 if (chip_id(adap) >= CHELSIO_T5) 4359 fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose); 4360 if (chip_id(adap) >= CHELSIO_T6) 4361 fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose); 4362 4363 v = t4_read_reg(adap, A_SGE_ERROR_STATS); 4364 if (v & F_ERROR_QID_VALID) { 4365 CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v)); 4366 if (v & F_UNCAPTURED_ERROR) 4367 CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n"); 4368 t4_write_reg(adap, A_SGE_ERROR_STATS, 4369 F_ERROR_QID_VALID | F_UNCAPTURED_ERROR); 4370 } 4371 4372 return (fatal); 4373} 4374 4375/* 4376 * CIM interrupt handler. 4377 */ 4378static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose) 4379{ 4380 static const struct intr_action cim_host_intr_actions[] = { 4381 { F_TIMER0INT, 0, t4_os_dump_cimla }, 4382 { 0 }, 4383 }; 4384 static const struct intr_details cim_host_intr_details[] = { 4385 /* T6+ */ 4386 { F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" }, 4387 4388 /* T5+ */ 4389 { F_MA_CIM_INTFPERR, "MA2CIM interface parity error" }, 4390 { F_PLCIM_MSTRSPDATAPARERR, 4391 "PL2CIM master response data parity error" }, 4392 { F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" }, 4393 { F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" }, 4394 { F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" }, 4395 { F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" }, 4396 { F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" }, 4397 { F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" }, 4398 4399 /* T4+ */ 4400 { F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" }, 4401 { F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" }, 4402 { F_MBHOSTPARERR, "CIM mailbox host read parity error" }, 4403 { F_MBUPPARERR, "CIM mailbox uP parity error" }, 4404 { F_IBQTP0PARERR, "CIM IBQ TP0 parity error" }, 4405 { F_IBQTP1PARERR, "CIM IBQ TP1 parity error" }, 4406 { F_IBQULPPARERR, "CIM IBQ ULP parity error" }, 4407 { F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" }, 4408 { F_IBQSGEHIPARERR | F_IBQPCIEPARERR, /* same bit */ 4409 "CIM IBQ PCIe/SGE_HI parity error" }, 4410 { F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" }, 4411 { F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" }, 4412 { F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" }, 4413 { F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" }, 4414 { F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" }, 4415 { F_OBQSGEPARERR, "CIM OBQ SGE parity error" }, 4416 { F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" }, 4417 { F_TIMER1INT, "CIM TIMER0 interrupt" }, 4418 { F_TIMER0INT, "CIM TIMER0 interrupt" }, 4419 { F_PREFDROPINT, "CIM control register prefetch drop" }, 4420 { 0} 4421 }; 4422 static const struct intr_info cim_host_intr_info = { 4423 .name = "CIM_HOST_INT_CAUSE", 4424 .cause_reg = A_CIM_HOST_INT_CAUSE, 4425 .enable_reg = A_CIM_HOST_INT_ENABLE, 4426 .fatal = 0x007fffe6, 4427 .flags = NONFATAL_IF_DISABLED, 4428 .details = cim_host_intr_details, 4429 .actions = cim_host_intr_actions, 4430 }; 4431 static const struct intr_details cim_host_upacc_intr_details[] = { 4432 { F_EEPROMWRINT, "CIM EEPROM came out of busy state" }, 4433 { F_TIMEOUTMAINT, "CIM PIF MA timeout" }, 4434 { F_TIMEOUTINT, "CIM PIF timeout" }, 4435 { F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" }, 4436 { F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" }, 4437 { F_BLKWRPLINT, "CIM block write to PL space" }, 4438 { F_BLKRDPLINT, "CIM block read from PL space" }, 4439 { F_SGLWRPLINT, 4440 "CIM single write to PL space with illegal BEs" }, 4441 { F_SGLRDPLINT, 4442 "CIM single read from PL space with illegal BEs" }, 4443 { F_BLKWRCTLINT, "CIM block write to CTL space" }, 4444 { F_BLKRDCTLINT, "CIM block read from CTL space" }, 4445 { F_SGLWRCTLINT, 4446 "CIM single write to CTL space with illegal BEs" }, 4447 { F_SGLRDCTLINT, 4448 "CIM single read from CTL space with illegal BEs" }, 4449 { F_BLKWREEPROMINT, "CIM block write to EEPROM space" }, 4450 { F_BLKRDEEPROMINT, "CIM block read from EEPROM space" }, 4451 { F_SGLWREEPROMINT, 4452 "CIM single write to EEPROM space with illegal BEs" }, 4453 { F_SGLRDEEPROMINT, 4454 "CIM single read from EEPROM space with illegal BEs" }, 4455 { F_BLKWRFLASHINT, "CIM block write to flash space" }, 4456 { F_BLKRDFLASHINT, "CIM block read from flash space" }, 4457 { F_SGLWRFLASHINT, "CIM single write to flash space" }, 4458 { F_SGLRDFLASHINT, 4459 "CIM single read from flash space with illegal BEs" }, 4460 { F_BLKWRBOOTINT, "CIM block write to boot space" }, 4461 { F_BLKRDBOOTINT, "CIM block read from boot space" }, 4462 { F_SGLWRBOOTINT, "CIM single write to boot space" }, 4463 { F_SGLRDBOOTINT, 4464 "CIM single read from boot space with illegal BEs" }, 4465 { F_ILLWRBEINT, "CIM illegal write BEs" }, 4466 { F_ILLRDBEINT, "CIM illegal read BEs" }, 4467 { F_ILLRDINT, "CIM illegal read" }, 4468 { F_ILLWRINT, "CIM illegal write" }, 4469 { F_ILLTRANSINT, "CIM illegal transaction" }, 4470 { F_RSVDSPACEINT, "CIM reserved space access" }, 4471 {0} 4472 }; 4473 static const struct intr_info cim_host_upacc_intr_info = { 4474 .name = "CIM_HOST_UPACC_INT_CAUSE", 4475 .cause_reg = A_CIM_HOST_UPACC_INT_CAUSE, 4476 .enable_reg = A_CIM_HOST_UPACC_INT_ENABLE, 4477 .fatal = 0x3fffeeff, 4478 .flags = NONFATAL_IF_DISABLED, 4479 .details = cim_host_upacc_intr_details, 4480 .actions = NULL, 4481 }; 4482 static const struct intr_info cim_pf_host_intr_info = { 4483 .name = "CIM_PF_HOST_INT_CAUSE", 4484 .cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 4485 .enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE), 4486 .fatal = 0, 4487 .flags = 0, 4488 .details = NULL, 4489 .actions = NULL, 4490 }; 4491 u32 val, fw_err; 4492 bool fatal; 4493 4494 fw_err = t4_read_reg(adap, A_PCIE_FW); 4495 if (fw_err & F_PCIE_FW_ERR) 4496 t4_report_fw_error(adap); 4497 4498 /* 4499 * When the Firmware detects an internal error which normally wouldn't 4500 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 4501 * to make sure the Host sees the Firmware Crash. So if we have a 4502 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 4503 * interrupt. 4504 */ 4505 val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE); 4506 if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) || 4507 G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) { 4508 t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT); 4509 } 4510 4511 fatal = false; 4512 fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose); 4513 fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose); 4514 fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose); 4515 4516 return (fatal); 4517} 4518 4519/* 4520 * ULP RX interrupt handler. 4521 */ 4522static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose) 4523{ 4524 static const struct intr_details ulprx_intr_details[] = { 4525 /* T5+ */ 4526 { F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" }, 4527 { F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" }, 4528 4529 /* T4+ */ 4530 { F_CAUSE_CTX_1, "ULPRX channel 1 context error" }, 4531 { F_CAUSE_CTX_0, "ULPRX channel 0 context error" }, 4532 { 0x007fffff, "ULPRX parity error" }, 4533 { 0 } 4534 }; 4535 static const struct intr_info ulprx_intr_info = { 4536 .name = "ULP_RX_INT_CAUSE", 4537 .cause_reg = A_ULP_RX_INT_CAUSE, 4538 .enable_reg = A_ULP_RX_INT_ENABLE, 4539 .fatal = 0x07ffffff, 4540 .flags = NONFATAL_IF_DISABLED, 4541 .details = ulprx_intr_details, 4542 .actions = NULL, 4543 }; 4544 static const struct intr_info ulprx_intr2_info = { 4545 .name = "ULP_RX_INT_CAUSE_2", 4546 .cause_reg = A_ULP_RX_INT_CAUSE_2, 4547 .enable_reg = A_ULP_RX_INT_ENABLE_2, 4548 .fatal = 0, 4549 .flags = 0, 4550 .details = NULL, 4551 .actions = NULL, 4552 }; 4553 bool fatal = false; 4554 4555 fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose); 4556 fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose); 4557 4558 return (fatal); 4559} 4560 4561/* 4562 * ULP TX interrupt handler. 4563 */ 4564static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose) 4565{ 4566 static const struct intr_details ulptx_intr_details[] = { 4567 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" }, 4568 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" }, 4569 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" }, 4570 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" }, 4571 { 0x0fffffff, "ULPTX parity error" }, 4572 { 0 } 4573 }; 4574 static const struct intr_info ulptx_intr_info = { 4575 .name = "ULP_TX_INT_CAUSE", 4576 .cause_reg = A_ULP_TX_INT_CAUSE, 4577 .enable_reg = A_ULP_TX_INT_ENABLE, 4578 .fatal = 0x0fffffff, 4579 .flags = NONFATAL_IF_DISABLED, 4580 .details = ulptx_intr_details, 4581 .actions = NULL, 4582 }; 4583 static const struct intr_info ulptx_intr2_info = { 4584 .name = "ULP_TX_INT_CAUSE_2", 4585 .cause_reg = A_ULP_TX_INT_CAUSE_2, 4586 .enable_reg = A_ULP_TX_INT_ENABLE_2, 4587 .fatal = 0xf0, 4588 .flags = NONFATAL_IF_DISABLED, 4589 .details = NULL, 4590 .actions = NULL, 4591 }; 4592 bool fatal = false; 4593 4594 fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose); 4595 fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose); 4596 4597 return (fatal); 4598} 4599 4600static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose) 4601{ 4602 int i; 4603 u32 data[17]; 4604 4605 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0], 4606 ARRAY_SIZE(data), A_PM_TX_DBG_STAT0); 4607 for (i = 0; i < ARRAY_SIZE(data); i++) { 4608 CH_ALERT(adap, " - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i, 4609 A_PM_TX_DBG_STAT0 + i, data[i]); 4610 } 4611 4612 return (false); 4613} 4614 4615/* 4616 * PM TX interrupt handler. 4617 */ 4618static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose) 4619{ 4620 static const struct intr_action pmtx_intr_actions[] = { 4621 { 0xffffffff, 0, pmtx_dump_dbg_stats }, 4622 { 0 }, 4623 }; 4624 static const struct intr_details pmtx_intr_details[] = { 4625 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" }, 4626 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" }, 4627 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" }, 4628 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" }, 4629 { 0x0f000000, "PMTX icspi FIFO2X Rx framing error" }, 4630 { 0x00f00000, "PMTX icspi FIFO Rx framing error" }, 4631 { 0x000f0000, "PMTX icspi FIFO Tx framing error" }, 4632 { 0x0000f000, "PMTX oespi FIFO Rx framing error" }, 4633 { 0x00000f00, "PMTX oespi FIFO Tx framing error" }, 4634 { 0x000000f0, "PMTX oespi FIFO2X Tx framing error" }, 4635 { F_OESPI_PAR_ERROR, "PMTX oespi parity error" }, 4636 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" }, 4637 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error" }, 4638 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" }, 4639 { 0 } 4640 }; 4641 static const struct intr_info pmtx_intr_info = { 4642 .name = "PM_TX_INT_CAUSE", 4643 .cause_reg = A_PM_TX_INT_CAUSE, 4644 .enable_reg = A_PM_TX_INT_ENABLE, 4645 .fatal = 0xffffffff, 4646 .flags = 0, 4647 .details = pmtx_intr_details, 4648 .actions = pmtx_intr_actions, 4649 }; 4650 4651 return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose)); 4652} 4653 4654/* 4655 * PM RX interrupt handler. 4656 */ 4657static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose) 4658{ 4659 static const struct intr_details pmrx_intr_details[] = { 4660 /* T6+ */ 4661 { 0x18000000, "PMRX ospi overflow" }, 4662 { F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" }, 4663 { F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" }, 4664 { F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" }, 4665 { F_SDC_ERR, "PMRX SDC error" }, 4666 4667 /* T4+ */ 4668 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" }, 4669 { 0x003c0000, "PMRX iespi FIFO2X Rx framing error" }, 4670 { 0x0003c000, "PMRX iespi Rx framing error" }, 4671 { 0x00003c00, "PMRX iespi Tx framing error" }, 4672 { 0x00000300, "PMRX ocspi Rx framing error" }, 4673 { 0x000000c0, "PMRX ocspi Tx framing error" }, 4674 { 0x00000030, "PMRX ocspi FIFO2X Tx framing error" }, 4675 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" }, 4676 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" }, 4677 { F_IESPI_PAR_ERROR, "PMRX iespi parity error" }, 4678 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"}, 4679 { 0 } 4680 }; 4681 static const struct intr_info pmrx_intr_info = { 4682 .name = "PM_RX_INT_CAUSE", 4683 .cause_reg = A_PM_RX_INT_CAUSE, 4684 .enable_reg = A_PM_RX_INT_ENABLE, 4685 .fatal = 0x1fffffff, 4686 .flags = NONFATAL_IF_DISABLED, 4687 .details = pmrx_intr_details, 4688 .actions = NULL, 4689 }; 4690 4691 return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose)); 4692} 4693 4694/* 4695 * CPL switch interrupt handler. 4696 */ 4697static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose) 4698{ 4699 static const struct intr_details cplsw_intr_details[] = { 4700 /* T5+ */ 4701 { F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" }, 4702 { F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" }, 4703 4704 /* T4+ */ 4705 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" }, 4706 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow" }, 4707 { F_TP_FRAMING_ERROR, "CPLSW TP framing error" }, 4708 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" }, 4709 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" }, 4710 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" }, 4711 { 0 } 4712 }; 4713 static const struct intr_info cplsw_intr_info = { 4714 .name = "CPL_INTR_CAUSE", 4715 .cause_reg = A_CPL_INTR_CAUSE, 4716 .enable_reg = A_CPL_INTR_ENABLE, 4717 .fatal = 0xff, 4718 .flags = NONFATAL_IF_DISABLED, 4719 .details = cplsw_intr_details, 4720 .actions = NULL, 4721 }; 4722 4723 return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose)); 4724} 4725 4726#define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR) 4727#define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR) 4728#define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \ 4729 F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \ 4730 F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \ 4731 F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR) 4732#define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \ 4733 F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \ 4734 F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR) 4735 4736/* 4737 * LE interrupt handler. 4738 */ 4739static bool le_intr_handler(struct adapter *adap, int arg, bool verbose) 4740{ 4741 static const struct intr_details le_intr_details[] = { 4742 { F_REQQPARERR, "LE request queue parity error" }, 4743 { F_UNKNOWNCMD, "LE unknown command" }, 4744 { F_ACTRGNFULL, "LE active region full" }, 4745 { F_PARITYERR, "LE parity error" }, 4746 { F_LIPMISS, "LE LIP miss" }, 4747 { F_LIP0, "LE 0 LIP error" }, 4748 { 0 } 4749 }; 4750 static const struct intr_details t6_le_intr_details[] = { 4751 { F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" }, 4752 { F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" }, 4753 { F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" }, 4754 { F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" }, 4755 { F_TOTCNTERR, "LE total active < TCAM count" }, 4756 { F_CMDPRSRINTERR, "LE internal error in parser" }, 4757 { F_CMDTIDERR, "Incorrect tid in LE command" }, 4758 { F_T6_ACTRGNFULL, "LE active region full" }, 4759 { F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" }, 4760 { F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" }, 4761 { F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" }, 4762 { F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" }, 4763 { F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" }, 4764 { F_TCAMACCFAIL, "LE TCAM access failure" }, 4765 { F_T6_UNKNOWNCMD, "LE unknown command" }, 4766 { F_T6_LIP0, "LE found 0 LIP during CLIP substitution" }, 4767 { F_T6_LIPMISS, "LE CLIP lookup miss" }, 4768 { T6_LE_PERRCRC_MASK, "LE parity/CRC error" }, 4769 { 0 } 4770 }; 4771 struct intr_info le_intr_info = { 4772 .name = "LE_DB_INT_CAUSE", 4773 .cause_reg = A_LE_DB_INT_CAUSE, 4774 .enable_reg = A_LE_DB_INT_ENABLE, 4775 .fatal = 0, 4776 .flags = NONFATAL_IF_DISABLED, 4777 .details = NULL, 4778 .actions = NULL, 4779 }; 4780 4781 if (chip_id(adap) <= CHELSIO_T5) { 4782 le_intr_info.details = le_intr_details; 4783 le_intr_info.fatal = T5_LE_FATAL_MASK; 4784 } else { 4785 le_intr_info.details = t6_le_intr_details; 4786 le_intr_info.fatal = T6_LE_FATAL_MASK; 4787 } 4788 4789 return (t4_handle_intr(adap, &le_intr_info, 0, verbose)); 4790} 4791 4792/* 4793 * MPS interrupt handler. 4794 */ 4795static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose) 4796{ 4797 static const struct intr_details mps_rx_perr_intr_details[] = { 4798 { 0xffffffff, "MPS Rx parity error" }, 4799 { 0 } 4800 }; 4801 static const struct intr_info mps_rx_perr_intr_info = { 4802 .name = "MPS_RX_PERR_INT_CAUSE", 4803 .cause_reg = A_MPS_RX_PERR_INT_CAUSE, 4804 .enable_reg = A_MPS_RX_PERR_INT_ENABLE, 4805 .fatal = 0xffffffff, 4806 .flags = NONFATAL_IF_DISABLED, 4807 .details = mps_rx_perr_intr_details, 4808 .actions = NULL, 4809 }; 4810 static const struct intr_details mps_tx_intr_details[] = { 4811 { F_PORTERR, "MPS Tx destination port is disabled" }, 4812 { F_FRMERR, "MPS Tx framing error" }, 4813 { F_SECNTERR, "MPS Tx SOP/EOP error" }, 4814 { F_BUBBLE, "MPS Tx underflow" }, 4815 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" }, 4816 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" }, 4817 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" }, 4818 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" }, 4819 { 0 } 4820 }; 4821 static const struct intr_info mps_tx_intr_info = { 4822 .name = "MPS_TX_INT_CAUSE", 4823 .cause_reg = A_MPS_TX_INT_CAUSE, 4824 .enable_reg = A_MPS_TX_INT_ENABLE, 4825 .fatal = 0x1ffff, 4826 .flags = NONFATAL_IF_DISABLED, 4827 .details = mps_tx_intr_details, 4828 .actions = NULL, 4829 }; 4830 static const struct intr_details mps_trc_intr_details[] = { 4831 { F_MISCPERR, "MPS TRC misc parity error" }, 4832 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" }, 4833 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" }, 4834 { 0 } 4835 }; 4836 static const struct intr_info mps_trc_intr_info = { 4837 .name = "MPS_TRC_INT_CAUSE", 4838 .cause_reg = A_MPS_TRC_INT_CAUSE, 4839 .enable_reg = A_MPS_TRC_INT_ENABLE, 4840 .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM), 4841 .flags = 0, 4842 .details = mps_trc_intr_details, 4843 .actions = NULL, 4844 }; 4845 static const struct intr_details mps_stat_sram_intr_details[] = { 4846 { 0xffffffff, "MPS statistics SRAM parity error" }, 4847 { 0 } 4848 }; 4849 static const struct intr_info mps_stat_sram_intr_info = { 4850 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM", 4851 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM, 4852 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM, 4853 .fatal = 0x1fffffff, 4854 .flags = NONFATAL_IF_DISABLED, 4855 .details = mps_stat_sram_intr_details, 4856 .actions = NULL, 4857 }; 4858 static const struct intr_details mps_stat_tx_intr_details[] = { 4859 { 0xffffff, "MPS statistics Tx FIFO parity error" }, 4860 { 0 } 4861 }; 4862 static const struct intr_info mps_stat_tx_intr_info = { 4863 .name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO", 4864 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 4865 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO, 4866 .fatal = 0xffffff, 4867 .flags = NONFATAL_IF_DISABLED, 4868 .details = mps_stat_tx_intr_details, 4869 .actions = NULL, 4870 }; 4871 static const struct intr_details mps_stat_rx_intr_details[] = { 4872 { 0xffffff, "MPS statistics Rx FIFO parity error" }, 4873 { 0 } 4874 }; 4875 static const struct intr_info mps_stat_rx_intr_info = { 4876 .name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO", 4877 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 4878 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO, 4879 .fatal = 0xffffff, 4880 .flags = 0, 4881 .details = mps_stat_rx_intr_details, 4882 .actions = NULL, 4883 }; 4884 static const struct intr_details mps_cls_intr_details[] = { 4885 { F_HASHSRAM, "MPS hash SRAM parity error" }, 4886 { F_MATCHTCAM, "MPS match TCAM parity error" }, 4887 { F_MATCHSRAM, "MPS match SRAM parity error" }, 4888 { 0 } 4889 }; 4890 static const struct intr_info mps_cls_intr_info = { 4891 .name = "MPS_CLS_INT_CAUSE", 4892 .cause_reg = A_MPS_CLS_INT_CAUSE, 4893 .enable_reg = A_MPS_CLS_INT_ENABLE, 4894 .fatal = F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM, 4895 .flags = 0, 4896 .details = mps_cls_intr_details, 4897 .actions = NULL, 4898 }; 4899 static const struct intr_details mps_stat_sram1_intr_details[] = { 4900 { 0xff, "MPS statistics SRAM1 parity error" }, 4901 { 0 } 4902 }; 4903 static const struct intr_info mps_stat_sram1_intr_info = { 4904 .name = "MPS_STAT_PERR_INT_CAUSE_SRAM1", 4905 .cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 4906 .enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1, 4907 .fatal = 0xff, 4908 .flags = 0, 4909 .details = mps_stat_sram1_intr_details, 4910 .actions = NULL, 4911 }; 4912 4913 bool fatal; 4914 4915 fatal = false; 4916 fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose); 4917 fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose); 4918 fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose); 4919 fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose); 4920 fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose); 4921 fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose); 4922 fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose); 4923 if (chip_id(adap) > CHELSIO_T4) { 4924 fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0, 4925 verbose); 4926 } 4927 4928 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); 4929 t4_read_reg(adap, A_MPS_INT_CAUSE); /* flush */ 4930 4931 return (fatal); 4932 4933} 4934 4935/* 4936 * EDC/MC interrupt handler. 4937 */ 4938static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose) 4939{ 4940 static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" }; 4941 unsigned int count_reg, v; 4942 static const struct intr_details mem_intr_details[] = { 4943 { F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" }, 4944 { F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" }, 4945 { F_PERR_INT_CAUSE, "FIFO parity error" }, 4946 { 0 } 4947 }; 4948 struct intr_info ii = { 4949 .fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE, 4950 .details = mem_intr_details, 4951 .flags = 0, 4952 .actions = NULL, 4953 }; 4954 bool fatal; 4955 4956 switch (idx) { 4957 case MEM_EDC0: 4958 ii.name = "EDC0_INT_CAUSE"; 4959 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0); 4960 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0); 4961 count_reg = EDC_REG(A_EDC_ECC_STATUS, 0); 4962 break; 4963 case MEM_EDC1: 4964 ii.name = "EDC1_INT_CAUSE"; 4965 ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1); 4966 ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1); 4967 count_reg = EDC_REG(A_EDC_ECC_STATUS, 1); 4968 break; 4969 case MEM_MC0: 4970 ii.name = "MC0_INT_CAUSE"; 4971 if (is_t4(adap)) { 4972 ii.cause_reg = A_MC_INT_CAUSE; 4973 ii.enable_reg = A_MC_INT_ENABLE; 4974 count_reg = A_MC_ECC_STATUS; 4975 } else { 4976 ii.cause_reg = A_MC_P_INT_CAUSE; 4977 ii.enable_reg = A_MC_P_INT_ENABLE; 4978 count_reg = A_MC_P_ECC_STATUS; 4979 } 4980 break; 4981 case MEM_MC1: 4982 ii.name = "MC1_INT_CAUSE"; 4983 ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1); 4984 ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1); 4985 count_reg = MC_REG(A_MC_P_ECC_STATUS, 1); 4986 break; 4987 } 4988 4989 fatal = t4_handle_intr(adap, &ii, 0, verbose); 4990 4991 v = t4_read_reg(adap, count_reg); 4992 if (v != 0) { 4993 if (G_ECC_UECNT(v) != 0) { 4994 CH_ALERT(adap, 4995 "%s: %u uncorrectable ECC data error(s)\n", 4996 name[idx], G_ECC_UECNT(v)); 4997 } 4998 if (G_ECC_CECNT(v) != 0) { 4999 if (idx <= MEM_EDC1) 5000 t4_edc_err_read(adap, idx); 5001 CH_WARN_RATELIMIT(adap, 5002 "%s: %u correctable ECC data error(s)\n", 5003 name[idx], G_ECC_CECNT(v)); 5004 } 5005 t4_write_reg(adap, count_reg, 0xffffffff); 5006 } 5007 5008 return (fatal); 5009} 5010 5011static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose) 5012{ 5013 u32 v; 5014 5015 v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS); 5016 CH_ALERT(adap, 5017 "MA address wrap-around error by client %u to address %#x\n", 5018 G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4); 5019 t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v); 5020 5021 return (false); 5022} 5023 5024 5025/* 5026 * MA interrupt handler. 5027 */ 5028static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose) 5029{ 5030 static const struct intr_action ma_intr_actions[] = { 5031 { F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status }, 5032 { 0 }, 5033 }; 5034 static const struct intr_info ma_intr_info = { 5035 .name = "MA_INT_CAUSE", 5036 .cause_reg = A_MA_INT_CAUSE, 5037 .enable_reg = A_MA_INT_ENABLE, 5038 .fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE, 5039 .flags = NONFATAL_IF_DISABLED, 5040 .details = NULL, 5041 .actions = ma_intr_actions, 5042 }; 5043 static const struct intr_info ma_perr_status1 = { 5044 .name = "MA_PARITY_ERROR_STATUS1", 5045 .cause_reg = A_MA_PARITY_ERROR_STATUS1, 5046 .enable_reg = A_MA_PARITY_ERROR_ENABLE1, 5047 .fatal = 0xffffffff, 5048 .flags = 0, 5049 .details = NULL, 5050 .actions = NULL, 5051 }; 5052 static const struct intr_info ma_perr_status2 = { 5053 .name = "MA_PARITY_ERROR_STATUS2", 5054 .cause_reg = A_MA_PARITY_ERROR_STATUS2, 5055 .enable_reg = A_MA_PARITY_ERROR_ENABLE2, 5056 .fatal = 0xffffffff, 5057 .flags = 0, 5058 .details = NULL, 5059 .actions = NULL, 5060 }; 5061 bool fatal; 5062 5063 fatal = false; 5064 fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose); 5065 fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose); 5066 if (chip_id(adap) > CHELSIO_T4) 5067 fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose); 5068 5069 return (fatal); 5070} 5071 5072/* 5073 * SMB interrupt handler. 5074 */ 5075static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose) 5076{ 5077 static const struct intr_details smb_intr_details[] = { 5078 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" }, 5079 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" }, 5080 { F_SLVFIFOPARINT, "SMB slave FIFO parity error" }, 5081 { 0 } 5082 }; 5083 static const struct intr_info smb_intr_info = { 5084 .name = "SMB_INT_CAUSE", 5085 .cause_reg = A_SMB_INT_CAUSE, 5086 .enable_reg = A_SMB_INT_ENABLE, 5087 .fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT, 5088 .flags = 0, 5089 .details = smb_intr_details, 5090 .actions = NULL, 5091 }; 5092 5093 return (t4_handle_intr(adap, &smb_intr_info, 0, verbose)); 5094} 5095 5096/* 5097 * NC-SI interrupt handler. 5098 */ 5099static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose) 5100{ 5101 static const struct intr_details ncsi_intr_details[] = { 5102 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" }, 5103 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" }, 5104 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" }, 5105 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" }, 5106 { 0 } 5107 }; 5108 static const struct intr_info ncsi_intr_info = { 5109 .name = "NCSI_INT_CAUSE", 5110 .cause_reg = A_NCSI_INT_CAUSE, 5111 .enable_reg = A_NCSI_INT_ENABLE, 5112 .fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR | 5113 F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR, 5114 .flags = 0, 5115 .details = ncsi_intr_details, 5116 .actions = NULL, 5117 }; 5118 5119 return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose)); 5120} 5121 5122/* 5123 * MAC interrupt handler. 5124 */ 5125static bool mac_intr_handler(struct adapter *adap, int port, bool verbose) 5126{ 5127 static const struct intr_details mac_intr_details[] = { 5128 { F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" }, 5129 { F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" }, 5130 { 0 } 5131 }; 5132 char name[32]; 5133 struct intr_info ii; 5134 bool fatal = false; 5135 5136 if (is_t4(adap)) { 5137 snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port); 5138 ii.name = &name[0]; 5139 ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 5140 ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN); 5141 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 5142 ii.flags = 0; 5143 ii.details = mac_intr_details; 5144 ii.actions = NULL; 5145 } else { 5146 snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port); 5147 ii.name = &name[0]; 5148 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 5149 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN); 5150 ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR; 5151 ii.flags = 0; 5152 ii.details = mac_intr_details; 5153 ii.actions = NULL; 5154 } 5155 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5156 5157 if (chip_id(adap) >= CHELSIO_T5) { 5158 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port); 5159 ii.name = &name[0]; 5160 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE); 5161 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN); 5162 ii.fatal = 0; 5163 ii.flags = 0; 5164 ii.details = NULL; 5165 ii.actions = NULL; 5166 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5167 } 5168 5169 if (chip_id(adap) >= CHELSIO_T6) { 5170 snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port); 5171 ii.name = &name[0]; 5172 ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G); 5173 ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G); 5174 ii.fatal = 0; 5175 ii.flags = 0; 5176 ii.details = NULL; 5177 ii.actions = NULL; 5178 fatal |= t4_handle_intr(adap, &ii, 0, verbose); 5179 } 5180 5181 return (fatal); 5182} 5183 5184static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose) 5185{ 5186 static const struct intr_details plpl_intr_details[] = { 5187 { F_FATALPERR, "Fatal parity error" }, 5188 { F_PERRVFID, "VFID_MAP parity error" }, 5189 { 0 } 5190 }; 5191 static const struct intr_info plpl_intr_info = { 5192 .name = "PL_PL_INT_CAUSE", 5193 .cause_reg = A_PL_PL_INT_CAUSE, 5194 .enable_reg = A_PL_PL_INT_ENABLE, 5195 .fatal = F_FATALPERR | F_PERRVFID, 5196 .flags = NONFATAL_IF_DISABLED, 5197 .details = plpl_intr_details, 5198 .actions = NULL, 5199 }; 5200 5201 return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose)); 5202} 5203 5204/** 5205 * t4_slow_intr_handler - control path interrupt handler 5206 * @adap: the adapter 5207 * @verbose: increased verbosity, for debug 5208 * 5209 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 5210 * The designation 'slow' is because it involves register reads, while 5211 * data interrupts typically don't involve any MMIOs. 5212 */ 5213int t4_slow_intr_handler(struct adapter *adap, bool verbose) 5214{ 5215 static const struct intr_details pl_intr_details[] = { 5216 { F_MC1, "MC1" }, 5217 { F_UART, "UART" }, 5218 { F_ULP_TX, "ULP TX" }, 5219 { F_SGE, "SGE" }, 5220 { F_HMA, "HMA" }, 5221 { F_CPL_SWITCH, "CPL Switch" }, 5222 { F_ULP_RX, "ULP RX" }, 5223 { F_PM_RX, "PM RX" }, 5224 { F_PM_TX, "PM TX" }, 5225 { F_MA, "MA" }, 5226 { F_TP, "TP" }, 5227 { F_LE, "LE" }, 5228 { F_EDC1, "EDC1" }, 5229 { F_EDC0, "EDC0" }, 5230 { F_MC, "MC0" }, 5231 { F_PCIE, "PCIE" }, 5232 { F_PMU, "PMU" }, 5233 { F_MAC3, "MAC3" }, 5234 { F_MAC2, "MAC2" }, 5235 { F_MAC1, "MAC1" }, 5236 { F_MAC0, "MAC0" }, 5237 { F_SMB, "SMB" }, 5238 { F_SF, "SF" }, 5239 { F_PL, "PL" }, 5240 { F_NCSI, "NC-SI" }, 5241 { F_MPS, "MPS" }, 5242 { F_MI, "MI" }, 5243 { F_DBG, "DBG" }, 5244 { F_I2CM, "I2CM" }, 5245 { F_CIM, "CIM" }, 5246 { 0 } 5247 }; 5248 static const struct intr_info pl_perr_cause = { 5249 .name = "PL_PERR_CAUSE", 5250 .cause_reg = A_PL_PERR_CAUSE, 5251 .enable_reg = A_PL_PERR_ENABLE, 5252 .fatal = 0xffffffff, 5253 .flags = 0, 5254 .details = pl_intr_details, 5255 .actions = NULL, 5256 }; 5257 static const struct intr_action pl_intr_action[] = { 5258 { F_MC1, MEM_MC1, mem_intr_handler }, 5259 { F_ULP_TX, -1, ulptx_intr_handler }, 5260 { F_SGE, -1, sge_intr_handler }, 5261 { F_CPL_SWITCH, -1, cplsw_intr_handler }, 5262 { F_ULP_RX, -1, ulprx_intr_handler }, 5263 { F_PM_RX, -1, pmrx_intr_handler}, 5264 { F_PM_TX, -1, pmtx_intr_handler}, 5265 { F_MA, -1, ma_intr_handler }, 5266 { F_TP, -1, tp_intr_handler }, 5267 { F_LE, -1, le_intr_handler }, 5268 { F_EDC1, MEM_EDC1, mem_intr_handler }, 5269 { F_EDC0, MEM_EDC0, mem_intr_handler }, 5270 { F_MC0, MEM_MC0, mem_intr_handler }, 5271 { F_PCIE, -1, pcie_intr_handler }, 5272 { F_MAC3, 3, mac_intr_handler}, 5273 { F_MAC2, 2, mac_intr_handler}, 5274 { F_MAC1, 1, mac_intr_handler}, 5275 { F_MAC0, 0, mac_intr_handler}, 5276 { F_SMB, -1, smb_intr_handler}, 5277 { F_PL, -1, plpl_intr_handler }, 5278 { F_NCSI, -1, ncsi_intr_handler}, 5279 { F_MPS, -1, mps_intr_handler }, 5280 { F_CIM, -1, cim_intr_handler }, 5281 { 0 } 5282 }; 5283 static const struct intr_info pl_intr_info = { 5284 .name = "PL_INT_CAUSE", 5285 .cause_reg = A_PL_INT_CAUSE, 5286 .enable_reg = A_PL_INT_ENABLE, 5287 .fatal = 0, 5288 .flags = 0, 5289 .details = pl_intr_details, 5290 .actions = pl_intr_action, 5291 }; 5292 bool fatal; 5293 u32 perr; 5294 5295 perr = t4_read_reg(adap, pl_perr_cause.cause_reg); 5296 if (verbose || perr != 0) { 5297 t4_show_intr_info(adap, &pl_perr_cause, perr); 5298 if (perr != 0) 5299 t4_write_reg(adap, pl_perr_cause.cause_reg, perr); 5300 if (verbose) 5301 perr |= t4_read_reg(adap, pl_intr_info.enable_reg); 5302 } 5303 fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose); 5304 if (fatal) 5305 t4_fatal_err(adap, false); 5306 5307 return (0); 5308} 5309 5310#define PF_INTR_MASK (F_PFSW | F_PFCIM) 5311 5312/** 5313 * t4_intr_enable - enable interrupts 5314 * @adapter: the adapter whose interrupts should be enabled 5315 * 5316 * Enable PF-specific interrupts for the calling function and the top-level 5317 * interrupt concentrator for global interrupts. Interrupts are already 5318 * enabled at each module, here we just enable the roots of the interrupt 5319 * hierarchies. 5320 * 5321 * Note: this function should be called only when the driver manages 5322 * non PF-specific interrupts from the various HW modules. Only one PCI 5323 * function at a time should be doing this. 5324 */ 5325void t4_intr_enable(struct adapter *adap) 5326{ 5327 u32 val = 0; 5328 5329 if (chip_id(adap) <= CHELSIO_T5) 5330 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 5331 else 5332 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 5333 val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC | 5334 F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 | 5335 F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 | 5336 F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 5337 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT | 5338 F_EGRESS_SIZE_ERR; 5339 t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val); 5340 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 5341 t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0); 5342 t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf); 5343} 5344 5345/** 5346 * t4_intr_disable - disable interrupts 5347 * @adap: the adapter whose interrupts should be disabled 5348 * 5349 * Disable interrupts. We only disable the top-level interrupt 5350 * concentrators. The caller must be a PCI function managing global 5351 * interrupts. 5352 */ 5353void t4_intr_disable(struct adapter *adap) 5354{ 5355 5356 t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 5357 t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0); 5358} 5359 5360/** 5361 * t4_intr_clear - clear all interrupts 5362 * @adap: the adapter whose interrupts should be cleared 5363 * 5364 * Clears all interrupts. The caller must be a PCI function managing 5365 * global interrupts. 5366 */ 5367void t4_intr_clear(struct adapter *adap) 5368{ 5369 static const u32 cause_reg[] = { 5370 A_CIM_HOST_INT_CAUSE, 5371 A_CIM_HOST_UPACC_INT_CAUSE, 5372 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 5373 A_CPL_INTR_CAUSE, 5374 EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1), 5375 A_LE_DB_INT_CAUSE, 5376 A_MA_INT_WRAP_STATUS, 5377 A_MA_PARITY_ERROR_STATUS1, 5378 A_MA_INT_CAUSE, 5379 A_MPS_CLS_INT_CAUSE, 5380 A_MPS_RX_PERR_INT_CAUSE, 5381 A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 5382 A_MPS_STAT_PERR_INT_CAUSE_SRAM, 5383 A_MPS_TRC_INT_CAUSE, 5384 A_MPS_TX_INT_CAUSE, 5385 A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 5386 A_NCSI_INT_CAUSE, 5387 A_PCIE_INT_CAUSE, 5388 A_PCIE_NONFAT_ERR, 5389 A_PL_PL_INT_CAUSE, 5390 A_PM_RX_INT_CAUSE, 5391 A_PM_TX_INT_CAUSE, 5392 A_SGE_INT_CAUSE1, 5393 A_SGE_INT_CAUSE2, 5394 A_SGE_INT_CAUSE3, 5395 A_SGE_INT_CAUSE4, 5396 A_SMB_INT_CAUSE, 5397 A_TP_INT_CAUSE, 5398 A_ULP_RX_INT_CAUSE, 5399 A_ULP_RX_INT_CAUSE_2, 5400 A_ULP_TX_INT_CAUSE, 5401 A_ULP_TX_INT_CAUSE_2, 5402 5403 MYPF_REG(A_PL_PF_INT_CAUSE), 5404 }; 5405 int i; 5406 const int nchan = adap->chip_params->nchan; 5407 5408 for (i = 0; i < ARRAY_SIZE(cause_reg); i++) 5409 t4_write_reg(adap, cause_reg[i], 0xffffffff); 5410 5411 if (is_t4(adap)) { 5412 t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 5413 0xffffffff); 5414 t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 5415 0xffffffff); 5416 t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff); 5417 for (i = 0; i < nchan; i++) { 5418 t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE), 5419 0xffffffff); 5420 } 5421 } 5422 if (chip_id(adap) >= CHELSIO_T5) { 5423 t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 5424 t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff); 5425 t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff); 5426 t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff); 5427 if (is_t5(adap)) { 5428 t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1), 5429 0xffffffff); 5430 } 5431 for (i = 0; i < nchan; i++) { 5432 t4_write_reg(adap, T5_PORT_REG(i, 5433 A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff); 5434 if (chip_id(adap) > CHELSIO_T5) { 5435 t4_write_reg(adap, T5_PORT_REG(i, 5436 A_MAC_PORT_PERR_INT_CAUSE_100G), 5437 0xffffffff); 5438 } 5439 t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE), 5440 0xffffffff); 5441 } 5442 } 5443 if (chip_id(adap) >= CHELSIO_T6) { 5444 t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff); 5445 } 5446 5447 t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff); 5448 t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff); 5449 t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff); 5450 (void) t4_read_reg(adap, A_PL_INT_CAUSE); /* flush */ 5451} 5452 5453/** 5454 * hash_mac_addr - return the hash value of a MAC address 5455 * @addr: the 48-bit Ethernet MAC address 5456 * 5457 * Hashes a MAC address according to the hash function used by HW inexact 5458 * (hash) address matching. 5459 */ 5460static int hash_mac_addr(const u8 *addr) 5461{ 5462 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 5463 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 5464 a ^= b; 5465 a ^= (a >> 12); 5466 a ^= (a >> 6); 5467 return a & 0x3f; 5468} 5469 5470/** 5471 * t4_config_rss_range - configure a portion of the RSS mapping table 5472 * @adapter: the adapter 5473 * @mbox: mbox to use for the FW command 5474 * @viid: virtual interface whose RSS subtable is to be written 5475 * @start: start entry in the table to write 5476 * @n: how many table entries to write 5477 * @rspq: values for the "response queue" (Ingress Queue) lookup table 5478 * @nrspq: number of values in @rspq 5479 * 5480 * Programs the selected part of the VI's RSS mapping table with the 5481 * provided values. If @nrspq < @n the supplied values are used repeatedly 5482 * until the full table range is populated. 5483 * 5484 * The caller must ensure the values in @rspq are in the range allowed for 5485 * @viid. 5486 */ 5487int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 5488 int start, int n, const u16 *rspq, unsigned int nrspq) 5489{ 5490 int ret; 5491 const u16 *rsp = rspq; 5492 const u16 *rsp_end = rspq + nrspq; 5493 struct fw_rss_ind_tbl_cmd cmd; 5494 5495 memset(&cmd, 0, sizeof(cmd)); 5496 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 5497 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5498 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 5499 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5500 5501 /* 5502 * Each firmware RSS command can accommodate up to 32 RSS Ingress 5503 * Queue Identifiers. These Ingress Queue IDs are packed three to 5504 * a 32-bit word as 10-bit values with the upper remaining 2 bits 5505 * reserved. 5506 */ 5507 while (n > 0) { 5508 int nq = min(n, 32); 5509 int nq_packed = 0; 5510 __be32 *qp = &cmd.iq0_to_iq2; 5511 5512 /* 5513 * Set up the firmware RSS command header to send the next 5514 * "nq" Ingress Queue IDs to the firmware. 5515 */ 5516 cmd.niqid = cpu_to_be16(nq); 5517 cmd.startidx = cpu_to_be16(start); 5518 5519 /* 5520 * "nq" more done for the start of the next loop. 5521 */ 5522 start += nq; 5523 n -= nq; 5524 5525 /* 5526 * While there are still Ingress Queue IDs to stuff into the 5527 * current firmware RSS command, retrieve them from the 5528 * Ingress Queue ID array and insert them into the command. 5529 */ 5530 while (nq > 0) { 5531 /* 5532 * Grab up to the next 3 Ingress Queue IDs (wrapping 5533 * around the Ingress Queue ID array if necessary) and 5534 * insert them into the firmware RSS command at the 5535 * current 3-tuple position within the commad. 5536 */ 5537 u16 qbuf[3]; 5538 u16 *qbp = qbuf; 5539 int nqbuf = min(3, nq); 5540 5541 nq -= nqbuf; 5542 qbuf[0] = qbuf[1] = qbuf[2] = 0; 5543 while (nqbuf && nq_packed < 32) { 5544 nqbuf--; 5545 nq_packed++; 5546 *qbp++ = *rsp++; 5547 if (rsp >= rsp_end) 5548 rsp = rspq; 5549 } 5550 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 5551 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 5552 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 5553 } 5554 5555 /* 5556 * Send this portion of the RRS table update to the firmware; 5557 * bail out on any errors. 5558 */ 5559 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 5560 if (ret) 5561 return ret; 5562 } 5563 return 0; 5564} 5565 5566/** 5567 * t4_config_glbl_rss - configure the global RSS mode 5568 * @adapter: the adapter 5569 * @mbox: mbox to use for the FW command 5570 * @mode: global RSS mode 5571 * @flags: mode-specific flags 5572 * 5573 * Sets the global RSS mode. 5574 */ 5575int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 5576 unsigned int flags) 5577{ 5578 struct fw_rss_glb_config_cmd c; 5579 5580 memset(&c, 0, sizeof(c)); 5581 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 5582 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 5583 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5584 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 5585 c.u.manual.mode_pkd = 5586 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5587 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 5588 c.u.basicvirtual.mode_keymode = 5589 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5590 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 5591 } else 5592 return -EINVAL; 5593 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5594} 5595 5596/** 5597 * t4_config_vi_rss - configure per VI RSS settings 5598 * @adapter: the adapter 5599 * @mbox: mbox to use for the FW command 5600 * @viid: the VI id 5601 * @flags: RSS flags 5602 * @defq: id of the default RSS queue for the VI. 5603 * @skeyidx: RSS secret key table index for non-global mode 5604 * @skey: RSS vf_scramble key for VI. 5605 * 5606 * Configures VI-specific RSS properties. 5607 */ 5608int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 5609 unsigned int flags, unsigned int defq, unsigned int skeyidx, 5610 unsigned int skey) 5611{ 5612 struct fw_rss_vi_config_cmd c; 5613 5614 memset(&c, 0, sizeof(c)); 5615 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 5616 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5617 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 5618 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5619 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 5620 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 5621 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 5622 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 5623 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 5624 5625 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5626} 5627 5628/* Read an RSS table row */ 5629static int rd_rss_row(struct adapter *adap, int row, u32 *val) 5630{ 5631 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 5632 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 5633 5, 0, val); 5634} 5635 5636/** 5637 * t4_read_rss - read the contents of the RSS mapping table 5638 * @adapter: the adapter 5639 * @map: holds the contents of the RSS mapping table 5640 * 5641 * Reads the contents of the RSS hash->queue mapping table. 5642 */ 5643int t4_read_rss(struct adapter *adapter, u16 *map) 5644{ 5645 u32 val; 5646 int i, ret; 5647 5648 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 5649 ret = rd_rss_row(adapter, i, &val); 5650 if (ret) 5651 return ret; 5652 *map++ = G_LKPTBLQUEUE0(val); 5653 *map++ = G_LKPTBLQUEUE1(val); 5654 } 5655 return 0; 5656} 5657 5658/** 5659 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 5660 * @adap: the adapter 5661 * @cmd: TP fw ldst address space type 5662 * @vals: where the indirect register values are stored/written 5663 * @nregs: how many indirect registers to read/write 5664 * @start_idx: index of first indirect register to read/write 5665 * @rw: Read (1) or Write (0) 5666 * @sleep_ok: if true we may sleep while awaiting command completion 5667 * 5668 * Access TP indirect registers through LDST 5669 **/ 5670static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 5671 unsigned int nregs, unsigned int start_index, 5672 unsigned int rw, bool sleep_ok) 5673{ 5674 int ret = 0; 5675 unsigned int i; 5676 struct fw_ldst_cmd c; 5677 5678 for (i = 0; i < nregs; i++) { 5679 memset(&c, 0, sizeof(c)); 5680 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5681 F_FW_CMD_REQUEST | 5682 (rw ? F_FW_CMD_READ : 5683 F_FW_CMD_WRITE) | 5684 V_FW_LDST_CMD_ADDRSPACE(cmd)); 5685 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5686 5687 c.u.addrval.addr = cpu_to_be32(start_index + i); 5688 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 5689 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 5690 sleep_ok); 5691 if (ret) 5692 return ret; 5693 5694 if (rw) 5695 vals[i] = be32_to_cpu(c.u.addrval.val); 5696 } 5697 return 0; 5698} 5699 5700/** 5701 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 5702 * @adap: the adapter 5703 * @reg_addr: Address Register 5704 * @reg_data: Data register 5705 * @buff: where the indirect register values are stored/written 5706 * @nregs: how many indirect registers to read/write 5707 * @start_index: index of first indirect register to read/write 5708 * @rw: READ(1) or WRITE(0) 5709 * @sleep_ok: if true we may sleep while awaiting command completion 5710 * 5711 * Read/Write TP indirect registers through LDST if possible. 5712 * Else, use backdoor access 5713 **/ 5714static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 5715 u32 *buff, u32 nregs, u32 start_index, int rw, 5716 bool sleep_ok) 5717{ 5718 int rc = -EINVAL; 5719 int cmd; 5720 5721 switch (reg_addr) { 5722 case A_TP_PIO_ADDR: 5723 cmd = FW_LDST_ADDRSPC_TP_PIO; 5724 break; 5725 case A_TP_TM_PIO_ADDR: 5726 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 5727 break; 5728 case A_TP_MIB_INDEX: 5729 cmd = FW_LDST_ADDRSPC_TP_MIB; 5730 break; 5731 default: 5732 goto indirect_access; 5733 } 5734 5735 if (t4_use_ldst(adap)) 5736 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 5737 sleep_ok); 5738 5739indirect_access: 5740 5741 if (rc) { 5742 if (rw) 5743 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 5744 start_index); 5745 else 5746 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 5747 start_index); 5748 } 5749} 5750 5751/** 5752 * t4_tp_pio_read - Read TP PIO registers 5753 * @adap: the adapter 5754 * @buff: where the indirect register values are written 5755 * @nregs: how many indirect registers to read 5756 * @start_index: index of first indirect register to read 5757 * @sleep_ok: if true we may sleep while awaiting command completion 5758 * 5759 * Read TP PIO Registers 5760 **/ 5761void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 5762 u32 start_index, bool sleep_ok) 5763{ 5764 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 5765 start_index, 1, sleep_ok); 5766} 5767 5768/** 5769 * t4_tp_pio_write - Write TP PIO registers 5770 * @adap: the adapter 5771 * @buff: where the indirect register values are stored 5772 * @nregs: how many indirect registers to write 5773 * @start_index: index of first indirect register to write 5774 * @sleep_ok: if true we may sleep while awaiting command completion 5775 * 5776 * Write TP PIO Registers 5777 **/ 5778void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, 5779 u32 start_index, bool sleep_ok) 5780{ 5781 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5782 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); 5783} 5784 5785/** 5786 * t4_tp_tm_pio_read - Read TP TM PIO registers 5787 * @adap: the adapter 5788 * @buff: where the indirect register values are written 5789 * @nregs: how many indirect registers to read 5790 * @start_index: index of first indirect register to read 5791 * @sleep_ok: if true we may sleep while awaiting command completion 5792 * 5793 * Read TP TM PIO Registers 5794 **/ 5795void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 5796 u32 start_index, bool sleep_ok) 5797{ 5798 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 5799 nregs, start_index, 1, sleep_ok); 5800} 5801 5802/** 5803 * t4_tp_mib_read - Read TP MIB registers 5804 * @adap: the adapter 5805 * @buff: where the indirect register values are written 5806 * @nregs: how many indirect registers to read 5807 * @start_index: index of first indirect register to read 5808 * @sleep_ok: if true we may sleep while awaiting command completion 5809 * 5810 * Read TP MIB Registers 5811 **/ 5812void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 5813 bool sleep_ok) 5814{ 5815 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 5816 start_index, 1, sleep_ok); 5817} 5818 5819/** 5820 * t4_read_rss_key - read the global RSS key 5821 * @adap: the adapter 5822 * @key: 10-entry array holding the 320-bit RSS key 5823 * @sleep_ok: if true we may sleep while awaiting command completion 5824 * 5825 * Reads the global 320-bit RSS key. 5826 */ 5827void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 5828{ 5829 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5830} 5831 5832/** 5833 * t4_write_rss_key - program one of the RSS keys 5834 * @adap: the adapter 5835 * @key: 10-entry array holding the 320-bit RSS key 5836 * @idx: which RSS key to write 5837 * @sleep_ok: if true we may sleep while awaiting command completion 5838 * 5839 * Writes one of the RSS keys with the given 320-bit value. If @idx is 5840 * 0..15 the corresponding entry in the RSS key table is written, 5841 * otherwise the global RSS key is written. 5842 */ 5843void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 5844 bool sleep_ok) 5845{ 5846 u8 rss_key_addr_cnt = 16; 5847 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 5848 5849 /* 5850 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 5851 * allows access to key addresses 16-63 by using KeyWrAddrX 5852 * as index[5:4](upper 2) into key table 5853 */ 5854 if ((chip_id(adap) > CHELSIO_T5) && 5855 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 5856 rss_key_addr_cnt = 32; 5857 5858 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5859 5860 if (idx >= 0 && idx < rss_key_addr_cnt) { 5861 if (rss_key_addr_cnt > 16) 5862 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5863 vrt | V_KEYWRADDRX(idx >> 4) | 5864 V_T6_VFWRADDR(idx) | F_KEYWREN); 5865 else 5866 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5867 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 5868 } 5869} 5870 5871/** 5872 * t4_read_rss_pf_config - read PF RSS Configuration Table 5873 * @adapter: the adapter 5874 * @index: the entry in the PF RSS table to read 5875 * @valp: where to store the returned value 5876 * @sleep_ok: if true we may sleep while awaiting command completion 5877 * 5878 * Reads the PF RSS Configuration Table at the specified index and returns 5879 * the value found there. 5880 */ 5881void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 5882 u32 *valp, bool sleep_ok) 5883{ 5884 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 5885} 5886 5887/** 5888 * t4_write_rss_pf_config - write PF RSS Configuration Table 5889 * @adapter: the adapter 5890 * @index: the entry in the VF RSS table to read 5891 * @val: the value to store 5892 * @sleep_ok: if true we may sleep while awaiting command completion 5893 * 5894 * Writes the PF RSS Configuration Table at the specified index with the 5895 * specified value. 5896 */ 5897void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 5898 u32 val, bool sleep_ok) 5899{ 5900 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 5901 sleep_ok); 5902} 5903 5904/** 5905 * t4_read_rss_vf_config - read VF RSS Configuration Table 5906 * @adapter: the adapter 5907 * @index: the entry in the VF RSS table to read 5908 * @vfl: where to store the returned VFL 5909 * @vfh: where to store the returned VFH 5910 * @sleep_ok: if true we may sleep while awaiting command completion 5911 * 5912 * Reads the VF RSS Configuration Table at the specified index and returns 5913 * the (VFL, VFH) values found there. 5914 */ 5915void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 5916 u32 *vfl, u32 *vfh, bool sleep_ok) 5917{ 5918 u32 vrt, mask, data; 5919 5920 if (chip_id(adapter) <= CHELSIO_T5) { 5921 mask = V_VFWRADDR(M_VFWRADDR); 5922 data = V_VFWRADDR(index); 5923 } else { 5924 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5925 data = V_T6_VFWRADDR(index); 5926 } 5927 /* 5928 * Request that the index'th VF Table values be read into VFL/VFH. 5929 */ 5930 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5931 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5932 vrt |= data | F_VFRDEN; 5933 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5934 5935 /* 5936 * Grab the VFL/VFH values ... 5937 */ 5938 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5939 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5940} 5941 5942/** 5943 * t4_write_rss_vf_config - write VF RSS Configuration Table 5944 * 5945 * @adapter: the adapter 5946 * @index: the entry in the VF RSS table to write 5947 * @vfl: the VFL to store 5948 * @vfh: the VFH to store 5949 * 5950 * Writes the VF RSS Configuration Table at the specified index with the 5951 * specified (VFL, VFH) values. 5952 */ 5953void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 5954 u32 vfl, u32 vfh, bool sleep_ok) 5955{ 5956 u32 vrt, mask, data; 5957 5958 if (chip_id(adapter) <= CHELSIO_T5) { 5959 mask = V_VFWRADDR(M_VFWRADDR); 5960 data = V_VFWRADDR(index); 5961 } else { 5962 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5963 data = V_T6_VFWRADDR(index); 5964 } 5965 5966 /* 5967 * Load up VFL/VFH with the values to be written ... 5968 */ 5969 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5970 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5971 5972 /* 5973 * Write the VFL/VFH into the VF Table at index'th location. 5974 */ 5975 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5976 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5977 vrt |= data | F_VFRDEN; 5978 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5979} 5980 5981/** 5982 * t4_read_rss_pf_map - read PF RSS Map 5983 * @adapter: the adapter 5984 * @sleep_ok: if true we may sleep while awaiting command completion 5985 * 5986 * Reads the PF RSS Map register and returns its value. 5987 */ 5988u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 5989{ 5990 u32 pfmap; 5991 5992 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5993 5994 return pfmap; 5995} 5996 5997/** 5998 * t4_write_rss_pf_map - write PF RSS Map 5999 * @adapter: the adapter 6000 * @pfmap: PF RSS Map value 6001 * 6002 * Writes the specified value to the PF RSS Map register. 6003 */ 6004void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) 6005{ 6006 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 6007} 6008 6009/** 6010 * t4_read_rss_pf_mask - read PF RSS Mask 6011 * @adapter: the adapter 6012 * @sleep_ok: if true we may sleep while awaiting command completion 6013 * 6014 * Reads the PF RSS Mask register and returns its value. 6015 */ 6016u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 6017{ 6018 u32 pfmask; 6019 6020 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 6021 6022 return pfmask; 6023} 6024 6025/** 6026 * t4_write_rss_pf_mask - write PF RSS Mask 6027 * @adapter: the adapter 6028 * @pfmask: PF RSS Mask value 6029 * 6030 * Writes the specified value to the PF RSS Mask register. 6031 */ 6032void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) 6033{ 6034 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 6035} 6036 6037/** 6038 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 6039 * @adap: the adapter 6040 * @v4: holds the TCP/IP counter values 6041 * @v6: holds the TCP/IPv6 counter values 6042 * @sleep_ok: if true we may sleep while awaiting command completion 6043 * 6044 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 6045 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 6046 */ 6047void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 6048 struct tp_tcp_stats *v6, bool sleep_ok) 6049{ 6050 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 6051 6052#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 6053#define STAT(x) val[STAT_IDX(x)] 6054#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 6055 6056 if (v4) { 6057 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6058 A_TP_MIB_TCP_OUT_RST, sleep_ok); 6059 v4->tcp_out_rsts = STAT(OUT_RST); 6060 v4->tcp_in_segs = STAT64(IN_SEG); 6061 v4->tcp_out_segs = STAT64(OUT_SEG); 6062 v4->tcp_retrans_segs = STAT64(RXT_SEG); 6063 } 6064 if (v6) { 6065 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 6066 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 6067 v6->tcp_out_rsts = STAT(OUT_RST); 6068 v6->tcp_in_segs = STAT64(IN_SEG); 6069 v6->tcp_out_segs = STAT64(OUT_SEG); 6070 v6->tcp_retrans_segs = STAT64(RXT_SEG); 6071 } 6072#undef STAT64 6073#undef STAT 6074#undef STAT_IDX 6075} 6076 6077/** 6078 * t4_tp_get_err_stats - read TP's error MIB counters 6079 * @adap: the adapter 6080 * @st: holds the counter values 6081 * @sleep_ok: if true we may sleep while awaiting command completion 6082 * 6083 * Returns the values of TP's error counters. 6084 */ 6085void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 6086 bool sleep_ok) 6087{ 6088 int nchan = adap->chip_params->nchan; 6089 6090 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 6091 sleep_ok); 6092 6093 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 6094 sleep_ok); 6095 6096 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 6097 sleep_ok); 6098 6099 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 6100 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 6101 6102 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 6103 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 6104 6105 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 6106 sleep_ok); 6107 6108 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 6109 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 6110 6111 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 6112 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 6113 6114 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 6115 sleep_ok); 6116} 6117 6118/** 6119 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 6120 * @adap: the adapter 6121 * @st: holds the counter values 6122 * 6123 * Returns the values of TP's proxy counters. 6124 */ 6125void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, 6126 bool sleep_ok) 6127{ 6128 int nchan = adap->chip_params->nchan; 6129 6130 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); 6131} 6132 6133/** 6134 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 6135 * @adap: the adapter 6136 * @st: holds the counter values 6137 * @sleep_ok: if true we may sleep while awaiting command completion 6138 * 6139 * Returns the values of TP's CPL counters. 6140 */ 6141void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 6142 bool sleep_ok) 6143{ 6144 int nchan = adap->chip_params->nchan; 6145 6146 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 6147 6148 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 6149} 6150 6151/** 6152 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 6153 * @adap: the adapter 6154 * @st: holds the counter values 6155 * 6156 * Returns the values of TP's RDMA counters. 6157 */ 6158void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 6159 bool sleep_ok) 6160{ 6161 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 6162 sleep_ok); 6163} 6164 6165/** 6166 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 6167 * @adap: the adapter 6168 * @idx: the port index 6169 * @st: holds the counter values 6170 * @sleep_ok: if true we may sleep while awaiting command completion 6171 * 6172 * Returns the values of TP's FCoE counters for the selected port. 6173 */ 6174void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 6175 struct tp_fcoe_stats *st, bool sleep_ok) 6176{ 6177 u32 val[2]; 6178 6179 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 6180 sleep_ok); 6181 6182 t4_tp_mib_read(adap, &st->frames_drop, 1, 6183 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 6184 6185 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 6186 sleep_ok); 6187 6188 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 6189} 6190 6191/** 6192 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 6193 * @adap: the adapter 6194 * @st: holds the counter values 6195 * @sleep_ok: if true we may sleep while awaiting command completion 6196 * 6197 * Returns the values of TP's counters for non-TCP directly-placed packets. 6198 */ 6199void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 6200 bool sleep_ok) 6201{ 6202 u32 val[4]; 6203 6204 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 6205 6206 st->frames = val[0]; 6207 st->drops = val[1]; 6208 st->octets = ((u64)val[2] << 32) | val[3]; 6209} 6210 6211/** 6212 * t4_read_mtu_tbl - returns the values in the HW path MTU table 6213 * @adap: the adapter 6214 * @mtus: where to store the MTU values 6215 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 6216 * 6217 * Reads the HW path MTU table. 6218 */ 6219void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 6220{ 6221 u32 v; 6222 int i; 6223 6224 for (i = 0; i < NMTUS; ++i) { 6225 t4_write_reg(adap, A_TP_MTU_TABLE, 6226 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 6227 v = t4_read_reg(adap, A_TP_MTU_TABLE); 6228 mtus[i] = G_MTUVALUE(v); 6229 if (mtu_log) 6230 mtu_log[i] = G_MTUWIDTH(v); 6231 } 6232} 6233 6234/** 6235 * t4_read_cong_tbl - reads the congestion control table 6236 * @adap: the adapter 6237 * @incr: where to store the alpha values 6238 * 6239 * Reads the additive increments programmed into the HW congestion 6240 * control table. 6241 */ 6242void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 6243{ 6244 unsigned int mtu, w; 6245 6246 for (mtu = 0; mtu < NMTUS; ++mtu) 6247 for (w = 0; w < NCCTRL_WIN; ++w) { 6248 t4_write_reg(adap, A_TP_CCTRL_TABLE, 6249 V_ROWINDEX(0xffff) | (mtu << 5) | w); 6250 incr[mtu][w] = (u16)t4_read_reg(adap, 6251 A_TP_CCTRL_TABLE) & 0x1fff; 6252 } 6253} 6254 6255/** 6256 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 6257 * @adap: the adapter 6258 * @addr: the indirect TP register address 6259 * @mask: specifies the field within the register to modify 6260 * @val: new value for the field 6261 * 6262 * Sets a field of an indirect TP register to the given value. 6263 */ 6264void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 6265 unsigned int mask, unsigned int val) 6266{ 6267 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 6268 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 6269 t4_write_reg(adap, A_TP_PIO_DATA, val); 6270} 6271 6272/** 6273 * init_cong_ctrl - initialize congestion control parameters 6274 * @a: the alpha values for congestion control 6275 * @b: the beta values for congestion control 6276 * 6277 * Initialize the congestion control parameters. 6278 */ 6279static void init_cong_ctrl(unsigned short *a, unsigned short *b) 6280{ 6281 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 6282 a[9] = 2; 6283 a[10] = 3; 6284 a[11] = 4; 6285 a[12] = 5; 6286 a[13] = 6; 6287 a[14] = 7; 6288 a[15] = 8; 6289 a[16] = 9; 6290 a[17] = 10; 6291 a[18] = 14; 6292 a[19] = 17; 6293 a[20] = 21; 6294 a[21] = 25; 6295 a[22] = 30; 6296 a[23] = 35; 6297 a[24] = 45; 6298 a[25] = 60; 6299 a[26] = 80; 6300 a[27] = 100; 6301 a[28] = 200; 6302 a[29] = 300; 6303 a[30] = 400; 6304 a[31] = 500; 6305 6306 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 6307 b[9] = b[10] = 1; 6308 b[11] = b[12] = 2; 6309 b[13] = b[14] = b[15] = b[16] = 3; 6310 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 6311 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 6312 b[28] = b[29] = 6; 6313 b[30] = b[31] = 7; 6314} 6315 6316/* The minimum additive increment value for the congestion control table */ 6317#define CC_MIN_INCR 2U 6318 6319/** 6320 * t4_load_mtus - write the MTU and congestion control HW tables 6321 * @adap: the adapter 6322 * @mtus: the values for the MTU table 6323 * @alpha: the values for the congestion control alpha parameter 6324 * @beta: the values for the congestion control beta parameter 6325 * 6326 * Write the HW MTU table with the supplied MTUs and the high-speed 6327 * congestion control table with the supplied alpha, beta, and MTUs. 6328 * We write the two tables together because the additive increments 6329 * depend on the MTUs. 6330 */ 6331void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 6332 const unsigned short *alpha, const unsigned short *beta) 6333{ 6334 static const unsigned int avg_pkts[NCCTRL_WIN] = { 6335 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 6336 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 6337 28672, 40960, 57344, 81920, 114688, 163840, 229376 6338 }; 6339 6340 unsigned int i, w; 6341 6342 for (i = 0; i < NMTUS; ++i) { 6343 unsigned int mtu = mtus[i]; 6344 unsigned int log2 = fls(mtu); 6345 6346 if (!(mtu & ((1 << log2) >> 2))) /* round */ 6347 log2--; 6348 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 6349 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 6350 6351 for (w = 0; w < NCCTRL_WIN; ++w) { 6352 unsigned int inc; 6353 6354 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 6355 CC_MIN_INCR); 6356 6357 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 6358 (w << 16) | (beta[w] << 13) | inc); 6359 } 6360 } 6361} 6362 6363/** 6364 * t4_set_pace_tbl - set the pace table 6365 * @adap: the adapter 6366 * @pace_vals: the pace values in microseconds 6367 * @start: index of the first entry in the HW pace table to set 6368 * @n: how many entries to set 6369 * 6370 * Sets (a subset of the) HW pace table. 6371 */ 6372int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 6373 unsigned int start, unsigned int n) 6374{ 6375 unsigned int vals[NTX_SCHED], i; 6376 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 6377 6378 if (n > NTX_SCHED) 6379 return -ERANGE; 6380 6381 /* convert values from us to dack ticks, rounding to closest value */ 6382 for (i = 0; i < n; i++, pace_vals++) { 6383 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 6384 if (vals[i] > 0x7ff) 6385 return -ERANGE; 6386 if (*pace_vals && vals[i] == 0) 6387 return -ERANGE; 6388 } 6389 for (i = 0; i < n; i++, start++) 6390 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 6391 return 0; 6392} 6393 6394/** 6395 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 6396 * @adap: the adapter 6397 * @kbps: target rate in Kbps 6398 * @sched: the scheduler index 6399 * 6400 * Configure a Tx HW scheduler for the target rate. 6401 */ 6402int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 6403{ 6404 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 6405 unsigned int clk = adap->params.vpd.cclk * 1000; 6406 unsigned int selected_cpt = 0, selected_bpt = 0; 6407 6408 if (kbps > 0) { 6409 kbps *= 125; /* -> bytes */ 6410 for (cpt = 1; cpt <= 255; cpt++) { 6411 tps = clk / cpt; 6412 bpt = (kbps + tps / 2) / tps; 6413 if (bpt > 0 && bpt <= 255) { 6414 v = bpt * tps; 6415 delta = v >= kbps ? v - kbps : kbps - v; 6416 if (delta < mindelta) { 6417 mindelta = delta; 6418 selected_cpt = cpt; 6419 selected_bpt = bpt; 6420 } 6421 } else if (selected_cpt) 6422 break; 6423 } 6424 if (!selected_cpt) 6425 return -EINVAL; 6426 } 6427 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 6428 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 6429 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 6430 if (sched & 1) 6431 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 6432 else 6433 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 6434 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 6435 return 0; 6436} 6437 6438/** 6439 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 6440 * @adap: the adapter 6441 * @sched: the scheduler index 6442 * @ipg: the interpacket delay in tenths of nanoseconds 6443 * 6444 * Set the interpacket delay for a HW packet rate scheduler. 6445 */ 6446int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 6447{ 6448 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 6449 6450 /* convert ipg to nearest number of core clocks */ 6451 ipg *= core_ticks_per_usec(adap); 6452 ipg = (ipg + 5000) / 10000; 6453 if (ipg > M_TXTIMERSEPQ0) 6454 return -EINVAL; 6455 6456 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 6457 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 6458 if (sched & 1) 6459 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 6460 else 6461 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 6462 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 6463 t4_read_reg(adap, A_TP_TM_PIO_DATA); 6464 return 0; 6465} 6466 6467/* 6468 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 6469 * clocks. The formula is 6470 * 6471 * bytes/s = bytes256 * 256 * ClkFreq / 4096 6472 * 6473 * which is equivalent to 6474 * 6475 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 6476 */ 6477static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 6478{ 6479 u64 v = (u64)bytes256 * adap->params.vpd.cclk; 6480 6481 return v * 62 + v / 2; 6482} 6483 6484/** 6485 * t4_get_chan_txrate - get the current per channel Tx rates 6486 * @adap: the adapter 6487 * @nic_rate: rates for NIC traffic 6488 * @ofld_rate: rates for offloaded traffic 6489 * 6490 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 6491 * for each channel. 6492 */ 6493void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 6494{ 6495 u32 v; 6496 6497 v = t4_read_reg(adap, A_TP_TX_TRATE); 6498 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 6499 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 6500 if (adap->chip_params->nchan > 2) { 6501 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 6502 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 6503 } 6504 6505 v = t4_read_reg(adap, A_TP_TX_ORATE); 6506 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 6507 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 6508 if (adap->chip_params->nchan > 2) { 6509 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 6510 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 6511 } 6512} 6513 6514/** 6515 * t4_set_trace_filter - configure one of the tracing filters 6516 * @adap: the adapter 6517 * @tp: the desired trace filter parameters 6518 * @idx: which filter to configure 6519 * @enable: whether to enable or disable the filter 6520 * 6521 * Configures one of the tracing filters available in HW. If @tp is %NULL 6522 * it indicates that the filter is already written in the register and it 6523 * just needs to be enabled or disabled. 6524 */ 6525int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 6526 int idx, int enable) 6527{ 6528 int i, ofst = idx * 4; 6529 u32 data_reg, mask_reg, cfg; 6530 u32 multitrc = F_TRCMULTIFILTER; 6531 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 6532 6533 if (idx < 0 || idx >= NTRACE) 6534 return -EINVAL; 6535 6536 if (tp == NULL || !enable) { 6537 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 6538 enable ? en : 0); 6539 return 0; 6540 } 6541 6542 /* 6543 * TODO - After T4 data book is updated, specify the exact 6544 * section below. 6545 * 6546 * See T4 data book - MPS section for a complete description 6547 * of the below if..else handling of A_MPS_TRC_CFG register 6548 * value. 6549 */ 6550 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 6551 if (cfg & F_TRCMULTIFILTER) { 6552 /* 6553 * If multiple tracers are enabled, then maximum 6554 * capture size is 2.5KB (FIFO size of a single channel) 6555 * minus 2 flits for CPL_TRACE_PKT header. 6556 */ 6557 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 6558 return -EINVAL; 6559 } else { 6560 /* 6561 * If multiple tracers are disabled, to avoid deadlocks 6562 * maximum packet capture size of 9600 bytes is recommended. 6563 * Also in this mode, only trace0 can be enabled and running. 6564 */ 6565 multitrc = 0; 6566 if (tp->snap_len > 9600 || idx) 6567 return -EINVAL; 6568 } 6569 6570 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 6571 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 6572 tp->min_len > M_TFMINPKTSIZE) 6573 return -EINVAL; 6574 6575 /* stop the tracer we'll be changing */ 6576 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 6577 6578 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 6579 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 6580 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 6581 6582 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6583 t4_write_reg(adap, data_reg, tp->data[i]); 6584 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 6585 } 6586 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 6587 V_TFCAPTUREMAX(tp->snap_len) | 6588 V_TFMINPKTSIZE(tp->min_len)); 6589 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 6590 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 6591 (is_t4(adap) ? 6592 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 6593 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 6594 6595 return 0; 6596} 6597 6598/** 6599 * t4_get_trace_filter - query one of the tracing filters 6600 * @adap: the adapter 6601 * @tp: the current trace filter parameters 6602 * @idx: which trace filter to query 6603 * @enabled: non-zero if the filter is enabled 6604 * 6605 * Returns the current settings of one of the HW tracing filters. 6606 */ 6607void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 6608 int *enabled) 6609{ 6610 u32 ctla, ctlb; 6611 int i, ofst = idx * 4; 6612 u32 data_reg, mask_reg; 6613 6614 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 6615 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 6616 6617 if (is_t4(adap)) { 6618 *enabled = !!(ctla & F_TFEN); 6619 tp->port = G_TFPORT(ctla); 6620 tp->invert = !!(ctla & F_TFINVERTMATCH); 6621 } else { 6622 *enabled = !!(ctla & F_T5_TFEN); 6623 tp->port = G_T5_TFPORT(ctla); 6624 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 6625 } 6626 tp->snap_len = G_TFCAPTUREMAX(ctlb); 6627 tp->min_len = G_TFMINPKTSIZE(ctlb); 6628 tp->skip_ofst = G_TFOFFSET(ctla); 6629 tp->skip_len = G_TFLENGTH(ctla); 6630 6631 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 6632 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 6633 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 6634 6635 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6636 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 6637 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 6638 } 6639} 6640 6641/** 6642 * t4_pmtx_get_stats - returns the HW stats from PMTX 6643 * @adap: the adapter 6644 * @cnt: where to store the count statistics 6645 * @cycles: where to store the cycle statistics 6646 * 6647 * Returns performance statistics from PMTX. 6648 */ 6649void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6650{ 6651 int i; 6652 u32 data[2]; 6653 6654 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 6655 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 6656 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 6657 if (is_t4(adap)) 6658 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 6659 else { 6660 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 6661 A_PM_TX_DBG_DATA, data, 2, 6662 A_PM_TX_DBG_STAT_MSB); 6663 cycles[i] = (((u64)data[0] << 32) | data[1]); 6664 } 6665 } 6666} 6667 6668/** 6669 * t4_pmrx_get_stats - returns the HW stats from PMRX 6670 * @adap: the adapter 6671 * @cnt: where to store the count statistics 6672 * @cycles: where to store the cycle statistics 6673 * 6674 * Returns performance statistics from PMRX. 6675 */ 6676void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6677{ 6678 int i; 6679 u32 data[2]; 6680 6681 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 6682 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 6683 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 6684 if (is_t4(adap)) { 6685 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 6686 } else { 6687 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 6688 A_PM_RX_DBG_DATA, data, 2, 6689 A_PM_RX_DBG_STAT_MSB); 6690 cycles[i] = (((u64)data[0] << 32) | data[1]); 6691 } 6692 } 6693} 6694 6695/** 6696 * t4_get_mps_bg_map - return the buffer groups associated with a port 6697 * @adap: the adapter 6698 * @idx: the port index 6699 * 6700 * Returns a bitmap indicating which MPS buffer groups are associated 6701 * with the given port. Bit i is set if buffer group i is used by the 6702 * port. 6703 */ 6704static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 6705{ 6706 u32 n; 6707 6708 if (adap->params.mps_bg_map) 6709 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); 6710 6711 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 6712 if (n == 0) 6713 return idx == 0 ? 0xf : 0; 6714 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 6715 return idx < 2 ? (3 << (2 * idx)) : 0; 6716 return 1 << idx; 6717} 6718 6719/* 6720 * TP RX e-channels associated with the port. 6721 */ 6722static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) 6723{ 6724 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 6725 6726 if (n == 0) 6727 return idx == 0 ? 0xf : 0; 6728 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 6729 return idx < 2 ? (3 << (2 * idx)) : 0; 6730 return 1 << idx; 6731} 6732 6733/** 6734 * t4_get_port_type_description - return Port Type string description 6735 * @port_type: firmware Port Type enumeration 6736 */ 6737const char *t4_get_port_type_description(enum fw_port_type port_type) 6738{ 6739 static const char *const port_type_description[] = { 6740 "Fiber_XFI", 6741 "Fiber_XAUI", 6742 "BT_SGMII", 6743 "BT_XFI", 6744 "BT_XAUI", 6745 "KX4", 6746 "CX4", 6747 "KX", 6748 "KR", 6749 "SFP", 6750 "BP_AP", 6751 "BP4_AP", 6752 "QSFP_10G", 6753 "QSA", 6754 "QSFP", 6755 "BP40_BA", 6756 "KR4_100G", 6757 "CR4_QSFP", 6758 "CR_QSFP", 6759 "CR2_QSFP", 6760 "SFP28", 6761 "KR_SFP28", 6762 }; 6763 6764 if (port_type < ARRAY_SIZE(port_type_description)) 6765 return port_type_description[port_type]; 6766 return "UNKNOWN"; 6767} 6768 6769/** 6770 * t4_get_port_stats_offset - collect port stats relative to a previous 6771 * snapshot 6772 * @adap: The adapter 6773 * @idx: The port 6774 * @stats: Current stats to fill 6775 * @offset: Previous stats snapshot 6776 */ 6777void t4_get_port_stats_offset(struct adapter *adap, int idx, 6778 struct port_stats *stats, 6779 struct port_stats *offset) 6780{ 6781 u64 *s, *o; 6782 int i; 6783 6784 t4_get_port_stats(adap, idx, stats); 6785 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 6786 i < (sizeof(struct port_stats)/sizeof(u64)) ; 6787 i++, s++, o++) 6788 *s -= *o; 6789} 6790 6791/** 6792 * t4_get_port_stats - collect port statistics 6793 * @adap: the adapter 6794 * @idx: the port index 6795 * @p: the stats structure to fill 6796 * 6797 * Collect statistics related to the given port from HW. 6798 */ 6799void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 6800{ 6801 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6802 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 6803 6804#define GET_STAT(name) \ 6805 t4_read_reg64(adap, \ 6806 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 6807 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 6808#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6809 6810 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 6811 p->tx_octets = GET_STAT(TX_PORT_BYTES); 6812 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 6813 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 6814 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 6815 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 6816 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 6817 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 6818 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 6819 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 6820 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 6821 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 6822 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 6823 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 6824 p->tx_drop = GET_STAT(TX_PORT_DROP); 6825 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 6826 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 6827 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 6828 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 6829 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 6830 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 6831 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 6832 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 6833 6834 if (chip_id(adap) >= CHELSIO_T5) { 6835 if (stat_ctl & F_COUNTPAUSESTATTX) { 6836 p->tx_frames -= p->tx_pause; 6837 p->tx_octets -= p->tx_pause * 64; 6838 } 6839 if (stat_ctl & F_COUNTPAUSEMCTX) 6840 p->tx_mcast_frames -= p->tx_pause; 6841 } 6842 6843 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 6844 p->rx_octets = GET_STAT(RX_PORT_BYTES); 6845 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 6846 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 6847 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 6848 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 6849 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 6850 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 6851 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 6852 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 6853 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 6854 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 6855 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 6856 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 6857 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 6858 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 6859 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 6860 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 6861 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 6862 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 6863 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 6864 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 6865 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 6866 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 6867 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 6868 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 6869 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 6870 6871 if (chip_id(adap) >= CHELSIO_T5) { 6872 if (stat_ctl & F_COUNTPAUSESTATRX) { 6873 p->rx_frames -= p->rx_pause; 6874 p->rx_octets -= p->rx_pause * 64; 6875 } 6876 if (stat_ctl & F_COUNTPAUSEMCRX) 6877 p->rx_mcast_frames -= p->rx_pause; 6878 } 6879 6880 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 6881 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 6882 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 6883 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 6884 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 6885 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 6886 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 6887 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 6888 6889#undef GET_STAT 6890#undef GET_STAT_COM 6891} 6892 6893/** 6894 * t4_get_lb_stats - collect loopback port statistics 6895 * @adap: the adapter 6896 * @idx: the loopback port index 6897 * @p: the stats structure to fill 6898 * 6899 * Return HW statistics for the given loopback port. 6900 */ 6901void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 6902{ 6903 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6904 6905#define GET_STAT(name) \ 6906 t4_read_reg64(adap, \ 6907 (is_t4(adap) ? \ 6908 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 6909 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 6910#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6911 6912 p->octets = GET_STAT(BYTES); 6913 p->frames = GET_STAT(FRAMES); 6914 p->bcast_frames = GET_STAT(BCAST); 6915 p->mcast_frames = GET_STAT(MCAST); 6916 p->ucast_frames = GET_STAT(UCAST); 6917 p->error_frames = GET_STAT(ERROR); 6918 6919 p->frames_64 = GET_STAT(64B); 6920 p->frames_65_127 = GET_STAT(65B_127B); 6921 p->frames_128_255 = GET_STAT(128B_255B); 6922 p->frames_256_511 = GET_STAT(256B_511B); 6923 p->frames_512_1023 = GET_STAT(512B_1023B); 6924 p->frames_1024_1518 = GET_STAT(1024B_1518B); 6925 p->frames_1519_max = GET_STAT(1519B_MAX); 6926 p->drop = GET_STAT(DROP_FRAMES); 6927 6928 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 6929 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 6930 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 6931 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 6932 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 6933 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 6934 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 6935 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 6936 6937#undef GET_STAT 6938#undef GET_STAT_COM 6939} 6940 6941/** 6942 * t4_wol_magic_enable - enable/disable magic packet WoL 6943 * @adap: the adapter 6944 * @port: the physical port index 6945 * @addr: MAC address expected in magic packets, %NULL to disable 6946 * 6947 * Enables/disables magic packet wake-on-LAN for the selected port. 6948 */ 6949void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 6950 const u8 *addr) 6951{ 6952 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 6953 6954 if (is_t4(adap)) { 6955 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 6956 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 6957 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6958 } else { 6959 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 6960 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 6961 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 6962 } 6963 6964 if (addr) { 6965 t4_write_reg(adap, mag_id_reg_l, 6966 (addr[2] << 24) | (addr[3] << 16) | 6967 (addr[4] << 8) | addr[5]); 6968 t4_write_reg(adap, mag_id_reg_h, 6969 (addr[0] << 8) | addr[1]); 6970 } 6971 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 6972 V_MAGICEN(addr != NULL)); 6973} 6974 6975/** 6976 * t4_wol_pat_enable - enable/disable pattern-based WoL 6977 * @adap: the adapter 6978 * @port: the physical port index 6979 * @map: bitmap of which HW pattern filters to set 6980 * @mask0: byte mask for bytes 0-63 of a packet 6981 * @mask1: byte mask for bytes 64-127 of a packet 6982 * @crc: Ethernet CRC for selected bytes 6983 * @enable: enable/disable switch 6984 * 6985 * Sets the pattern filters indicated in @map to mask out the bytes 6986 * specified in @mask0/@mask1 in received packets and compare the CRC of 6987 * the resulting packet against @crc. If @enable is %true pattern-based 6988 * WoL is enabled, otherwise disabled. 6989 */ 6990int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 6991 u64 mask0, u64 mask1, unsigned int crc, bool enable) 6992{ 6993 int i; 6994 u32 port_cfg_reg; 6995 6996 if (is_t4(adap)) 6997 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6998 else 6999 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 7000 7001 if (!enable) { 7002 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 7003 return 0; 7004 } 7005 if (map > 0xff) 7006 return -EINVAL; 7007 7008#define EPIO_REG(name) \ 7009 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 7010 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 7011 7012 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 7013 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 7014 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 7015 7016 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 7017 if (!(map & 1)) 7018 continue; 7019 7020 /* write byte masks */ 7021 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 7022 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 7023 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 7024 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 7025 return -ETIMEDOUT; 7026 7027 /* write CRC */ 7028 t4_write_reg(adap, EPIO_REG(DATA0), crc); 7029 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 7030 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 7031 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 7032 return -ETIMEDOUT; 7033 } 7034#undef EPIO_REG 7035 7036 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 7037 return 0; 7038} 7039 7040/* t4_mk_filtdelwr - create a delete filter WR 7041 * @ftid: the filter ID 7042 * @wr: the filter work request to populate 7043 * @qid: ingress queue to receive the delete notification 7044 * 7045 * Creates a filter work request to delete the supplied filter. If @qid is 7046 * negative the delete notification is suppressed. 7047 */ 7048void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 7049{ 7050 memset(wr, 0, sizeof(*wr)); 7051 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 7052 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 7053 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 7054 V_FW_FILTER_WR_NOREPLY(qid < 0)); 7055 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 7056 if (qid >= 0) 7057 wr->rx_chan_rx_rpl_iq = 7058 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 7059} 7060 7061#define INIT_CMD(var, cmd, rd_wr) do { \ 7062 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 7063 F_FW_CMD_REQUEST | \ 7064 F_FW_CMD_##rd_wr); \ 7065 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 7066} while (0) 7067 7068int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 7069 u32 addr, u32 val) 7070{ 7071 u32 ldst_addrspace; 7072 struct fw_ldst_cmd c; 7073 7074 memset(&c, 0, sizeof(c)); 7075 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 7076 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7077 F_FW_CMD_REQUEST | 7078 F_FW_CMD_WRITE | 7079 ldst_addrspace); 7080 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7081 c.u.addrval.addr = cpu_to_be32(addr); 7082 c.u.addrval.val = cpu_to_be32(val); 7083 7084 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7085} 7086 7087/** 7088 * t4_mdio_rd - read a PHY register through MDIO 7089 * @adap: the adapter 7090 * @mbox: mailbox to use for the FW command 7091 * @phy_addr: the PHY address 7092 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 7093 * @reg: the register to read 7094 * @valp: where to store the value 7095 * 7096 * Issues a FW command through the given mailbox to read a PHY register. 7097 */ 7098int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 7099 unsigned int mmd, unsigned int reg, unsigned int *valp) 7100{ 7101 int ret; 7102 u32 ldst_addrspace; 7103 struct fw_ldst_cmd c; 7104 7105 memset(&c, 0, sizeof(c)); 7106 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 7107 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7108 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7109 ldst_addrspace); 7110 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7111 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 7112 V_FW_LDST_CMD_MMD(mmd)); 7113 c.u.mdio.raddr = cpu_to_be16(reg); 7114 7115 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7116 if (ret == 0) 7117 *valp = be16_to_cpu(c.u.mdio.rval); 7118 return ret; 7119} 7120 7121/** 7122 * t4_mdio_wr - write a PHY register through MDIO 7123 * @adap: the adapter 7124 * @mbox: mailbox to use for the FW command 7125 * @phy_addr: the PHY address 7126 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 7127 * @reg: the register to write 7128 * @valp: value to write 7129 * 7130 * Issues a FW command through the given mailbox to write a PHY register. 7131 */ 7132int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 7133 unsigned int mmd, unsigned int reg, unsigned int val) 7134{ 7135 u32 ldst_addrspace; 7136 struct fw_ldst_cmd c; 7137 7138 memset(&c, 0, sizeof(c)); 7139 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 7140 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7141 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7142 ldst_addrspace); 7143 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7144 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 7145 V_FW_LDST_CMD_MMD(mmd)); 7146 c.u.mdio.raddr = cpu_to_be16(reg); 7147 c.u.mdio.rval = cpu_to_be16(val); 7148 7149 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7150} 7151 7152/** 7153 * 7154 * t4_sge_decode_idma_state - decode the idma state 7155 * @adap: the adapter 7156 * @state: the state idma is stuck in 7157 */ 7158void t4_sge_decode_idma_state(struct adapter *adapter, int state) 7159{ 7160 static const char * const t4_decode[] = { 7161 "IDMA_IDLE", 7162 "IDMA_PUSH_MORE_CPL_FIFO", 7163 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7164 "Not used", 7165 "IDMA_PHYSADDR_SEND_PCIEHDR", 7166 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7167 "IDMA_PHYSADDR_SEND_PAYLOAD", 7168 "IDMA_SEND_FIFO_TO_IMSG", 7169 "IDMA_FL_REQ_DATA_FL_PREP", 7170 "IDMA_FL_REQ_DATA_FL", 7171 "IDMA_FL_DROP", 7172 "IDMA_FL_H_REQ_HEADER_FL", 7173 "IDMA_FL_H_SEND_PCIEHDR", 7174 "IDMA_FL_H_PUSH_CPL_FIFO", 7175 "IDMA_FL_H_SEND_CPL", 7176 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7177 "IDMA_FL_H_SEND_IP_HDR", 7178 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7179 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7180 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7181 "IDMA_FL_D_SEND_PCIEHDR", 7182 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7183 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7184 "IDMA_FL_SEND_PCIEHDR", 7185 "IDMA_FL_PUSH_CPL_FIFO", 7186 "IDMA_FL_SEND_CPL", 7187 "IDMA_FL_SEND_PAYLOAD_FIRST", 7188 "IDMA_FL_SEND_PAYLOAD", 7189 "IDMA_FL_REQ_NEXT_DATA_FL", 7190 "IDMA_FL_SEND_NEXT_PCIEHDR", 7191 "IDMA_FL_SEND_PADDING", 7192 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7193 "IDMA_FL_SEND_FIFO_TO_IMSG", 7194 "IDMA_FL_REQ_DATAFL_DONE", 7195 "IDMA_FL_REQ_HEADERFL_DONE", 7196 }; 7197 static const char * const t5_decode[] = { 7198 "IDMA_IDLE", 7199 "IDMA_ALMOST_IDLE", 7200 "IDMA_PUSH_MORE_CPL_FIFO", 7201 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7202 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7203 "IDMA_PHYSADDR_SEND_PCIEHDR", 7204 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7205 "IDMA_PHYSADDR_SEND_PAYLOAD", 7206 "IDMA_SEND_FIFO_TO_IMSG", 7207 "IDMA_FL_REQ_DATA_FL", 7208 "IDMA_FL_DROP", 7209 "IDMA_FL_DROP_SEND_INC", 7210 "IDMA_FL_H_REQ_HEADER_FL", 7211 "IDMA_FL_H_SEND_PCIEHDR", 7212 "IDMA_FL_H_PUSH_CPL_FIFO", 7213 "IDMA_FL_H_SEND_CPL", 7214 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7215 "IDMA_FL_H_SEND_IP_HDR", 7216 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7217 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7218 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7219 "IDMA_FL_D_SEND_PCIEHDR", 7220 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7221 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7222 "IDMA_FL_SEND_PCIEHDR", 7223 "IDMA_FL_PUSH_CPL_FIFO", 7224 "IDMA_FL_SEND_CPL", 7225 "IDMA_FL_SEND_PAYLOAD_FIRST", 7226 "IDMA_FL_SEND_PAYLOAD", 7227 "IDMA_FL_REQ_NEXT_DATA_FL", 7228 "IDMA_FL_SEND_NEXT_PCIEHDR", 7229 "IDMA_FL_SEND_PADDING", 7230 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7231 }; 7232 static const char * const t6_decode[] = { 7233 "IDMA_IDLE", 7234 "IDMA_PUSH_MORE_CPL_FIFO", 7235 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7236 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7237 "IDMA_PHYSADDR_SEND_PCIEHDR", 7238 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7239 "IDMA_PHYSADDR_SEND_PAYLOAD", 7240 "IDMA_FL_REQ_DATA_FL", 7241 "IDMA_FL_DROP", 7242 "IDMA_FL_DROP_SEND_INC", 7243 "IDMA_FL_H_REQ_HEADER_FL", 7244 "IDMA_FL_H_SEND_PCIEHDR", 7245 "IDMA_FL_H_PUSH_CPL_FIFO", 7246 "IDMA_FL_H_SEND_CPL", 7247 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7248 "IDMA_FL_H_SEND_IP_HDR", 7249 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7250 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7251 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7252 "IDMA_FL_D_SEND_PCIEHDR", 7253 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7254 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7255 "IDMA_FL_SEND_PCIEHDR", 7256 "IDMA_FL_PUSH_CPL_FIFO", 7257 "IDMA_FL_SEND_CPL", 7258 "IDMA_FL_SEND_PAYLOAD_FIRST", 7259 "IDMA_FL_SEND_PAYLOAD", 7260 "IDMA_FL_REQ_NEXT_DATA_FL", 7261 "IDMA_FL_SEND_NEXT_PCIEHDR", 7262 "IDMA_FL_SEND_PADDING", 7263 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7264 }; 7265 static const u32 sge_regs[] = { 7266 A_SGE_DEBUG_DATA_LOW_INDEX_2, 7267 A_SGE_DEBUG_DATA_LOW_INDEX_3, 7268 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 7269 }; 7270 const char * const *sge_idma_decode; 7271 int sge_idma_decode_nstates; 7272 int i; 7273 unsigned int chip_version = chip_id(adapter); 7274 7275 /* Select the right set of decode strings to dump depending on the 7276 * adapter chip type. 7277 */ 7278 switch (chip_version) { 7279 case CHELSIO_T4: 7280 sge_idma_decode = (const char * const *)t4_decode; 7281 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 7282 break; 7283 7284 case CHELSIO_T5: 7285 sge_idma_decode = (const char * const *)t5_decode; 7286 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 7287 break; 7288 7289 case CHELSIO_T6: 7290 sge_idma_decode = (const char * const *)t6_decode; 7291 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 7292 break; 7293 7294 default: 7295 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 7296 return; 7297 } 7298 7299 if (state < sge_idma_decode_nstates) 7300 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 7301 else 7302 CH_WARN(adapter, "idma state %d unknown\n", state); 7303 7304 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 7305 CH_WARN(adapter, "SGE register %#x value %#x\n", 7306 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 7307} 7308 7309/** 7310 * t4_sge_ctxt_flush - flush the SGE context cache 7311 * @adap: the adapter 7312 * @mbox: mailbox to use for the FW command 7313 * 7314 * Issues a FW command through the given mailbox to flush the 7315 * SGE context cache. 7316 */ 7317int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 7318{ 7319 int ret; 7320 u32 ldst_addrspace; 7321 struct fw_ldst_cmd c; 7322 7323 memset(&c, 0, sizeof(c)); 7324 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); 7325 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7326 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7327 ldst_addrspace); 7328 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7329 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 7330 7331 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7332 return ret; 7333} 7334 7335/** 7336 * t4_fw_hello - establish communication with FW 7337 * @adap: the adapter 7338 * @mbox: mailbox to use for the FW command 7339 * @evt_mbox: mailbox to receive async FW events 7340 * @master: specifies the caller's willingness to be the device master 7341 * @state: returns the current device state (if non-NULL) 7342 * 7343 * Issues a command to establish communication with FW. Returns either 7344 * an error (negative integer) or the mailbox of the Master PF. 7345 */ 7346int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 7347 enum dev_master master, enum dev_state *state) 7348{ 7349 int ret; 7350 struct fw_hello_cmd c; 7351 u32 v; 7352 unsigned int master_mbox; 7353 int retries = FW_CMD_HELLO_RETRIES; 7354 7355retry: 7356 memset(&c, 0, sizeof(c)); 7357 INIT_CMD(c, HELLO, WRITE); 7358 c.err_to_clearinit = cpu_to_be32( 7359 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 7360 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 7361 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 7362 mbox : M_FW_HELLO_CMD_MBMASTER) | 7363 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 7364 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 7365 F_FW_HELLO_CMD_CLEARINIT); 7366 7367 /* 7368 * Issue the HELLO command to the firmware. If it's not successful 7369 * but indicates that we got a "busy" or "timeout" condition, retry 7370 * the HELLO until we exhaust our retry limit. If we do exceed our 7371 * retry limit, check to see if the firmware left us any error 7372 * information and report that if so ... 7373 */ 7374 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7375 if (ret != FW_SUCCESS) { 7376 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 7377 goto retry; 7378 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 7379 t4_report_fw_error(adap); 7380 return ret; 7381 } 7382 7383 v = be32_to_cpu(c.err_to_clearinit); 7384 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 7385 if (state) { 7386 if (v & F_FW_HELLO_CMD_ERR) 7387 *state = DEV_STATE_ERR; 7388 else if (v & F_FW_HELLO_CMD_INIT) 7389 *state = DEV_STATE_INIT; 7390 else 7391 *state = DEV_STATE_UNINIT; 7392 } 7393 7394 /* 7395 * If we're not the Master PF then we need to wait around for the 7396 * Master PF Driver to finish setting up the adapter. 7397 * 7398 * Note that we also do this wait if we're a non-Master-capable PF and 7399 * there is no current Master PF; a Master PF may show up momentarily 7400 * and we wouldn't want to fail pointlessly. (This can happen when an 7401 * OS loads lots of different drivers rapidly at the same time). In 7402 * this case, the Master PF returned by the firmware will be 7403 * M_PCIE_FW_MASTER so the test below will work ... 7404 */ 7405 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 7406 master_mbox != mbox) { 7407 int waiting = FW_CMD_HELLO_TIMEOUT; 7408 7409 /* 7410 * Wait for the firmware to either indicate an error or 7411 * initialized state. If we see either of these we bail out 7412 * and report the issue to the caller. If we exhaust the 7413 * "hello timeout" and we haven't exhausted our retries, try 7414 * again. Otherwise bail with a timeout error. 7415 */ 7416 for (;;) { 7417 u32 pcie_fw; 7418 7419 msleep(50); 7420 waiting -= 50; 7421 7422 /* 7423 * If neither Error nor Initialialized are indicated 7424 * by the firmware keep waiting till we exhaust our 7425 * timeout ... and then retry if we haven't exhausted 7426 * our retries ... 7427 */ 7428 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 7429 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 7430 if (waiting <= 0) { 7431 if (retries-- > 0) 7432 goto retry; 7433 7434 return -ETIMEDOUT; 7435 } 7436 continue; 7437 } 7438 7439 /* 7440 * We either have an Error or Initialized condition 7441 * report errors preferentially. 7442 */ 7443 if (state) { 7444 if (pcie_fw & F_PCIE_FW_ERR) 7445 *state = DEV_STATE_ERR; 7446 else if (pcie_fw & F_PCIE_FW_INIT) 7447 *state = DEV_STATE_INIT; 7448 } 7449 7450 /* 7451 * If we arrived before a Master PF was selected and 7452 * there's not a valid Master PF, grab its identity 7453 * for our caller. 7454 */ 7455 if (master_mbox == M_PCIE_FW_MASTER && 7456 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 7457 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 7458 break; 7459 } 7460 } 7461 7462 return master_mbox; 7463} 7464 7465/** 7466 * t4_fw_bye - end communication with FW 7467 * @adap: the adapter 7468 * @mbox: mailbox to use for the FW command 7469 * 7470 * Issues a command to terminate communication with FW. 7471 */ 7472int t4_fw_bye(struct adapter *adap, unsigned int mbox) 7473{ 7474 struct fw_bye_cmd c; 7475 7476 memset(&c, 0, sizeof(c)); 7477 INIT_CMD(c, BYE, WRITE); 7478 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7479} 7480 7481/** 7482 * t4_fw_reset - issue a reset to FW 7483 * @adap: the adapter 7484 * @mbox: mailbox to use for the FW command 7485 * @reset: specifies the type of reset to perform 7486 * 7487 * Issues a reset command of the specified type to FW. 7488 */ 7489int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 7490{ 7491 struct fw_reset_cmd c; 7492 7493 memset(&c, 0, sizeof(c)); 7494 INIT_CMD(c, RESET, WRITE); 7495 c.val = cpu_to_be32(reset); 7496 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7497} 7498 7499/** 7500 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 7501 * @adap: the adapter 7502 * @mbox: mailbox to use for the FW RESET command (if desired) 7503 * @force: force uP into RESET even if FW RESET command fails 7504 * 7505 * Issues a RESET command to firmware (if desired) with a HALT indication 7506 * and then puts the microprocessor into RESET state. The RESET command 7507 * will only be issued if a legitimate mailbox is provided (mbox <= 7508 * M_PCIE_FW_MASTER). 7509 * 7510 * This is generally used in order for the host to safely manipulate the 7511 * adapter without fear of conflicting with whatever the firmware might 7512 * be doing. The only way out of this state is to RESTART the firmware 7513 * ... 7514 */ 7515int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 7516{ 7517 int ret = 0; 7518 7519 /* 7520 * If a legitimate mailbox is provided, issue a RESET command 7521 * with a HALT indication. 7522 */ 7523 if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) { 7524 struct fw_reset_cmd c; 7525 7526 memset(&c, 0, sizeof(c)); 7527 INIT_CMD(c, RESET, WRITE); 7528 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 7529 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 7530 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7531 } 7532 7533 /* 7534 * Normally we won't complete the operation if the firmware RESET 7535 * command fails but if our caller insists we'll go ahead and put the 7536 * uP into RESET. This can be useful if the firmware is hung or even 7537 * missing ... We'll have to take the risk of putting the uP into 7538 * RESET without the cooperation of firmware in that case. 7539 * 7540 * We also force the firmware's HALT flag to be on in case we bypassed 7541 * the firmware RESET command above or we're dealing with old firmware 7542 * which doesn't have the HALT capability. This will serve as a flag 7543 * for the incoming firmware to know that it's coming out of a HALT 7544 * rather than a RESET ... if it's new enough to understand that ... 7545 */ 7546 if (ret == 0 || force) { 7547 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 7548 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 7549 F_PCIE_FW_HALT); 7550 } 7551 7552 /* 7553 * And we always return the result of the firmware RESET command 7554 * even when we force the uP into RESET ... 7555 */ 7556 return ret; 7557} 7558 7559/** 7560 * t4_fw_restart - restart the firmware by taking the uP out of RESET 7561 * @adap: the adapter 7562 * 7563 * Restart firmware previously halted by t4_fw_halt(). On successful 7564 * return the previous PF Master remains as the new PF Master and there 7565 * is no need to issue a new HELLO command, etc. 7566 */ 7567int t4_fw_restart(struct adapter *adap, unsigned int mbox) 7568{ 7569 int ms; 7570 7571 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 7572 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 7573 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 7574 return FW_SUCCESS; 7575 msleep(100); 7576 ms += 100; 7577 } 7578 7579 return -ETIMEDOUT; 7580} 7581 7582/** 7583 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 7584 * @adap: the adapter 7585 * @mbox: mailbox to use for the FW RESET command (if desired) 7586 * @fw_data: the firmware image to write 7587 * @size: image size 7588 * @force: force upgrade even if firmware doesn't cooperate 7589 * 7590 * Perform all of the steps necessary for upgrading an adapter's 7591 * firmware image. Normally this requires the cooperation of the 7592 * existing firmware in order to halt all existing activities 7593 * but if an invalid mailbox token is passed in we skip that step 7594 * (though we'll still put the adapter microprocessor into RESET in 7595 * that case). 7596 * 7597 * On successful return the new firmware will have been loaded and 7598 * the adapter will have been fully RESET losing all previous setup 7599 * state. On unsuccessful return the adapter may be completely hosed ... 7600 * positive errno indicates that the adapter is ~probably~ intact, a 7601 * negative errno indicates that things are looking bad ... 7602 */ 7603int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 7604 const u8 *fw_data, unsigned int size, int force) 7605{ 7606 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 7607 unsigned int bootstrap = 7608 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 7609 int ret; 7610 7611 if (!t4_fw_matches_chip(adap, fw_hdr)) 7612 return -EINVAL; 7613 7614 if (!bootstrap) { 7615 ret = t4_fw_halt(adap, mbox, force); 7616 if (ret < 0 && !force) 7617 return ret; 7618 } 7619 7620 ret = t4_load_fw(adap, fw_data, size); 7621 if (ret < 0 || bootstrap) 7622 return ret; 7623 7624 return t4_fw_restart(adap, mbox); 7625} 7626 7627/** 7628 * t4_fw_initialize - ask FW to initialize the device 7629 * @adap: the adapter 7630 * @mbox: mailbox to use for the FW command 7631 * 7632 * Issues a command to FW to partially initialize the device. This 7633 * performs initialization that generally doesn't depend on user input. 7634 */ 7635int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 7636{ 7637 struct fw_initialize_cmd c; 7638 7639 memset(&c, 0, sizeof(c)); 7640 INIT_CMD(c, INITIALIZE, WRITE); 7641 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7642} 7643 7644/** 7645 * t4_query_params_rw - query FW or device parameters 7646 * @adap: the adapter 7647 * @mbox: mailbox to use for the FW command 7648 * @pf: the PF 7649 * @vf: the VF 7650 * @nparams: the number of parameters 7651 * @params: the parameter names 7652 * @val: the parameter values 7653 * @rw: Write and read flag 7654 * 7655 * Reads the value of FW or device parameters. Up to 7 parameters can be 7656 * queried at once. 7657 */ 7658int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 7659 unsigned int vf, unsigned int nparams, const u32 *params, 7660 u32 *val, int rw) 7661{ 7662 int i, ret; 7663 struct fw_params_cmd c; 7664 __be32 *p = &c.param[0].mnem; 7665 7666 if (nparams > 7) 7667 return -EINVAL; 7668 7669 memset(&c, 0, sizeof(c)); 7670 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 7671 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7672 V_FW_PARAMS_CMD_PFN(pf) | 7673 V_FW_PARAMS_CMD_VFN(vf)); 7674 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7675 7676 for (i = 0; i < nparams; i++) { 7677 *p++ = cpu_to_be32(*params++); 7678 if (rw) 7679 *p = cpu_to_be32(*(val + i)); 7680 p++; 7681 } 7682 7683 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7684 if (ret == 0) 7685 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 7686 *val++ = be32_to_cpu(*p); 7687 return ret; 7688} 7689 7690int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7691 unsigned int vf, unsigned int nparams, const u32 *params, 7692 u32 *val) 7693{ 7694 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 7695} 7696 7697/** 7698 * t4_set_params_timeout - sets FW or device parameters 7699 * @adap: the adapter 7700 * @mbox: mailbox to use for the FW command 7701 * @pf: the PF 7702 * @vf: the VF 7703 * @nparams: the number of parameters 7704 * @params: the parameter names 7705 * @val: the parameter values 7706 * @timeout: the timeout time 7707 * 7708 * Sets the value of FW or device parameters. Up to 7 parameters can be 7709 * specified at once. 7710 */ 7711int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 7712 unsigned int pf, unsigned int vf, 7713 unsigned int nparams, const u32 *params, 7714 const u32 *val, int timeout) 7715{ 7716 struct fw_params_cmd c; 7717 __be32 *p = &c.param[0].mnem; 7718 7719 if (nparams > 7) 7720 return -EINVAL; 7721 7722 memset(&c, 0, sizeof(c)); 7723 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 7724 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7725 V_FW_PARAMS_CMD_PFN(pf) | 7726 V_FW_PARAMS_CMD_VFN(vf)); 7727 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7728 7729 while (nparams--) { 7730 *p++ = cpu_to_be32(*params++); 7731 *p++ = cpu_to_be32(*val++); 7732 } 7733 7734 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 7735} 7736 7737/** 7738 * t4_set_params - sets FW or device parameters 7739 * @adap: the adapter 7740 * @mbox: mailbox to use for the FW command 7741 * @pf: the PF 7742 * @vf: the VF 7743 * @nparams: the number of parameters 7744 * @params: the parameter names 7745 * @val: the parameter values 7746 * 7747 * Sets the value of FW or device parameters. Up to 7 parameters can be 7748 * specified at once. 7749 */ 7750int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7751 unsigned int vf, unsigned int nparams, const u32 *params, 7752 const u32 *val) 7753{ 7754 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 7755 FW_CMD_MAX_TIMEOUT); 7756} 7757 7758/** 7759 * t4_cfg_pfvf - configure PF/VF resource limits 7760 * @adap: the adapter 7761 * @mbox: mailbox to use for the FW command 7762 * @pf: the PF being configured 7763 * @vf: the VF being configured 7764 * @txq: the max number of egress queues 7765 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 7766 * @rxqi: the max number of interrupt-capable ingress queues 7767 * @rxq: the max number of interruptless ingress queues 7768 * @tc: the PCI traffic class 7769 * @vi: the max number of virtual interfaces 7770 * @cmask: the channel access rights mask for the PF/VF 7771 * @pmask: the port access rights mask for the PF/VF 7772 * @nexact: the maximum number of exact MPS filters 7773 * @rcaps: read capabilities 7774 * @wxcaps: write/execute capabilities 7775 * 7776 * Configures resource limits and capabilities for a physical or virtual 7777 * function. 7778 */ 7779int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 7780 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 7781 unsigned int rxqi, unsigned int rxq, unsigned int tc, 7782 unsigned int vi, unsigned int cmask, unsigned int pmask, 7783 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 7784{ 7785 struct fw_pfvf_cmd c; 7786 7787 memset(&c, 0, sizeof(c)); 7788 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 7789 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 7790 V_FW_PFVF_CMD_VFN(vf)); 7791 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7792 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 7793 V_FW_PFVF_CMD_NIQ(rxq)); 7794 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 7795 V_FW_PFVF_CMD_PMASK(pmask) | 7796 V_FW_PFVF_CMD_NEQ(txq)); 7797 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 7798 V_FW_PFVF_CMD_NVI(vi) | 7799 V_FW_PFVF_CMD_NEXACTF(nexact)); 7800 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 7801 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 7802 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 7803 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7804} 7805 7806/** 7807 * t4_alloc_vi_func - allocate a virtual interface 7808 * @adap: the adapter 7809 * @mbox: mailbox to use for the FW command 7810 * @port: physical port associated with the VI 7811 * @pf: the PF owning the VI 7812 * @vf: the VF owning the VI 7813 * @nmac: number of MAC addresses needed (1 to 5) 7814 * @mac: the MAC addresses of the VI 7815 * @rss_size: size of RSS table slice associated with this VI 7816 * @portfunc: which Port Application Function MAC Address is desired 7817 * @idstype: Intrusion Detection Type 7818 * 7819 * Allocates a virtual interface for the given physical port. If @mac is 7820 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 7821 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 7822 * @mac should be large enough to hold @nmac Ethernet addresses, they are 7823 * stored consecutively so the space needed is @nmac * 6 bytes. 7824 * Returns a negative error number or the non-negative VI id. 7825 */ 7826int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 7827 unsigned int port, unsigned int pf, unsigned int vf, 7828 unsigned int nmac, u8 *mac, u16 *rss_size, 7829 uint8_t *vfvld, uint16_t *vin, 7830 unsigned int portfunc, unsigned int idstype) 7831{ 7832 int ret; 7833 struct fw_vi_cmd c; 7834 7835 memset(&c, 0, sizeof(c)); 7836 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 7837 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 7838 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 7839 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 7840 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 7841 V_FW_VI_CMD_FUNC(portfunc)); 7842 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 7843 c.nmac = nmac - 1; 7844 if(!rss_size) 7845 c.norss_rsssize = F_FW_VI_CMD_NORSS; 7846 7847 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7848 if (ret) 7849 return ret; 7850 ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 7851 7852 if (mac) { 7853 memcpy(mac, c.mac, sizeof(c.mac)); 7854 switch (nmac) { 7855 case 5: 7856 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 7857 case 4: 7858 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 7859 case 3: 7860 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 7861 case 2: 7862 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 7863 } 7864 } 7865 if (rss_size) 7866 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 7867 if (vfvld) { 7868 *vfvld = adap->params.viid_smt_extn_support ? 7869 G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) : 7870 G_FW_VIID_VIVLD(ret); 7871 } 7872 if (vin) { 7873 *vin = adap->params.viid_smt_extn_support ? 7874 G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) : 7875 G_FW_VIID_VIN(ret); 7876 } 7877 7878 return ret; 7879} 7880 7881/** 7882 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 7883 * @adap: the adapter 7884 * @mbox: mailbox to use for the FW command 7885 * @port: physical port associated with the VI 7886 * @pf: the PF owning the VI 7887 * @vf: the VF owning the VI 7888 * @nmac: number of MAC addresses needed (1 to 5) 7889 * @mac: the MAC addresses of the VI 7890 * @rss_size: size of RSS table slice associated with this VI 7891 * 7892 * backwards compatible and convieniance routine to allocate a Virtual 7893 * Interface with a Ethernet Port Application Function and Intrustion 7894 * Detection System disabled. 7895 */ 7896int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 7897 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 7898 u16 *rss_size, uint8_t *vfvld, uint16_t *vin) 7899{ 7900 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 7901 vfvld, vin, FW_VI_FUNC_ETH, 0); 7902} 7903 7904/** 7905 * t4_free_vi - free a virtual interface 7906 * @adap: the adapter 7907 * @mbox: mailbox to use for the FW command 7908 * @pf: the PF owning the VI 7909 * @vf: the VF owning the VI 7910 * @viid: virtual interface identifiler 7911 * 7912 * Free a previously allocated virtual interface. 7913 */ 7914int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 7915 unsigned int vf, unsigned int viid) 7916{ 7917 struct fw_vi_cmd c; 7918 7919 memset(&c, 0, sizeof(c)); 7920 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 7921 F_FW_CMD_REQUEST | 7922 F_FW_CMD_EXEC | 7923 V_FW_VI_CMD_PFN(pf) | 7924 V_FW_VI_CMD_VFN(vf)); 7925 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 7926 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 7927 7928 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7929} 7930 7931/** 7932 * t4_set_rxmode - set Rx properties of a virtual interface 7933 * @adap: the adapter 7934 * @mbox: mailbox to use for the FW command 7935 * @viid: the VI id 7936 * @mtu: the new MTU or -1 7937 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 7938 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 7939 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 7940 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 7941 * @sleep_ok: if true we may sleep while awaiting command completion 7942 * 7943 * Sets Rx properties of a virtual interface. 7944 */ 7945int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 7946 int mtu, int promisc, int all_multi, int bcast, int vlanex, 7947 bool sleep_ok) 7948{ 7949 struct fw_vi_rxmode_cmd c; 7950 7951 /* convert to FW values */ 7952 if (mtu < 0) 7953 mtu = M_FW_VI_RXMODE_CMD_MTU; 7954 if (promisc < 0) 7955 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 7956 if (all_multi < 0) 7957 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 7958 if (bcast < 0) 7959 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 7960 if (vlanex < 0) 7961 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 7962 7963 memset(&c, 0, sizeof(c)); 7964 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 7965 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7966 V_FW_VI_RXMODE_CMD_VIID(viid)); 7967 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7968 c.mtu_to_vlanexen = 7969 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 7970 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 7971 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 7972 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 7973 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 7974 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7975} 7976 7977/** 7978 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 7979 * @adap: the adapter 7980 * @mbox: mailbox to use for the FW command 7981 * @viid: the VI id 7982 * @free: if true any existing filters for this VI id are first removed 7983 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 7984 * @addr: the MAC address(es) 7985 * @idx: where to store the index of each allocated filter 7986 * @hash: pointer to hash address filter bitmap 7987 * @sleep_ok: call is allowed to sleep 7988 * 7989 * Allocates an exact-match filter for each of the supplied addresses and 7990 * sets it to the corresponding address. If @idx is not %NULL it should 7991 * have at least @naddr entries, each of which will be set to the index of 7992 * the filter allocated for the corresponding MAC address. If a filter 7993 * could not be allocated for an address its index is set to 0xffff. 7994 * If @hash is not %NULL addresses that fail to allocate an exact filter 7995 * are hashed and update the hash filter bitmap pointed at by @hash. 7996 * 7997 * Returns a negative error number or the number of filters allocated. 7998 */ 7999int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 8000 unsigned int viid, bool free, unsigned int naddr, 8001 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 8002{ 8003 int offset, ret = 0; 8004 struct fw_vi_mac_cmd c; 8005 unsigned int nfilters = 0; 8006 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 8007 unsigned int rem = naddr; 8008 8009 if (naddr > max_naddr) 8010 return -EINVAL; 8011 8012 for (offset = 0; offset < naddr ; /**/) { 8013 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 8014 ? rem 8015 : ARRAY_SIZE(c.u.exact)); 8016 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 8017 u.exact[fw_naddr]), 16); 8018 struct fw_vi_mac_exact *p; 8019 int i; 8020 8021 memset(&c, 0, sizeof(c)); 8022 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8023 F_FW_CMD_REQUEST | 8024 F_FW_CMD_WRITE | 8025 V_FW_CMD_EXEC(free) | 8026 V_FW_VI_MAC_CMD_VIID(viid)); 8027 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 8028 V_FW_CMD_LEN16(len16)); 8029 8030 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8031 p->valid_to_idx = 8032 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8033 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8034 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 8035 } 8036 8037 /* 8038 * It's okay if we run out of space in our MAC address arena. 8039 * Some of the addresses we submit may get stored so we need 8040 * to run through the reply to see what the results were ... 8041 */ 8042 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8043 if (ret && ret != -FW_ENOMEM) 8044 break; 8045 8046 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8047 u16 index = G_FW_VI_MAC_CMD_IDX( 8048 be16_to_cpu(p->valid_to_idx)); 8049 8050 if (idx) 8051 idx[offset+i] = (index >= max_naddr 8052 ? 0xffff 8053 : index); 8054 if (index < max_naddr) 8055 nfilters++; 8056 else if (hash) 8057 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 8058 } 8059 8060 free = false; 8061 offset += fw_naddr; 8062 rem -= fw_naddr; 8063 } 8064 8065 if (ret == 0 || ret == -FW_ENOMEM) 8066 ret = nfilters; 8067 return ret; 8068} 8069 8070/** 8071 * t4_change_mac - modifies the exact-match filter for a MAC address 8072 * @adap: the adapter 8073 * @mbox: mailbox to use for the FW command 8074 * @viid: the VI id 8075 * @idx: index of existing filter for old value of MAC address, or -1 8076 * @addr: the new MAC address value 8077 * @persist: whether a new MAC allocation should be persistent 8078 * @smt_idx: add MAC to SMT and return its index, or NULL 8079 * 8080 * Modifies an exact-match filter and sets it to the new MAC address if 8081 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 8082 * latter case the address is added persistently if @persist is %true. 8083 * 8084 * Note that in general it is not possible to modify the value of a given 8085 * filter so the generic way to modify an address filter is to free the one 8086 * being used by the old address value and allocate a new filter for the 8087 * new address value. 8088 * 8089 * Returns a negative error number or the index of the filter with the new 8090 * MAC value. Note that this index may differ from @idx. 8091 */ 8092int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 8093 int idx, const u8 *addr, bool persist, uint16_t *smt_idx) 8094{ 8095 int ret, mode; 8096 struct fw_vi_mac_cmd c; 8097 struct fw_vi_mac_exact *p = c.u.exact; 8098 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 8099 8100 if (idx < 0) /* new allocation */ 8101 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 8102 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 8103 8104 memset(&c, 0, sizeof(c)); 8105 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8106 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8107 V_FW_VI_MAC_CMD_VIID(viid)); 8108 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 8109 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8110 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 8111 V_FW_VI_MAC_CMD_IDX(idx)); 8112 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8113 8114 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8115 if (ret == 0) { 8116 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 8117 if (ret >= max_mac_addr) 8118 ret = -ENOMEM; 8119 if (smt_idx) { 8120 if (adap->params.viid_smt_extn_support) 8121 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); 8122 else { 8123 if (chip_id(adap) <= CHELSIO_T5) 8124 *smt_idx = (viid & M_FW_VIID_VIN) << 1; 8125 else 8126 *smt_idx = viid & M_FW_VIID_VIN; 8127 } 8128 } 8129 } 8130 return ret; 8131} 8132 8133/** 8134 * t4_set_addr_hash - program the MAC inexact-match hash filter 8135 * @adap: the adapter 8136 * @mbox: mailbox to use for the FW command 8137 * @viid: the VI id 8138 * @ucast: whether the hash filter should also match unicast addresses 8139 * @vec: the value to be written to the hash filter 8140 * @sleep_ok: call is allowed to sleep 8141 * 8142 * Sets the 64-bit inexact-match hash filter for a virtual interface. 8143 */ 8144int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 8145 bool ucast, u64 vec, bool sleep_ok) 8146{ 8147 struct fw_vi_mac_cmd c; 8148 u32 val; 8149 8150 memset(&c, 0, sizeof(c)); 8151 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8152 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8153 V_FW_VI_ENABLE_CMD_VIID(viid)); 8154 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 8155 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 8156 c.freemacs_to_len16 = cpu_to_be32(val); 8157 c.u.hash.hashvec = cpu_to_be64(vec); 8158 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8159} 8160 8161/** 8162 * t4_enable_vi_params - enable/disable a virtual interface 8163 * @adap: the adapter 8164 * @mbox: mailbox to use for the FW command 8165 * @viid: the VI id 8166 * @rx_en: 1=enable Rx, 0=disable Rx 8167 * @tx_en: 1=enable Tx, 0=disable Tx 8168 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 8169 * 8170 * Enables/disables a virtual interface. Note that setting DCB Enable 8171 * only makes sense when enabling a Virtual Interface ... 8172 */ 8173int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 8174 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 8175{ 8176 struct fw_vi_enable_cmd c; 8177 8178 memset(&c, 0, sizeof(c)); 8179 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 8180 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8181 V_FW_VI_ENABLE_CMD_VIID(viid)); 8182 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 8183 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 8184 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 8185 FW_LEN16(c)); 8186 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 8187} 8188 8189/** 8190 * t4_enable_vi - enable/disable a virtual interface 8191 * @adap: the adapter 8192 * @mbox: mailbox to use for the FW command 8193 * @viid: the VI id 8194 * @rx_en: 1=enable Rx, 0=disable Rx 8195 * @tx_en: 1=enable Tx, 0=disable Tx 8196 * 8197 * Enables/disables a virtual interface. Note that setting DCB Enable 8198 * only makes sense when enabling a Virtual Interface ... 8199 */ 8200int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 8201 bool rx_en, bool tx_en) 8202{ 8203 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 8204} 8205 8206/** 8207 * t4_identify_port - identify a VI's port by blinking its LED 8208 * @adap: the adapter 8209 * @mbox: mailbox to use for the FW command 8210 * @viid: the VI id 8211 * @nblinks: how many times to blink LED at 2.5 Hz 8212 * 8213 * Identifies a VI's port by blinking its LED. 8214 */ 8215int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 8216 unsigned int nblinks) 8217{ 8218 struct fw_vi_enable_cmd c; 8219 8220 memset(&c, 0, sizeof(c)); 8221 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 8222 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8223 V_FW_VI_ENABLE_CMD_VIID(viid)); 8224 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 8225 c.blinkdur = cpu_to_be16(nblinks); 8226 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8227} 8228 8229/** 8230 * t4_iq_stop - stop an ingress queue and its FLs 8231 * @adap: the adapter 8232 * @mbox: mailbox to use for the FW command 8233 * @pf: the PF owning the queues 8234 * @vf: the VF owning the queues 8235 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 8236 * @iqid: ingress queue id 8237 * @fl0id: FL0 queue id or 0xffff if no attached FL0 8238 * @fl1id: FL1 queue id or 0xffff if no attached FL1 8239 * 8240 * Stops an ingress queue and its associated FLs, if any. This causes 8241 * any current or future data/messages destined for these queues to be 8242 * tossed. 8243 */ 8244int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 8245 unsigned int vf, unsigned int iqtype, unsigned int iqid, 8246 unsigned int fl0id, unsigned int fl1id) 8247{ 8248 struct fw_iq_cmd c; 8249 8250 memset(&c, 0, sizeof(c)); 8251 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 8252 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 8253 V_FW_IQ_CMD_VFN(vf)); 8254 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 8255 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 8256 c.iqid = cpu_to_be16(iqid); 8257 c.fl0id = cpu_to_be16(fl0id); 8258 c.fl1id = cpu_to_be16(fl1id); 8259 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8260} 8261 8262/** 8263 * t4_iq_free - free an ingress queue and its FLs 8264 * @adap: the adapter 8265 * @mbox: mailbox to use for the FW command 8266 * @pf: the PF owning the queues 8267 * @vf: the VF owning the queues 8268 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 8269 * @iqid: ingress queue id 8270 * @fl0id: FL0 queue id or 0xffff if no attached FL0 8271 * @fl1id: FL1 queue id or 0xffff if no attached FL1 8272 * 8273 * Frees an ingress queue and its associated FLs, if any. 8274 */ 8275int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8276 unsigned int vf, unsigned int iqtype, unsigned int iqid, 8277 unsigned int fl0id, unsigned int fl1id) 8278{ 8279 struct fw_iq_cmd c; 8280 8281 memset(&c, 0, sizeof(c)); 8282 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 8283 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 8284 V_FW_IQ_CMD_VFN(vf)); 8285 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 8286 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 8287 c.iqid = cpu_to_be16(iqid); 8288 c.fl0id = cpu_to_be16(fl0id); 8289 c.fl1id = cpu_to_be16(fl1id); 8290 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8291} 8292 8293/** 8294 * t4_eth_eq_free - free an Ethernet egress queue 8295 * @adap: the adapter 8296 * @mbox: mailbox to use for the FW command 8297 * @pf: the PF owning the queue 8298 * @vf: the VF owning the queue 8299 * @eqid: egress queue id 8300 * 8301 * Frees an Ethernet egress queue. 8302 */ 8303int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8304 unsigned int vf, unsigned int eqid) 8305{ 8306 struct fw_eq_eth_cmd c; 8307 8308 memset(&c, 0, sizeof(c)); 8309 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 8310 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8311 V_FW_EQ_ETH_CMD_PFN(pf) | 8312 V_FW_EQ_ETH_CMD_VFN(vf)); 8313 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 8314 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 8315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8316} 8317 8318/** 8319 * t4_ctrl_eq_free - free a control egress queue 8320 * @adap: the adapter 8321 * @mbox: mailbox to use for the FW command 8322 * @pf: the PF owning the queue 8323 * @vf: the VF owning the queue 8324 * @eqid: egress queue id 8325 * 8326 * Frees a control egress queue. 8327 */ 8328int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8329 unsigned int vf, unsigned int eqid) 8330{ 8331 struct fw_eq_ctrl_cmd c; 8332 8333 memset(&c, 0, sizeof(c)); 8334 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 8335 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8336 V_FW_EQ_CTRL_CMD_PFN(pf) | 8337 V_FW_EQ_CTRL_CMD_VFN(vf)); 8338 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 8339 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 8340 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8341} 8342 8343/** 8344 * t4_ofld_eq_free - free an offload egress queue 8345 * @adap: the adapter 8346 * @mbox: mailbox to use for the FW command 8347 * @pf: the PF owning the queue 8348 * @vf: the VF owning the queue 8349 * @eqid: egress queue id 8350 * 8351 * Frees a control egress queue. 8352 */ 8353int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8354 unsigned int vf, unsigned int eqid) 8355{ 8356 struct fw_eq_ofld_cmd c; 8357 8358 memset(&c, 0, sizeof(c)); 8359 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 8360 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8361 V_FW_EQ_OFLD_CMD_PFN(pf) | 8362 V_FW_EQ_OFLD_CMD_VFN(vf)); 8363 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 8364 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 8365 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8366} 8367 8368/** 8369 * t4_link_down_rc_str - return a string for a Link Down Reason Code 8370 * @link_down_rc: Link Down Reason Code 8371 * 8372 * Returns a string representation of the Link Down Reason Code. 8373 */ 8374const char *t4_link_down_rc_str(unsigned char link_down_rc) 8375{ 8376 static const char *reason[] = { 8377 "Link Down", 8378 "Remote Fault", 8379 "Auto-negotiation Failure", 8380 "Reserved3", 8381 "Insufficient Airflow", 8382 "Unable To Determine Reason", 8383 "No RX Signal Detected", 8384 "Reserved7", 8385 }; 8386 8387 if (link_down_rc >= ARRAY_SIZE(reason)) 8388 return "Bad Reason Code"; 8389 8390 return reason[link_down_rc]; 8391} 8392 8393/* 8394 * Return the highest speed set in the port capabilities, in Mb/s. 8395 */ 8396unsigned int fwcap_to_speed(uint32_t caps) 8397{ 8398 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 8399 do { \ 8400 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 8401 return __speed; \ 8402 } while (0) 8403 8404 TEST_SPEED_RETURN(400G, 400000); 8405 TEST_SPEED_RETURN(200G, 200000); 8406 TEST_SPEED_RETURN(100G, 100000); 8407 TEST_SPEED_RETURN(50G, 50000); 8408 TEST_SPEED_RETURN(40G, 40000); 8409 TEST_SPEED_RETURN(25G, 25000); 8410 TEST_SPEED_RETURN(10G, 10000); 8411 TEST_SPEED_RETURN(1G, 1000); 8412 TEST_SPEED_RETURN(100M, 100); 8413 8414 #undef TEST_SPEED_RETURN 8415 8416 return 0; 8417} 8418 8419/* 8420 * Return the port capabilities bit for the given speed, which is in Mb/s. 8421 */ 8422uint32_t speed_to_fwcap(unsigned int speed) 8423{ 8424 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 8425 do { \ 8426 if (speed == __speed) \ 8427 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 8428 } while (0) 8429 8430 TEST_SPEED_RETURN(400G, 400000); 8431 TEST_SPEED_RETURN(200G, 200000); 8432 TEST_SPEED_RETURN(100G, 100000); 8433 TEST_SPEED_RETURN(50G, 50000); 8434 TEST_SPEED_RETURN(40G, 40000); 8435 TEST_SPEED_RETURN(25G, 25000); 8436 TEST_SPEED_RETURN(10G, 10000); 8437 TEST_SPEED_RETURN(1G, 1000); 8438 TEST_SPEED_RETURN(100M, 100); 8439 8440 #undef TEST_SPEED_RETURN 8441 8442 return 0; 8443} 8444 8445/* 8446 * Return the port capabilities bit for the highest speed in the capabilities. 8447 */ 8448uint32_t fwcap_top_speed(uint32_t caps) 8449{ 8450 #define TEST_SPEED_RETURN(__caps_speed) \ 8451 do { \ 8452 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 8453 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 8454 } while (0) 8455 8456 TEST_SPEED_RETURN(400G); 8457 TEST_SPEED_RETURN(200G); 8458 TEST_SPEED_RETURN(100G); 8459 TEST_SPEED_RETURN(50G); 8460 TEST_SPEED_RETURN(40G); 8461 TEST_SPEED_RETURN(25G); 8462 TEST_SPEED_RETURN(10G); 8463 TEST_SPEED_RETURN(1G); 8464 TEST_SPEED_RETURN(100M); 8465 8466 #undef TEST_SPEED_RETURN 8467 8468 return 0; 8469} 8470 8471 8472/** 8473 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 8474 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 8475 * 8476 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 8477 * 32-bit Port Capabilities value. 8478 */ 8479static uint32_t lstatus_to_fwcap(u32 lstatus) 8480{ 8481 uint32_t linkattr = 0; 8482 8483 /* 8484 * Unfortunately the format of the Link Status in the old 8485 * 16-bit Port Information message isn't the same as the 8486 * 16-bit Port Capabilities bitfield used everywhere else ... 8487 */ 8488 if (lstatus & F_FW_PORT_CMD_RXPAUSE) 8489 linkattr |= FW_PORT_CAP32_FC_RX; 8490 if (lstatus & F_FW_PORT_CMD_TXPAUSE) 8491 linkattr |= FW_PORT_CAP32_FC_TX; 8492 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 8493 linkattr |= FW_PORT_CAP32_SPEED_100M; 8494 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 8495 linkattr |= FW_PORT_CAP32_SPEED_1G; 8496 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 8497 linkattr |= FW_PORT_CAP32_SPEED_10G; 8498 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 8499 linkattr |= FW_PORT_CAP32_SPEED_25G; 8500 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 8501 linkattr |= FW_PORT_CAP32_SPEED_40G; 8502 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 8503 linkattr |= FW_PORT_CAP32_SPEED_100G; 8504 8505 return linkattr; 8506} 8507 8508/* 8509 * Updates all fields owned by the common code in port_info and link_config 8510 * based on information provided by the firmware. Does not touch any 8511 * requested_* field. 8512 */ 8513static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p, 8514 enum fw_port_action action, bool *mod_changed, bool *link_changed) 8515{ 8516 struct link_config old_lc, *lc = &pi->link_cfg; 8517 unsigned char fc, fec; 8518 u32 stat, linkattr; 8519 int old_ptype, old_mtype; 8520 8521 old_ptype = pi->port_type; 8522 old_mtype = pi->mod_type; 8523 old_lc = *lc; 8524 if (action == FW_PORT_ACTION_GET_PORT_INFO) { 8525 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); 8526 8527 pi->port_type = G_FW_PORT_CMD_PTYPE(stat); 8528 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); 8529 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? 8530 G_FW_PORT_CMD_MDIOADDR(stat) : -1; 8531 8532 lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap)); 8533 lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap)); 8534 lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap)); 8535 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 8536 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); 8537 8538 linkattr = lstatus_to_fwcap(stat); 8539 } else if (action == FW_PORT_ACTION_GET_PORT_INFO32) { 8540 stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32); 8541 8542 pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat); 8543 pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat); 8544 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ? 8545 G_FW_PORT_CMD_MDIOADDR32(stat) : -1; 8546 8547 lc->supported = be32_to_cpu(p->u.info32.pcaps32); 8548 lc->advertising = be32_to_cpu(p->u.info32.acaps32); 8549 lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32); 8550 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0; 8551 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat); 8552 8553 linkattr = be32_to_cpu(p->u.info32.linkattr32); 8554 } else { 8555 CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action); 8556 return; 8557 } 8558 8559 lc->speed = fwcap_to_speed(linkattr); 8560 8561 fc = 0; 8562 if (linkattr & FW_PORT_CAP32_FC_RX) 8563 fc |= PAUSE_RX; 8564 if (linkattr & FW_PORT_CAP32_FC_TX) 8565 fc |= PAUSE_TX; 8566 lc->fc = fc; 8567 8568 fec = FEC_NONE; 8569 if (linkattr & FW_PORT_CAP32_FEC_RS) 8570 fec |= FEC_RS; 8571 if (linkattr & FW_PORT_CAP32_FEC_BASER_RS) 8572 fec |= FEC_BASER_RS; 8573 lc->fec = fec; 8574 8575 if (mod_changed != NULL) 8576 *mod_changed = false; 8577 if (link_changed != NULL) 8578 *link_changed = false; 8579 if (old_ptype != pi->port_type || old_mtype != pi->mod_type || 8580 old_lc.supported != lc->supported) { 8581 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { 8582 lc->fec_hint = lc->advertising & 8583 V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC); 8584 } 8585 if (mod_changed != NULL) 8586 *mod_changed = true; 8587 } 8588 if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed || 8589 old_lc.fec != lc->fec || old_lc.fc != lc->fc) { 8590 if (link_changed != NULL) 8591 *link_changed = true; 8592 } 8593} 8594 8595/** 8596 * t4_update_port_info - retrieve and update port information if changed 8597 * @pi: the port_info 8598 * 8599 * We issue a Get Port Information Command to the Firmware and, if 8600 * successful, we check to see if anything is different from what we 8601 * last recorded and update things accordingly. 8602 */ 8603 int t4_update_port_info(struct port_info *pi) 8604 { 8605 struct adapter *sc = pi->adapter; 8606 struct fw_port_cmd cmd; 8607 enum fw_port_action action; 8608 int ret; 8609 8610 memset(&cmd, 0, sizeof(cmd)); 8611 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 8612 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8613 V_FW_PORT_CMD_PORTID(pi->tx_chan)); 8614 action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 : 8615 FW_PORT_ACTION_GET_PORT_INFO; 8616 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | 8617 FW_LEN16(cmd)); 8618 ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 8619 if (ret) 8620 return ret; 8621 8622 handle_port_info(pi, &cmd, action, NULL, NULL); 8623 return 0; 8624} 8625 8626/** 8627 * t4_handle_fw_rpl - process a FW reply message 8628 * @adap: the adapter 8629 * @rpl: start of the FW message 8630 * 8631 * Processes a FW message, such as link state change messages. 8632 */ 8633int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 8634{ 8635 u8 opcode = *(const u8 *)rpl; 8636 const struct fw_port_cmd *p = (const void *)rpl; 8637 enum fw_port_action action = 8638 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 8639 bool mod_changed, link_changed; 8640 8641 if (opcode == FW_PORT_CMD && 8642 (action == FW_PORT_ACTION_GET_PORT_INFO || 8643 action == FW_PORT_ACTION_GET_PORT_INFO32)) { 8644 /* link/module state change message */ 8645 int i; 8646 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 8647 struct port_info *pi = NULL; 8648 struct link_config *lc; 8649 8650 for_each_port(adap, i) { 8651 pi = adap2pinfo(adap, i); 8652 if (pi->tx_chan == chan) 8653 break; 8654 } 8655 8656 lc = &pi->link_cfg; 8657 PORT_LOCK(pi); 8658 handle_port_info(pi, p, action, &mod_changed, &link_changed); 8659 PORT_UNLOCK(pi); 8660 if (mod_changed) 8661 t4_os_portmod_changed(pi); 8662 if (link_changed) { 8663 PORT_LOCK(pi); 8664 t4_os_link_changed(pi); 8665 PORT_UNLOCK(pi); 8666 } 8667 } else { 8668 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 8669 return -EINVAL; 8670 } 8671 return 0; 8672} 8673 8674/** 8675 * get_pci_mode - determine a card's PCI mode 8676 * @adapter: the adapter 8677 * @p: where to store the PCI settings 8678 * 8679 * Determines a card's PCI mode and associated parameters, such as speed 8680 * and width. 8681 */ 8682static void get_pci_mode(struct adapter *adapter, 8683 struct pci_params *p) 8684{ 8685 u16 val; 8686 u32 pcie_cap; 8687 8688 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 8689 if (pcie_cap) { 8690 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 8691 p->speed = val & PCI_EXP_LNKSTA_CLS; 8692 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 8693 } 8694} 8695 8696struct flash_desc { 8697 u32 vendor_and_model_id; 8698 u32 size_mb; 8699}; 8700 8701int t4_get_flash_params(struct adapter *adapter) 8702{ 8703 /* 8704 * Table for non-standard supported Flash parts. Note, all Flash 8705 * parts must have 64KB sectors. 8706 */ 8707 static struct flash_desc supported_flash[] = { 8708 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 8709 }; 8710 8711 int ret; 8712 u32 flashid = 0; 8713 unsigned int part, manufacturer; 8714 unsigned int density, size = 0; 8715 8716 8717 /* 8718 * Issue a Read ID Command to the Flash part. We decode supported 8719 * Flash parts and their sizes from this. There's a newer Query 8720 * Command which can retrieve detailed geometry information but many 8721 * Flash parts don't support it. 8722 */ 8723 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 8724 if (!ret) 8725 ret = sf1_read(adapter, 3, 0, 1, &flashid); 8726 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 8727 if (ret < 0) 8728 return ret; 8729 8730 /* 8731 * Check to see if it's one of our non-standard supported Flash parts. 8732 */ 8733 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 8734 if (supported_flash[part].vendor_and_model_id == flashid) { 8735 adapter->params.sf_size = 8736 supported_flash[part].size_mb; 8737 adapter->params.sf_nsec = 8738 adapter->params.sf_size / SF_SEC_SIZE; 8739 goto found; 8740 } 8741 8742 /* 8743 * Decode Flash part size. The code below looks repetative with 8744 * common encodings, but that's not guaranteed in the JEDEC 8745 * specification for the Read JADEC ID command. The only thing that 8746 * we're guaranteed by the JADEC specification is where the 8747 * Manufacturer ID is in the returned result. After that each 8748 * Manufacturer ~could~ encode things completely differently. 8749 * Note, all Flash parts must have 64KB sectors. 8750 */ 8751 manufacturer = flashid & 0xff; 8752 switch (manufacturer) { 8753 case 0x20: /* Micron/Numonix */ 8754 /* 8755 * This Density -> Size decoding table is taken from Micron 8756 * Data Sheets. 8757 */ 8758 density = (flashid >> 16) & 0xff; 8759 switch (density) { 8760 case 0x14: size = 1 << 20; break; /* 1MB */ 8761 case 0x15: size = 1 << 21; break; /* 2MB */ 8762 case 0x16: size = 1 << 22; break; /* 4MB */ 8763 case 0x17: size = 1 << 23; break; /* 8MB */ 8764 case 0x18: size = 1 << 24; break; /* 16MB */ 8765 case 0x19: size = 1 << 25; break; /* 32MB */ 8766 case 0x20: size = 1 << 26; break; /* 64MB */ 8767 case 0x21: size = 1 << 27; break; /* 128MB */ 8768 case 0x22: size = 1 << 28; break; /* 256MB */ 8769 } 8770 break; 8771 8772 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */ 8773 /* 8774 * This Density -> Size decoding table is taken from ISSI 8775 * Data Sheets. 8776 */ 8777 density = (flashid >> 16) & 0xff; 8778 switch (density) { 8779 case 0x16: size = 1 << 25; break; /* 32MB */ 8780 case 0x17: size = 1 << 26; break; /* 64MB */ 8781 } 8782 break; 8783 8784 case 0xc2: /* Macronix */ 8785 /* 8786 * This Density -> Size decoding table is taken from Macronix 8787 * Data Sheets. 8788 */ 8789 density = (flashid >> 16) & 0xff; 8790 switch (density) { 8791 case 0x17: size = 1 << 23; break; /* 8MB */ 8792 case 0x18: size = 1 << 24; break; /* 16MB */ 8793 } 8794 break; 8795 8796 case 0xef: /* Winbond */ 8797 /* 8798 * This Density -> Size decoding table is taken from Winbond 8799 * Data Sheets. 8800 */ 8801 density = (flashid >> 16) & 0xff; 8802 switch (density) { 8803 case 0x17: size = 1 << 23; break; /* 8MB */ 8804 case 0x18: size = 1 << 24; break; /* 16MB */ 8805 } 8806 break; 8807 } 8808 8809 /* If we didn't recognize the FLASH part, that's no real issue: the 8810 * Hardware/Software contract says that Hardware will _*ALWAYS*_ 8811 * use a FLASH part which is at least 4MB in size and has 64KB 8812 * sectors. The unrecognized FLASH part is likely to be much larger 8813 * than 4MB, but that's all we really need. 8814 */ 8815 if (size == 0) { 8816 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); 8817 size = 1 << 22; 8818 } 8819 8820 /* 8821 * Store decoded Flash size and fall through into vetting code. 8822 */ 8823 adapter->params.sf_size = size; 8824 adapter->params.sf_nsec = size / SF_SEC_SIZE; 8825 8826 found: 8827 /* 8828 * We should ~probably~ reject adapters with FLASHes which are too 8829 * small but we have some legacy FPGAs with small FLASHes that we'd 8830 * still like to use. So instead we emit a scary message ... 8831 */ 8832 if (adapter->params.sf_size < FLASH_MIN_SIZE) 8833 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 8834 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 8835 8836 return 0; 8837} 8838 8839static void set_pcie_completion_timeout(struct adapter *adapter, 8840 u8 range) 8841{ 8842 u16 val; 8843 u32 pcie_cap; 8844 8845 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 8846 if (pcie_cap) { 8847 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 8848 val &= 0xfff0; 8849 val |= range ; 8850 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 8851 } 8852} 8853 8854const struct chip_params *t4_get_chip_params(int chipid) 8855{ 8856 static const struct chip_params chip_params[] = { 8857 { 8858 /* T4 */ 8859 .nchan = NCHAN, 8860 .pm_stats_cnt = PM_NSTATS, 8861 .cng_ch_bits_log = 2, 8862 .nsched_cls = 15, 8863 .cim_num_obq = CIM_NUM_OBQ, 8864 .mps_rplc_size = 128, 8865 .vfcount = 128, 8866 .sge_fl_db = F_DBPRIO, 8867 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 8868 }, 8869 { 8870 /* T5 */ 8871 .nchan = NCHAN, 8872 .pm_stats_cnt = PM_NSTATS, 8873 .cng_ch_bits_log = 2, 8874 .nsched_cls = 16, 8875 .cim_num_obq = CIM_NUM_OBQ_T5, 8876 .mps_rplc_size = 128, 8877 .vfcount = 128, 8878 .sge_fl_db = F_DBPRIO | F_DBTYPE, 8879 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 8880 }, 8881 { 8882 /* T6 */ 8883 .nchan = T6_NCHAN, 8884 .pm_stats_cnt = T6_PM_NSTATS, 8885 .cng_ch_bits_log = 3, 8886 .nsched_cls = 16, 8887 .cim_num_obq = CIM_NUM_OBQ_T5, 8888 .mps_rplc_size = 256, 8889 .vfcount = 256, 8890 .sge_fl_db = 0, 8891 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 8892 }, 8893 }; 8894 8895 chipid -= CHELSIO_T4; 8896 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 8897 return NULL; 8898 8899 return &chip_params[chipid]; 8900} 8901 8902/** 8903 * t4_prep_adapter - prepare SW and HW for operation 8904 * @adapter: the adapter 8905 * @buf: temporary space of at least VPD_LEN size provided by the caller. 8906 * 8907 * Initialize adapter SW state for the various HW modules, set initial 8908 * values for some adapter tunables, take PHYs out of reset, and 8909 * initialize the MDIO interface. 8910 */ 8911int t4_prep_adapter(struct adapter *adapter, u32 *buf) 8912{ 8913 int ret; 8914 uint16_t device_id; 8915 uint32_t pl_rev; 8916 8917 get_pci_mode(adapter, &adapter->params.pci); 8918 8919 pl_rev = t4_read_reg(adapter, A_PL_REV); 8920 adapter->params.chipid = G_CHIPID(pl_rev); 8921 adapter->params.rev = G_REV(pl_rev); 8922 if (adapter->params.chipid == 0) { 8923 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 8924 adapter->params.chipid = CHELSIO_T4; 8925 8926 /* T4A1 chip is not supported */ 8927 if (adapter->params.rev == 1) { 8928 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 8929 return -EINVAL; 8930 } 8931 } 8932 8933 adapter->chip_params = t4_get_chip_params(chip_id(adapter)); 8934 if (adapter->chip_params == NULL) 8935 return -EINVAL; 8936 8937 adapter->params.pci.vpd_cap_addr = 8938 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 8939 8940 ret = t4_get_flash_params(adapter); 8941 if (ret < 0) 8942 return ret; 8943 8944 /* Cards with real ASICs have the chipid in the PCIe device id */ 8945 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 8946 if (device_id >> 12 == chip_id(adapter)) 8947 adapter->params.cim_la_size = CIMLA_SIZE; 8948 else { 8949 /* FPGA */ 8950 adapter->params.fpga = 1; 8951 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 8952 } 8953 8954 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf); 8955 if (ret < 0) 8956 return ret; 8957 8958 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 8959 8960 /* 8961 * Default port and clock for debugging in case we can't reach FW. 8962 */ 8963 adapter->params.nports = 1; 8964 adapter->params.portvec = 1; 8965 adapter->params.vpd.cclk = 50000; 8966 8967 /* Set pci completion timeout value to 4 seconds. */ 8968 set_pcie_completion_timeout(adapter, 0xd); 8969 return 0; 8970} 8971 8972/** 8973 * t4_shutdown_adapter - shut down adapter, host & wire 8974 * @adapter: the adapter 8975 * 8976 * Perform an emergency shutdown of the adapter and stop it from 8977 * continuing any further communication on the ports or DMA to the 8978 * host. This is typically used when the adapter and/or firmware 8979 * have crashed and we want to prevent any further accidental 8980 * communication with the rest of the world. This will also force 8981 * the port Link Status to go down -- if register writes work -- 8982 * which should help our peers figure out that we're down. 8983 */ 8984int t4_shutdown_adapter(struct adapter *adapter) 8985{ 8986 int port; 8987 8988 t4_intr_disable(adapter); 8989 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 8990 for_each_port(adapter, port) { 8991 u32 a_port_cfg = is_t4(adapter) ? 8992 PORT_REG(port, A_XGMAC_PORT_CFG) : 8993 T5_PORT_REG(port, A_MAC_PORT_CFG); 8994 8995 t4_write_reg(adapter, a_port_cfg, 8996 t4_read_reg(adapter, a_port_cfg) 8997 & ~V_SIGNAL_DET(1)); 8998 } 8999 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 9000 9001 return 0; 9002} 9003 9004/** 9005 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 9006 * @adapter: the adapter 9007 * @qid: the Queue ID 9008 * @qtype: the Ingress or Egress type for @qid 9009 * @user: true if this request is for a user mode queue 9010 * @pbar2_qoffset: BAR2 Queue Offset 9011 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 9012 * 9013 * Returns the BAR2 SGE Queue Registers information associated with the 9014 * indicated Absolute Queue ID. These are passed back in return value 9015 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 9016 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 9017 * 9018 * This may return an error which indicates that BAR2 SGE Queue 9019 * registers aren't available. If an error is not returned, then the 9020 * following values are returned: 9021 * 9022 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 9023 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 9024 * 9025 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 9026 * require the "Inferred Queue ID" ability may be used. E.g. the 9027 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 9028 * then these "Inferred Queue ID" register may not be used. 9029 */ 9030int t4_bar2_sge_qregs(struct adapter *adapter, 9031 unsigned int qid, 9032 enum t4_bar2_qtype qtype, 9033 int user, 9034 u64 *pbar2_qoffset, 9035 unsigned int *pbar2_qid) 9036{ 9037 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 9038 u64 bar2_page_offset, bar2_qoffset; 9039 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 9040 9041 /* T4 doesn't support BAR2 SGE Queue registers for kernel 9042 * mode queues. 9043 */ 9044 if (!user && is_t4(adapter)) 9045 return -EINVAL; 9046 9047 /* Get our SGE Page Size parameters. 9048 */ 9049 page_shift = adapter->params.sge.page_shift; 9050 page_size = 1 << page_shift; 9051 9052 /* Get the right Queues per Page parameters for our Queue. 9053 */ 9054 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 9055 ? adapter->params.sge.eq_s_qpp 9056 : adapter->params.sge.iq_s_qpp); 9057 qpp_mask = (1 << qpp_shift) - 1; 9058 9059 /* Calculate the basics of the BAR2 SGE Queue register area: 9060 * o The BAR2 page the Queue registers will be in. 9061 * o The BAR2 Queue ID. 9062 * o The BAR2 Queue ID Offset into the BAR2 page. 9063 */ 9064 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 9065 bar2_qid = qid & qpp_mask; 9066 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 9067 9068 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 9069 * hardware will infer the Absolute Queue ID simply from the writes to 9070 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 9071 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 9072 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 9073 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 9074 * from the BAR2 Page and BAR2 Queue ID. 9075 * 9076 * One important censequence of this is that some BAR2 SGE registers 9077 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 9078 * there. But other registers synthesize the SGE Queue ID purely 9079 * from the writes to the registers -- the Write Combined Doorbell 9080 * Buffer is a good example. These BAR2 SGE Registers are only 9081 * available for those BAR2 SGE Register areas where the SGE Absolute 9082 * Queue ID can be inferred from simple writes. 9083 */ 9084 bar2_qoffset = bar2_page_offset; 9085 bar2_qinferred = (bar2_qid_offset < page_size); 9086 if (bar2_qinferred) { 9087 bar2_qoffset += bar2_qid_offset; 9088 bar2_qid = 0; 9089 } 9090 9091 *pbar2_qoffset = bar2_qoffset; 9092 *pbar2_qid = bar2_qid; 9093 return 0; 9094} 9095 9096/** 9097 * t4_init_devlog_params - initialize adapter->params.devlog 9098 * @adap: the adapter 9099 * @fw_attach: whether we can talk to the firmware 9100 * 9101 * Initialize various fields of the adapter's Firmware Device Log 9102 * Parameters structure. 9103 */ 9104int t4_init_devlog_params(struct adapter *adap, int fw_attach) 9105{ 9106 struct devlog_params *dparams = &adap->params.devlog; 9107 u32 pf_dparams; 9108 unsigned int devlog_meminfo; 9109 struct fw_devlog_cmd devlog_cmd; 9110 int ret; 9111 9112 /* If we're dealing with newer firmware, the Device Log Paramerters 9113 * are stored in a designated register which allows us to access the 9114 * Device Log even if we can't talk to the firmware. 9115 */ 9116 pf_dparams = 9117 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 9118 if (pf_dparams) { 9119 unsigned int nentries, nentries128; 9120 9121 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 9122 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 9123 9124 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 9125 nentries = (nentries128 + 1) * 128; 9126 dparams->size = nentries * sizeof(struct fw_devlog_e); 9127 9128 return 0; 9129 } 9130 9131 /* 9132 * For any failing returns ... 9133 */ 9134 memset(dparams, 0, sizeof *dparams); 9135 9136 /* 9137 * If we can't talk to the firmware, there's really nothing we can do 9138 * at this point. 9139 */ 9140 if (!fw_attach) 9141 return -ENXIO; 9142 9143 /* Otherwise, ask the firmware for it's Device Log Parameters. 9144 */ 9145 memset(&devlog_cmd, 0, sizeof devlog_cmd); 9146 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9147 F_FW_CMD_REQUEST | F_FW_CMD_READ); 9148 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9149 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 9150 &devlog_cmd); 9151 if (ret) 9152 return ret; 9153 9154 devlog_meminfo = 9155 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 9156 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 9157 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 9158 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 9159 9160 return 0; 9161} 9162 9163/** 9164 * t4_init_sge_params - initialize adap->params.sge 9165 * @adapter: the adapter 9166 * 9167 * Initialize various fields of the adapter's SGE Parameters structure. 9168 */ 9169int t4_init_sge_params(struct adapter *adapter) 9170{ 9171 u32 r; 9172 struct sge_params *sp = &adapter->params.sge; 9173 unsigned i, tscale = 1; 9174 9175 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 9176 sp->counter_val[0] = G_THRESHOLD_0(r); 9177 sp->counter_val[1] = G_THRESHOLD_1(r); 9178 sp->counter_val[2] = G_THRESHOLD_2(r); 9179 sp->counter_val[3] = G_THRESHOLD_3(r); 9180 9181 if (chip_id(adapter) >= CHELSIO_T6) { 9182 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); 9183 tscale = G_TSCALE(r); 9184 if (tscale == 0) 9185 tscale = 1; 9186 else 9187 tscale += 2; 9188 } 9189 9190 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 9191 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; 9192 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; 9193 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 9194 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; 9195 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; 9196 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 9197 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; 9198 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; 9199 9200 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 9201 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 9202 if (is_t4(adapter)) 9203 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 9204 else if (is_t5(adapter)) 9205 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 9206 else 9207 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; 9208 9209 /* egress queues: log2 of # of doorbells per BAR2 page */ 9210 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 9211 r >>= S_QUEUESPERPAGEPF0 + 9212 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 9213 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 9214 9215 /* ingress queues: log2 of # of doorbells per BAR2 page */ 9216 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 9217 r >>= S_QUEUESPERPAGEPF0 + 9218 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 9219 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 9220 9221 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 9222 r >>= S_HOSTPAGESIZEPF0 + 9223 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 9224 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 9225 9226 r = t4_read_reg(adapter, A_SGE_CONTROL); 9227 sp->sge_control = r; 9228 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 9229 sp->fl_pktshift = G_PKTSHIFT(r); 9230 if (chip_id(adapter) <= CHELSIO_T5) { 9231 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 9232 X_INGPADBOUNDARY_SHIFT); 9233 } else { 9234 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 9235 X_T6_INGPADBOUNDARY_SHIFT); 9236 } 9237 if (is_t4(adapter)) 9238 sp->pack_boundary = sp->pad_boundary; 9239 else { 9240 r = t4_read_reg(adapter, A_SGE_CONTROL2); 9241 if (G_INGPACKBOUNDARY(r) == 0) 9242 sp->pack_boundary = 16; 9243 else 9244 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 9245 } 9246 for (i = 0; i < SGE_FLBUF_SIZES; i++) 9247 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, 9248 A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 9249 9250 return 0; 9251} 9252 9253/* 9254 * Read and cache the adapter's compressed filter mode and ingress config. 9255 */ 9256static void read_filter_mode_and_ingress_config(struct adapter *adap, 9257 bool sleep_ok) 9258{ 9259 uint32_t v; 9260 struct tp_params *tpp = &adap->params.tp; 9261 9262 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, 9263 sleep_ok); 9264 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG, 9265 sleep_ok); 9266 9267 /* 9268 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 9269 * shift positions of several elements of the Compressed Filter Tuple 9270 * for this adapter which we need frequently ... 9271 */ 9272 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 9273 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 9274 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 9275 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 9276 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 9277 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 9278 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 9279 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 9280 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 9281 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 9282 9283 if (chip_id(adap) > CHELSIO_T4) { 9284 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3)); 9285 adap->params.tp.hash_filter_mask = v; 9286 v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4)); 9287 adap->params.tp.hash_filter_mask |= (u64)v << 32; 9288 } 9289} 9290 9291/** 9292 * t4_init_tp_params - initialize adap->params.tp 9293 * @adap: the adapter 9294 * 9295 * Initialize various fields of the adapter's TP Parameters structure. 9296 */ 9297int t4_init_tp_params(struct adapter *adap, bool sleep_ok) 9298{ 9299 int chan; 9300 u32 v; 9301 struct tp_params *tpp = &adap->params.tp; 9302 9303 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 9304 tpp->tre = G_TIMERRESOLUTION(v); 9305 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 9306 9307 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 9308 for (chan = 0; chan < MAX_NCHAN; chan++) 9309 tpp->tx_modq[chan] = chan; 9310 9311 read_filter_mode_and_ingress_config(adap, sleep_ok); 9312 9313 /* 9314 * Cache a mask of the bits that represent the error vector portion of 9315 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room 9316 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE). 9317 */ 9318 tpp->err_vec_mask = htobe16(0xffff); 9319 if (chip_id(adap) > CHELSIO_T5) { 9320 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 9321 if (v & F_CRXPKTENC) { 9322 tpp->err_vec_mask = 9323 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC)); 9324 } 9325 } 9326 9327 return 0; 9328} 9329 9330/** 9331 * t4_filter_field_shift - calculate filter field shift 9332 * @adap: the adapter 9333 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 9334 * 9335 * Return the shift position of a filter field within the Compressed 9336 * Filter Tuple. The filter field is specified via its selection bit 9337 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 9338 */ 9339int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 9340{ 9341 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 9342 unsigned int sel; 9343 int field_shift; 9344 9345 if ((filter_mode & filter_sel) == 0) 9346 return -1; 9347 9348 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 9349 switch (filter_mode & sel) { 9350 case F_FCOE: 9351 field_shift += W_FT_FCOE; 9352 break; 9353 case F_PORT: 9354 field_shift += W_FT_PORT; 9355 break; 9356 case F_VNIC_ID: 9357 field_shift += W_FT_VNIC_ID; 9358 break; 9359 case F_VLAN: 9360 field_shift += W_FT_VLAN; 9361 break; 9362 case F_TOS: 9363 field_shift += W_FT_TOS; 9364 break; 9365 case F_PROTOCOL: 9366 field_shift += W_FT_PROTOCOL; 9367 break; 9368 case F_ETHERTYPE: 9369 field_shift += W_FT_ETHERTYPE; 9370 break; 9371 case F_MACMATCH: 9372 field_shift += W_FT_MACMATCH; 9373 break; 9374 case F_MPSHITTYPE: 9375 field_shift += W_FT_MPSHITTYPE; 9376 break; 9377 case F_FRAGMENTATION: 9378 field_shift += W_FT_FRAGMENTATION; 9379 break; 9380 } 9381 } 9382 return field_shift; 9383} 9384 9385int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 9386{ 9387 u8 addr[6]; 9388 int ret, i, j; 9389 struct port_info *p = adap2pinfo(adap, port_id); 9390 u32 param, val; 9391 struct vi_info *vi = &p->vi[0]; 9392 9393 for (i = 0, j = -1; i <= p->port_id; i++) { 9394 do { 9395 j++; 9396 } while ((adap->params.portvec & (1 << j)) == 0); 9397 } 9398 9399 p->tx_chan = j; 9400 p->mps_bg_map = t4_get_mps_bg_map(adap, j); 9401 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); 9402 p->lport = j; 9403 9404 if (!(adap->flags & IS_VF) || 9405 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { 9406 t4_update_port_info(p); 9407 } 9408 9409 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size, 9410 &vi->vfvld, &vi->vin); 9411 if (ret < 0) 9412 return ret; 9413 9414 vi->viid = ret; 9415 t4_os_set_hw_addr(p, addr); 9416 9417 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 9418 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 9419 V_FW_PARAMS_PARAM_YZ(vi->viid); 9420 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 9421 if (ret) 9422 vi->rss_base = 0xffff; 9423 else { 9424 /* MPASS((val >> 16) == rss_size); */ 9425 vi->rss_base = val & 0xffff; 9426 } 9427 9428 return 0; 9429} 9430 9431/** 9432 * t4_read_cimq_cfg - read CIM queue configuration 9433 * @adap: the adapter 9434 * @base: holds the queue base addresses in bytes 9435 * @size: holds the queue sizes in bytes 9436 * @thres: holds the queue full thresholds in bytes 9437 * 9438 * Returns the current configuration of the CIM queues, starting with 9439 * the IBQs, then the OBQs. 9440 */ 9441void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 9442{ 9443 unsigned int i, v; 9444 int cim_num_obq = adap->chip_params->cim_num_obq; 9445 9446 for (i = 0; i < CIM_NUM_IBQ; i++) { 9447 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 9448 V_QUENUMSELECT(i)); 9449 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 9450 /* value is in 256-byte units */ 9451 *base++ = G_CIMQBASE(v) * 256; 9452 *size++ = G_CIMQSIZE(v) * 256; 9453 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 9454 } 9455 for (i = 0; i < cim_num_obq; i++) { 9456 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 9457 V_QUENUMSELECT(i)); 9458 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 9459 /* value is in 256-byte units */ 9460 *base++ = G_CIMQBASE(v) * 256; 9461 *size++ = G_CIMQSIZE(v) * 256; 9462 } 9463} 9464 9465/** 9466 * t4_read_cim_ibq - read the contents of a CIM inbound queue 9467 * @adap: the adapter 9468 * @qid: the queue index 9469 * @data: where to store the queue contents 9470 * @n: capacity of @data in 32-bit words 9471 * 9472 * Reads the contents of the selected CIM queue starting at address 0 up 9473 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 9474 * error and the number of 32-bit words actually read on success. 9475 */ 9476int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 9477{ 9478 int i, err, attempts; 9479 unsigned int addr; 9480 const unsigned int nwords = CIM_IBQ_SIZE * 4; 9481 9482 if (qid > 5 || (n & 3)) 9483 return -EINVAL; 9484 9485 addr = qid * nwords; 9486 if (n > nwords) 9487 n = nwords; 9488 9489 /* It might take 3-10ms before the IBQ debug read access is allowed. 9490 * Wait for 1 Sec with a delay of 1 usec. 9491 */ 9492 attempts = 1000000; 9493 9494 for (i = 0; i < n; i++, addr++) { 9495 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 9496 F_IBQDBGEN); 9497 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 9498 attempts, 1); 9499 if (err) 9500 return err; 9501 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 9502 } 9503 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 9504 return i; 9505} 9506 9507/** 9508 * t4_read_cim_obq - read the contents of a CIM outbound queue 9509 * @adap: the adapter 9510 * @qid: the queue index 9511 * @data: where to store the queue contents 9512 * @n: capacity of @data in 32-bit words 9513 * 9514 * Reads the contents of the selected CIM queue starting at address 0 up 9515 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 9516 * error and the number of 32-bit words actually read on success. 9517 */ 9518int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 9519{ 9520 int i, err; 9521 unsigned int addr, v, nwords; 9522 int cim_num_obq = adap->chip_params->cim_num_obq; 9523 9524 if ((qid > (cim_num_obq - 1)) || (n & 3)) 9525 return -EINVAL; 9526 9527 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 9528 V_QUENUMSELECT(qid)); 9529 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 9530 9531 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 9532 nwords = G_CIMQSIZE(v) * 64; /* same */ 9533 if (n > nwords) 9534 n = nwords; 9535 9536 for (i = 0; i < n; i++, addr++) { 9537 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 9538 F_OBQDBGEN); 9539 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 9540 2, 1); 9541 if (err) 9542 return err; 9543 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 9544 } 9545 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 9546 return i; 9547} 9548 9549enum { 9550 CIM_QCTL_BASE = 0, 9551 CIM_CTL_BASE = 0x2000, 9552 CIM_PBT_ADDR_BASE = 0x2800, 9553 CIM_PBT_LRF_BASE = 0x3000, 9554 CIM_PBT_DATA_BASE = 0x3800 9555}; 9556 9557/** 9558 * t4_cim_read - read a block from CIM internal address space 9559 * @adap: the adapter 9560 * @addr: the start address within the CIM address space 9561 * @n: number of words to read 9562 * @valp: where to store the result 9563 * 9564 * Reads a block of 4-byte words from the CIM intenal address space. 9565 */ 9566int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 9567 unsigned int *valp) 9568{ 9569 int ret = 0; 9570 9571 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 9572 return -EBUSY; 9573 9574 for ( ; !ret && n--; addr += 4) { 9575 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 9576 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 9577 0, 5, 2); 9578 if (!ret) 9579 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 9580 } 9581 return ret; 9582} 9583 9584/** 9585 * t4_cim_write - write a block into CIM internal address space 9586 * @adap: the adapter 9587 * @addr: the start address within the CIM address space 9588 * @n: number of words to write 9589 * @valp: set of values to write 9590 * 9591 * Writes a block of 4-byte words into the CIM intenal address space. 9592 */ 9593int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 9594 const unsigned int *valp) 9595{ 9596 int ret = 0; 9597 9598 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 9599 return -EBUSY; 9600 9601 for ( ; !ret && n--; addr += 4) { 9602 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 9603 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 9604 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 9605 0, 5, 2); 9606 } 9607 return ret; 9608} 9609 9610static int t4_cim_write1(struct adapter *adap, unsigned int addr, 9611 unsigned int val) 9612{ 9613 return t4_cim_write(adap, addr, 1, &val); 9614} 9615 9616/** 9617 * t4_cim_ctl_read - read a block from CIM control region 9618 * @adap: the adapter 9619 * @addr: the start address within the CIM control region 9620 * @n: number of words to read 9621 * @valp: where to store the result 9622 * 9623 * Reads a block of 4-byte words from the CIM control region. 9624 */ 9625int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 9626 unsigned int *valp) 9627{ 9628 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 9629} 9630 9631/** 9632 * t4_cim_read_la - read CIM LA capture buffer 9633 * @adap: the adapter 9634 * @la_buf: where to store the LA data 9635 * @wrptr: the HW write pointer within the capture buffer 9636 * 9637 * Reads the contents of the CIM LA buffer with the most recent entry at 9638 * the end of the returned data and with the entry at @wrptr first. 9639 * We try to leave the LA in the running state we find it in. 9640 */ 9641int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 9642{ 9643 int i, ret; 9644 unsigned int cfg, val, idx; 9645 9646 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 9647 if (ret) 9648 return ret; 9649 9650 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 9651 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 9652 if (ret) 9653 return ret; 9654 } 9655 9656 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 9657 if (ret) 9658 goto restart; 9659 9660 idx = G_UPDBGLAWRPTR(val); 9661 if (wrptr) 9662 *wrptr = idx; 9663 9664 for (i = 0; i < adap->params.cim_la_size; i++) { 9665 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 9666 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 9667 if (ret) 9668 break; 9669 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 9670 if (ret) 9671 break; 9672 if (val & F_UPDBGLARDEN) { 9673 ret = -ETIMEDOUT; 9674 break; 9675 } 9676 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 9677 if (ret) 9678 break; 9679 9680 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 9681 idx = (idx + 1) & M_UPDBGLARDPTR; 9682 /* 9683 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 9684 * identify the 32-bit portion of the full 312-bit data 9685 */ 9686 if (is_t6(adap)) 9687 while ((idx & 0xf) > 9) 9688 idx = (idx + 1) % M_UPDBGLARDPTR; 9689 } 9690restart: 9691 if (cfg & F_UPDBGLAEN) { 9692 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 9693 cfg & ~F_UPDBGLARDEN); 9694 if (!ret) 9695 ret = r; 9696 } 9697 return ret; 9698} 9699 9700/** 9701 * t4_tp_read_la - read TP LA capture buffer 9702 * @adap: the adapter 9703 * @la_buf: where to store the LA data 9704 * @wrptr: the HW write pointer within the capture buffer 9705 * 9706 * Reads the contents of the TP LA buffer with the most recent entry at 9707 * the end of the returned data and with the entry at @wrptr first. 9708 * We leave the LA in the running state we find it in. 9709 */ 9710void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 9711{ 9712 bool last_incomplete; 9713 unsigned int i, cfg, val, idx; 9714 9715 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 9716 if (cfg & F_DBGLAENABLE) /* freeze LA */ 9717 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 9718 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 9719 9720 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 9721 idx = G_DBGLAWPTR(val); 9722 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 9723 if (last_incomplete) 9724 idx = (idx + 1) & M_DBGLARPTR; 9725 if (wrptr) 9726 *wrptr = idx; 9727 9728 val &= 0xffff; 9729 val &= ~V_DBGLARPTR(M_DBGLARPTR); 9730 val |= adap->params.tp.la_mask; 9731 9732 for (i = 0; i < TPLA_SIZE; i++) { 9733 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 9734 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 9735 idx = (idx + 1) & M_DBGLARPTR; 9736 } 9737 9738 /* Wipe out last entry if it isn't valid */ 9739 if (last_incomplete) 9740 la_buf[TPLA_SIZE - 1] = ~0ULL; 9741 9742 if (cfg & F_DBGLAENABLE) /* restore running state */ 9743 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 9744 cfg | adap->params.tp.la_mask); 9745} 9746 9747/* 9748 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 9749 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 9750 * state for more than the Warning Threshold then we'll issue a warning about 9751 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 9752 * appears to be hung every Warning Repeat second till the situation clears. 9753 * If the situation clears, we'll note that as well. 9754 */ 9755#define SGE_IDMA_WARN_THRESH 1 9756#define SGE_IDMA_WARN_REPEAT 300 9757 9758/** 9759 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 9760 * @adapter: the adapter 9761 * @idma: the adapter IDMA Monitor state 9762 * 9763 * Initialize the state of an SGE Ingress DMA Monitor. 9764 */ 9765void t4_idma_monitor_init(struct adapter *adapter, 9766 struct sge_idma_monitor_state *idma) 9767{ 9768 /* Initialize the state variables for detecting an SGE Ingress DMA 9769 * hang. The SGE has internal counters which count up on each clock 9770 * tick whenever the SGE finds its Ingress DMA State Engines in the 9771 * same state they were on the previous clock tick. The clock used is 9772 * the Core Clock so we have a limit on the maximum "time" they can 9773 * record; typically a very small number of seconds. For instance, 9774 * with a 600MHz Core Clock, we can only count up to a bit more than 9775 * 7s. So we'll synthesize a larger counter in order to not run the 9776 * risk of having the "timers" overflow and give us the flexibility to 9777 * maintain a Hung SGE State Machine of our own which operates across 9778 * a longer time frame. 9779 */ 9780 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 9781 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 9782} 9783 9784/** 9785 * t4_idma_monitor - monitor SGE Ingress DMA state 9786 * @adapter: the adapter 9787 * @idma: the adapter IDMA Monitor state 9788 * @hz: number of ticks/second 9789 * @ticks: number of ticks since the last IDMA Monitor call 9790 */ 9791void t4_idma_monitor(struct adapter *adapter, 9792 struct sge_idma_monitor_state *idma, 9793 int hz, int ticks) 9794{ 9795 int i, idma_same_state_cnt[2]; 9796 9797 /* Read the SGE Debug Ingress DMA Same State Count registers. These 9798 * are counters inside the SGE which count up on each clock when the 9799 * SGE finds its Ingress DMA State Engines in the same states they 9800 * were in the previous clock. The counters will peg out at 9801 * 0xffffffff without wrapping around so once they pass the 1s 9802 * threshold they'll stay above that till the IDMA state changes. 9803 */ 9804 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 9805 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 9806 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 9807 9808 for (i = 0; i < 2; i++) { 9809 u32 debug0, debug11; 9810 9811 /* If the Ingress DMA Same State Counter ("timer") is less 9812 * than 1s, then we can reset our synthesized Stall Timer and 9813 * continue. If we have previously emitted warnings about a 9814 * potential stalled Ingress Queue, issue a note indicating 9815 * that the Ingress Queue has resumed forward progress. 9816 */ 9817 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 9818 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 9819 CH_WARN(adapter, "SGE idma%d, queue %u, " 9820 "resumed after %d seconds\n", 9821 i, idma->idma_qid[i], 9822 idma->idma_stalled[i]/hz); 9823 idma->idma_stalled[i] = 0; 9824 continue; 9825 } 9826 9827 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 9828 * domain. The first time we get here it'll be because we 9829 * passed the 1s Threshold; each additional time it'll be 9830 * because the RX Timer Callback is being fired on its regular 9831 * schedule. 9832 * 9833 * If the stall is below our Potential Hung Ingress Queue 9834 * Warning Threshold, continue. 9835 */ 9836 if (idma->idma_stalled[i] == 0) { 9837 idma->idma_stalled[i] = hz; 9838 idma->idma_warn[i] = 0; 9839 } else { 9840 idma->idma_stalled[i] += ticks; 9841 idma->idma_warn[i] -= ticks; 9842 } 9843 9844 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 9845 continue; 9846 9847 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 9848 */ 9849 if (idma->idma_warn[i] > 0) 9850 continue; 9851 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 9852 9853 /* Read and save the SGE IDMA State and Queue ID information. 9854 * We do this every time in case it changes across time ... 9855 * can't be too careful ... 9856 */ 9857 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 9858 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 9859 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 9860 9861 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 9862 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 9863 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 9864 9865 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 9866 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 9867 i, idma->idma_qid[i], idma->idma_state[i], 9868 idma->idma_stalled[i]/hz, 9869 debug0, debug11); 9870 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 9871 } 9872} 9873 9874/** 9875 * t4_read_pace_tbl - read the pace table 9876 * @adap: the adapter 9877 * @pace_vals: holds the returned values 9878 * 9879 * Returns the values of TP's pace table in microseconds. 9880 */ 9881void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 9882{ 9883 unsigned int i, v; 9884 9885 for (i = 0; i < NTX_SCHED; i++) { 9886 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 9887 v = t4_read_reg(adap, A_TP_PACE_TABLE); 9888 pace_vals[i] = dack_ticks_to_usec(adap, v); 9889 } 9890} 9891 9892/** 9893 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 9894 * @adap: the adapter 9895 * @sched: the scheduler index 9896 * @kbps: the byte rate in Kbps 9897 * @ipg: the interpacket delay in tenths of nanoseconds 9898 * 9899 * Return the current configuration of a HW Tx scheduler. 9900 */ 9901void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 9902 unsigned int *ipg, bool sleep_ok) 9903{ 9904 unsigned int v, addr, bpt, cpt; 9905 9906 if (kbps) { 9907 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 9908 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 9909 if (sched & 1) 9910 v >>= 16; 9911 bpt = (v >> 8) & 0xff; 9912 cpt = v & 0xff; 9913 if (!cpt) 9914 *kbps = 0; /* scheduler disabled */ 9915 else { 9916 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 9917 *kbps = (v * bpt) / 125; 9918 } 9919 } 9920 if (ipg) { 9921 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 9922 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 9923 if (sched & 1) 9924 v >>= 16; 9925 v &= 0xffff; 9926 *ipg = (10000 * v) / core_ticks_per_usec(adap); 9927 } 9928} 9929 9930/** 9931 * t4_load_cfg - download config file 9932 * @adap: the adapter 9933 * @cfg_data: the cfg text file to write 9934 * @size: text file size 9935 * 9936 * Write the supplied config text file to the card's serial flash. 9937 */ 9938int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 9939{ 9940 int ret, i, n, cfg_addr; 9941 unsigned int addr; 9942 unsigned int flash_cfg_start_sec; 9943 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9944 9945 cfg_addr = t4_flash_cfg_addr(adap); 9946 if (cfg_addr < 0) 9947 return cfg_addr; 9948 9949 addr = cfg_addr; 9950 flash_cfg_start_sec = addr / SF_SEC_SIZE; 9951 9952 if (size > FLASH_CFG_MAX_SIZE) { 9953 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 9954 FLASH_CFG_MAX_SIZE); 9955 return -EFBIG; 9956 } 9957 9958 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 9959 sf_sec_size); 9960 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 9961 flash_cfg_start_sec + i - 1); 9962 /* 9963 * If size == 0 then we're simply erasing the FLASH sectors associated 9964 * with the on-adapter Firmware Configuration File. 9965 */ 9966 if (ret || size == 0) 9967 goto out; 9968 9969 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 9970 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 9971 if ( (size - i) < SF_PAGE_SIZE) 9972 n = size - i; 9973 else 9974 n = SF_PAGE_SIZE; 9975 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 9976 if (ret) 9977 goto out; 9978 9979 addr += SF_PAGE_SIZE; 9980 cfg_data += SF_PAGE_SIZE; 9981 } 9982 9983out: 9984 if (ret) 9985 CH_ERR(adap, "config file %s failed %d\n", 9986 (size == 0 ? "clear" : "download"), ret); 9987 return ret; 9988} 9989 9990/** 9991 * t5_fw_init_extern_mem - initialize the external memory 9992 * @adap: the adapter 9993 * 9994 * Initializes the external memory on T5. 9995 */ 9996int t5_fw_init_extern_mem(struct adapter *adap) 9997{ 9998 u32 params[1], val[1]; 9999 int ret; 10000 10001 if (!is_t5(adap)) 10002 return 0; 10003 10004 val[0] = 0xff; /* Initialize all MCs */ 10005 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10006 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 10007 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 10008 FW_CMD_MAX_TIMEOUT); 10009 10010 return ret; 10011} 10012 10013/* BIOS boot headers */ 10014typedef struct pci_expansion_rom_header { 10015 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 10016 u8 reserved[22]; /* Reserved per processor Architecture data */ 10017 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 10018} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 10019 10020/* Legacy PCI Expansion ROM Header */ 10021typedef struct legacy_pci_expansion_rom_header { 10022 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 10023 u8 size512; /* Current Image Size in units of 512 bytes */ 10024 u8 initentry_point[4]; 10025 u8 cksum; /* Checksum computed on the entire Image */ 10026 u8 reserved[16]; /* Reserved */ 10027 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 10028} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 10029 10030/* EFI PCI Expansion ROM Header */ 10031typedef struct efi_pci_expansion_rom_header { 10032 u8 signature[2]; // ROM signature. The value 0xaa55 10033 u8 initialization_size[2]; /* Units 512. Includes this header */ 10034 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 10035 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 10036 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 10037 u8 compression_type[2]; /* Compression type. */ 10038 /* 10039 * Compression type definition 10040 * 0x0: uncompressed 10041 * 0x1: Compressed 10042 * 0x2-0xFFFF: Reserved 10043 */ 10044 u8 reserved[8]; /* Reserved */ 10045 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 10046 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 10047} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 10048 10049/* PCI Data Structure Format */ 10050typedef struct pcir_data_structure { /* PCI Data Structure */ 10051 u8 signature[4]; /* Signature. The string "PCIR" */ 10052 u8 vendor_id[2]; /* Vendor Identification */ 10053 u8 device_id[2]; /* Device Identification */ 10054 u8 vital_product[2]; /* Pointer to Vital Product Data */ 10055 u8 length[2]; /* PCIR Data Structure Length */ 10056 u8 revision; /* PCIR Data Structure Revision */ 10057 u8 class_code[3]; /* Class Code */ 10058 u8 image_length[2]; /* Image Length. Multiple of 512B */ 10059 u8 code_revision[2]; /* Revision Level of Code/Data */ 10060 u8 code_type; /* Code Type. */ 10061 /* 10062 * PCI Expansion ROM Code Types 10063 * 0x00: Intel IA-32, PC-AT compatible. Legacy 10064 * 0x01: Open Firmware standard for PCI. FCODE 10065 * 0x02: Hewlett-Packard PA RISC. HP reserved 10066 * 0x03: EFI Image. EFI 10067 * 0x04-0xFF: Reserved. 10068 */ 10069 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 10070 u8 reserved[2]; /* Reserved */ 10071} pcir_data_t; /* PCI__DATA_STRUCTURE */ 10072 10073/* BOOT constants */ 10074enum { 10075 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 10076 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 10077 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 10078 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 10079 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 10080 VENDOR_ID = 0x1425, /* Vendor ID */ 10081 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 10082}; 10083 10084/* 10085 * modify_device_id - Modifies the device ID of the Boot BIOS image 10086 * @adatper: the device ID to write. 10087 * @boot_data: the boot image to modify. 10088 * 10089 * Write the supplied device ID to the boot BIOS image. 10090 */ 10091static void modify_device_id(int device_id, u8 *boot_data) 10092{ 10093 legacy_pci_exp_rom_header_t *header; 10094 pcir_data_t *pcir_header; 10095 u32 cur_header = 0; 10096 10097 /* 10098 * Loop through all chained images and change the device ID's 10099 */ 10100 while (1) { 10101 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 10102 pcir_header = (pcir_data_t *) &boot_data[cur_header + 10103 le16_to_cpu(*(u16*)header->pcir_offset)]; 10104 10105 /* 10106 * Only modify the Device ID if code type is Legacy or HP. 10107 * 0x00: Okay to modify 10108 * 0x01: FCODE. Do not be modify 10109 * 0x03: Okay to modify 10110 * 0x04-0xFF: Do not modify 10111 */ 10112 if (pcir_header->code_type == 0x00) { 10113 u8 csum = 0; 10114 int i; 10115 10116 /* 10117 * Modify Device ID to match current adatper 10118 */ 10119 *(u16*) pcir_header->device_id = device_id; 10120 10121 /* 10122 * Set checksum temporarily to 0. 10123 * We will recalculate it later. 10124 */ 10125 header->cksum = 0x0; 10126 10127 /* 10128 * Calculate and update checksum 10129 */ 10130 for (i = 0; i < (header->size512 * 512); i++) 10131 csum += (u8)boot_data[cur_header + i]; 10132 10133 /* 10134 * Invert summed value to create the checksum 10135 * Writing new checksum value directly to the boot data 10136 */ 10137 boot_data[cur_header + 7] = -csum; 10138 10139 } else if (pcir_header->code_type == 0x03) { 10140 10141 /* 10142 * Modify Device ID to match current adatper 10143 */ 10144 *(u16*) pcir_header->device_id = device_id; 10145 10146 } 10147 10148 10149 /* 10150 * Check indicator element to identify if this is the last 10151 * image in the ROM. 10152 */ 10153 if (pcir_header->indicator & 0x80) 10154 break; 10155 10156 /* 10157 * Move header pointer up to the next image in the ROM. 10158 */ 10159 cur_header += header->size512 * 512; 10160 } 10161} 10162 10163/* 10164 * t4_load_boot - download boot flash 10165 * @adapter: the adapter 10166 * @boot_data: the boot image to write 10167 * @boot_addr: offset in flash to write boot_data 10168 * @size: image size 10169 * 10170 * Write the supplied boot image to the card's serial flash. 10171 * The boot image has the following sections: a 28-byte header and the 10172 * boot image. 10173 */ 10174int t4_load_boot(struct adapter *adap, u8 *boot_data, 10175 unsigned int boot_addr, unsigned int size) 10176{ 10177 pci_exp_rom_header_t *header; 10178 int pcir_offset ; 10179 pcir_data_t *pcir_header; 10180 int ret, addr; 10181 uint16_t device_id; 10182 unsigned int i; 10183 unsigned int boot_sector = (boot_addr * 1024 ); 10184 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 10185 10186 /* 10187 * Make sure the boot image does not encroach on the firmware region 10188 */ 10189 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 10190 CH_ERR(adap, "boot image encroaching on firmware region\n"); 10191 return -EFBIG; 10192 } 10193 10194 /* 10195 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 10196 * and Boot configuration data sections. These 3 boot sections span 10197 * sectors 0 to 7 in flash and live right before the FW image location. 10198 */ 10199 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 10200 sf_sec_size); 10201 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 10202 (boot_sector >> 16) + i - 1); 10203 10204 /* 10205 * If size == 0 then we're simply erasing the FLASH sectors associated 10206 * with the on-adapter option ROM file 10207 */ 10208 if (ret || (size == 0)) 10209 goto out; 10210 10211 /* Get boot header */ 10212 header = (pci_exp_rom_header_t *)boot_data; 10213 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 10214 /* PCIR Data Structure */ 10215 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 10216 10217 /* 10218 * Perform some primitive sanity testing to avoid accidentally 10219 * writing garbage over the boot sectors. We ought to check for 10220 * more but it's not worth it for now ... 10221 */ 10222 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 10223 CH_ERR(adap, "boot image too small/large\n"); 10224 return -EFBIG; 10225 } 10226 10227#ifndef CHELSIO_T4_DIAGS 10228 /* 10229 * Check BOOT ROM header signature 10230 */ 10231 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 10232 CH_ERR(adap, "Boot image missing signature\n"); 10233 return -EINVAL; 10234 } 10235 10236 /* 10237 * Check PCI header signature 10238 */ 10239 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 10240 CH_ERR(adap, "PCI header missing signature\n"); 10241 return -EINVAL; 10242 } 10243 10244 /* 10245 * Check Vendor ID matches Chelsio ID 10246 */ 10247 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 10248 CH_ERR(adap, "Vendor ID missing signature\n"); 10249 return -EINVAL; 10250 } 10251#endif 10252 10253 /* 10254 * Retrieve adapter's device ID 10255 */ 10256 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 10257 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 10258 device_id = device_id & 0xf0ff; 10259 10260 /* 10261 * Check PCIE Device ID 10262 */ 10263 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 10264 /* 10265 * Change the device ID in the Boot BIOS image to match 10266 * the Device ID of the current adapter. 10267 */ 10268 modify_device_id(device_id, boot_data); 10269 } 10270 10271 /* 10272 * Skip over the first SF_PAGE_SIZE worth of data and write it after 10273 * we finish copying the rest of the boot image. This will ensure 10274 * that the BIOS boot header will only be written if the boot image 10275 * was written in full. 10276 */ 10277 addr = boot_sector; 10278 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 10279 addr += SF_PAGE_SIZE; 10280 boot_data += SF_PAGE_SIZE; 10281 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 10282 if (ret) 10283 goto out; 10284 } 10285 10286 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 10287 (const u8 *)header, 0); 10288 10289out: 10290 if (ret) 10291 CH_ERR(adap, "boot image download failed, error %d\n", ret); 10292 return ret; 10293} 10294 10295/* 10296 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 10297 * @adapter: the adapter 10298 * 10299 * Return the address within the flash where the OptionROM Configuration 10300 * is stored, or an error if the device FLASH is too small to contain 10301 * a OptionROM Configuration. 10302 */ 10303static int t4_flash_bootcfg_addr(struct adapter *adapter) 10304{ 10305 /* 10306 * If the device FLASH isn't large enough to hold a Firmware 10307 * Configuration File, return an error. 10308 */ 10309 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 10310 return -ENOSPC; 10311 10312 return FLASH_BOOTCFG_START; 10313} 10314 10315int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 10316{ 10317 int ret, i, n, cfg_addr; 10318 unsigned int addr; 10319 unsigned int flash_cfg_start_sec; 10320 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 10321 10322 cfg_addr = t4_flash_bootcfg_addr(adap); 10323 if (cfg_addr < 0) 10324 return cfg_addr; 10325 10326 addr = cfg_addr; 10327 flash_cfg_start_sec = addr / SF_SEC_SIZE; 10328 10329 if (size > FLASH_BOOTCFG_MAX_SIZE) { 10330 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 10331 FLASH_BOOTCFG_MAX_SIZE); 10332 return -EFBIG; 10333 } 10334 10335 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 10336 sf_sec_size); 10337 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 10338 flash_cfg_start_sec + i - 1); 10339 10340 /* 10341 * If size == 0 then we're simply erasing the FLASH sectors associated 10342 * with the on-adapter OptionROM Configuration File. 10343 */ 10344 if (ret || size == 0) 10345 goto out; 10346 10347 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 10348 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 10349 if ( (size - i) < SF_PAGE_SIZE) 10350 n = size - i; 10351 else 10352 n = SF_PAGE_SIZE; 10353 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 10354 if (ret) 10355 goto out; 10356 10357 addr += SF_PAGE_SIZE; 10358 cfg_data += SF_PAGE_SIZE; 10359 } 10360 10361out: 10362 if (ret) 10363 CH_ERR(adap, "boot config data %s failed %d\n", 10364 (size == 0 ? "clear" : "download"), ret); 10365 return ret; 10366} 10367 10368/** 10369 * t4_set_filter_mode - configure the optional components of filter tuples 10370 * @adap: the adapter 10371 * @mode_map: a bitmap selcting which optional filter components to enable 10372 * @sleep_ok: if true we may sleep while awaiting command completion 10373 * 10374 * Sets the filter mode by selecting the optional components to enable 10375 * in filter tuples. Returns 0 on success and a negative error if the 10376 * requested mode needs more bits than are available for optional 10377 * components. 10378 */ 10379int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, 10380 bool sleep_ok) 10381{ 10382 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 10383 10384 int i, nbits = 0; 10385 10386 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 10387 if (mode_map & (1 << i)) 10388 nbits += width[i]; 10389 if (nbits > FILTER_OPT_LEN) 10390 return -EINVAL; 10391 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); 10392 read_filter_mode_and_ingress_config(adap, sleep_ok); 10393 10394 return 0; 10395} 10396 10397/** 10398 * t4_clr_port_stats - clear port statistics 10399 * @adap: the adapter 10400 * @idx: the port index 10401 * 10402 * Clear HW statistics for the given port. 10403 */ 10404void t4_clr_port_stats(struct adapter *adap, int idx) 10405{ 10406 unsigned int i; 10407 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 10408 u32 port_base_addr; 10409 10410 if (is_t4(adap)) 10411 port_base_addr = PORT_BASE(idx); 10412 else 10413 port_base_addr = T5_PORT_BASE(idx); 10414 10415 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 10416 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 10417 t4_write_reg(adap, port_base_addr + i, 0); 10418 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 10419 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 10420 t4_write_reg(adap, port_base_addr + i, 0); 10421 for (i = 0; i < 4; i++) 10422 if (bgmap & (1 << i)) { 10423 t4_write_reg(adap, 10424 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 10425 t4_write_reg(adap, 10426 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 10427 } 10428} 10429 10430/** 10431 * t4_i2c_rd - read I2C data from adapter 10432 * @adap: the adapter 10433 * @port: Port number if per-port device; <0 if not 10434 * @devid: per-port device ID or absolute device ID 10435 * @offset: byte offset into device I2C space 10436 * @len: byte length of I2C space data 10437 * @buf: buffer in which to return I2C data 10438 * 10439 * Reads the I2C data from the indicated device and location. 10440 */ 10441int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 10442 int port, unsigned int devid, 10443 unsigned int offset, unsigned int len, 10444 u8 *buf) 10445{ 10446 u32 ldst_addrspace; 10447 struct fw_ldst_cmd ldst; 10448 int ret; 10449 10450 if (port >= 4 || 10451 devid >= 256 || 10452 offset >= 256 || 10453 len > sizeof ldst.u.i2c.data) 10454 return -EINVAL; 10455 10456 memset(&ldst, 0, sizeof ldst); 10457 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 10458 ldst.op_to_addrspace = 10459 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 10460 F_FW_CMD_REQUEST | 10461 F_FW_CMD_READ | 10462 ldst_addrspace); 10463 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 10464 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 10465 ldst.u.i2c.did = devid; 10466 ldst.u.i2c.boffset = offset; 10467 ldst.u.i2c.blen = len; 10468 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 10469 if (!ret) 10470 memcpy(buf, ldst.u.i2c.data, len); 10471 return ret; 10472} 10473 10474/** 10475 * t4_i2c_wr - write I2C data to adapter 10476 * @adap: the adapter 10477 * @port: Port number if per-port device; <0 if not 10478 * @devid: per-port device ID or absolute device ID 10479 * @offset: byte offset into device I2C space 10480 * @len: byte length of I2C space data 10481 * @buf: buffer containing new I2C data 10482 * 10483 * Write the I2C data to the indicated device and location. 10484 */ 10485int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 10486 int port, unsigned int devid, 10487 unsigned int offset, unsigned int len, 10488 u8 *buf) 10489{ 10490 u32 ldst_addrspace; 10491 struct fw_ldst_cmd ldst; 10492 10493 if (port >= 4 || 10494 devid >= 256 || 10495 offset >= 256 || 10496 len > sizeof ldst.u.i2c.data) 10497 return -EINVAL; 10498 10499 memset(&ldst, 0, sizeof ldst); 10500 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 10501 ldst.op_to_addrspace = 10502 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 10503 F_FW_CMD_REQUEST | 10504 F_FW_CMD_WRITE | 10505 ldst_addrspace); 10506 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 10507 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 10508 ldst.u.i2c.did = devid; 10509 ldst.u.i2c.boffset = offset; 10510 ldst.u.i2c.blen = len; 10511 memcpy(ldst.u.i2c.data, buf, len); 10512 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 10513} 10514 10515/** 10516 * t4_sge_ctxt_rd - read an SGE context through FW 10517 * @adap: the adapter 10518 * @mbox: mailbox to use for the FW command 10519 * @cid: the context id 10520 * @ctype: the context type 10521 * @data: where to store the context data 10522 * 10523 * Issues a FW command through the given mailbox to read an SGE context. 10524 */ 10525int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 10526 enum ctxt_type ctype, u32 *data) 10527{ 10528 int ret; 10529 struct fw_ldst_cmd c; 10530 10531 if (ctype == CTXT_EGRESS) 10532 ret = FW_LDST_ADDRSPC_SGE_EGRC; 10533 else if (ctype == CTXT_INGRESS) 10534 ret = FW_LDST_ADDRSPC_SGE_INGC; 10535 else if (ctype == CTXT_FLM) 10536 ret = FW_LDST_ADDRSPC_SGE_FLMC; 10537 else 10538 ret = FW_LDST_ADDRSPC_SGE_CONMC; 10539 10540 memset(&c, 0, sizeof(c)); 10541 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 10542 F_FW_CMD_REQUEST | F_FW_CMD_READ | 10543 V_FW_LDST_CMD_ADDRSPACE(ret)); 10544 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 10545 c.u.idctxt.physid = cpu_to_be32(cid); 10546 10547 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 10548 if (ret == 0) { 10549 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 10550 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 10551 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 10552 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 10553 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 10554 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 10555 } 10556 return ret; 10557} 10558 10559/** 10560 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 10561 * @adap: the adapter 10562 * @cid: the context id 10563 * @ctype: the context type 10564 * @data: where to store the context data 10565 * 10566 * Reads an SGE context directly, bypassing FW. This is only for 10567 * debugging when FW is unavailable. 10568 */ 10569int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 10570 u32 *data) 10571{ 10572 int i, ret; 10573 10574 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 10575 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 10576 if (!ret) 10577 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 10578 *data++ = t4_read_reg(adap, i); 10579 return ret; 10580} 10581 10582int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 10583 int sleep_ok) 10584{ 10585 struct fw_sched_cmd cmd; 10586 10587 memset(&cmd, 0, sizeof(cmd)); 10588 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 10589 F_FW_CMD_REQUEST | 10590 F_FW_CMD_WRITE); 10591 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 10592 10593 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 10594 cmd.u.config.type = type; 10595 cmd.u.config.minmaxen = minmaxen; 10596 10597 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 10598 NULL, sleep_ok); 10599} 10600 10601int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 10602 int rateunit, int ratemode, int channel, int cl, 10603 int minrate, int maxrate, int weight, int pktsize, 10604 int burstsize, int sleep_ok) 10605{ 10606 struct fw_sched_cmd cmd; 10607 10608 memset(&cmd, 0, sizeof(cmd)); 10609 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 10610 F_FW_CMD_REQUEST | 10611 F_FW_CMD_WRITE); 10612 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 10613 10614 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 10615 cmd.u.params.type = type; 10616 cmd.u.params.level = level; 10617 cmd.u.params.mode = mode; 10618 cmd.u.params.ch = channel; 10619 cmd.u.params.cl = cl; 10620 cmd.u.params.unit = rateunit; 10621 cmd.u.params.rate = ratemode; 10622 cmd.u.params.min = cpu_to_be32(minrate); 10623 cmd.u.params.max = cpu_to_be32(maxrate); 10624 cmd.u.params.weight = cpu_to_be16(weight); 10625 cmd.u.params.pktsize = cpu_to_be16(pktsize); 10626 cmd.u.params.burstsize = cpu_to_be16(burstsize); 10627 10628 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 10629 NULL, sleep_ok); 10630} 10631 10632int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, 10633 unsigned int maxrate, int sleep_ok) 10634{ 10635 struct fw_sched_cmd cmd; 10636 10637 memset(&cmd, 0, sizeof(cmd)); 10638 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 10639 F_FW_CMD_REQUEST | 10640 F_FW_CMD_WRITE); 10641 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 10642 10643 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 10644 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 10645 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; 10646 cmd.u.params.ch = channel; 10647 cmd.u.params.rate = ratemode; /* REL or ABS */ 10648 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ 10649 10650 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 10651 NULL, sleep_ok); 10652} 10653 10654int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, 10655 int weight, int sleep_ok) 10656{ 10657 struct fw_sched_cmd cmd; 10658 10659 if (weight < 0 || weight > 100) 10660 return -EINVAL; 10661 10662 memset(&cmd, 0, sizeof(cmd)); 10663 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 10664 F_FW_CMD_REQUEST | 10665 F_FW_CMD_WRITE); 10666 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 10667 10668 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 10669 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 10670 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 10671 cmd.u.params.ch = channel; 10672 cmd.u.params.cl = cl; 10673 cmd.u.params.weight = cpu_to_be16(weight); 10674 10675 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 10676 NULL, sleep_ok); 10677} 10678 10679int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, 10680 int mode, unsigned int maxrate, int pktsize, int sleep_ok) 10681{ 10682 struct fw_sched_cmd cmd; 10683 10684 memset(&cmd, 0, sizeof(cmd)); 10685 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 10686 F_FW_CMD_REQUEST | 10687 F_FW_CMD_WRITE); 10688 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 10689 10690 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 10691 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 10692 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; 10693 cmd.u.params.mode = mode; 10694 cmd.u.params.ch = channel; 10695 cmd.u.params.cl = cl; 10696 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; 10697 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; 10698 cmd.u.params.max = cpu_to_be32(maxrate); 10699 cmd.u.params.pktsize = cpu_to_be16(pktsize); 10700 10701 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 10702 NULL, sleep_ok); 10703} 10704 10705/* 10706 * t4_config_watchdog - configure (enable/disable) a watchdog timer 10707 * @adapter: the adapter 10708 * @mbox: mailbox to use for the FW command 10709 * @pf: the PF owning the queue 10710 * @vf: the VF owning the queue 10711 * @timeout: watchdog timeout in ms 10712 * @action: watchdog timer / action 10713 * 10714 * There are separate watchdog timers for each possible watchdog 10715 * action. Configure one of the watchdog timers by setting a non-zero 10716 * timeout. Disable a watchdog timer by using a timeout of zero. 10717 */ 10718int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 10719 unsigned int pf, unsigned int vf, 10720 unsigned int timeout, unsigned int action) 10721{ 10722 struct fw_watchdog_cmd wdog; 10723 unsigned int ticks; 10724 10725 /* 10726 * The watchdog command expects a timeout in units of 10ms so we need 10727 * to convert it here (via rounding) and force a minimum of one 10ms 10728 * "tick" if the timeout is non-zero but the conversion results in 0 10729 * ticks. 10730 */ 10731 ticks = (timeout + 5)/10; 10732 if (timeout && !ticks) 10733 ticks = 1; 10734 10735 memset(&wdog, 0, sizeof wdog); 10736 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 10737 F_FW_CMD_REQUEST | 10738 F_FW_CMD_WRITE | 10739 V_FW_PARAMS_CMD_PFN(pf) | 10740 V_FW_PARAMS_CMD_VFN(vf)); 10741 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 10742 wdog.timeout = cpu_to_be32(ticks); 10743 wdog.action = cpu_to_be32(action); 10744 10745 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 10746} 10747 10748int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 10749{ 10750 struct fw_devlog_cmd devlog_cmd; 10751 int ret; 10752 10753 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 10754 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 10755 F_FW_CMD_REQUEST | F_FW_CMD_READ); 10756 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 10757 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 10758 sizeof(devlog_cmd), &devlog_cmd); 10759 if (ret) 10760 return ret; 10761 10762 *level = devlog_cmd.level; 10763 return 0; 10764} 10765 10766int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 10767{ 10768 struct fw_devlog_cmd devlog_cmd; 10769 10770 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 10771 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 10772 F_FW_CMD_REQUEST | 10773 F_FW_CMD_WRITE); 10774 devlog_cmd.level = level; 10775 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 10776 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 10777 sizeof(devlog_cmd), &devlog_cmd); 10778} 10779