t4_hw.c revision 339398
1/*- 2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/common/t4_hw.c 339398 2018-10-17 01:05:52Z np $"); 29 30#include "opt_inet.h" 31 32#include <sys/param.h> 33#include <sys/eventhandler.h> 34 35#include "common.h" 36#include "t4_regs.h" 37#include "t4_regs_values.h" 38#include "firmware/t4fw_interface.h" 39 40#undef msleep 41#define msleep(x) do { \ 42 if (cold) \ 43 DELAY((x) * 1000); \ 44 else \ 45 pause("t4hw", (x) * hz / 1000); \ 46} while (0) 47 48/** 49 * t4_wait_op_done_val - wait until an operation is completed 50 * @adapter: the adapter performing the operation 51 * @reg: the register to check for completion 52 * @mask: a single-bit field within @reg that indicates completion 53 * @polarity: the value of the field when the operation is completed 54 * @attempts: number of check iterations 55 * @delay: delay in usecs between iterations 56 * @valp: where to store the value of the register at completion time 57 * 58 * Wait until an operation is completed by checking a bit in a register 59 * up to @attempts times. If @valp is not NULL the value of the register 60 * at the time it indicated completion is stored there. Returns 0 if the 61 * operation completes and -EAGAIN otherwise. 62 */ 63static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 64 int polarity, int attempts, int delay, u32 *valp) 65{ 66 while (1) { 67 u32 val = t4_read_reg(adapter, reg); 68 69 if (!!(val & mask) == polarity) { 70 if (valp) 71 *valp = val; 72 return 0; 73 } 74 if (--attempts == 0) 75 return -EAGAIN; 76 if (delay) 77 udelay(delay); 78 } 79} 80 81static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 82 int polarity, int attempts, int delay) 83{ 84 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 85 delay, NULL); 86} 87 88/** 89 * t4_set_reg_field - set a register field to a value 90 * @adapter: the adapter to program 91 * @addr: the register address 92 * @mask: specifies the portion of the register to modify 93 * @val: the new value for the register field 94 * 95 * Sets a register field specified by the supplied mask to the 96 * given value. 97 */ 98void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 99 u32 val) 100{ 101 u32 v = t4_read_reg(adapter, addr) & ~mask; 102 103 t4_write_reg(adapter, addr, v | val); 104 (void) t4_read_reg(adapter, addr); /* flush */ 105} 106 107/** 108 * t4_read_indirect - read indirectly addressed registers 109 * @adap: the adapter 110 * @addr_reg: register holding the indirect address 111 * @data_reg: register holding the value of the indirect register 112 * @vals: where the read register values are stored 113 * @nregs: how many indirect registers to read 114 * @start_idx: index of first indirect register to read 115 * 116 * Reads registers that are accessed indirectly through an address/data 117 * register pair. 118 */ 119void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 120 unsigned int data_reg, u32 *vals, 121 unsigned int nregs, unsigned int start_idx) 122{ 123 while (nregs--) { 124 t4_write_reg(adap, addr_reg, start_idx); 125 *vals++ = t4_read_reg(adap, data_reg); 126 start_idx++; 127 } 128} 129 130/** 131 * t4_write_indirect - write indirectly addressed registers 132 * @adap: the adapter 133 * @addr_reg: register holding the indirect addresses 134 * @data_reg: register holding the value for the indirect registers 135 * @vals: values to write 136 * @nregs: how many indirect registers to write 137 * @start_idx: address of first indirect register to write 138 * 139 * Writes a sequential block of registers that are accessed indirectly 140 * through an address/data register pair. 141 */ 142void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 143 unsigned int data_reg, const u32 *vals, 144 unsigned int nregs, unsigned int start_idx) 145{ 146 while (nregs--) { 147 t4_write_reg(adap, addr_reg, start_idx++); 148 t4_write_reg(adap, data_reg, *vals++); 149 } 150} 151 152/* 153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 154 * mechanism. This guarantees that we get the real value even if we're 155 * operating within a Virtual Machine and the Hypervisor is trapping our 156 * Configuration Space accesses. 157 * 158 * N.B. This routine should only be used as a last resort: the firmware uses 159 * the backdoor registers on a regular basis and we can end up 160 * conflicting with it's uses! 161 */ 162u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 163{ 164 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 165 u32 val; 166 167 if (chip_id(adap) <= CHELSIO_T5) 168 req |= F_ENABLE; 169 else 170 req |= F_T6_ENABLE; 171 172 if (is_t4(adap)) 173 req |= F_LOCALCFG; 174 175 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 176 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 177 178 /* 179 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 180 * Configuration Space read. (None of the other fields matter when 181 * F_ENABLE is 0 so a simple register write is easier than a 182 * read-modify-write via t4_set_reg_field().) 183 */ 184 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 185 186 return val; 187} 188 189/* 190 * t4_report_fw_error - report firmware error 191 * @adap: the adapter 192 * 193 * The adapter firmware can indicate error conditions to the host. 194 * If the firmware has indicated an error, print out the reason for 195 * the firmware error. 196 */ 197static void t4_report_fw_error(struct adapter *adap) 198{ 199 static const char *const reason[] = { 200 "Crash", /* PCIE_FW_EVAL_CRASH */ 201 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 202 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 203 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 204 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 205 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 206 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 207 "Reserved", /* reserved */ 208 }; 209 u32 pcie_fw; 210 211 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 212 if (pcie_fw & F_PCIE_FW_ERR) 213 CH_ERR(adap, "Firmware reports adapter error: %s\n", 214 reason[G_PCIE_FW_EVAL(pcie_fw)]); 215} 216 217/* 218 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 219 */ 220static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 221 u32 mbox_addr) 222{ 223 for ( ; nflit; nflit--, mbox_addr += 8) 224 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 225} 226 227/* 228 * Handle a FW assertion reported in a mailbox. 229 */ 230static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 231{ 232 CH_ALERT(adap, 233 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 234 asrt->u.assert.filename_0_7, 235 be32_to_cpu(asrt->u.assert.line), 236 be32_to_cpu(asrt->u.assert.x), 237 be32_to_cpu(asrt->u.assert.y)); 238} 239 240#define X_CIM_PF_NOACCESS 0xeeeeeeee 241/** 242 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 243 * @adap: the adapter 244 * @mbox: index of the mailbox to use 245 * @cmd: the command to write 246 * @size: command length in bytes 247 * @rpl: where to optionally store the reply 248 * @sleep_ok: if true we may sleep while awaiting command completion 249 * @timeout: time to wait for command to finish before timing out 250 * (negative implies @sleep_ok=false) 251 * 252 * Sends the given command to FW through the selected mailbox and waits 253 * for the FW to execute the command. If @rpl is not %NULL it is used to 254 * store the FW's reply to the command. The command and its optional 255 * reply are of the same length. Some FW commands like RESET and 256 * INITIALIZE can take a considerable amount of time to execute. 257 * @sleep_ok determines whether we may sleep while awaiting the response. 258 * If sleeping is allowed we use progressive backoff otherwise we spin. 259 * Note that passing in a negative @timeout is an alternate mechanism 260 * for specifying @sleep_ok=false. This is useful when a higher level 261 * interface allows for specification of @timeout but not @sleep_ok ... 262 * 263 * The return value is 0 on success or a negative errno on failure. A 264 * failure can happen either because we are not able to execute the 265 * command or FW executes it but signals an error. In the latter case 266 * the return value is the error code indicated by FW (negated). 267 */ 268int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 269 int size, void *rpl, bool sleep_ok, int timeout) 270{ 271 /* 272 * We delay in small increments at first in an effort to maintain 273 * responsiveness for simple, fast executing commands but then back 274 * off to larger delays to a maximum retry delay. 275 */ 276 static const int delay[] = { 277 1, 1, 3, 5, 10, 10, 20, 50, 100 278 }; 279 u32 v; 280 u64 res; 281 int i, ms, delay_idx, ret; 282 const __be64 *p = cmd; 283 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 284 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 285 u32 ctl; 286 __be64 cmd_rpl[MBOX_LEN/8]; 287 u32 pcie_fw; 288 289 if (adap->flags & CHK_MBOX_ACCESS) 290 ASSERT_SYNCHRONIZED_OP(adap); 291 292 if ((size & 15) || size > MBOX_LEN) 293 return -EINVAL; 294 295 if (adap->flags & IS_VF) { 296 if (is_t6(adap)) 297 data_reg = FW_T6VF_MBDATA_BASE_ADDR; 298 else 299 data_reg = FW_T4VF_MBDATA_BASE_ADDR; 300 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); 301 } 302 303 /* 304 * If we have a negative timeout, that implies that we can't sleep. 305 */ 306 if (timeout < 0) { 307 sleep_ok = false; 308 timeout = -timeout; 309 } 310 311 /* 312 * Attempt to gain access to the mailbox. 313 */ 314 for (i = 0; i < 4; i++) { 315 ctl = t4_read_reg(adap, ctl_reg); 316 v = G_MBOWNER(ctl); 317 if (v != X_MBOWNER_NONE) 318 break; 319 } 320 321 /* 322 * If we were unable to gain access, dequeue ourselves from the 323 * mailbox atomic access list and report the error to our caller. 324 */ 325 if (v != X_MBOWNER_PL) { 326 t4_report_fw_error(adap); 327 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 328 return ret; 329 } 330 331 /* 332 * If we gain ownership of the mailbox and there's a "valid" message 333 * in it, this is likely an asynchronous error message from the 334 * firmware. So we'll report that and then proceed on with attempting 335 * to issue our own command ... which may well fail if the error 336 * presaged the firmware crashing ... 337 */ 338 if (ctl & F_MBMSGVALID) { 339 CH_ERR(adap, "found VALID command in mbox %u: %016llx %016llx " 340 "%016llx %016llx %016llx %016llx %016llx %016llx\n", 341 mbox, (unsigned long long)t4_read_reg64(adap, data_reg), 342 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 343 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 344 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 345 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 346 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 347 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 348 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 349 } 350 351 /* 352 * Copy in the new mailbox command and send it on its way ... 353 */ 354 for (i = 0; i < size; i += 8, p++) 355 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 356 357 if (adap->flags & IS_VF) { 358 /* 359 * For the VFs, the Mailbox Data "registers" are 360 * actually backed by T4's "MA" interface rather than 361 * PL Registers (as is the case for the PFs). Because 362 * these are in different coherency domains, the write 363 * to the VF's PL-register-backed Mailbox Control can 364 * race in front of the writes to the MA-backed VF 365 * Mailbox Data "registers". So we need to do a 366 * read-back on at least one byte of the VF Mailbox 367 * Data registers before doing the write to the VF 368 * Mailbox Control register. 369 */ 370 t4_read_reg(adap, data_reg); 371 } 372 373 CH_DUMP_MBOX(adap, mbox, data_reg); 374 375 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 376 t4_read_reg(adap, ctl_reg); /* flush write */ 377 378 delay_idx = 0; 379 ms = delay[0]; 380 381 /* 382 * Loop waiting for the reply; bail out if we time out or the firmware 383 * reports an error. 384 */ 385 pcie_fw = 0; 386 for (i = 0; i < timeout; i += ms) { 387 if (!(adap->flags & IS_VF)) { 388 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 389 if (pcie_fw & F_PCIE_FW_ERR) 390 break; 391 } 392 if (sleep_ok) { 393 ms = delay[delay_idx]; /* last element may repeat */ 394 if (delay_idx < ARRAY_SIZE(delay) - 1) 395 delay_idx++; 396 msleep(ms); 397 } else { 398 mdelay(ms); 399 } 400 401 v = t4_read_reg(adap, ctl_reg); 402 if (v == X_CIM_PF_NOACCESS) 403 continue; 404 if (G_MBOWNER(v) == X_MBOWNER_PL) { 405 if (!(v & F_MBMSGVALID)) { 406 t4_write_reg(adap, ctl_reg, 407 V_MBOWNER(X_MBOWNER_NONE)); 408 continue; 409 } 410 411 /* 412 * Retrieve the command reply and release the mailbox. 413 */ 414 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 415 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 416 417 CH_DUMP_MBOX(adap, mbox, data_reg); 418 419 res = be64_to_cpu(cmd_rpl[0]); 420 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 421 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 422 res = V_FW_CMD_RETVAL(EIO); 423 } else if (rpl) 424 memcpy(rpl, cmd_rpl, size); 425 return -G_FW_CMD_RETVAL((int)res); 426 } 427 } 428 429 /* 430 * We timed out waiting for a reply to our mailbox command. Report 431 * the error and also check to see if the firmware reported any 432 * errors ... 433 */ 434 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 435 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 436 *(const u8 *)cmd, mbox); 437 438 /* If DUMP_MBOX is set the mbox has already been dumped */ 439 if ((adap->debug_flags & DF_DUMP_MBOX) == 0) { 440 p = cmd; 441 CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx " 442 "%016llx %016llx %016llx %016llx\n", 443 (unsigned long long)be64_to_cpu(p[0]), 444 (unsigned long long)be64_to_cpu(p[1]), 445 (unsigned long long)be64_to_cpu(p[2]), 446 (unsigned long long)be64_to_cpu(p[3]), 447 (unsigned long long)be64_to_cpu(p[4]), 448 (unsigned long long)be64_to_cpu(p[5]), 449 (unsigned long long)be64_to_cpu(p[6]), 450 (unsigned long long)be64_to_cpu(p[7])); 451 } 452 453 t4_report_fw_error(adap); 454 t4_fatal_err(adap); 455 return ret; 456} 457 458int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 459 void *rpl, bool sleep_ok) 460{ 461 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 462 sleep_ok, FW_CMD_MAX_TIMEOUT); 463 464} 465 466static int t4_edc_err_read(struct adapter *adap, int idx) 467{ 468 u32 edc_ecc_err_addr_reg; 469 u32 edc_bist_status_rdata_reg; 470 471 if (is_t4(adap)) { 472 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 473 return 0; 474 } 475 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 476 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 477 return 0; 478 } 479 480 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 481 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 482 483 CH_WARN(adap, 484 "edc%d err addr 0x%x: 0x%x.\n", 485 idx, edc_ecc_err_addr_reg, 486 t4_read_reg(adap, edc_ecc_err_addr_reg)); 487 CH_WARN(adap, 488 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 489 edc_bist_status_rdata_reg, 490 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 491 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 492 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 493 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 494 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 495 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 496 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 497 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 498 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 499 500 return 0; 501} 502 503/** 504 * t4_mc_read - read from MC through backdoor accesses 505 * @adap: the adapter 506 * @idx: which MC to access 507 * @addr: address of first byte requested 508 * @data: 64 bytes of data containing the requested address 509 * @ecc: where to store the corresponding 64-bit ECC word 510 * 511 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 512 * that covers the requested address @addr. If @parity is not %NULL it 513 * is assigned the 64-bit ECC word for the read data. 514 */ 515int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 516{ 517 int i; 518 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 519 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 520 521 if (is_t4(adap)) { 522 mc_bist_cmd_reg = A_MC_BIST_CMD; 523 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 524 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 525 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 526 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 527 } else { 528 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 529 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 530 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 531 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 532 idx); 533 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 534 idx); 535 } 536 537 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 538 return -EBUSY; 539 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 540 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 541 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 542 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 543 F_START_BIST | V_BIST_CMD_GAP(1)); 544 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 545 if (i) 546 return i; 547 548#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 549 550 for (i = 15; i >= 0; i--) 551 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 552 if (ecc) 553 *ecc = t4_read_reg64(adap, MC_DATA(16)); 554#undef MC_DATA 555 return 0; 556} 557 558/** 559 * t4_edc_read - read from EDC through backdoor accesses 560 * @adap: the adapter 561 * @idx: which EDC to access 562 * @addr: address of first byte requested 563 * @data: 64 bytes of data containing the requested address 564 * @ecc: where to store the corresponding 64-bit ECC word 565 * 566 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 567 * that covers the requested address @addr. If @parity is not %NULL it 568 * is assigned the 64-bit ECC word for the read data. 569 */ 570int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 571{ 572 int i; 573 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 574 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 575 576 if (is_t4(adap)) { 577 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 578 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 579 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 580 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 581 idx); 582 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 583 idx); 584 } else { 585/* 586 * These macro are missing in t4_regs.h file. 587 * Added temporarily for testing. 588 */ 589#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 590#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 591 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 592 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 593 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 594 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 595 idx); 596 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 597 idx); 598#undef EDC_REG_T5 599#undef EDC_STRIDE_T5 600 } 601 602 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 603 return -EBUSY; 604 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 605 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 606 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 607 t4_write_reg(adap, edc_bist_cmd_reg, 608 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 609 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 610 if (i) 611 return i; 612 613#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 614 615 for (i = 15; i >= 0; i--) 616 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 617 if (ecc) 618 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 619#undef EDC_DATA 620 return 0; 621} 622 623/** 624 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 625 * @adap: the adapter 626 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 627 * @addr: address within indicated memory type 628 * @len: amount of memory to read 629 * @buf: host memory buffer 630 * 631 * Reads an [almost] arbitrary memory region in the firmware: the 632 * firmware memory address, length and host buffer must be aligned on 633 * 32-bit boudaries. The memory is returned as a raw byte sequence from 634 * the firmware's memory. If this memory contains data structures which 635 * contain multi-byte integers, it's the callers responsibility to 636 * perform appropriate byte order conversions. 637 */ 638int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 639 __be32 *buf) 640{ 641 u32 pos, start, end, offset; 642 int ret; 643 644 /* 645 * Argument sanity checks ... 646 */ 647 if ((addr & 0x3) || (len & 0x3)) 648 return -EINVAL; 649 650 /* 651 * The underlaying EDC/MC read routines read 64 bytes at a time so we 652 * need to round down the start and round up the end. We'll start 653 * copying out of the first line at (addr - start) a word at a time. 654 */ 655 start = rounddown2(addr, 64); 656 end = roundup2(addr + len, 64); 657 offset = (addr - start)/sizeof(__be32); 658 659 for (pos = start; pos < end; pos += 64, offset = 0) { 660 __be32 data[16]; 661 662 /* 663 * Read the chip's memory block and bail if there's an error. 664 */ 665 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 666 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 667 else 668 ret = t4_edc_read(adap, mtype, pos, data, NULL); 669 if (ret) 670 return ret; 671 672 /* 673 * Copy the data into the caller's memory buffer. 674 */ 675 while (offset < 16 && len > 0) { 676 *buf++ = data[offset++]; 677 len -= sizeof(__be32); 678 } 679 } 680 681 return 0; 682} 683 684/* 685 * Return the specified PCI-E Configuration Space register from our Physical 686 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 687 * since we prefer to let the firmware own all of these registers, but if that 688 * fails we go for it directly ourselves. 689 */ 690u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 691{ 692 693 /* 694 * If fw_attach != 0, construct and send the Firmware LDST Command to 695 * retrieve the specified PCI-E Configuration Space register. 696 */ 697 if (drv_fw_attach != 0) { 698 struct fw_ldst_cmd ldst_cmd; 699 int ret; 700 701 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 702 ldst_cmd.op_to_addrspace = 703 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 704 F_FW_CMD_REQUEST | 705 F_FW_CMD_READ | 706 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 707 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 708 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 709 ldst_cmd.u.pcie.ctrl_to_fn = 710 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 711 ldst_cmd.u.pcie.r = reg; 712 713 /* 714 * If the LDST Command succeeds, return the result, otherwise 715 * fall through to reading it directly ourselves ... 716 */ 717 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 718 &ldst_cmd); 719 if (ret == 0) 720 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 721 722 CH_WARN(adap, "Firmware failed to return " 723 "Configuration Space register %d, err = %d\n", 724 reg, -ret); 725 } 726 727 /* 728 * Read the desired Configuration Space register via the PCI-E 729 * Backdoor mechanism. 730 */ 731 return t4_hw_pci_read_cfg4(adap, reg); 732} 733 734/** 735 * t4_get_regs_len - return the size of the chips register set 736 * @adapter: the adapter 737 * 738 * Returns the size of the chip's BAR0 register space. 739 */ 740unsigned int t4_get_regs_len(struct adapter *adapter) 741{ 742 unsigned int chip_version = chip_id(adapter); 743 744 switch (chip_version) { 745 case CHELSIO_T4: 746 if (adapter->flags & IS_VF) 747 return FW_T4VF_REGMAP_SIZE; 748 return T4_REGMAP_SIZE; 749 750 case CHELSIO_T5: 751 case CHELSIO_T6: 752 if (adapter->flags & IS_VF) 753 return FW_T4VF_REGMAP_SIZE; 754 return T5_REGMAP_SIZE; 755 } 756 757 CH_ERR(adapter, 758 "Unsupported chip version %d\n", chip_version); 759 return 0; 760} 761 762/** 763 * t4_get_regs - read chip registers into provided buffer 764 * @adap: the adapter 765 * @buf: register buffer 766 * @buf_size: size (in bytes) of register buffer 767 * 768 * If the provided register buffer isn't large enough for the chip's 769 * full register range, the register dump will be truncated to the 770 * register buffer's size. 771 */ 772void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 773{ 774 static const unsigned int t4_reg_ranges[] = { 775 0x1008, 0x1108, 776 0x1180, 0x1184, 777 0x1190, 0x1194, 778 0x11a0, 0x11a4, 779 0x11b0, 0x11b4, 780 0x11fc, 0x123c, 781 0x1300, 0x173c, 782 0x1800, 0x18fc, 783 0x3000, 0x30d8, 784 0x30e0, 0x30e4, 785 0x30ec, 0x5910, 786 0x5920, 0x5924, 787 0x5960, 0x5960, 788 0x5968, 0x5968, 789 0x5970, 0x5970, 790 0x5978, 0x5978, 791 0x5980, 0x5980, 792 0x5988, 0x5988, 793 0x5990, 0x5990, 794 0x5998, 0x5998, 795 0x59a0, 0x59d4, 796 0x5a00, 0x5ae0, 797 0x5ae8, 0x5ae8, 798 0x5af0, 0x5af0, 799 0x5af8, 0x5af8, 800 0x6000, 0x6098, 801 0x6100, 0x6150, 802 0x6200, 0x6208, 803 0x6240, 0x6248, 804 0x6280, 0x62b0, 805 0x62c0, 0x6338, 806 0x6370, 0x638c, 807 0x6400, 0x643c, 808 0x6500, 0x6524, 809 0x6a00, 0x6a04, 810 0x6a14, 0x6a38, 811 0x6a60, 0x6a70, 812 0x6a78, 0x6a78, 813 0x6b00, 0x6b0c, 814 0x6b1c, 0x6b84, 815 0x6bf0, 0x6bf8, 816 0x6c00, 0x6c0c, 817 0x6c1c, 0x6c84, 818 0x6cf0, 0x6cf8, 819 0x6d00, 0x6d0c, 820 0x6d1c, 0x6d84, 821 0x6df0, 0x6df8, 822 0x6e00, 0x6e0c, 823 0x6e1c, 0x6e84, 824 0x6ef0, 0x6ef8, 825 0x6f00, 0x6f0c, 826 0x6f1c, 0x6f84, 827 0x6ff0, 0x6ff8, 828 0x7000, 0x700c, 829 0x701c, 0x7084, 830 0x70f0, 0x70f8, 831 0x7100, 0x710c, 832 0x711c, 0x7184, 833 0x71f0, 0x71f8, 834 0x7200, 0x720c, 835 0x721c, 0x7284, 836 0x72f0, 0x72f8, 837 0x7300, 0x730c, 838 0x731c, 0x7384, 839 0x73f0, 0x73f8, 840 0x7400, 0x7450, 841 0x7500, 0x7530, 842 0x7600, 0x760c, 843 0x7614, 0x761c, 844 0x7680, 0x76cc, 845 0x7700, 0x7798, 846 0x77c0, 0x77fc, 847 0x7900, 0x79fc, 848 0x7b00, 0x7b58, 849 0x7b60, 0x7b84, 850 0x7b8c, 0x7c38, 851 0x7d00, 0x7d38, 852 0x7d40, 0x7d80, 853 0x7d8c, 0x7ddc, 854 0x7de4, 0x7e04, 855 0x7e10, 0x7e1c, 856 0x7e24, 0x7e38, 857 0x7e40, 0x7e44, 858 0x7e4c, 0x7e78, 859 0x7e80, 0x7ea4, 860 0x7eac, 0x7edc, 861 0x7ee8, 0x7efc, 862 0x8dc0, 0x8e04, 863 0x8e10, 0x8e1c, 864 0x8e30, 0x8e78, 865 0x8ea0, 0x8eb8, 866 0x8ec0, 0x8f6c, 867 0x8fc0, 0x9008, 868 0x9010, 0x9058, 869 0x9060, 0x9060, 870 0x9068, 0x9074, 871 0x90fc, 0x90fc, 872 0x9400, 0x9408, 873 0x9410, 0x9458, 874 0x9600, 0x9600, 875 0x9608, 0x9638, 876 0x9640, 0x96bc, 877 0x9800, 0x9808, 878 0x9820, 0x983c, 879 0x9850, 0x9864, 880 0x9c00, 0x9c6c, 881 0x9c80, 0x9cec, 882 0x9d00, 0x9d6c, 883 0x9d80, 0x9dec, 884 0x9e00, 0x9e6c, 885 0x9e80, 0x9eec, 886 0x9f00, 0x9f6c, 887 0x9f80, 0x9fec, 888 0xd004, 0xd004, 889 0xd010, 0xd03c, 890 0xdfc0, 0xdfe0, 891 0xe000, 0xea7c, 892 0xf000, 0x11110, 893 0x11118, 0x11190, 894 0x19040, 0x1906c, 895 0x19078, 0x19080, 896 0x1908c, 0x190e4, 897 0x190f0, 0x190f8, 898 0x19100, 0x19110, 899 0x19120, 0x19124, 900 0x19150, 0x19194, 901 0x1919c, 0x191b0, 902 0x191d0, 0x191e8, 903 0x19238, 0x1924c, 904 0x193f8, 0x1943c, 905 0x1944c, 0x19474, 906 0x19490, 0x194e0, 907 0x194f0, 0x194f8, 908 0x19800, 0x19c08, 909 0x19c10, 0x19c90, 910 0x19ca0, 0x19ce4, 911 0x19cf0, 0x19d40, 912 0x19d50, 0x19d94, 913 0x19da0, 0x19de8, 914 0x19df0, 0x19e40, 915 0x19e50, 0x19e90, 916 0x19ea0, 0x19f4c, 917 0x1a000, 0x1a004, 918 0x1a010, 0x1a06c, 919 0x1a0b0, 0x1a0e4, 920 0x1a0ec, 0x1a0f4, 921 0x1a100, 0x1a108, 922 0x1a114, 0x1a120, 923 0x1a128, 0x1a130, 924 0x1a138, 0x1a138, 925 0x1a190, 0x1a1c4, 926 0x1a1fc, 0x1a1fc, 927 0x1e040, 0x1e04c, 928 0x1e284, 0x1e28c, 929 0x1e2c0, 0x1e2c0, 930 0x1e2e0, 0x1e2e0, 931 0x1e300, 0x1e384, 932 0x1e3c0, 0x1e3c8, 933 0x1e440, 0x1e44c, 934 0x1e684, 0x1e68c, 935 0x1e6c0, 0x1e6c0, 936 0x1e6e0, 0x1e6e0, 937 0x1e700, 0x1e784, 938 0x1e7c0, 0x1e7c8, 939 0x1e840, 0x1e84c, 940 0x1ea84, 0x1ea8c, 941 0x1eac0, 0x1eac0, 942 0x1eae0, 0x1eae0, 943 0x1eb00, 0x1eb84, 944 0x1ebc0, 0x1ebc8, 945 0x1ec40, 0x1ec4c, 946 0x1ee84, 0x1ee8c, 947 0x1eec0, 0x1eec0, 948 0x1eee0, 0x1eee0, 949 0x1ef00, 0x1ef84, 950 0x1efc0, 0x1efc8, 951 0x1f040, 0x1f04c, 952 0x1f284, 0x1f28c, 953 0x1f2c0, 0x1f2c0, 954 0x1f2e0, 0x1f2e0, 955 0x1f300, 0x1f384, 956 0x1f3c0, 0x1f3c8, 957 0x1f440, 0x1f44c, 958 0x1f684, 0x1f68c, 959 0x1f6c0, 0x1f6c0, 960 0x1f6e0, 0x1f6e0, 961 0x1f700, 0x1f784, 962 0x1f7c0, 0x1f7c8, 963 0x1f840, 0x1f84c, 964 0x1fa84, 0x1fa8c, 965 0x1fac0, 0x1fac0, 966 0x1fae0, 0x1fae0, 967 0x1fb00, 0x1fb84, 968 0x1fbc0, 0x1fbc8, 969 0x1fc40, 0x1fc4c, 970 0x1fe84, 0x1fe8c, 971 0x1fec0, 0x1fec0, 972 0x1fee0, 0x1fee0, 973 0x1ff00, 0x1ff84, 974 0x1ffc0, 0x1ffc8, 975 0x20000, 0x2002c, 976 0x20100, 0x2013c, 977 0x20190, 0x201a0, 978 0x201a8, 0x201b8, 979 0x201c4, 0x201c8, 980 0x20200, 0x20318, 981 0x20400, 0x204b4, 982 0x204c0, 0x20528, 983 0x20540, 0x20614, 984 0x21000, 0x21040, 985 0x2104c, 0x21060, 986 0x210c0, 0x210ec, 987 0x21200, 0x21268, 988 0x21270, 0x21284, 989 0x212fc, 0x21388, 990 0x21400, 0x21404, 991 0x21500, 0x21500, 992 0x21510, 0x21518, 993 0x2152c, 0x21530, 994 0x2153c, 0x2153c, 995 0x21550, 0x21554, 996 0x21600, 0x21600, 997 0x21608, 0x2161c, 998 0x21624, 0x21628, 999 0x21630, 0x21634, 1000 0x2163c, 0x2163c, 1001 0x21700, 0x2171c, 1002 0x21780, 0x2178c, 1003 0x21800, 0x21818, 1004 0x21820, 0x21828, 1005 0x21830, 0x21848, 1006 0x21850, 0x21854, 1007 0x21860, 0x21868, 1008 0x21870, 0x21870, 1009 0x21878, 0x21898, 1010 0x218a0, 0x218a8, 1011 0x218b0, 0x218c8, 1012 0x218d0, 0x218d4, 1013 0x218e0, 0x218e8, 1014 0x218f0, 0x218f0, 1015 0x218f8, 0x21a18, 1016 0x21a20, 0x21a28, 1017 0x21a30, 0x21a48, 1018 0x21a50, 0x21a54, 1019 0x21a60, 0x21a68, 1020 0x21a70, 0x21a70, 1021 0x21a78, 0x21a98, 1022 0x21aa0, 0x21aa8, 1023 0x21ab0, 0x21ac8, 1024 0x21ad0, 0x21ad4, 1025 0x21ae0, 0x21ae8, 1026 0x21af0, 0x21af0, 1027 0x21af8, 0x21c18, 1028 0x21c20, 0x21c20, 1029 0x21c28, 0x21c30, 1030 0x21c38, 0x21c38, 1031 0x21c80, 0x21c98, 1032 0x21ca0, 0x21ca8, 1033 0x21cb0, 0x21cc8, 1034 0x21cd0, 0x21cd4, 1035 0x21ce0, 0x21ce8, 1036 0x21cf0, 0x21cf0, 1037 0x21cf8, 0x21d7c, 1038 0x21e00, 0x21e04, 1039 0x22000, 0x2202c, 1040 0x22100, 0x2213c, 1041 0x22190, 0x221a0, 1042 0x221a8, 0x221b8, 1043 0x221c4, 0x221c8, 1044 0x22200, 0x22318, 1045 0x22400, 0x224b4, 1046 0x224c0, 0x22528, 1047 0x22540, 0x22614, 1048 0x23000, 0x23040, 1049 0x2304c, 0x23060, 1050 0x230c0, 0x230ec, 1051 0x23200, 0x23268, 1052 0x23270, 0x23284, 1053 0x232fc, 0x23388, 1054 0x23400, 0x23404, 1055 0x23500, 0x23500, 1056 0x23510, 0x23518, 1057 0x2352c, 0x23530, 1058 0x2353c, 0x2353c, 1059 0x23550, 0x23554, 1060 0x23600, 0x23600, 1061 0x23608, 0x2361c, 1062 0x23624, 0x23628, 1063 0x23630, 0x23634, 1064 0x2363c, 0x2363c, 1065 0x23700, 0x2371c, 1066 0x23780, 0x2378c, 1067 0x23800, 0x23818, 1068 0x23820, 0x23828, 1069 0x23830, 0x23848, 1070 0x23850, 0x23854, 1071 0x23860, 0x23868, 1072 0x23870, 0x23870, 1073 0x23878, 0x23898, 1074 0x238a0, 0x238a8, 1075 0x238b0, 0x238c8, 1076 0x238d0, 0x238d4, 1077 0x238e0, 0x238e8, 1078 0x238f0, 0x238f0, 1079 0x238f8, 0x23a18, 1080 0x23a20, 0x23a28, 1081 0x23a30, 0x23a48, 1082 0x23a50, 0x23a54, 1083 0x23a60, 0x23a68, 1084 0x23a70, 0x23a70, 1085 0x23a78, 0x23a98, 1086 0x23aa0, 0x23aa8, 1087 0x23ab0, 0x23ac8, 1088 0x23ad0, 0x23ad4, 1089 0x23ae0, 0x23ae8, 1090 0x23af0, 0x23af0, 1091 0x23af8, 0x23c18, 1092 0x23c20, 0x23c20, 1093 0x23c28, 0x23c30, 1094 0x23c38, 0x23c38, 1095 0x23c80, 0x23c98, 1096 0x23ca0, 0x23ca8, 1097 0x23cb0, 0x23cc8, 1098 0x23cd0, 0x23cd4, 1099 0x23ce0, 0x23ce8, 1100 0x23cf0, 0x23cf0, 1101 0x23cf8, 0x23d7c, 1102 0x23e00, 0x23e04, 1103 0x24000, 0x2402c, 1104 0x24100, 0x2413c, 1105 0x24190, 0x241a0, 1106 0x241a8, 0x241b8, 1107 0x241c4, 0x241c8, 1108 0x24200, 0x24318, 1109 0x24400, 0x244b4, 1110 0x244c0, 0x24528, 1111 0x24540, 0x24614, 1112 0x25000, 0x25040, 1113 0x2504c, 0x25060, 1114 0x250c0, 0x250ec, 1115 0x25200, 0x25268, 1116 0x25270, 0x25284, 1117 0x252fc, 0x25388, 1118 0x25400, 0x25404, 1119 0x25500, 0x25500, 1120 0x25510, 0x25518, 1121 0x2552c, 0x25530, 1122 0x2553c, 0x2553c, 1123 0x25550, 0x25554, 1124 0x25600, 0x25600, 1125 0x25608, 0x2561c, 1126 0x25624, 0x25628, 1127 0x25630, 0x25634, 1128 0x2563c, 0x2563c, 1129 0x25700, 0x2571c, 1130 0x25780, 0x2578c, 1131 0x25800, 0x25818, 1132 0x25820, 0x25828, 1133 0x25830, 0x25848, 1134 0x25850, 0x25854, 1135 0x25860, 0x25868, 1136 0x25870, 0x25870, 1137 0x25878, 0x25898, 1138 0x258a0, 0x258a8, 1139 0x258b0, 0x258c8, 1140 0x258d0, 0x258d4, 1141 0x258e0, 0x258e8, 1142 0x258f0, 0x258f0, 1143 0x258f8, 0x25a18, 1144 0x25a20, 0x25a28, 1145 0x25a30, 0x25a48, 1146 0x25a50, 0x25a54, 1147 0x25a60, 0x25a68, 1148 0x25a70, 0x25a70, 1149 0x25a78, 0x25a98, 1150 0x25aa0, 0x25aa8, 1151 0x25ab0, 0x25ac8, 1152 0x25ad0, 0x25ad4, 1153 0x25ae0, 0x25ae8, 1154 0x25af0, 0x25af0, 1155 0x25af8, 0x25c18, 1156 0x25c20, 0x25c20, 1157 0x25c28, 0x25c30, 1158 0x25c38, 0x25c38, 1159 0x25c80, 0x25c98, 1160 0x25ca0, 0x25ca8, 1161 0x25cb0, 0x25cc8, 1162 0x25cd0, 0x25cd4, 1163 0x25ce0, 0x25ce8, 1164 0x25cf0, 0x25cf0, 1165 0x25cf8, 0x25d7c, 1166 0x25e00, 0x25e04, 1167 0x26000, 0x2602c, 1168 0x26100, 0x2613c, 1169 0x26190, 0x261a0, 1170 0x261a8, 0x261b8, 1171 0x261c4, 0x261c8, 1172 0x26200, 0x26318, 1173 0x26400, 0x264b4, 1174 0x264c0, 0x26528, 1175 0x26540, 0x26614, 1176 0x27000, 0x27040, 1177 0x2704c, 0x27060, 1178 0x270c0, 0x270ec, 1179 0x27200, 0x27268, 1180 0x27270, 0x27284, 1181 0x272fc, 0x27388, 1182 0x27400, 0x27404, 1183 0x27500, 0x27500, 1184 0x27510, 0x27518, 1185 0x2752c, 0x27530, 1186 0x2753c, 0x2753c, 1187 0x27550, 0x27554, 1188 0x27600, 0x27600, 1189 0x27608, 0x2761c, 1190 0x27624, 0x27628, 1191 0x27630, 0x27634, 1192 0x2763c, 0x2763c, 1193 0x27700, 0x2771c, 1194 0x27780, 0x2778c, 1195 0x27800, 0x27818, 1196 0x27820, 0x27828, 1197 0x27830, 0x27848, 1198 0x27850, 0x27854, 1199 0x27860, 0x27868, 1200 0x27870, 0x27870, 1201 0x27878, 0x27898, 1202 0x278a0, 0x278a8, 1203 0x278b0, 0x278c8, 1204 0x278d0, 0x278d4, 1205 0x278e0, 0x278e8, 1206 0x278f0, 0x278f0, 1207 0x278f8, 0x27a18, 1208 0x27a20, 0x27a28, 1209 0x27a30, 0x27a48, 1210 0x27a50, 0x27a54, 1211 0x27a60, 0x27a68, 1212 0x27a70, 0x27a70, 1213 0x27a78, 0x27a98, 1214 0x27aa0, 0x27aa8, 1215 0x27ab0, 0x27ac8, 1216 0x27ad0, 0x27ad4, 1217 0x27ae0, 0x27ae8, 1218 0x27af0, 0x27af0, 1219 0x27af8, 0x27c18, 1220 0x27c20, 0x27c20, 1221 0x27c28, 0x27c30, 1222 0x27c38, 0x27c38, 1223 0x27c80, 0x27c98, 1224 0x27ca0, 0x27ca8, 1225 0x27cb0, 0x27cc8, 1226 0x27cd0, 0x27cd4, 1227 0x27ce0, 0x27ce8, 1228 0x27cf0, 0x27cf0, 1229 0x27cf8, 0x27d7c, 1230 0x27e00, 0x27e04, 1231 }; 1232 1233 static const unsigned int t4vf_reg_ranges[] = { 1234 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 1235 VF_MPS_REG(A_MPS_VF_CTL), 1236 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 1237 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), 1238 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 1239 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 1240 FW_T4VF_MBDATA_BASE_ADDR, 1241 FW_T4VF_MBDATA_BASE_ADDR + 1242 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 1243 }; 1244 1245 static const unsigned int t5_reg_ranges[] = { 1246 0x1008, 0x10c0, 1247 0x10cc, 0x10f8, 1248 0x1100, 0x1100, 1249 0x110c, 0x1148, 1250 0x1180, 0x1184, 1251 0x1190, 0x1194, 1252 0x11a0, 0x11a4, 1253 0x11b0, 0x11b4, 1254 0x11fc, 0x123c, 1255 0x1280, 0x173c, 1256 0x1800, 0x18fc, 1257 0x3000, 0x3028, 1258 0x3060, 0x30b0, 1259 0x30b8, 0x30d8, 1260 0x30e0, 0x30fc, 1261 0x3140, 0x357c, 1262 0x35a8, 0x35cc, 1263 0x35ec, 0x35ec, 1264 0x3600, 0x5624, 1265 0x56cc, 0x56ec, 1266 0x56f4, 0x5720, 1267 0x5728, 0x575c, 1268 0x580c, 0x5814, 1269 0x5890, 0x589c, 1270 0x58a4, 0x58ac, 1271 0x58b8, 0x58bc, 1272 0x5940, 0x59c8, 1273 0x59d0, 0x59dc, 1274 0x59fc, 0x5a18, 1275 0x5a60, 0x5a70, 1276 0x5a80, 0x5a9c, 1277 0x5b94, 0x5bfc, 1278 0x6000, 0x6020, 1279 0x6028, 0x6040, 1280 0x6058, 0x609c, 1281 0x60a8, 0x614c, 1282 0x7700, 0x7798, 1283 0x77c0, 0x78fc, 1284 0x7b00, 0x7b58, 1285 0x7b60, 0x7b84, 1286 0x7b8c, 0x7c54, 1287 0x7d00, 0x7d38, 1288 0x7d40, 0x7d80, 1289 0x7d8c, 0x7ddc, 1290 0x7de4, 0x7e04, 1291 0x7e10, 0x7e1c, 1292 0x7e24, 0x7e38, 1293 0x7e40, 0x7e44, 1294 0x7e4c, 0x7e78, 1295 0x7e80, 0x7edc, 1296 0x7ee8, 0x7efc, 1297 0x8dc0, 0x8de0, 1298 0x8df8, 0x8e04, 1299 0x8e10, 0x8e84, 1300 0x8ea0, 0x8f84, 1301 0x8fc0, 0x9058, 1302 0x9060, 0x9060, 1303 0x9068, 0x90f8, 1304 0x9400, 0x9408, 1305 0x9410, 0x9470, 1306 0x9600, 0x9600, 1307 0x9608, 0x9638, 1308 0x9640, 0x96f4, 1309 0x9800, 0x9808, 1310 0x9820, 0x983c, 1311 0x9850, 0x9864, 1312 0x9c00, 0x9c6c, 1313 0x9c80, 0x9cec, 1314 0x9d00, 0x9d6c, 1315 0x9d80, 0x9dec, 1316 0x9e00, 0x9e6c, 1317 0x9e80, 0x9eec, 1318 0x9f00, 0x9f6c, 1319 0x9f80, 0xa020, 1320 0xd004, 0xd004, 1321 0xd010, 0xd03c, 1322 0xdfc0, 0xdfe0, 1323 0xe000, 0x1106c, 1324 0x11074, 0x11088, 1325 0x1109c, 0x1117c, 1326 0x11190, 0x11204, 1327 0x19040, 0x1906c, 1328 0x19078, 0x19080, 1329 0x1908c, 0x190e8, 1330 0x190f0, 0x190f8, 1331 0x19100, 0x19110, 1332 0x19120, 0x19124, 1333 0x19150, 0x19194, 1334 0x1919c, 0x191b0, 1335 0x191d0, 0x191e8, 1336 0x19238, 0x19290, 1337 0x193f8, 0x19428, 1338 0x19430, 0x19444, 1339 0x1944c, 0x1946c, 1340 0x19474, 0x19474, 1341 0x19490, 0x194cc, 1342 0x194f0, 0x194f8, 1343 0x19c00, 0x19c08, 1344 0x19c10, 0x19c60, 1345 0x19c94, 0x19ce4, 1346 0x19cf0, 0x19d40, 1347 0x19d50, 0x19d94, 1348 0x19da0, 0x19de8, 1349 0x19df0, 0x19e10, 1350 0x19e50, 0x19e90, 1351 0x19ea0, 0x19f24, 1352 0x19f34, 0x19f34, 1353 0x19f40, 0x19f50, 1354 0x19f90, 0x19fb4, 1355 0x19fc4, 0x19fe4, 1356 0x1a000, 0x1a004, 1357 0x1a010, 0x1a06c, 1358 0x1a0b0, 0x1a0e4, 1359 0x1a0ec, 0x1a0f8, 1360 0x1a100, 0x1a108, 1361 0x1a114, 0x1a120, 1362 0x1a128, 0x1a130, 1363 0x1a138, 0x1a138, 1364 0x1a190, 0x1a1c4, 1365 0x1a1fc, 0x1a1fc, 1366 0x1e008, 0x1e00c, 1367 0x1e040, 0x1e044, 1368 0x1e04c, 0x1e04c, 1369 0x1e284, 0x1e290, 1370 0x1e2c0, 0x1e2c0, 1371 0x1e2e0, 0x1e2e0, 1372 0x1e300, 0x1e384, 1373 0x1e3c0, 0x1e3c8, 1374 0x1e408, 0x1e40c, 1375 0x1e440, 0x1e444, 1376 0x1e44c, 0x1e44c, 1377 0x1e684, 0x1e690, 1378 0x1e6c0, 0x1e6c0, 1379 0x1e6e0, 0x1e6e0, 1380 0x1e700, 0x1e784, 1381 0x1e7c0, 0x1e7c8, 1382 0x1e808, 0x1e80c, 1383 0x1e840, 0x1e844, 1384 0x1e84c, 0x1e84c, 1385 0x1ea84, 0x1ea90, 1386 0x1eac0, 0x1eac0, 1387 0x1eae0, 0x1eae0, 1388 0x1eb00, 0x1eb84, 1389 0x1ebc0, 0x1ebc8, 1390 0x1ec08, 0x1ec0c, 1391 0x1ec40, 0x1ec44, 1392 0x1ec4c, 0x1ec4c, 1393 0x1ee84, 0x1ee90, 1394 0x1eec0, 0x1eec0, 1395 0x1eee0, 0x1eee0, 1396 0x1ef00, 0x1ef84, 1397 0x1efc0, 0x1efc8, 1398 0x1f008, 0x1f00c, 1399 0x1f040, 0x1f044, 1400 0x1f04c, 0x1f04c, 1401 0x1f284, 0x1f290, 1402 0x1f2c0, 0x1f2c0, 1403 0x1f2e0, 0x1f2e0, 1404 0x1f300, 0x1f384, 1405 0x1f3c0, 0x1f3c8, 1406 0x1f408, 0x1f40c, 1407 0x1f440, 0x1f444, 1408 0x1f44c, 0x1f44c, 1409 0x1f684, 0x1f690, 1410 0x1f6c0, 0x1f6c0, 1411 0x1f6e0, 0x1f6e0, 1412 0x1f700, 0x1f784, 1413 0x1f7c0, 0x1f7c8, 1414 0x1f808, 0x1f80c, 1415 0x1f840, 0x1f844, 1416 0x1f84c, 0x1f84c, 1417 0x1fa84, 0x1fa90, 1418 0x1fac0, 0x1fac0, 1419 0x1fae0, 0x1fae0, 1420 0x1fb00, 0x1fb84, 1421 0x1fbc0, 0x1fbc8, 1422 0x1fc08, 0x1fc0c, 1423 0x1fc40, 0x1fc44, 1424 0x1fc4c, 0x1fc4c, 1425 0x1fe84, 0x1fe90, 1426 0x1fec0, 0x1fec0, 1427 0x1fee0, 0x1fee0, 1428 0x1ff00, 0x1ff84, 1429 0x1ffc0, 0x1ffc8, 1430 0x30000, 0x30030, 1431 0x30100, 0x30144, 1432 0x30190, 0x301a0, 1433 0x301a8, 0x301b8, 1434 0x301c4, 0x301c8, 1435 0x301d0, 0x301d0, 1436 0x30200, 0x30318, 1437 0x30400, 0x304b4, 1438 0x304c0, 0x3052c, 1439 0x30540, 0x3061c, 1440 0x30800, 0x30828, 1441 0x30834, 0x30834, 1442 0x308c0, 0x30908, 1443 0x30910, 0x309ac, 1444 0x30a00, 0x30a14, 1445 0x30a1c, 0x30a2c, 1446 0x30a44, 0x30a50, 1447 0x30a74, 0x30a74, 1448 0x30a7c, 0x30afc, 1449 0x30b08, 0x30c24, 1450 0x30d00, 0x30d00, 1451 0x30d08, 0x30d14, 1452 0x30d1c, 0x30d20, 1453 0x30d3c, 0x30d3c, 1454 0x30d48, 0x30d50, 1455 0x31200, 0x3120c, 1456 0x31220, 0x31220, 1457 0x31240, 0x31240, 1458 0x31600, 0x3160c, 1459 0x31a00, 0x31a1c, 1460 0x31e00, 0x31e20, 1461 0x31e38, 0x31e3c, 1462 0x31e80, 0x31e80, 1463 0x31e88, 0x31ea8, 1464 0x31eb0, 0x31eb4, 1465 0x31ec8, 0x31ed4, 1466 0x31fb8, 0x32004, 1467 0x32200, 0x32200, 1468 0x32208, 0x32240, 1469 0x32248, 0x32280, 1470 0x32288, 0x322c0, 1471 0x322c8, 0x322fc, 1472 0x32600, 0x32630, 1473 0x32a00, 0x32abc, 1474 0x32b00, 0x32b10, 1475 0x32b20, 0x32b30, 1476 0x32b40, 0x32b50, 1477 0x32b60, 0x32b70, 1478 0x33000, 0x33028, 1479 0x33030, 0x33048, 1480 0x33060, 0x33068, 1481 0x33070, 0x3309c, 1482 0x330f0, 0x33128, 1483 0x33130, 0x33148, 1484 0x33160, 0x33168, 1485 0x33170, 0x3319c, 1486 0x331f0, 0x33238, 1487 0x33240, 0x33240, 1488 0x33248, 0x33250, 1489 0x3325c, 0x33264, 1490 0x33270, 0x332b8, 1491 0x332c0, 0x332e4, 1492 0x332f8, 0x33338, 1493 0x33340, 0x33340, 1494 0x33348, 0x33350, 1495 0x3335c, 0x33364, 1496 0x33370, 0x333b8, 1497 0x333c0, 0x333e4, 1498 0x333f8, 0x33428, 1499 0x33430, 0x33448, 1500 0x33460, 0x33468, 1501 0x33470, 0x3349c, 1502 0x334f0, 0x33528, 1503 0x33530, 0x33548, 1504 0x33560, 0x33568, 1505 0x33570, 0x3359c, 1506 0x335f0, 0x33638, 1507 0x33640, 0x33640, 1508 0x33648, 0x33650, 1509 0x3365c, 0x33664, 1510 0x33670, 0x336b8, 1511 0x336c0, 0x336e4, 1512 0x336f8, 0x33738, 1513 0x33740, 0x33740, 1514 0x33748, 0x33750, 1515 0x3375c, 0x33764, 1516 0x33770, 0x337b8, 1517 0x337c0, 0x337e4, 1518 0x337f8, 0x337fc, 1519 0x33814, 0x33814, 1520 0x3382c, 0x3382c, 1521 0x33880, 0x3388c, 1522 0x338e8, 0x338ec, 1523 0x33900, 0x33928, 1524 0x33930, 0x33948, 1525 0x33960, 0x33968, 1526 0x33970, 0x3399c, 1527 0x339f0, 0x33a38, 1528 0x33a40, 0x33a40, 1529 0x33a48, 0x33a50, 1530 0x33a5c, 0x33a64, 1531 0x33a70, 0x33ab8, 1532 0x33ac0, 0x33ae4, 1533 0x33af8, 0x33b10, 1534 0x33b28, 0x33b28, 1535 0x33b3c, 0x33b50, 1536 0x33bf0, 0x33c10, 1537 0x33c28, 0x33c28, 1538 0x33c3c, 0x33c50, 1539 0x33cf0, 0x33cfc, 1540 0x34000, 0x34030, 1541 0x34100, 0x34144, 1542 0x34190, 0x341a0, 1543 0x341a8, 0x341b8, 1544 0x341c4, 0x341c8, 1545 0x341d0, 0x341d0, 1546 0x34200, 0x34318, 1547 0x34400, 0x344b4, 1548 0x344c0, 0x3452c, 1549 0x34540, 0x3461c, 1550 0x34800, 0x34828, 1551 0x34834, 0x34834, 1552 0x348c0, 0x34908, 1553 0x34910, 0x349ac, 1554 0x34a00, 0x34a14, 1555 0x34a1c, 0x34a2c, 1556 0x34a44, 0x34a50, 1557 0x34a74, 0x34a74, 1558 0x34a7c, 0x34afc, 1559 0x34b08, 0x34c24, 1560 0x34d00, 0x34d00, 1561 0x34d08, 0x34d14, 1562 0x34d1c, 0x34d20, 1563 0x34d3c, 0x34d3c, 1564 0x34d48, 0x34d50, 1565 0x35200, 0x3520c, 1566 0x35220, 0x35220, 1567 0x35240, 0x35240, 1568 0x35600, 0x3560c, 1569 0x35a00, 0x35a1c, 1570 0x35e00, 0x35e20, 1571 0x35e38, 0x35e3c, 1572 0x35e80, 0x35e80, 1573 0x35e88, 0x35ea8, 1574 0x35eb0, 0x35eb4, 1575 0x35ec8, 0x35ed4, 1576 0x35fb8, 0x36004, 1577 0x36200, 0x36200, 1578 0x36208, 0x36240, 1579 0x36248, 0x36280, 1580 0x36288, 0x362c0, 1581 0x362c8, 0x362fc, 1582 0x36600, 0x36630, 1583 0x36a00, 0x36abc, 1584 0x36b00, 0x36b10, 1585 0x36b20, 0x36b30, 1586 0x36b40, 0x36b50, 1587 0x36b60, 0x36b70, 1588 0x37000, 0x37028, 1589 0x37030, 0x37048, 1590 0x37060, 0x37068, 1591 0x37070, 0x3709c, 1592 0x370f0, 0x37128, 1593 0x37130, 0x37148, 1594 0x37160, 0x37168, 1595 0x37170, 0x3719c, 1596 0x371f0, 0x37238, 1597 0x37240, 0x37240, 1598 0x37248, 0x37250, 1599 0x3725c, 0x37264, 1600 0x37270, 0x372b8, 1601 0x372c0, 0x372e4, 1602 0x372f8, 0x37338, 1603 0x37340, 0x37340, 1604 0x37348, 0x37350, 1605 0x3735c, 0x37364, 1606 0x37370, 0x373b8, 1607 0x373c0, 0x373e4, 1608 0x373f8, 0x37428, 1609 0x37430, 0x37448, 1610 0x37460, 0x37468, 1611 0x37470, 0x3749c, 1612 0x374f0, 0x37528, 1613 0x37530, 0x37548, 1614 0x37560, 0x37568, 1615 0x37570, 0x3759c, 1616 0x375f0, 0x37638, 1617 0x37640, 0x37640, 1618 0x37648, 0x37650, 1619 0x3765c, 0x37664, 1620 0x37670, 0x376b8, 1621 0x376c0, 0x376e4, 1622 0x376f8, 0x37738, 1623 0x37740, 0x37740, 1624 0x37748, 0x37750, 1625 0x3775c, 0x37764, 1626 0x37770, 0x377b8, 1627 0x377c0, 0x377e4, 1628 0x377f8, 0x377fc, 1629 0x37814, 0x37814, 1630 0x3782c, 0x3782c, 1631 0x37880, 0x3788c, 1632 0x378e8, 0x378ec, 1633 0x37900, 0x37928, 1634 0x37930, 0x37948, 1635 0x37960, 0x37968, 1636 0x37970, 0x3799c, 1637 0x379f0, 0x37a38, 1638 0x37a40, 0x37a40, 1639 0x37a48, 0x37a50, 1640 0x37a5c, 0x37a64, 1641 0x37a70, 0x37ab8, 1642 0x37ac0, 0x37ae4, 1643 0x37af8, 0x37b10, 1644 0x37b28, 0x37b28, 1645 0x37b3c, 0x37b50, 1646 0x37bf0, 0x37c10, 1647 0x37c28, 0x37c28, 1648 0x37c3c, 0x37c50, 1649 0x37cf0, 0x37cfc, 1650 0x38000, 0x38030, 1651 0x38100, 0x38144, 1652 0x38190, 0x381a0, 1653 0x381a8, 0x381b8, 1654 0x381c4, 0x381c8, 1655 0x381d0, 0x381d0, 1656 0x38200, 0x38318, 1657 0x38400, 0x384b4, 1658 0x384c0, 0x3852c, 1659 0x38540, 0x3861c, 1660 0x38800, 0x38828, 1661 0x38834, 0x38834, 1662 0x388c0, 0x38908, 1663 0x38910, 0x389ac, 1664 0x38a00, 0x38a14, 1665 0x38a1c, 0x38a2c, 1666 0x38a44, 0x38a50, 1667 0x38a74, 0x38a74, 1668 0x38a7c, 0x38afc, 1669 0x38b08, 0x38c24, 1670 0x38d00, 0x38d00, 1671 0x38d08, 0x38d14, 1672 0x38d1c, 0x38d20, 1673 0x38d3c, 0x38d3c, 1674 0x38d48, 0x38d50, 1675 0x39200, 0x3920c, 1676 0x39220, 0x39220, 1677 0x39240, 0x39240, 1678 0x39600, 0x3960c, 1679 0x39a00, 0x39a1c, 1680 0x39e00, 0x39e20, 1681 0x39e38, 0x39e3c, 1682 0x39e80, 0x39e80, 1683 0x39e88, 0x39ea8, 1684 0x39eb0, 0x39eb4, 1685 0x39ec8, 0x39ed4, 1686 0x39fb8, 0x3a004, 1687 0x3a200, 0x3a200, 1688 0x3a208, 0x3a240, 1689 0x3a248, 0x3a280, 1690 0x3a288, 0x3a2c0, 1691 0x3a2c8, 0x3a2fc, 1692 0x3a600, 0x3a630, 1693 0x3aa00, 0x3aabc, 1694 0x3ab00, 0x3ab10, 1695 0x3ab20, 0x3ab30, 1696 0x3ab40, 0x3ab50, 1697 0x3ab60, 0x3ab70, 1698 0x3b000, 0x3b028, 1699 0x3b030, 0x3b048, 1700 0x3b060, 0x3b068, 1701 0x3b070, 0x3b09c, 1702 0x3b0f0, 0x3b128, 1703 0x3b130, 0x3b148, 1704 0x3b160, 0x3b168, 1705 0x3b170, 0x3b19c, 1706 0x3b1f0, 0x3b238, 1707 0x3b240, 0x3b240, 1708 0x3b248, 0x3b250, 1709 0x3b25c, 0x3b264, 1710 0x3b270, 0x3b2b8, 1711 0x3b2c0, 0x3b2e4, 1712 0x3b2f8, 0x3b338, 1713 0x3b340, 0x3b340, 1714 0x3b348, 0x3b350, 1715 0x3b35c, 0x3b364, 1716 0x3b370, 0x3b3b8, 1717 0x3b3c0, 0x3b3e4, 1718 0x3b3f8, 0x3b428, 1719 0x3b430, 0x3b448, 1720 0x3b460, 0x3b468, 1721 0x3b470, 0x3b49c, 1722 0x3b4f0, 0x3b528, 1723 0x3b530, 0x3b548, 1724 0x3b560, 0x3b568, 1725 0x3b570, 0x3b59c, 1726 0x3b5f0, 0x3b638, 1727 0x3b640, 0x3b640, 1728 0x3b648, 0x3b650, 1729 0x3b65c, 0x3b664, 1730 0x3b670, 0x3b6b8, 1731 0x3b6c0, 0x3b6e4, 1732 0x3b6f8, 0x3b738, 1733 0x3b740, 0x3b740, 1734 0x3b748, 0x3b750, 1735 0x3b75c, 0x3b764, 1736 0x3b770, 0x3b7b8, 1737 0x3b7c0, 0x3b7e4, 1738 0x3b7f8, 0x3b7fc, 1739 0x3b814, 0x3b814, 1740 0x3b82c, 0x3b82c, 1741 0x3b880, 0x3b88c, 1742 0x3b8e8, 0x3b8ec, 1743 0x3b900, 0x3b928, 1744 0x3b930, 0x3b948, 1745 0x3b960, 0x3b968, 1746 0x3b970, 0x3b99c, 1747 0x3b9f0, 0x3ba38, 1748 0x3ba40, 0x3ba40, 1749 0x3ba48, 0x3ba50, 1750 0x3ba5c, 0x3ba64, 1751 0x3ba70, 0x3bab8, 1752 0x3bac0, 0x3bae4, 1753 0x3baf8, 0x3bb10, 1754 0x3bb28, 0x3bb28, 1755 0x3bb3c, 0x3bb50, 1756 0x3bbf0, 0x3bc10, 1757 0x3bc28, 0x3bc28, 1758 0x3bc3c, 0x3bc50, 1759 0x3bcf0, 0x3bcfc, 1760 0x3c000, 0x3c030, 1761 0x3c100, 0x3c144, 1762 0x3c190, 0x3c1a0, 1763 0x3c1a8, 0x3c1b8, 1764 0x3c1c4, 0x3c1c8, 1765 0x3c1d0, 0x3c1d0, 1766 0x3c200, 0x3c318, 1767 0x3c400, 0x3c4b4, 1768 0x3c4c0, 0x3c52c, 1769 0x3c540, 0x3c61c, 1770 0x3c800, 0x3c828, 1771 0x3c834, 0x3c834, 1772 0x3c8c0, 0x3c908, 1773 0x3c910, 0x3c9ac, 1774 0x3ca00, 0x3ca14, 1775 0x3ca1c, 0x3ca2c, 1776 0x3ca44, 0x3ca50, 1777 0x3ca74, 0x3ca74, 1778 0x3ca7c, 0x3cafc, 1779 0x3cb08, 0x3cc24, 1780 0x3cd00, 0x3cd00, 1781 0x3cd08, 0x3cd14, 1782 0x3cd1c, 0x3cd20, 1783 0x3cd3c, 0x3cd3c, 1784 0x3cd48, 0x3cd50, 1785 0x3d200, 0x3d20c, 1786 0x3d220, 0x3d220, 1787 0x3d240, 0x3d240, 1788 0x3d600, 0x3d60c, 1789 0x3da00, 0x3da1c, 1790 0x3de00, 0x3de20, 1791 0x3de38, 0x3de3c, 1792 0x3de80, 0x3de80, 1793 0x3de88, 0x3dea8, 1794 0x3deb0, 0x3deb4, 1795 0x3dec8, 0x3ded4, 1796 0x3dfb8, 0x3e004, 1797 0x3e200, 0x3e200, 1798 0x3e208, 0x3e240, 1799 0x3e248, 0x3e280, 1800 0x3e288, 0x3e2c0, 1801 0x3e2c8, 0x3e2fc, 1802 0x3e600, 0x3e630, 1803 0x3ea00, 0x3eabc, 1804 0x3eb00, 0x3eb10, 1805 0x3eb20, 0x3eb30, 1806 0x3eb40, 0x3eb50, 1807 0x3eb60, 0x3eb70, 1808 0x3f000, 0x3f028, 1809 0x3f030, 0x3f048, 1810 0x3f060, 0x3f068, 1811 0x3f070, 0x3f09c, 1812 0x3f0f0, 0x3f128, 1813 0x3f130, 0x3f148, 1814 0x3f160, 0x3f168, 1815 0x3f170, 0x3f19c, 1816 0x3f1f0, 0x3f238, 1817 0x3f240, 0x3f240, 1818 0x3f248, 0x3f250, 1819 0x3f25c, 0x3f264, 1820 0x3f270, 0x3f2b8, 1821 0x3f2c0, 0x3f2e4, 1822 0x3f2f8, 0x3f338, 1823 0x3f340, 0x3f340, 1824 0x3f348, 0x3f350, 1825 0x3f35c, 0x3f364, 1826 0x3f370, 0x3f3b8, 1827 0x3f3c0, 0x3f3e4, 1828 0x3f3f8, 0x3f428, 1829 0x3f430, 0x3f448, 1830 0x3f460, 0x3f468, 1831 0x3f470, 0x3f49c, 1832 0x3f4f0, 0x3f528, 1833 0x3f530, 0x3f548, 1834 0x3f560, 0x3f568, 1835 0x3f570, 0x3f59c, 1836 0x3f5f0, 0x3f638, 1837 0x3f640, 0x3f640, 1838 0x3f648, 0x3f650, 1839 0x3f65c, 0x3f664, 1840 0x3f670, 0x3f6b8, 1841 0x3f6c0, 0x3f6e4, 1842 0x3f6f8, 0x3f738, 1843 0x3f740, 0x3f740, 1844 0x3f748, 0x3f750, 1845 0x3f75c, 0x3f764, 1846 0x3f770, 0x3f7b8, 1847 0x3f7c0, 0x3f7e4, 1848 0x3f7f8, 0x3f7fc, 1849 0x3f814, 0x3f814, 1850 0x3f82c, 0x3f82c, 1851 0x3f880, 0x3f88c, 1852 0x3f8e8, 0x3f8ec, 1853 0x3f900, 0x3f928, 1854 0x3f930, 0x3f948, 1855 0x3f960, 0x3f968, 1856 0x3f970, 0x3f99c, 1857 0x3f9f0, 0x3fa38, 1858 0x3fa40, 0x3fa40, 1859 0x3fa48, 0x3fa50, 1860 0x3fa5c, 0x3fa64, 1861 0x3fa70, 0x3fab8, 1862 0x3fac0, 0x3fae4, 1863 0x3faf8, 0x3fb10, 1864 0x3fb28, 0x3fb28, 1865 0x3fb3c, 0x3fb50, 1866 0x3fbf0, 0x3fc10, 1867 0x3fc28, 0x3fc28, 1868 0x3fc3c, 0x3fc50, 1869 0x3fcf0, 0x3fcfc, 1870 0x40000, 0x4000c, 1871 0x40040, 0x40050, 1872 0x40060, 0x40068, 1873 0x4007c, 0x4008c, 1874 0x40094, 0x400b0, 1875 0x400c0, 0x40144, 1876 0x40180, 0x4018c, 1877 0x40200, 0x40254, 1878 0x40260, 0x40264, 1879 0x40270, 0x40288, 1880 0x40290, 0x40298, 1881 0x402ac, 0x402c8, 1882 0x402d0, 0x402e0, 1883 0x402f0, 0x402f0, 1884 0x40300, 0x4033c, 1885 0x403f8, 0x403fc, 1886 0x41304, 0x413c4, 1887 0x41400, 0x4140c, 1888 0x41414, 0x4141c, 1889 0x41480, 0x414d0, 1890 0x44000, 0x44054, 1891 0x4405c, 0x44078, 1892 0x440c0, 0x44174, 1893 0x44180, 0x441ac, 1894 0x441b4, 0x441b8, 1895 0x441c0, 0x44254, 1896 0x4425c, 0x44278, 1897 0x442c0, 0x44374, 1898 0x44380, 0x443ac, 1899 0x443b4, 0x443b8, 1900 0x443c0, 0x44454, 1901 0x4445c, 0x44478, 1902 0x444c0, 0x44574, 1903 0x44580, 0x445ac, 1904 0x445b4, 0x445b8, 1905 0x445c0, 0x44654, 1906 0x4465c, 0x44678, 1907 0x446c0, 0x44774, 1908 0x44780, 0x447ac, 1909 0x447b4, 0x447b8, 1910 0x447c0, 0x44854, 1911 0x4485c, 0x44878, 1912 0x448c0, 0x44974, 1913 0x44980, 0x449ac, 1914 0x449b4, 0x449b8, 1915 0x449c0, 0x449fc, 1916 0x45000, 0x45004, 1917 0x45010, 0x45030, 1918 0x45040, 0x45060, 1919 0x45068, 0x45068, 1920 0x45080, 0x45084, 1921 0x450a0, 0x450b0, 1922 0x45200, 0x45204, 1923 0x45210, 0x45230, 1924 0x45240, 0x45260, 1925 0x45268, 0x45268, 1926 0x45280, 0x45284, 1927 0x452a0, 0x452b0, 1928 0x460c0, 0x460e4, 1929 0x47000, 0x4703c, 1930 0x47044, 0x4708c, 1931 0x47200, 0x47250, 1932 0x47400, 0x47408, 1933 0x47414, 0x47420, 1934 0x47600, 0x47618, 1935 0x47800, 0x47814, 1936 0x48000, 0x4800c, 1937 0x48040, 0x48050, 1938 0x48060, 0x48068, 1939 0x4807c, 0x4808c, 1940 0x48094, 0x480b0, 1941 0x480c0, 0x48144, 1942 0x48180, 0x4818c, 1943 0x48200, 0x48254, 1944 0x48260, 0x48264, 1945 0x48270, 0x48288, 1946 0x48290, 0x48298, 1947 0x482ac, 0x482c8, 1948 0x482d0, 0x482e0, 1949 0x482f0, 0x482f0, 1950 0x48300, 0x4833c, 1951 0x483f8, 0x483fc, 1952 0x49304, 0x493c4, 1953 0x49400, 0x4940c, 1954 0x49414, 0x4941c, 1955 0x49480, 0x494d0, 1956 0x4c000, 0x4c054, 1957 0x4c05c, 0x4c078, 1958 0x4c0c0, 0x4c174, 1959 0x4c180, 0x4c1ac, 1960 0x4c1b4, 0x4c1b8, 1961 0x4c1c0, 0x4c254, 1962 0x4c25c, 0x4c278, 1963 0x4c2c0, 0x4c374, 1964 0x4c380, 0x4c3ac, 1965 0x4c3b4, 0x4c3b8, 1966 0x4c3c0, 0x4c454, 1967 0x4c45c, 0x4c478, 1968 0x4c4c0, 0x4c574, 1969 0x4c580, 0x4c5ac, 1970 0x4c5b4, 0x4c5b8, 1971 0x4c5c0, 0x4c654, 1972 0x4c65c, 0x4c678, 1973 0x4c6c0, 0x4c774, 1974 0x4c780, 0x4c7ac, 1975 0x4c7b4, 0x4c7b8, 1976 0x4c7c0, 0x4c854, 1977 0x4c85c, 0x4c878, 1978 0x4c8c0, 0x4c974, 1979 0x4c980, 0x4c9ac, 1980 0x4c9b4, 0x4c9b8, 1981 0x4c9c0, 0x4c9fc, 1982 0x4d000, 0x4d004, 1983 0x4d010, 0x4d030, 1984 0x4d040, 0x4d060, 1985 0x4d068, 0x4d068, 1986 0x4d080, 0x4d084, 1987 0x4d0a0, 0x4d0b0, 1988 0x4d200, 0x4d204, 1989 0x4d210, 0x4d230, 1990 0x4d240, 0x4d260, 1991 0x4d268, 0x4d268, 1992 0x4d280, 0x4d284, 1993 0x4d2a0, 0x4d2b0, 1994 0x4e0c0, 0x4e0e4, 1995 0x4f000, 0x4f03c, 1996 0x4f044, 0x4f08c, 1997 0x4f200, 0x4f250, 1998 0x4f400, 0x4f408, 1999 0x4f414, 0x4f420, 2000 0x4f600, 0x4f618, 2001 0x4f800, 0x4f814, 2002 0x50000, 0x50084, 2003 0x50090, 0x500cc, 2004 0x50400, 0x50400, 2005 0x50800, 0x50884, 2006 0x50890, 0x508cc, 2007 0x50c00, 0x50c00, 2008 0x51000, 0x5101c, 2009 0x51300, 0x51308, 2010 }; 2011 2012 static const unsigned int t5vf_reg_ranges[] = { 2013 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2014 VF_MPS_REG(A_MPS_VF_CTL), 2015 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2016 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2017 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2018 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2019 FW_T4VF_MBDATA_BASE_ADDR, 2020 FW_T4VF_MBDATA_BASE_ADDR + 2021 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2022 }; 2023 2024 static const unsigned int t6_reg_ranges[] = { 2025 0x1008, 0x101c, 2026 0x1024, 0x10a8, 2027 0x10b4, 0x10f8, 2028 0x1100, 0x1114, 2029 0x111c, 0x112c, 2030 0x1138, 0x113c, 2031 0x1144, 0x114c, 2032 0x1180, 0x1184, 2033 0x1190, 0x1194, 2034 0x11a0, 0x11a4, 2035 0x11b0, 0x11b4, 2036 0x11fc, 0x1274, 2037 0x1280, 0x133c, 2038 0x1800, 0x18fc, 2039 0x3000, 0x302c, 2040 0x3060, 0x30b0, 2041 0x30b8, 0x30d8, 2042 0x30e0, 0x30fc, 2043 0x3140, 0x357c, 2044 0x35a8, 0x35cc, 2045 0x35ec, 0x35ec, 2046 0x3600, 0x5624, 2047 0x56cc, 0x56ec, 2048 0x56f4, 0x5720, 2049 0x5728, 0x575c, 2050 0x580c, 0x5814, 2051 0x5890, 0x589c, 2052 0x58a4, 0x58ac, 2053 0x58b8, 0x58bc, 2054 0x5940, 0x595c, 2055 0x5980, 0x598c, 2056 0x59b0, 0x59c8, 2057 0x59d0, 0x59dc, 2058 0x59fc, 0x5a18, 2059 0x5a60, 0x5a6c, 2060 0x5a80, 0x5a8c, 2061 0x5a94, 0x5a9c, 2062 0x5b94, 0x5bfc, 2063 0x5c10, 0x5e48, 2064 0x5e50, 0x5e94, 2065 0x5ea0, 0x5eb0, 2066 0x5ec0, 0x5ec0, 2067 0x5ec8, 0x5ed0, 2068 0x5ee0, 0x5ee0, 2069 0x5ef0, 0x5ef0, 2070 0x5f00, 0x5f00, 2071 0x6000, 0x6020, 2072 0x6028, 0x6040, 2073 0x6058, 0x609c, 2074 0x60a8, 0x619c, 2075 0x7700, 0x7798, 2076 0x77c0, 0x7880, 2077 0x78cc, 0x78fc, 2078 0x7b00, 0x7b58, 2079 0x7b60, 0x7b84, 2080 0x7b8c, 0x7c54, 2081 0x7d00, 0x7d38, 2082 0x7d40, 0x7d84, 2083 0x7d8c, 0x7ddc, 2084 0x7de4, 0x7e04, 2085 0x7e10, 0x7e1c, 2086 0x7e24, 0x7e38, 2087 0x7e40, 0x7e44, 2088 0x7e4c, 0x7e78, 2089 0x7e80, 0x7edc, 2090 0x7ee8, 0x7efc, 2091 0x8dc0, 0x8de4, 2092 0x8df8, 0x8e04, 2093 0x8e10, 0x8e84, 2094 0x8ea0, 0x8f88, 2095 0x8fb8, 0x9058, 2096 0x9060, 0x9060, 2097 0x9068, 0x90f8, 2098 0x9100, 0x9124, 2099 0x9400, 0x9470, 2100 0x9600, 0x9600, 2101 0x9608, 0x9638, 2102 0x9640, 0x9704, 2103 0x9710, 0x971c, 2104 0x9800, 0x9808, 2105 0x9820, 0x983c, 2106 0x9850, 0x9864, 2107 0x9c00, 0x9c6c, 2108 0x9c80, 0x9cec, 2109 0x9d00, 0x9d6c, 2110 0x9d80, 0x9dec, 2111 0x9e00, 0x9e6c, 2112 0x9e80, 0x9eec, 2113 0x9f00, 0x9f6c, 2114 0x9f80, 0xa020, 2115 0xd004, 0xd03c, 2116 0xd100, 0xd118, 2117 0xd200, 0xd214, 2118 0xd220, 0xd234, 2119 0xd240, 0xd254, 2120 0xd260, 0xd274, 2121 0xd280, 0xd294, 2122 0xd2a0, 0xd2b4, 2123 0xd2c0, 0xd2d4, 2124 0xd2e0, 0xd2f4, 2125 0xd300, 0xd31c, 2126 0xdfc0, 0xdfe0, 2127 0xe000, 0xf008, 2128 0xf010, 0xf018, 2129 0xf020, 0xf028, 2130 0x11000, 0x11014, 2131 0x11048, 0x1106c, 2132 0x11074, 0x11088, 2133 0x11098, 0x11120, 2134 0x1112c, 0x1117c, 2135 0x11190, 0x112e0, 2136 0x11300, 0x1130c, 2137 0x12000, 0x1206c, 2138 0x19040, 0x1906c, 2139 0x19078, 0x19080, 2140 0x1908c, 0x190e8, 2141 0x190f0, 0x190f8, 2142 0x19100, 0x19110, 2143 0x19120, 0x19124, 2144 0x19150, 0x19194, 2145 0x1919c, 0x191b0, 2146 0x191d0, 0x191e8, 2147 0x19238, 0x19290, 2148 0x192a4, 0x192b0, 2149 0x192bc, 0x192bc, 2150 0x19348, 0x1934c, 2151 0x193f8, 0x19418, 2152 0x19420, 0x19428, 2153 0x19430, 0x19444, 2154 0x1944c, 0x1946c, 2155 0x19474, 0x19474, 2156 0x19490, 0x194cc, 2157 0x194f0, 0x194f8, 2158 0x19c00, 0x19c48, 2159 0x19c50, 0x19c80, 2160 0x19c94, 0x19c98, 2161 0x19ca0, 0x19cbc, 2162 0x19ce4, 0x19ce4, 2163 0x19cf0, 0x19cf8, 2164 0x19d00, 0x19d28, 2165 0x19d50, 0x19d78, 2166 0x19d94, 0x19d98, 2167 0x19da0, 0x19dc8, 2168 0x19df0, 0x19e10, 2169 0x19e50, 0x19e6c, 2170 0x19ea0, 0x19ebc, 2171 0x19ec4, 0x19ef4, 2172 0x19f04, 0x19f2c, 2173 0x19f34, 0x19f34, 2174 0x19f40, 0x19f50, 2175 0x19f90, 0x19fac, 2176 0x19fc4, 0x19fc8, 2177 0x19fd0, 0x19fe4, 2178 0x1a000, 0x1a004, 2179 0x1a010, 0x1a06c, 2180 0x1a0b0, 0x1a0e4, 2181 0x1a0ec, 0x1a0f8, 2182 0x1a100, 0x1a108, 2183 0x1a114, 0x1a120, 2184 0x1a128, 0x1a130, 2185 0x1a138, 0x1a138, 2186 0x1a190, 0x1a1c4, 2187 0x1a1fc, 0x1a1fc, 2188 0x1e008, 0x1e00c, 2189 0x1e040, 0x1e044, 2190 0x1e04c, 0x1e04c, 2191 0x1e284, 0x1e290, 2192 0x1e2c0, 0x1e2c0, 2193 0x1e2e0, 0x1e2e0, 2194 0x1e300, 0x1e384, 2195 0x1e3c0, 0x1e3c8, 2196 0x1e408, 0x1e40c, 2197 0x1e440, 0x1e444, 2198 0x1e44c, 0x1e44c, 2199 0x1e684, 0x1e690, 2200 0x1e6c0, 0x1e6c0, 2201 0x1e6e0, 0x1e6e0, 2202 0x1e700, 0x1e784, 2203 0x1e7c0, 0x1e7c8, 2204 0x1e808, 0x1e80c, 2205 0x1e840, 0x1e844, 2206 0x1e84c, 0x1e84c, 2207 0x1ea84, 0x1ea90, 2208 0x1eac0, 0x1eac0, 2209 0x1eae0, 0x1eae0, 2210 0x1eb00, 0x1eb84, 2211 0x1ebc0, 0x1ebc8, 2212 0x1ec08, 0x1ec0c, 2213 0x1ec40, 0x1ec44, 2214 0x1ec4c, 0x1ec4c, 2215 0x1ee84, 0x1ee90, 2216 0x1eec0, 0x1eec0, 2217 0x1eee0, 0x1eee0, 2218 0x1ef00, 0x1ef84, 2219 0x1efc0, 0x1efc8, 2220 0x1f008, 0x1f00c, 2221 0x1f040, 0x1f044, 2222 0x1f04c, 0x1f04c, 2223 0x1f284, 0x1f290, 2224 0x1f2c0, 0x1f2c0, 2225 0x1f2e0, 0x1f2e0, 2226 0x1f300, 0x1f384, 2227 0x1f3c0, 0x1f3c8, 2228 0x1f408, 0x1f40c, 2229 0x1f440, 0x1f444, 2230 0x1f44c, 0x1f44c, 2231 0x1f684, 0x1f690, 2232 0x1f6c0, 0x1f6c0, 2233 0x1f6e0, 0x1f6e0, 2234 0x1f700, 0x1f784, 2235 0x1f7c0, 0x1f7c8, 2236 0x1f808, 0x1f80c, 2237 0x1f840, 0x1f844, 2238 0x1f84c, 0x1f84c, 2239 0x1fa84, 0x1fa90, 2240 0x1fac0, 0x1fac0, 2241 0x1fae0, 0x1fae0, 2242 0x1fb00, 0x1fb84, 2243 0x1fbc0, 0x1fbc8, 2244 0x1fc08, 0x1fc0c, 2245 0x1fc40, 0x1fc44, 2246 0x1fc4c, 0x1fc4c, 2247 0x1fe84, 0x1fe90, 2248 0x1fec0, 0x1fec0, 2249 0x1fee0, 0x1fee0, 2250 0x1ff00, 0x1ff84, 2251 0x1ffc0, 0x1ffc8, 2252 0x30000, 0x30030, 2253 0x30100, 0x30168, 2254 0x30190, 0x301a0, 2255 0x301a8, 0x301b8, 2256 0x301c4, 0x301c8, 2257 0x301d0, 0x301d0, 2258 0x30200, 0x30320, 2259 0x30400, 0x304b4, 2260 0x304c0, 0x3052c, 2261 0x30540, 0x3061c, 2262 0x30800, 0x308a0, 2263 0x308c0, 0x30908, 2264 0x30910, 0x309b8, 2265 0x30a00, 0x30a04, 2266 0x30a0c, 0x30a14, 2267 0x30a1c, 0x30a2c, 2268 0x30a44, 0x30a50, 2269 0x30a74, 0x30a74, 2270 0x30a7c, 0x30afc, 2271 0x30b08, 0x30c24, 2272 0x30d00, 0x30d14, 2273 0x30d1c, 0x30d3c, 2274 0x30d44, 0x30d4c, 2275 0x30d54, 0x30d74, 2276 0x30d7c, 0x30d7c, 2277 0x30de0, 0x30de0, 2278 0x30e00, 0x30ed4, 2279 0x30f00, 0x30fa4, 2280 0x30fc0, 0x30fc4, 2281 0x31000, 0x31004, 2282 0x31080, 0x310fc, 2283 0x31208, 0x31220, 2284 0x3123c, 0x31254, 2285 0x31300, 0x31300, 2286 0x31308, 0x3131c, 2287 0x31338, 0x3133c, 2288 0x31380, 0x31380, 2289 0x31388, 0x313a8, 2290 0x313b4, 0x313b4, 2291 0x31400, 0x31420, 2292 0x31438, 0x3143c, 2293 0x31480, 0x31480, 2294 0x314a8, 0x314a8, 2295 0x314b0, 0x314b4, 2296 0x314c8, 0x314d4, 2297 0x31a40, 0x31a4c, 2298 0x31af0, 0x31b20, 2299 0x31b38, 0x31b3c, 2300 0x31b80, 0x31b80, 2301 0x31ba8, 0x31ba8, 2302 0x31bb0, 0x31bb4, 2303 0x31bc8, 0x31bd4, 2304 0x32140, 0x3218c, 2305 0x321f0, 0x321f4, 2306 0x32200, 0x32200, 2307 0x32218, 0x32218, 2308 0x32400, 0x32400, 2309 0x32408, 0x3241c, 2310 0x32618, 0x32620, 2311 0x32664, 0x32664, 2312 0x326a8, 0x326a8, 2313 0x326ec, 0x326ec, 2314 0x32a00, 0x32abc, 2315 0x32b00, 0x32b18, 2316 0x32b20, 0x32b38, 2317 0x32b40, 0x32b58, 2318 0x32b60, 0x32b78, 2319 0x32c00, 0x32c00, 2320 0x32c08, 0x32c3c, 2321 0x33000, 0x3302c, 2322 0x33034, 0x33050, 2323 0x33058, 0x33058, 2324 0x33060, 0x3308c, 2325 0x3309c, 0x330ac, 2326 0x330c0, 0x330c0, 2327 0x330c8, 0x330d0, 2328 0x330d8, 0x330e0, 2329 0x330ec, 0x3312c, 2330 0x33134, 0x33150, 2331 0x33158, 0x33158, 2332 0x33160, 0x3318c, 2333 0x3319c, 0x331ac, 2334 0x331c0, 0x331c0, 2335 0x331c8, 0x331d0, 2336 0x331d8, 0x331e0, 2337 0x331ec, 0x33290, 2338 0x33298, 0x332c4, 2339 0x332e4, 0x33390, 2340 0x33398, 0x333c4, 2341 0x333e4, 0x3342c, 2342 0x33434, 0x33450, 2343 0x33458, 0x33458, 2344 0x33460, 0x3348c, 2345 0x3349c, 0x334ac, 2346 0x334c0, 0x334c0, 2347 0x334c8, 0x334d0, 2348 0x334d8, 0x334e0, 2349 0x334ec, 0x3352c, 2350 0x33534, 0x33550, 2351 0x33558, 0x33558, 2352 0x33560, 0x3358c, 2353 0x3359c, 0x335ac, 2354 0x335c0, 0x335c0, 2355 0x335c8, 0x335d0, 2356 0x335d8, 0x335e0, 2357 0x335ec, 0x33690, 2358 0x33698, 0x336c4, 2359 0x336e4, 0x33790, 2360 0x33798, 0x337c4, 2361 0x337e4, 0x337fc, 2362 0x33814, 0x33814, 2363 0x33854, 0x33868, 2364 0x33880, 0x3388c, 2365 0x338c0, 0x338d0, 2366 0x338e8, 0x338ec, 2367 0x33900, 0x3392c, 2368 0x33934, 0x33950, 2369 0x33958, 0x33958, 2370 0x33960, 0x3398c, 2371 0x3399c, 0x339ac, 2372 0x339c0, 0x339c0, 2373 0x339c8, 0x339d0, 2374 0x339d8, 0x339e0, 2375 0x339ec, 0x33a90, 2376 0x33a98, 0x33ac4, 2377 0x33ae4, 0x33b10, 2378 0x33b24, 0x33b28, 2379 0x33b38, 0x33b50, 2380 0x33bf0, 0x33c10, 2381 0x33c24, 0x33c28, 2382 0x33c38, 0x33c50, 2383 0x33cf0, 0x33cfc, 2384 0x34000, 0x34030, 2385 0x34100, 0x34168, 2386 0x34190, 0x341a0, 2387 0x341a8, 0x341b8, 2388 0x341c4, 0x341c8, 2389 0x341d0, 0x341d0, 2390 0x34200, 0x34320, 2391 0x34400, 0x344b4, 2392 0x344c0, 0x3452c, 2393 0x34540, 0x3461c, 2394 0x34800, 0x348a0, 2395 0x348c0, 0x34908, 2396 0x34910, 0x349b8, 2397 0x34a00, 0x34a04, 2398 0x34a0c, 0x34a14, 2399 0x34a1c, 0x34a2c, 2400 0x34a44, 0x34a50, 2401 0x34a74, 0x34a74, 2402 0x34a7c, 0x34afc, 2403 0x34b08, 0x34c24, 2404 0x34d00, 0x34d14, 2405 0x34d1c, 0x34d3c, 2406 0x34d44, 0x34d4c, 2407 0x34d54, 0x34d74, 2408 0x34d7c, 0x34d7c, 2409 0x34de0, 0x34de0, 2410 0x34e00, 0x34ed4, 2411 0x34f00, 0x34fa4, 2412 0x34fc0, 0x34fc4, 2413 0x35000, 0x35004, 2414 0x35080, 0x350fc, 2415 0x35208, 0x35220, 2416 0x3523c, 0x35254, 2417 0x35300, 0x35300, 2418 0x35308, 0x3531c, 2419 0x35338, 0x3533c, 2420 0x35380, 0x35380, 2421 0x35388, 0x353a8, 2422 0x353b4, 0x353b4, 2423 0x35400, 0x35420, 2424 0x35438, 0x3543c, 2425 0x35480, 0x35480, 2426 0x354a8, 0x354a8, 2427 0x354b0, 0x354b4, 2428 0x354c8, 0x354d4, 2429 0x35a40, 0x35a4c, 2430 0x35af0, 0x35b20, 2431 0x35b38, 0x35b3c, 2432 0x35b80, 0x35b80, 2433 0x35ba8, 0x35ba8, 2434 0x35bb0, 0x35bb4, 2435 0x35bc8, 0x35bd4, 2436 0x36140, 0x3618c, 2437 0x361f0, 0x361f4, 2438 0x36200, 0x36200, 2439 0x36218, 0x36218, 2440 0x36400, 0x36400, 2441 0x36408, 0x3641c, 2442 0x36618, 0x36620, 2443 0x36664, 0x36664, 2444 0x366a8, 0x366a8, 2445 0x366ec, 0x366ec, 2446 0x36a00, 0x36abc, 2447 0x36b00, 0x36b18, 2448 0x36b20, 0x36b38, 2449 0x36b40, 0x36b58, 2450 0x36b60, 0x36b78, 2451 0x36c00, 0x36c00, 2452 0x36c08, 0x36c3c, 2453 0x37000, 0x3702c, 2454 0x37034, 0x37050, 2455 0x37058, 0x37058, 2456 0x37060, 0x3708c, 2457 0x3709c, 0x370ac, 2458 0x370c0, 0x370c0, 2459 0x370c8, 0x370d0, 2460 0x370d8, 0x370e0, 2461 0x370ec, 0x3712c, 2462 0x37134, 0x37150, 2463 0x37158, 0x37158, 2464 0x37160, 0x3718c, 2465 0x3719c, 0x371ac, 2466 0x371c0, 0x371c0, 2467 0x371c8, 0x371d0, 2468 0x371d8, 0x371e0, 2469 0x371ec, 0x37290, 2470 0x37298, 0x372c4, 2471 0x372e4, 0x37390, 2472 0x37398, 0x373c4, 2473 0x373e4, 0x3742c, 2474 0x37434, 0x37450, 2475 0x37458, 0x37458, 2476 0x37460, 0x3748c, 2477 0x3749c, 0x374ac, 2478 0x374c0, 0x374c0, 2479 0x374c8, 0x374d0, 2480 0x374d8, 0x374e0, 2481 0x374ec, 0x3752c, 2482 0x37534, 0x37550, 2483 0x37558, 0x37558, 2484 0x37560, 0x3758c, 2485 0x3759c, 0x375ac, 2486 0x375c0, 0x375c0, 2487 0x375c8, 0x375d0, 2488 0x375d8, 0x375e0, 2489 0x375ec, 0x37690, 2490 0x37698, 0x376c4, 2491 0x376e4, 0x37790, 2492 0x37798, 0x377c4, 2493 0x377e4, 0x377fc, 2494 0x37814, 0x37814, 2495 0x37854, 0x37868, 2496 0x37880, 0x3788c, 2497 0x378c0, 0x378d0, 2498 0x378e8, 0x378ec, 2499 0x37900, 0x3792c, 2500 0x37934, 0x37950, 2501 0x37958, 0x37958, 2502 0x37960, 0x3798c, 2503 0x3799c, 0x379ac, 2504 0x379c0, 0x379c0, 2505 0x379c8, 0x379d0, 2506 0x379d8, 0x379e0, 2507 0x379ec, 0x37a90, 2508 0x37a98, 0x37ac4, 2509 0x37ae4, 0x37b10, 2510 0x37b24, 0x37b28, 2511 0x37b38, 0x37b50, 2512 0x37bf0, 0x37c10, 2513 0x37c24, 0x37c28, 2514 0x37c38, 0x37c50, 2515 0x37cf0, 0x37cfc, 2516 0x40040, 0x40040, 2517 0x40080, 0x40084, 2518 0x40100, 0x40100, 2519 0x40140, 0x401bc, 2520 0x40200, 0x40214, 2521 0x40228, 0x40228, 2522 0x40240, 0x40258, 2523 0x40280, 0x40280, 2524 0x40304, 0x40304, 2525 0x40330, 0x4033c, 2526 0x41304, 0x413c8, 2527 0x413d0, 0x413dc, 2528 0x413f0, 0x413f0, 2529 0x41400, 0x4140c, 2530 0x41414, 0x4141c, 2531 0x41480, 0x414d0, 2532 0x44000, 0x4407c, 2533 0x440c0, 0x441ac, 2534 0x441b4, 0x4427c, 2535 0x442c0, 0x443ac, 2536 0x443b4, 0x4447c, 2537 0x444c0, 0x445ac, 2538 0x445b4, 0x4467c, 2539 0x446c0, 0x447ac, 2540 0x447b4, 0x4487c, 2541 0x448c0, 0x449ac, 2542 0x449b4, 0x44a7c, 2543 0x44ac0, 0x44bac, 2544 0x44bb4, 0x44c7c, 2545 0x44cc0, 0x44dac, 2546 0x44db4, 0x44e7c, 2547 0x44ec0, 0x44fac, 2548 0x44fb4, 0x4507c, 2549 0x450c0, 0x451ac, 2550 0x451b4, 0x451fc, 2551 0x45800, 0x45804, 2552 0x45810, 0x45830, 2553 0x45840, 0x45860, 2554 0x45868, 0x45868, 2555 0x45880, 0x45884, 2556 0x458a0, 0x458b0, 2557 0x45a00, 0x45a04, 2558 0x45a10, 0x45a30, 2559 0x45a40, 0x45a60, 2560 0x45a68, 0x45a68, 2561 0x45a80, 0x45a84, 2562 0x45aa0, 0x45ab0, 2563 0x460c0, 0x460e4, 2564 0x47000, 0x4703c, 2565 0x47044, 0x4708c, 2566 0x47200, 0x47250, 2567 0x47400, 0x47408, 2568 0x47414, 0x47420, 2569 0x47600, 0x47618, 2570 0x47800, 0x47814, 2571 0x47820, 0x4782c, 2572 0x50000, 0x50084, 2573 0x50090, 0x500cc, 2574 0x50300, 0x50384, 2575 0x50400, 0x50400, 2576 0x50800, 0x50884, 2577 0x50890, 0x508cc, 2578 0x50b00, 0x50b84, 2579 0x50c00, 0x50c00, 2580 0x51000, 0x51020, 2581 0x51028, 0x510b0, 2582 0x51300, 0x51324, 2583 }; 2584 2585 static const unsigned int t6vf_reg_ranges[] = { 2586 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2587 VF_MPS_REG(A_MPS_VF_CTL), 2588 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2589 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2590 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2591 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2592 FW_T6VF_MBDATA_BASE_ADDR, 2593 FW_T6VF_MBDATA_BASE_ADDR + 2594 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2595 }; 2596 2597 u32 *buf_end = (u32 *)(buf + buf_size); 2598 const unsigned int *reg_ranges; 2599 int reg_ranges_size, range; 2600 unsigned int chip_version = chip_id(adap); 2601 2602 /* 2603 * Select the right set of register ranges to dump depending on the 2604 * adapter chip type. 2605 */ 2606 switch (chip_version) { 2607 case CHELSIO_T4: 2608 if (adap->flags & IS_VF) { 2609 reg_ranges = t4vf_reg_ranges; 2610 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); 2611 } else { 2612 reg_ranges = t4_reg_ranges; 2613 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2614 } 2615 break; 2616 2617 case CHELSIO_T5: 2618 if (adap->flags & IS_VF) { 2619 reg_ranges = t5vf_reg_ranges; 2620 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); 2621 } else { 2622 reg_ranges = t5_reg_ranges; 2623 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2624 } 2625 break; 2626 2627 case CHELSIO_T6: 2628 if (adap->flags & IS_VF) { 2629 reg_ranges = t6vf_reg_ranges; 2630 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 2631 } else { 2632 reg_ranges = t6_reg_ranges; 2633 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2634 } 2635 break; 2636 2637 default: 2638 CH_ERR(adap, 2639 "Unsupported chip version %d\n", chip_version); 2640 return; 2641 } 2642 2643 /* 2644 * Clear the register buffer and insert the appropriate register 2645 * values selected by the above register ranges. 2646 */ 2647 memset(buf, 0, buf_size); 2648 for (range = 0; range < reg_ranges_size; range += 2) { 2649 unsigned int reg = reg_ranges[range]; 2650 unsigned int last_reg = reg_ranges[range + 1]; 2651 u32 *bufp = (u32 *)(buf + reg); 2652 2653 /* 2654 * Iterate across the register range filling in the register 2655 * buffer but don't write past the end of the register buffer. 2656 */ 2657 while (reg <= last_reg && bufp < buf_end) { 2658 *bufp++ = t4_read_reg(adap, reg); 2659 reg += sizeof(u32); 2660 } 2661 } 2662} 2663 2664/* 2665 * Partial EEPROM Vital Product Data structure. The VPD starts with one ID 2666 * header followed by one or more VPD-R sections, each with its own header. 2667 */ 2668struct t4_vpd_hdr { 2669 u8 id_tag; 2670 u8 id_len[2]; 2671 u8 id_data[ID_LEN]; 2672}; 2673 2674struct t4_vpdr_hdr { 2675 u8 vpdr_tag; 2676 u8 vpdr_len[2]; 2677}; 2678 2679/* 2680 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2681 */ 2682#define EEPROM_DELAY 10 /* 10us per poll spin */ 2683#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 2684 2685#define EEPROM_STAT_ADDR 0x7bfc 2686#define VPD_SIZE 0x800 2687#define VPD_BASE 0x400 2688#define VPD_BASE_OLD 0 2689#define VPD_LEN 1024 2690#define VPD_INFO_FLD_HDR_SIZE 3 2691#define CHELSIO_VPD_UNIQUE_ID 0x82 2692 2693/* 2694 * Small utility function to wait till any outstanding VPD Access is complete. 2695 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2696 * VPD Access in flight. This allows us to handle the problem of having a 2697 * previous VPD Access time out and prevent an attempt to inject a new VPD 2698 * Request before any in-flight VPD reguest has completed. 2699 */ 2700static int t4_seeprom_wait(struct adapter *adapter) 2701{ 2702 unsigned int base = adapter->params.pci.vpd_cap_addr; 2703 int max_poll; 2704 2705 /* 2706 * If no VPD Access is in flight, we can just return success right 2707 * away. 2708 */ 2709 if (!adapter->vpd_busy) 2710 return 0; 2711 2712 /* 2713 * Poll the VPD Capability Address/Flag register waiting for it 2714 * to indicate that the operation is complete. 2715 */ 2716 max_poll = EEPROM_MAX_POLL; 2717 do { 2718 u16 val; 2719 2720 udelay(EEPROM_DELAY); 2721 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2722 2723 /* 2724 * If the operation is complete, mark the VPD as no longer 2725 * busy and return success. 2726 */ 2727 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2728 adapter->vpd_busy = 0; 2729 return 0; 2730 } 2731 } while (--max_poll); 2732 2733 /* 2734 * Failure! Note that we leave the VPD Busy status set in order to 2735 * avoid pushing a new VPD Access request into the VPD Capability till 2736 * the current operation eventually succeeds. It's a bug to issue a 2737 * new request when an existing request is in flight and will result 2738 * in corrupt hardware state. 2739 */ 2740 return -ETIMEDOUT; 2741} 2742 2743/** 2744 * t4_seeprom_read - read a serial EEPROM location 2745 * @adapter: adapter to read 2746 * @addr: EEPROM virtual address 2747 * @data: where to store the read data 2748 * 2749 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 2750 * VPD capability. Note that this function must be called with a virtual 2751 * address. 2752 */ 2753int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 2754{ 2755 unsigned int base = adapter->params.pci.vpd_cap_addr; 2756 int ret; 2757 2758 /* 2759 * VPD Accesses must alway be 4-byte aligned! 2760 */ 2761 if (addr >= EEPROMVSIZE || (addr & 3)) 2762 return -EINVAL; 2763 2764 /* 2765 * Wait for any previous operation which may still be in flight to 2766 * complete. 2767 */ 2768 ret = t4_seeprom_wait(adapter); 2769 if (ret) { 2770 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2771 return ret; 2772 } 2773 2774 /* 2775 * Issue our new VPD Read request, mark the VPD as being busy and wait 2776 * for our request to complete. If it doesn't complete, note the 2777 * error and return it to our caller. Note that we do not reset the 2778 * VPD Busy status! 2779 */ 2780 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 2781 adapter->vpd_busy = 1; 2782 adapter->vpd_flag = PCI_VPD_ADDR_F; 2783 ret = t4_seeprom_wait(adapter); 2784 if (ret) { 2785 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 2786 return ret; 2787 } 2788 2789 /* 2790 * Grab the returned data, swizzle it into our endianness and 2791 * return success. 2792 */ 2793 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 2794 *data = le32_to_cpu(*data); 2795 return 0; 2796} 2797 2798/** 2799 * t4_seeprom_write - write a serial EEPROM location 2800 * @adapter: adapter to write 2801 * @addr: virtual EEPROM address 2802 * @data: value to write 2803 * 2804 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 2805 * VPD capability. Note that this function must be called with a virtual 2806 * address. 2807 */ 2808int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 2809{ 2810 unsigned int base = adapter->params.pci.vpd_cap_addr; 2811 int ret; 2812 u32 stats_reg; 2813 int max_poll; 2814 2815 /* 2816 * VPD Accesses must alway be 4-byte aligned! 2817 */ 2818 if (addr >= EEPROMVSIZE || (addr & 3)) 2819 return -EINVAL; 2820 2821 /* 2822 * Wait for any previous operation which may still be in flight to 2823 * complete. 2824 */ 2825 ret = t4_seeprom_wait(adapter); 2826 if (ret) { 2827 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2828 return ret; 2829 } 2830 2831 /* 2832 * Issue our new VPD Read request, mark the VPD as being busy and wait 2833 * for our request to complete. If it doesn't complete, note the 2834 * error and return it to our caller. Note that we do not reset the 2835 * VPD Busy status! 2836 */ 2837 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 2838 cpu_to_le32(data)); 2839 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 2840 (u16)addr | PCI_VPD_ADDR_F); 2841 adapter->vpd_busy = 1; 2842 adapter->vpd_flag = 0; 2843 ret = t4_seeprom_wait(adapter); 2844 if (ret) { 2845 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 2846 return ret; 2847 } 2848 2849 /* 2850 * Reset PCI_VPD_DATA register after a transaction and wait for our 2851 * request to complete. If it doesn't complete, return error. 2852 */ 2853 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 2854 max_poll = EEPROM_MAX_POLL; 2855 do { 2856 udelay(EEPROM_DELAY); 2857 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 2858 } while ((stats_reg & 0x1) && --max_poll); 2859 if (!max_poll) 2860 return -ETIMEDOUT; 2861 2862 /* Return success! */ 2863 return 0; 2864} 2865 2866/** 2867 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 2868 * @phys_addr: the physical EEPROM address 2869 * @fn: the PCI function number 2870 * @sz: size of function-specific area 2871 * 2872 * Translate a physical EEPROM address to virtual. The first 1K is 2873 * accessed through virtual addresses starting at 31K, the rest is 2874 * accessed through virtual addresses starting at 0. 2875 * 2876 * The mapping is as follows: 2877 * [0..1K) -> [31K..32K) 2878 * [1K..1K+A) -> [ES-A..ES) 2879 * [1K+A..ES) -> [0..ES-A-1K) 2880 * 2881 * where A = @fn * @sz, and ES = EEPROM size. 2882 */ 2883int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2884{ 2885 fn *= sz; 2886 if (phys_addr < 1024) 2887 return phys_addr + (31 << 10); 2888 if (phys_addr < 1024 + fn) 2889 return EEPROMSIZE - fn + phys_addr - 1024; 2890 if (phys_addr < EEPROMSIZE) 2891 return phys_addr - 1024 - fn; 2892 return -EINVAL; 2893} 2894 2895/** 2896 * t4_seeprom_wp - enable/disable EEPROM write protection 2897 * @adapter: the adapter 2898 * @enable: whether to enable or disable write protection 2899 * 2900 * Enables or disables write protection on the serial EEPROM. 2901 */ 2902int t4_seeprom_wp(struct adapter *adapter, int enable) 2903{ 2904 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 2905} 2906 2907/** 2908 * get_vpd_keyword_val - Locates an information field keyword in the VPD 2909 * @vpd: Pointer to buffered vpd data structure 2910 * @kw: The keyword to search for 2911 * @region: VPD region to search (starting from 0) 2912 * 2913 * Returns the value of the information field keyword or 2914 * -ENOENT otherwise. 2915 */ 2916static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region) 2917{ 2918 int i, tag; 2919 unsigned int offset, len; 2920 const struct t4_vpdr_hdr *vpdr; 2921 2922 offset = sizeof(struct t4_vpd_hdr); 2923 vpdr = (const void *)(vpd + offset); 2924 tag = vpdr->vpdr_tag; 2925 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 2926 while (region--) { 2927 offset += sizeof(struct t4_vpdr_hdr) + len; 2928 vpdr = (const void *)(vpd + offset); 2929 if (++tag != vpdr->vpdr_tag) 2930 return -ENOENT; 2931 len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8); 2932 } 2933 offset += sizeof(struct t4_vpdr_hdr); 2934 2935 if (offset + len > VPD_LEN) { 2936 return -ENOENT; 2937 } 2938 2939 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 2940 if (memcmp(vpd + i , kw , 2) == 0){ 2941 i += VPD_INFO_FLD_HDR_SIZE; 2942 return i; 2943 } 2944 2945 i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2]; 2946 } 2947 2948 return -ENOENT; 2949} 2950 2951 2952/** 2953 * get_vpd_params - read VPD parameters from VPD EEPROM 2954 * @adapter: adapter to read 2955 * @p: where to store the parameters 2956 * @vpd: caller provided temporary space to read the VPD into 2957 * 2958 * Reads card parameters stored in VPD EEPROM. 2959 */ 2960static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 2961 uint16_t device_id, u32 *buf) 2962{ 2963 int i, ret, addr; 2964 int ec, sn, pn, na, md; 2965 u8 csum; 2966 const u8 *vpd = (const u8 *)buf; 2967 2968 /* 2969 * Card information normally starts at VPD_BASE but early cards had 2970 * it at 0. 2971 */ 2972 ret = t4_seeprom_read(adapter, VPD_BASE, buf); 2973 if (ret) 2974 return (ret); 2975 2976 /* 2977 * The VPD shall have a unique identifier specified by the PCI SIG. 2978 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 2979 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 2980 * is expected to automatically put this entry at the 2981 * beginning of the VPD. 2982 */ 2983 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 2984 2985 for (i = 0; i < VPD_LEN; i += 4) { 2986 ret = t4_seeprom_read(adapter, addr + i, buf++); 2987 if (ret) 2988 return ret; 2989 } 2990 2991#define FIND_VPD_KW(var,name) do { \ 2992 var = get_vpd_keyword_val(vpd, name, 0); \ 2993 if (var < 0) { \ 2994 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 2995 return -EINVAL; \ 2996 } \ 2997} while (0) 2998 2999 FIND_VPD_KW(i, "RV"); 3000 for (csum = 0; i >= 0; i--) 3001 csum += vpd[i]; 3002 3003 if (csum) { 3004 CH_ERR(adapter, 3005 "corrupted VPD EEPROM, actual csum %u\n", csum); 3006 return -EINVAL; 3007 } 3008 3009 FIND_VPD_KW(ec, "EC"); 3010 FIND_VPD_KW(sn, "SN"); 3011 FIND_VPD_KW(pn, "PN"); 3012 FIND_VPD_KW(na, "NA"); 3013#undef FIND_VPD_KW 3014 3015 memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN); 3016 strstrip(p->id); 3017 memcpy(p->ec, vpd + ec, EC_LEN); 3018 strstrip(p->ec); 3019 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3020 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3021 strstrip(p->sn); 3022 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3023 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3024 strstrip((char *)p->pn); 3025 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3026 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3027 strstrip((char *)p->na); 3028 3029 if (device_id & 0x80) 3030 return 0; /* Custom card */ 3031 3032 md = get_vpd_keyword_val(vpd, "VF", 1); 3033 if (md < 0) { 3034 snprintf(p->md, sizeof(p->md), "unknown"); 3035 } else { 3036 i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2]; 3037 memcpy(p->md, vpd + md, min(i, MD_LEN)); 3038 strstrip((char *)p->md); 3039 } 3040 3041 return 0; 3042} 3043 3044/* serial flash and firmware constants and flash config file constants */ 3045enum { 3046 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3047 3048 /* flash command opcodes */ 3049 SF_PROG_PAGE = 2, /* program 256B page */ 3050 SF_WR_DISABLE = 4, /* disable writes */ 3051 SF_RD_STATUS = 5, /* read status register */ 3052 SF_WR_ENABLE = 6, /* enable writes */ 3053 SF_RD_DATA_FAST = 0xb, /* read flash */ 3054 SF_RD_ID = 0x9f, /* read ID */ 3055 SF_ERASE_SECTOR = 0xd8, /* erase 64KB sector */ 3056}; 3057 3058/** 3059 * sf1_read - read data from the serial flash 3060 * @adapter: the adapter 3061 * @byte_cnt: number of bytes to read 3062 * @cont: whether another operation will be chained 3063 * @lock: whether to lock SF for PL access only 3064 * @valp: where to store the read data 3065 * 3066 * Reads up to 4 bytes of data from the serial flash. The location of 3067 * the read needs to be specified prior to calling this by issuing the 3068 * appropriate commands to the serial flash. 3069 */ 3070static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3071 int lock, u32 *valp) 3072{ 3073 int ret; 3074 3075 if (!byte_cnt || byte_cnt > 4) 3076 return -EINVAL; 3077 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3078 return -EBUSY; 3079 t4_write_reg(adapter, A_SF_OP, 3080 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3081 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3082 if (!ret) 3083 *valp = t4_read_reg(adapter, A_SF_DATA); 3084 return ret; 3085} 3086 3087/** 3088 * sf1_write - write data to the serial flash 3089 * @adapter: the adapter 3090 * @byte_cnt: number of bytes to write 3091 * @cont: whether another operation will be chained 3092 * @lock: whether to lock SF for PL access only 3093 * @val: value to write 3094 * 3095 * Writes up to 4 bytes of data to the serial flash. The location of 3096 * the write needs to be specified prior to calling this by issuing the 3097 * appropriate commands to the serial flash. 3098 */ 3099static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3100 int lock, u32 val) 3101{ 3102 if (!byte_cnt || byte_cnt > 4) 3103 return -EINVAL; 3104 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3105 return -EBUSY; 3106 t4_write_reg(adapter, A_SF_DATA, val); 3107 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3108 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3109 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3110} 3111 3112/** 3113 * flash_wait_op - wait for a flash operation to complete 3114 * @adapter: the adapter 3115 * @attempts: max number of polls of the status register 3116 * @delay: delay between polls in ms 3117 * 3118 * Wait for a flash operation to complete by polling the status register. 3119 */ 3120static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3121{ 3122 int ret; 3123 u32 status; 3124 3125 while (1) { 3126 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3127 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3128 return ret; 3129 if (!(status & 1)) 3130 return 0; 3131 if (--attempts == 0) 3132 return -EAGAIN; 3133 if (delay) 3134 msleep(delay); 3135 } 3136} 3137 3138/** 3139 * t4_read_flash - read words from serial flash 3140 * @adapter: the adapter 3141 * @addr: the start address for the read 3142 * @nwords: how many 32-bit words to read 3143 * @data: where to store the read data 3144 * @byte_oriented: whether to store data as bytes or as words 3145 * 3146 * Read the specified number of 32-bit words from the serial flash. 3147 * If @byte_oriented is set the read data is stored as a byte array 3148 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3149 * natural endianness. 3150 */ 3151int t4_read_flash(struct adapter *adapter, unsigned int addr, 3152 unsigned int nwords, u32 *data, int byte_oriented) 3153{ 3154 int ret; 3155 3156 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3157 return -EINVAL; 3158 3159 addr = swab32(addr) | SF_RD_DATA_FAST; 3160 3161 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3162 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3163 return ret; 3164 3165 for ( ; nwords; nwords--, data++) { 3166 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3167 if (nwords == 1) 3168 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3169 if (ret) 3170 return ret; 3171 if (byte_oriented) 3172 *data = (__force __u32)(cpu_to_be32(*data)); 3173 } 3174 return 0; 3175} 3176 3177/** 3178 * t4_write_flash - write up to a page of data to the serial flash 3179 * @adapter: the adapter 3180 * @addr: the start address to write 3181 * @n: length of data to write in bytes 3182 * @data: the data to write 3183 * @byte_oriented: whether to store data as bytes or as words 3184 * 3185 * Writes up to a page of data (256 bytes) to the serial flash starting 3186 * at the given address. All the data must be written to the same page. 3187 * If @byte_oriented is set the write data is stored as byte stream 3188 * (i.e. matches what on disk), otherwise in big-endian. 3189 */ 3190int t4_write_flash(struct adapter *adapter, unsigned int addr, 3191 unsigned int n, const u8 *data, int byte_oriented) 3192{ 3193 int ret; 3194 u32 buf[SF_PAGE_SIZE / 4]; 3195 unsigned int i, c, left, val, offset = addr & 0xff; 3196 3197 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3198 return -EINVAL; 3199 3200 val = swab32(addr) | SF_PROG_PAGE; 3201 3202 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3203 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3204 goto unlock; 3205 3206 for (left = n; left; left -= c) { 3207 c = min(left, 4U); 3208 for (val = 0, i = 0; i < c; ++i) 3209 val = (val << 8) + *data++; 3210 3211 if (!byte_oriented) 3212 val = cpu_to_be32(val); 3213 3214 ret = sf1_write(adapter, c, c != left, 1, val); 3215 if (ret) 3216 goto unlock; 3217 } 3218 ret = flash_wait_op(adapter, 8, 1); 3219 if (ret) 3220 goto unlock; 3221 3222 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3223 3224 /* Read the page to verify the write succeeded */ 3225 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3226 byte_oriented); 3227 if (ret) 3228 return ret; 3229 3230 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3231 CH_ERR(adapter, 3232 "failed to correctly write the flash page at %#x\n", 3233 addr); 3234 return -EIO; 3235 } 3236 return 0; 3237 3238unlock: 3239 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3240 return ret; 3241} 3242 3243/** 3244 * t4_get_fw_version - read the firmware version 3245 * @adapter: the adapter 3246 * @vers: where to place the version 3247 * 3248 * Reads the FW version from flash. 3249 */ 3250int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3251{ 3252 return t4_read_flash(adapter, FLASH_FW_START + 3253 offsetof(struct fw_hdr, fw_ver), 1, 3254 vers, 0); 3255} 3256 3257/** 3258 * t4_get_bs_version - read the firmware bootstrap version 3259 * @adapter: the adapter 3260 * @vers: where to place the version 3261 * 3262 * Reads the FW Bootstrap version from flash. 3263 */ 3264int t4_get_bs_version(struct adapter *adapter, u32 *vers) 3265{ 3266 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + 3267 offsetof(struct fw_hdr, fw_ver), 1, 3268 vers, 0); 3269} 3270 3271/** 3272 * t4_get_tp_version - read the TP microcode version 3273 * @adapter: the adapter 3274 * @vers: where to place the version 3275 * 3276 * Reads the TP microcode version from flash. 3277 */ 3278int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3279{ 3280 return t4_read_flash(adapter, FLASH_FW_START + 3281 offsetof(struct fw_hdr, tp_microcode_ver), 3282 1, vers, 0); 3283} 3284 3285/** 3286 * t4_get_exprom_version - return the Expansion ROM version (if any) 3287 * @adapter: the adapter 3288 * @vers: where to place the version 3289 * 3290 * Reads the Expansion ROM header from FLASH and returns the version 3291 * number (if present) through the @vers return value pointer. We return 3292 * this in the Firmware Version Format since it's convenient. Return 3293 * 0 on success, -ENOENT if no Expansion ROM is present. 3294 */ 3295int t4_get_exprom_version(struct adapter *adap, u32 *vers) 3296{ 3297 struct exprom_header { 3298 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3299 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3300 } *hdr; 3301 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3302 sizeof(u32))]; 3303 int ret; 3304 3305 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 3306 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3307 0); 3308 if (ret) 3309 return ret; 3310 3311 hdr = (struct exprom_header *)exprom_header_buf; 3312 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3313 return -ENOENT; 3314 3315 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3316 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3317 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3318 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3319 return 0; 3320} 3321 3322/** 3323 * t4_get_scfg_version - return the Serial Configuration version 3324 * @adapter: the adapter 3325 * @vers: where to place the version 3326 * 3327 * Reads the Serial Configuration Version via the Firmware interface 3328 * (thus this can only be called once we're ready to issue Firmware 3329 * commands). The format of the Serial Configuration version is 3330 * adapter specific. Returns 0 on success, an error on failure. 3331 * 3332 * Note that early versions of the Firmware didn't include the ability 3333 * to retrieve the Serial Configuration version, so we zero-out the 3334 * return-value parameter in that case to avoid leaving it with 3335 * garbage in it. 3336 * 3337 * Also note that the Firmware will return its cached copy of the Serial 3338 * Initialization Revision ID, not the actual Revision ID as written in 3339 * the Serial EEPROM. This is only an issue if a new VPD has been written 3340 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 3341 * it's best to defer calling this routine till after a FW_RESET_CMD has 3342 * been issued if the Host Driver will be performing a full adapter 3343 * initialization. 3344 */ 3345int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 3346{ 3347 u32 scfgrev_param; 3348 int ret; 3349 3350 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3351 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 3352 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3353 1, &scfgrev_param, vers); 3354 if (ret) 3355 *vers = 0; 3356 return ret; 3357} 3358 3359/** 3360 * t4_get_vpd_version - return the VPD version 3361 * @adapter: the adapter 3362 * @vers: where to place the version 3363 * 3364 * Reads the VPD via the Firmware interface (thus this can only be called 3365 * once we're ready to issue Firmware commands). The format of the 3366 * VPD version is adapter specific. Returns 0 on success, an error on 3367 * failure. 3368 * 3369 * Note that early versions of the Firmware didn't include the ability 3370 * to retrieve the VPD version, so we zero-out the return-value parameter 3371 * in that case to avoid leaving it with garbage in it. 3372 * 3373 * Also note that the Firmware will return its cached copy of the VPD 3374 * Revision ID, not the actual Revision ID as written in the Serial 3375 * EEPROM. This is only an issue if a new VPD has been written and the 3376 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 3377 * to defer calling this routine till after a FW_RESET_CMD has been issued 3378 * if the Host Driver will be performing a full adapter initialization. 3379 */ 3380int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 3381{ 3382 u32 vpdrev_param; 3383 int ret; 3384 3385 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3386 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 3387 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3388 1, &vpdrev_param, vers); 3389 if (ret) 3390 *vers = 0; 3391 return ret; 3392} 3393 3394/** 3395 * t4_get_version_info - extract various chip/firmware version information 3396 * @adapter: the adapter 3397 * 3398 * Reads various chip/firmware version numbers and stores them into the 3399 * adapter Adapter Parameters structure. If any of the efforts fails 3400 * the first failure will be returned, but all of the version numbers 3401 * will be read. 3402 */ 3403int t4_get_version_info(struct adapter *adapter) 3404{ 3405 int ret = 0; 3406 3407 #define FIRST_RET(__getvinfo) \ 3408 do { \ 3409 int __ret = __getvinfo; \ 3410 if (__ret && !ret) \ 3411 ret = __ret; \ 3412 } while (0) 3413 3414 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 3415 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 3416 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 3417 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 3418 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 3419 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 3420 3421 #undef FIRST_RET 3422 3423 return ret; 3424} 3425 3426/** 3427 * t4_flash_erase_sectors - erase a range of flash sectors 3428 * @adapter: the adapter 3429 * @start: the first sector to erase 3430 * @end: the last sector to erase 3431 * 3432 * Erases the sectors in the given inclusive range. 3433 */ 3434int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 3435{ 3436 int ret = 0; 3437 3438 if (end >= adapter->params.sf_nsec) 3439 return -EINVAL; 3440 3441 while (start <= end) { 3442 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3443 (ret = sf1_write(adapter, 4, 0, 1, 3444 SF_ERASE_SECTOR | (start << 8))) != 0 || 3445 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 3446 CH_ERR(adapter, 3447 "erase of flash sector %d failed, error %d\n", 3448 start, ret); 3449 break; 3450 } 3451 start++; 3452 } 3453 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3454 return ret; 3455} 3456 3457/** 3458 * t4_flash_cfg_addr - return the address of the flash configuration file 3459 * @adapter: the adapter 3460 * 3461 * Return the address within the flash where the Firmware Configuration 3462 * File is stored, or an error if the device FLASH is too small to contain 3463 * a Firmware Configuration File. 3464 */ 3465int t4_flash_cfg_addr(struct adapter *adapter) 3466{ 3467 /* 3468 * If the device FLASH isn't large enough to hold a Firmware 3469 * Configuration File, return an error. 3470 */ 3471 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 3472 return -ENOSPC; 3473 3474 return FLASH_CFG_START; 3475} 3476 3477/* 3478 * Return TRUE if the specified firmware matches the adapter. I.e. T4 3479 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 3480 * and emit an error message for mismatched firmware to save our caller the 3481 * effort ... 3482 */ 3483static int t4_fw_matches_chip(struct adapter *adap, 3484 const struct fw_hdr *hdr) 3485{ 3486 /* 3487 * The expression below will return FALSE for any unsupported adapter 3488 * which will keep us "honest" in the future ... 3489 */ 3490 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 3491 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 3492 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) 3493 return 1; 3494 3495 CH_ERR(adap, 3496 "FW image (%d) is not suitable for this adapter (%d)\n", 3497 hdr->chip, chip_id(adap)); 3498 return 0; 3499} 3500 3501/** 3502 * t4_load_fw - download firmware 3503 * @adap: the adapter 3504 * @fw_data: the firmware image to write 3505 * @size: image size 3506 * 3507 * Write the supplied firmware image to the card's serial flash. 3508 */ 3509int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 3510{ 3511 u32 csum; 3512 int ret, addr; 3513 unsigned int i; 3514 u8 first_page[SF_PAGE_SIZE]; 3515 const u32 *p = (const u32 *)fw_data; 3516 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3517 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3518 unsigned int fw_start_sec; 3519 unsigned int fw_start; 3520 unsigned int fw_size; 3521 3522 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 3523 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 3524 fw_start = FLASH_FWBOOTSTRAP_START; 3525 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 3526 } else { 3527 fw_start_sec = FLASH_FW_START_SEC; 3528 fw_start = FLASH_FW_START; 3529 fw_size = FLASH_FW_MAX_SIZE; 3530 } 3531 3532 if (!size) { 3533 CH_ERR(adap, "FW image has no data\n"); 3534 return -EINVAL; 3535 } 3536 if (size & 511) { 3537 CH_ERR(adap, 3538 "FW image size not multiple of 512 bytes\n"); 3539 return -EINVAL; 3540 } 3541 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 3542 CH_ERR(adap, 3543 "FW image size differs from size in FW header\n"); 3544 return -EINVAL; 3545 } 3546 if (size > fw_size) { 3547 CH_ERR(adap, "FW image too large, max is %u bytes\n", 3548 fw_size); 3549 return -EFBIG; 3550 } 3551 if (!t4_fw_matches_chip(adap, hdr)) 3552 return -EINVAL; 3553 3554 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 3555 csum += be32_to_cpu(p[i]); 3556 3557 if (csum != 0xffffffff) { 3558 CH_ERR(adap, 3559 "corrupted firmware image, checksum %#x\n", csum); 3560 return -EINVAL; 3561 } 3562 3563 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 3564 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 3565 if (ret) 3566 goto out; 3567 3568 /* 3569 * We write the correct version at the end so the driver can see a bad 3570 * version if the FW write fails. Start by writing a copy of the 3571 * first page with a bad version. 3572 */ 3573 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3574 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3575 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 3576 if (ret) 3577 goto out; 3578 3579 addr = fw_start; 3580 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3581 addr += SF_PAGE_SIZE; 3582 fw_data += SF_PAGE_SIZE; 3583 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 3584 if (ret) 3585 goto out; 3586 } 3587 3588 ret = t4_write_flash(adap, 3589 fw_start + offsetof(struct fw_hdr, fw_ver), 3590 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 3591out: 3592 if (ret) 3593 CH_ERR(adap, "firmware download failed, error %d\n", 3594 ret); 3595 return ret; 3596} 3597 3598/** 3599 * t4_fwcache - firmware cache operation 3600 * @adap: the adapter 3601 * @op : the operation (flush or flush and invalidate) 3602 */ 3603int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 3604{ 3605 struct fw_params_cmd c; 3606 3607 memset(&c, 0, sizeof(c)); 3608 c.op_to_vfn = 3609 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 3610 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 3611 V_FW_PARAMS_CMD_PFN(adap->pf) | 3612 V_FW_PARAMS_CMD_VFN(0)); 3613 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3614 c.param[0].mnem = 3615 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3616 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 3617 c.param[0].val = (__force __be32)op; 3618 3619 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 3620} 3621 3622void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 3623 unsigned int *pif_req_wrptr, 3624 unsigned int *pif_rsp_wrptr) 3625{ 3626 int i, j; 3627 u32 cfg, val, req, rsp; 3628 3629 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3630 if (cfg & F_LADBGEN) 3631 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3632 3633 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 3634 req = G_POLADBGWRPTR(val); 3635 rsp = G_PILADBGWRPTR(val); 3636 if (pif_req_wrptr) 3637 *pif_req_wrptr = req; 3638 if (pif_rsp_wrptr) 3639 *pif_rsp_wrptr = rsp; 3640 3641 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 3642 for (j = 0; j < 6; j++) { 3643 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 3644 V_PILADBGRDPTR(rsp)); 3645 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 3646 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 3647 req++; 3648 rsp++; 3649 } 3650 req = (req + 2) & M_POLADBGRDPTR; 3651 rsp = (rsp + 2) & M_PILADBGRDPTR; 3652 } 3653 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3654} 3655 3656void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 3657{ 3658 u32 cfg; 3659 int i, j, idx; 3660 3661 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3662 if (cfg & F_LADBGEN) 3663 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3664 3665 for (i = 0; i < CIM_MALA_SIZE; i++) { 3666 for (j = 0; j < 5; j++) { 3667 idx = 8 * i + j; 3668 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 3669 V_PILADBGRDPTR(idx)); 3670 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 3671 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 3672 } 3673 } 3674 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3675} 3676 3677void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 3678{ 3679 unsigned int i, j; 3680 3681 for (i = 0; i < 8; i++) { 3682 u32 *p = la_buf + i; 3683 3684 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 3685 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 3686 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 3687 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 3688 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 3689 } 3690} 3691 3692/** 3693 * t4_link_l1cfg - apply link configuration to MAC/PHY 3694 * @phy: the PHY to setup 3695 * @mac: the MAC to setup 3696 * @lc: the requested link configuration 3697 * 3698 * Set up a port's MAC and PHY according to a desired link configuration. 3699 * - If the PHY can auto-negotiate first decide what to advertise, then 3700 * enable/disable auto-negotiation as desired, and reset. 3701 * - If the PHY does not auto-negotiate just reset it. 3702 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 3703 * otherwise do it later based on the outcome of auto-negotiation. 3704 */ 3705int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 3706 struct link_config *lc) 3707{ 3708 struct fw_port_cmd c; 3709 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 3710 unsigned int aneg, fc, fec, speed, rcap; 3711 3712 fc = 0; 3713 if (lc->requested_fc & PAUSE_RX) 3714 fc |= FW_PORT_CAP_FC_RX; 3715 if (lc->requested_fc & PAUSE_TX) 3716 fc |= FW_PORT_CAP_FC_TX; 3717 3718 fec = 0; 3719 if (lc->requested_fec & FEC_RS) 3720 fec = FW_PORT_CAP_FEC_RS; 3721 else if (lc->requested_fec & FEC_BASER_RS) 3722 fec = FW_PORT_CAP_FEC_BASER_RS; 3723 3724 if (!(lc->supported & FW_PORT_CAP_ANEG) || 3725 lc->requested_aneg == AUTONEG_DISABLE) { 3726 aneg = 0; 3727 switch (lc->requested_speed) { 3728 case 100000: 3729 speed = FW_PORT_CAP_SPEED_100G; 3730 break; 3731 case 40000: 3732 speed = FW_PORT_CAP_SPEED_40G; 3733 break; 3734 case 25000: 3735 speed = FW_PORT_CAP_SPEED_25G; 3736 break; 3737 case 10000: 3738 speed = FW_PORT_CAP_SPEED_10G; 3739 break; 3740 case 1000: 3741 speed = FW_PORT_CAP_SPEED_1G; 3742 break; 3743 case 100: 3744 speed = FW_PORT_CAP_SPEED_100M; 3745 break; 3746 default: 3747 return -EINVAL; 3748 break; 3749 } 3750 } else { 3751 aneg = FW_PORT_CAP_ANEG; 3752 speed = lc->supported & 3753 V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED); 3754 } 3755 3756 rcap = aneg | speed | fc | fec; 3757 if ((rcap | lc->supported) != lc->supported) { 3758#ifdef INVARIANTS 3759 CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap, 3760 lc->supported); 3761#endif 3762 rcap &= lc->supported; 3763 } 3764 rcap |= mdi; 3765 3766 memset(&c, 0, sizeof(c)); 3767 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3768 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3769 V_FW_PORT_CMD_PORTID(port)); 3770 c.action_to_len16 = 3771 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3772 FW_LEN16(c)); 3773 c.u.l1cfg.rcap = cpu_to_be32(rcap); 3774 3775 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3776} 3777 3778/** 3779 * t4_restart_aneg - restart autonegotiation 3780 * @adap: the adapter 3781 * @mbox: mbox to use for the FW command 3782 * @port: the port id 3783 * 3784 * Restarts autonegotiation for the selected port. 3785 */ 3786int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 3787{ 3788 struct fw_port_cmd c; 3789 3790 memset(&c, 0, sizeof(c)); 3791 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3792 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3793 V_FW_PORT_CMD_PORTID(port)); 3794 c.action_to_len16 = 3795 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3796 FW_LEN16(c)); 3797 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 3798 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3799} 3800 3801typedef void (*int_handler_t)(struct adapter *adap); 3802 3803struct intr_info { 3804 unsigned int mask; /* bits to check in interrupt status */ 3805 const char *msg; /* message to print or NULL */ 3806 short stat_idx; /* stat counter to increment or -1 */ 3807 unsigned short fatal; /* whether the condition reported is fatal */ 3808 int_handler_t int_handler; /* platform-specific int handler */ 3809}; 3810 3811/** 3812 * t4_handle_intr_status - table driven interrupt handler 3813 * @adapter: the adapter that generated the interrupt 3814 * @reg: the interrupt status register to process 3815 * @acts: table of interrupt actions 3816 * 3817 * A table driven interrupt handler that applies a set of masks to an 3818 * interrupt status word and performs the corresponding actions if the 3819 * interrupts described by the mask have occurred. The actions include 3820 * optionally emitting a warning or alert message. The table is terminated 3821 * by an entry specifying mask 0. Returns the number of fatal interrupt 3822 * conditions. 3823 */ 3824static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 3825 const struct intr_info *acts) 3826{ 3827 int fatal = 0; 3828 unsigned int mask = 0; 3829 unsigned int status = t4_read_reg(adapter, reg); 3830 3831 for ( ; acts->mask; ++acts) { 3832 if (!(status & acts->mask)) 3833 continue; 3834 if (acts->fatal) { 3835 fatal++; 3836 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 3837 status & acts->mask); 3838 } else if (acts->msg) 3839 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 3840 status & acts->mask); 3841 if (acts->int_handler) 3842 acts->int_handler(adapter); 3843 mask |= acts->mask; 3844 } 3845 status &= mask; 3846 if (status) /* clear processed interrupts */ 3847 t4_write_reg(adapter, reg, status); 3848 return fatal; 3849} 3850 3851/* 3852 * Interrupt handler for the PCIE module. 3853 */ 3854static void pcie_intr_handler(struct adapter *adapter) 3855{ 3856 static const struct intr_info sysbus_intr_info[] = { 3857 { F_RNPP, "RXNP array parity error", -1, 1 }, 3858 { F_RPCP, "RXPC array parity error", -1, 1 }, 3859 { F_RCIP, "RXCIF array parity error", -1, 1 }, 3860 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 3861 { F_RFTP, "RXFT array parity error", -1, 1 }, 3862 { 0 } 3863 }; 3864 static const struct intr_info pcie_port_intr_info[] = { 3865 { F_TPCP, "TXPC array parity error", -1, 1 }, 3866 { F_TNPP, "TXNP array parity error", -1, 1 }, 3867 { F_TFTP, "TXFT array parity error", -1, 1 }, 3868 { F_TCAP, "TXCA array parity error", -1, 1 }, 3869 { F_TCIP, "TXCIF array parity error", -1, 1 }, 3870 { F_RCAP, "RXCA array parity error", -1, 1 }, 3871 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 3872 { F_RDPE, "Rx data parity error", -1, 1 }, 3873 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 3874 { 0 } 3875 }; 3876 static const struct intr_info pcie_intr_info[] = { 3877 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 3878 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 3879 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 3880 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3881 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3882 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3883 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3884 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 3885 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 3886 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3887 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 3888 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3889 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3890 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 3891 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3892 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3893 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 3894 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3895 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3896 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3897 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3898 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 3899 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 3900 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3901 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 3902 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 3903 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 3904 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 3905 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 3906 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 3907 0 }, 3908 { 0 } 3909 }; 3910 3911 static const struct intr_info t5_pcie_intr_info[] = { 3912 { F_MSTGRPPERR, "Master Response Read Queue parity error", 3913 -1, 1 }, 3914 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 3915 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 3916 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3917 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3918 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3919 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3920 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 3921 -1, 1 }, 3922 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 3923 -1, 1 }, 3924 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3925 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 3926 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3927 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3928 { F_DREQWRPERR, "PCI DMA channel write request parity error", 3929 -1, 1 }, 3930 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3931 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3932 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 3933 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3934 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3935 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3936 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3937 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 3938 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 3939 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3940 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 3941 -1, 1 }, 3942 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 3943 -1, 1 }, 3944 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 3945 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 3946 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 3947 { F_READRSPERR, "Outbound read error", -1, 3948 0 }, 3949 { 0 } 3950 }; 3951 3952 int fat; 3953 3954 if (is_t4(adapter)) 3955 fat = t4_handle_intr_status(adapter, 3956 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 3957 sysbus_intr_info) + 3958 t4_handle_intr_status(adapter, 3959 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 3960 pcie_port_intr_info) + 3961 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3962 pcie_intr_info); 3963 else 3964 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3965 t5_pcie_intr_info); 3966 if (fat) 3967 t4_fatal_err(adapter); 3968} 3969 3970/* 3971 * TP interrupt handler. 3972 */ 3973static void tp_intr_handler(struct adapter *adapter) 3974{ 3975 static const struct intr_info tp_intr_info[] = { 3976 { 0x3fffffff, "TP parity error", -1, 1 }, 3977 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 3978 { 0 } 3979 }; 3980 3981 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 3982 t4_fatal_err(adapter); 3983} 3984 3985/* 3986 * SGE interrupt handler. 3987 */ 3988static void sge_intr_handler(struct adapter *adapter) 3989{ 3990 u64 v; 3991 u32 err; 3992 3993 static const struct intr_info sge_intr_info[] = { 3994 { F_ERR_CPL_EXCEED_IQE_SIZE, 3995 "SGE received CPL exceeding IQE size", -1, 1 }, 3996 { F_ERR_INVALID_CIDX_INC, 3997 "SGE GTS CIDX increment too large", -1, 0 }, 3998 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 3999 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 4000 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 4001 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 4002 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 4003 0 }, 4004 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 4005 0 }, 4006 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 4007 0 }, 4008 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 4009 0 }, 4010 { F_ERR_ING_CTXT_PRIO, 4011 "SGE too many priority ingress contexts", -1, 0 }, 4012 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 4013 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 4014 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | 4015 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3, 4016 "SGE PCIe error for a DBP thread", -1, 0 }, 4017 { 0 } 4018 }; 4019 4020 static const struct intr_info t4t5_sge_intr_info[] = { 4021 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 4022 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 4023 { F_ERR_EGR_CTXT_PRIO, 4024 "SGE too many priority egress contexts", -1, 0 }, 4025 { 0 } 4026 }; 4027 4028 /* 4029 * For now, treat below interrupts as fatal so that we disable SGE and 4030 * get better debug */ 4031 static const struct intr_info t6_sge_intr_info[] = { 4032 { F_FATAL_WRE_LEN, 4033 "SGE Actual WRE packet is less than advertized length", 4034 -1, 1 }, 4035 { 0 } 4036 }; 4037 4038 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 4039 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 4040 if (v) { 4041 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 4042 (unsigned long long)v); 4043 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 4044 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 4045 } 4046 4047 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 4048 if (chip_id(adapter) <= CHELSIO_T5) 4049 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4050 t4t5_sge_intr_info); 4051 else 4052 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4053 t6_sge_intr_info); 4054 4055 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 4056 if (err & F_ERROR_QID_VALID) { 4057 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 4058 if (err & F_UNCAPTURED_ERROR) 4059 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 4060 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 4061 F_UNCAPTURED_ERROR); 4062 } 4063 4064 if (v != 0) 4065 t4_fatal_err(adapter); 4066} 4067 4068#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 4069 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 4070#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 4071 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 4072 4073/* 4074 * CIM interrupt handler. 4075 */ 4076static void cim_intr_handler(struct adapter *adapter) 4077{ 4078 static const struct intr_info cim_intr_info[] = { 4079 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 4080 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 4081 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 4082 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 4083 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 4084 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 4085 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 4086 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 }, 4087 { 0 } 4088 }; 4089 static const struct intr_info cim_upintr_info[] = { 4090 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 4091 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 4092 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 4093 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 4094 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 4095 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 4096 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 4097 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 4098 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 4099 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 4100 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 4101 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 4102 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 4103 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 4104 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 4105 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 4106 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 4107 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 4108 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 4109 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 4110 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 4111 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 4112 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 4113 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 4114 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 4115 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 4116 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 4117 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 4118 { 0 } 4119 }; 4120 u32 val, fw_err; 4121 int fat; 4122 4123 fw_err = t4_read_reg(adapter, A_PCIE_FW); 4124 if (fw_err & F_PCIE_FW_ERR) 4125 t4_report_fw_error(adapter); 4126 4127 /* When the Firmware detects an internal error which normally wouldn't 4128 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 4129 * to make sure the Host sees the Firmware Crash. So if we have a 4130 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 4131 * interrupt. 4132 */ 4133 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE); 4134 if (val & F_TIMER0INT) 4135 if (!(fw_err & F_PCIE_FW_ERR) || 4136 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) 4137 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE, 4138 F_TIMER0INT); 4139 4140 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 4141 cim_intr_info) + 4142 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 4143 cim_upintr_info); 4144 if (fat) 4145 t4_fatal_err(adapter); 4146} 4147 4148/* 4149 * ULP RX interrupt handler. 4150 */ 4151static void ulprx_intr_handler(struct adapter *adapter) 4152{ 4153 static const struct intr_info ulprx_intr_info[] = { 4154 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 4155 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 4156 { 0x7fffff, "ULPRX parity error", -1, 1 }, 4157 { 0 } 4158 }; 4159 4160 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 4161 t4_fatal_err(adapter); 4162} 4163 4164/* 4165 * ULP TX interrupt handler. 4166 */ 4167static void ulptx_intr_handler(struct adapter *adapter) 4168{ 4169 static const struct intr_info ulptx_intr_info[] = { 4170 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 4171 0 }, 4172 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 4173 0 }, 4174 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 4175 0 }, 4176 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 4177 0 }, 4178 { 0xfffffff, "ULPTX parity error", -1, 1 }, 4179 { 0 } 4180 }; 4181 4182 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 4183 t4_fatal_err(adapter); 4184} 4185 4186/* 4187 * PM TX interrupt handler. 4188 */ 4189static void pmtx_intr_handler(struct adapter *adapter) 4190{ 4191 static const struct intr_info pmtx_intr_info[] = { 4192 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 4193 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 4194 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 4195 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 4196 { 0xffffff0, "PMTX framing error", -1, 1 }, 4197 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 4198 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 4199 1 }, 4200 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 4201 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 4202 { 0 } 4203 }; 4204 4205 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 4206 t4_fatal_err(adapter); 4207} 4208 4209/* 4210 * PM RX interrupt handler. 4211 */ 4212static void pmrx_intr_handler(struct adapter *adapter) 4213{ 4214 static const struct intr_info pmrx_intr_info[] = { 4215 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 4216 { 0x3ffff0, "PMRX framing error", -1, 1 }, 4217 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 4218 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 4219 1 }, 4220 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 4221 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 4222 { 0 } 4223 }; 4224 4225 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 4226 t4_fatal_err(adapter); 4227} 4228 4229/* 4230 * CPL switch interrupt handler. 4231 */ 4232static void cplsw_intr_handler(struct adapter *adapter) 4233{ 4234 static const struct intr_info cplsw_intr_info[] = { 4235 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 4236 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 4237 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 4238 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 4239 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 4240 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 4241 { 0 } 4242 }; 4243 4244 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 4245 t4_fatal_err(adapter); 4246} 4247 4248/* 4249 * LE interrupt handler. 4250 */ 4251static void le_intr_handler(struct adapter *adap) 4252{ 4253 unsigned int chip_ver = chip_id(adap); 4254 static const struct intr_info le_intr_info[] = { 4255 { F_LIPMISS, "LE LIP miss", -1, 0 }, 4256 { F_LIP0, "LE 0 LIP error", -1, 0 }, 4257 { F_PARITYERR, "LE parity error", -1, 1 }, 4258 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4259 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 4260 { 0 } 4261 }; 4262 4263 static const struct intr_info t6_le_intr_info[] = { 4264 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 4265 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 4266 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 4267 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4268 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 4269 { 0 } 4270 }; 4271 4272 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 4273 (chip_ver <= CHELSIO_T5) ? 4274 le_intr_info : t6_le_intr_info)) 4275 t4_fatal_err(adap); 4276} 4277 4278/* 4279 * MPS interrupt handler. 4280 */ 4281static void mps_intr_handler(struct adapter *adapter) 4282{ 4283 static const struct intr_info mps_rx_intr_info[] = { 4284 { 0xffffff, "MPS Rx parity error", -1, 1 }, 4285 { 0 } 4286 }; 4287 static const struct intr_info mps_tx_intr_info[] = { 4288 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 4289 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 4290 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 4291 -1, 1 }, 4292 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 4293 -1, 1 }, 4294 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 4295 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 4296 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 4297 { 0 } 4298 }; 4299 static const struct intr_info mps_trc_intr_info[] = { 4300 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 4301 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 4302 1 }, 4303 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 4304 { 0 } 4305 }; 4306 static const struct intr_info mps_stat_sram_intr_info[] = { 4307 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 4308 { 0 } 4309 }; 4310 static const struct intr_info mps_stat_tx_intr_info[] = { 4311 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 4312 { 0 } 4313 }; 4314 static const struct intr_info mps_stat_rx_intr_info[] = { 4315 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 4316 { 0 } 4317 }; 4318 static const struct intr_info mps_cls_intr_info[] = { 4319 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 4320 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 4321 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 4322 { 0 } 4323 }; 4324 4325 int fat; 4326 4327 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 4328 mps_rx_intr_info) + 4329 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 4330 mps_tx_intr_info) + 4331 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 4332 mps_trc_intr_info) + 4333 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 4334 mps_stat_sram_intr_info) + 4335 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 4336 mps_stat_tx_intr_info) + 4337 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 4338 mps_stat_rx_intr_info) + 4339 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 4340 mps_cls_intr_info); 4341 4342 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 4343 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 4344 if (fat) 4345 t4_fatal_err(adapter); 4346} 4347 4348#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 4349 F_ECC_UE_INT_CAUSE) 4350 4351/* 4352 * EDC/MC interrupt handler. 4353 */ 4354static void mem_intr_handler(struct adapter *adapter, int idx) 4355{ 4356 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 4357 4358 unsigned int addr, cnt_addr, v; 4359 4360 if (idx <= MEM_EDC1) { 4361 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 4362 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 4363 } else if (idx == MEM_MC) { 4364 if (is_t4(adapter)) { 4365 addr = A_MC_INT_CAUSE; 4366 cnt_addr = A_MC_ECC_STATUS; 4367 } else { 4368 addr = A_MC_P_INT_CAUSE; 4369 cnt_addr = A_MC_P_ECC_STATUS; 4370 } 4371 } else { 4372 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 4373 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 4374 } 4375 4376 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 4377 if (v & F_PERR_INT_CAUSE) 4378 CH_ALERT(adapter, "%s FIFO parity error\n", 4379 name[idx]); 4380 if (v & F_ECC_CE_INT_CAUSE) { 4381 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 4382 4383 if (idx <= MEM_EDC1) 4384 t4_edc_err_read(adapter, idx); 4385 4386 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 4387 CH_WARN_RATELIMIT(adapter, 4388 "%u %s correctable ECC data error%s\n", 4389 cnt, name[idx], cnt > 1 ? "s" : ""); 4390 } 4391 if (v & F_ECC_UE_INT_CAUSE) 4392 CH_ALERT(adapter, 4393 "%s uncorrectable ECC data error\n", name[idx]); 4394 4395 t4_write_reg(adapter, addr, v); 4396 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 4397 t4_fatal_err(adapter); 4398} 4399 4400/* 4401 * MA interrupt handler. 4402 */ 4403static void ma_intr_handler(struct adapter *adapter) 4404{ 4405 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 4406 4407 if (status & F_MEM_PERR_INT_CAUSE) { 4408 CH_ALERT(adapter, 4409 "MA parity error, parity status %#x\n", 4410 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 4411 if (is_t5(adapter)) 4412 CH_ALERT(adapter, 4413 "MA parity error, parity status %#x\n", 4414 t4_read_reg(adapter, 4415 A_MA_PARITY_ERROR_STATUS2)); 4416 } 4417 if (status & F_MEM_WRAP_INT_CAUSE) { 4418 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 4419 CH_ALERT(adapter, "MA address wrap-around error by " 4420 "client %u to address %#x\n", 4421 G_MEM_WRAP_CLIENT_NUM(v), 4422 G_MEM_WRAP_ADDRESS(v) << 4); 4423 } 4424 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 4425 t4_fatal_err(adapter); 4426} 4427 4428/* 4429 * SMB interrupt handler. 4430 */ 4431static void smb_intr_handler(struct adapter *adap) 4432{ 4433 static const struct intr_info smb_intr_info[] = { 4434 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 4435 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 4436 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 4437 { 0 } 4438 }; 4439 4440 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 4441 t4_fatal_err(adap); 4442} 4443 4444/* 4445 * NC-SI interrupt handler. 4446 */ 4447static void ncsi_intr_handler(struct adapter *adap) 4448{ 4449 static const struct intr_info ncsi_intr_info[] = { 4450 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 4451 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 4452 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 4453 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 4454 { 0 } 4455 }; 4456 4457 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 4458 t4_fatal_err(adap); 4459} 4460 4461/* 4462 * XGMAC interrupt handler. 4463 */ 4464static void xgmac_intr_handler(struct adapter *adap, int port) 4465{ 4466 u32 v, int_cause_reg; 4467 4468 if (is_t4(adap)) 4469 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 4470 else 4471 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 4472 4473 v = t4_read_reg(adap, int_cause_reg); 4474 4475 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 4476 if (!v) 4477 return; 4478 4479 if (v & F_TXFIFO_PRTY_ERR) 4480 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 4481 port); 4482 if (v & F_RXFIFO_PRTY_ERR) 4483 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 4484 port); 4485 t4_write_reg(adap, int_cause_reg, v); 4486 t4_fatal_err(adap); 4487} 4488 4489/* 4490 * PL interrupt handler. 4491 */ 4492static void pl_intr_handler(struct adapter *adap) 4493{ 4494 static const struct intr_info pl_intr_info[] = { 4495 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4496 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 4497 { 0 } 4498 }; 4499 4500 static const struct intr_info t5_pl_intr_info[] = { 4501 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4502 { 0 } 4503 }; 4504 4505 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 4506 is_t4(adap) ? 4507 pl_intr_info : t5_pl_intr_info)) 4508 t4_fatal_err(adap); 4509} 4510 4511#define PF_INTR_MASK (F_PFSW | F_PFCIM) 4512 4513/** 4514 * t4_slow_intr_handler - control path interrupt handler 4515 * @adapter: the adapter 4516 * 4517 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 4518 * The designation 'slow' is because it involves register reads, while 4519 * data interrupts typically don't involve any MMIOs. 4520 */ 4521int t4_slow_intr_handler(struct adapter *adapter) 4522{ 4523 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 4524 4525 if (!(cause & GLBL_INTR_MASK)) 4526 return 0; 4527 if (cause & F_CIM) 4528 cim_intr_handler(adapter); 4529 if (cause & F_MPS) 4530 mps_intr_handler(adapter); 4531 if (cause & F_NCSI) 4532 ncsi_intr_handler(adapter); 4533 if (cause & F_PL) 4534 pl_intr_handler(adapter); 4535 if (cause & F_SMB) 4536 smb_intr_handler(adapter); 4537 if (cause & F_MAC0) 4538 xgmac_intr_handler(adapter, 0); 4539 if (cause & F_MAC1) 4540 xgmac_intr_handler(adapter, 1); 4541 if (cause & F_MAC2) 4542 xgmac_intr_handler(adapter, 2); 4543 if (cause & F_MAC3) 4544 xgmac_intr_handler(adapter, 3); 4545 if (cause & F_PCIE) 4546 pcie_intr_handler(adapter); 4547 if (cause & F_MC0) 4548 mem_intr_handler(adapter, MEM_MC); 4549 if (is_t5(adapter) && (cause & F_MC1)) 4550 mem_intr_handler(adapter, MEM_MC1); 4551 if (cause & F_EDC0) 4552 mem_intr_handler(adapter, MEM_EDC0); 4553 if (cause & F_EDC1) 4554 mem_intr_handler(adapter, MEM_EDC1); 4555 if (cause & F_LE) 4556 le_intr_handler(adapter); 4557 if (cause & F_TP) 4558 tp_intr_handler(adapter); 4559 if (cause & F_MA) 4560 ma_intr_handler(adapter); 4561 if (cause & F_PM_TX) 4562 pmtx_intr_handler(adapter); 4563 if (cause & F_PM_RX) 4564 pmrx_intr_handler(adapter); 4565 if (cause & F_ULP_RX) 4566 ulprx_intr_handler(adapter); 4567 if (cause & F_CPL_SWITCH) 4568 cplsw_intr_handler(adapter); 4569 if (cause & F_SGE) 4570 sge_intr_handler(adapter); 4571 if (cause & F_ULP_TX) 4572 ulptx_intr_handler(adapter); 4573 4574 /* Clear the interrupts just processed for which we are the master. */ 4575 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 4576 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4577 return 1; 4578} 4579 4580/** 4581 * t4_intr_enable - enable interrupts 4582 * @adapter: the adapter whose interrupts should be enabled 4583 * 4584 * Enable PF-specific interrupts for the calling function and the top-level 4585 * interrupt concentrator for global interrupts. Interrupts are already 4586 * enabled at each module, here we just enable the roots of the interrupt 4587 * hierarchies. 4588 * 4589 * Note: this function should be called only when the driver manages 4590 * non PF-specific interrupts from the various HW modules. Only one PCI 4591 * function at a time should be doing this. 4592 */ 4593void t4_intr_enable(struct adapter *adapter) 4594{ 4595 u32 val = 0; 4596 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4597 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4598 ? G_SOURCEPF(whoami) 4599 : G_T6_SOURCEPF(whoami)); 4600 4601 if (chip_id(adapter) <= CHELSIO_T5) 4602 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 4603 else 4604 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 4605 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 4606 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 4607 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 4608 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 4609 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4610 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 4611 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 4612 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 4613 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 4614} 4615 4616/** 4617 * t4_intr_disable - disable interrupts 4618 * @adapter: the adapter whose interrupts should be disabled 4619 * 4620 * Disable interrupts. We only disable the top-level interrupt 4621 * concentrators. The caller must be a PCI function managing global 4622 * interrupts. 4623 */ 4624void t4_intr_disable(struct adapter *adapter) 4625{ 4626 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4627 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4628 ? G_SOURCEPF(whoami) 4629 : G_T6_SOURCEPF(whoami)); 4630 4631 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 4632 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 4633} 4634 4635/** 4636 * t4_intr_clear - clear all interrupts 4637 * @adapter: the adapter whose interrupts should be cleared 4638 * 4639 * Clears all interrupts. The caller must be a PCI function managing 4640 * global interrupts. 4641 */ 4642void t4_intr_clear(struct adapter *adapter) 4643{ 4644 static const unsigned int cause_reg[] = { 4645 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 4646 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 4647 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 4648 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 4649 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 4650 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 4651 A_TP_INT_CAUSE, 4652 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 4653 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 4654 A_MPS_RX_PERR_INT_CAUSE, 4655 A_CPL_INTR_CAUSE, 4656 MYPF_REG(A_PL_PF_INT_CAUSE), 4657 A_PL_PL_INT_CAUSE, 4658 A_LE_DB_INT_CAUSE, 4659 }; 4660 4661 unsigned int i; 4662 4663 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 4664 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 4665 4666 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 4667 A_MC_P_INT_CAUSE, 0xffffffff); 4668 4669 if (is_t4(adapter)) { 4670 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4671 0xffffffff); 4672 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4673 0xffffffff); 4674 } else 4675 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 4676 4677 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 4678 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4679} 4680 4681/** 4682 * hash_mac_addr - return the hash value of a MAC address 4683 * @addr: the 48-bit Ethernet MAC address 4684 * 4685 * Hashes a MAC address according to the hash function used by HW inexact 4686 * (hash) address matching. 4687 */ 4688static int hash_mac_addr(const u8 *addr) 4689{ 4690 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 4691 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 4692 a ^= b; 4693 a ^= (a >> 12); 4694 a ^= (a >> 6); 4695 return a & 0x3f; 4696} 4697 4698/** 4699 * t4_config_rss_range - configure a portion of the RSS mapping table 4700 * @adapter: the adapter 4701 * @mbox: mbox to use for the FW command 4702 * @viid: virtual interface whose RSS subtable is to be written 4703 * @start: start entry in the table to write 4704 * @n: how many table entries to write 4705 * @rspq: values for the "response queue" (Ingress Queue) lookup table 4706 * @nrspq: number of values in @rspq 4707 * 4708 * Programs the selected part of the VI's RSS mapping table with the 4709 * provided values. If @nrspq < @n the supplied values are used repeatedly 4710 * until the full table range is populated. 4711 * 4712 * The caller must ensure the values in @rspq are in the range allowed for 4713 * @viid. 4714 */ 4715int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 4716 int start, int n, const u16 *rspq, unsigned int nrspq) 4717{ 4718 int ret; 4719 const u16 *rsp = rspq; 4720 const u16 *rsp_end = rspq + nrspq; 4721 struct fw_rss_ind_tbl_cmd cmd; 4722 4723 memset(&cmd, 0, sizeof(cmd)); 4724 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 4725 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4726 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 4727 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 4728 4729 /* 4730 * Each firmware RSS command can accommodate up to 32 RSS Ingress 4731 * Queue Identifiers. These Ingress Queue IDs are packed three to 4732 * a 32-bit word as 10-bit values with the upper remaining 2 bits 4733 * reserved. 4734 */ 4735 while (n > 0) { 4736 int nq = min(n, 32); 4737 int nq_packed = 0; 4738 __be32 *qp = &cmd.iq0_to_iq2; 4739 4740 /* 4741 * Set up the firmware RSS command header to send the next 4742 * "nq" Ingress Queue IDs to the firmware. 4743 */ 4744 cmd.niqid = cpu_to_be16(nq); 4745 cmd.startidx = cpu_to_be16(start); 4746 4747 /* 4748 * "nq" more done for the start of the next loop. 4749 */ 4750 start += nq; 4751 n -= nq; 4752 4753 /* 4754 * While there are still Ingress Queue IDs to stuff into the 4755 * current firmware RSS command, retrieve them from the 4756 * Ingress Queue ID array and insert them into the command. 4757 */ 4758 while (nq > 0) { 4759 /* 4760 * Grab up to the next 3 Ingress Queue IDs (wrapping 4761 * around the Ingress Queue ID array if necessary) and 4762 * insert them into the firmware RSS command at the 4763 * current 3-tuple position within the commad. 4764 */ 4765 u16 qbuf[3]; 4766 u16 *qbp = qbuf; 4767 int nqbuf = min(3, nq); 4768 4769 nq -= nqbuf; 4770 qbuf[0] = qbuf[1] = qbuf[2] = 0; 4771 while (nqbuf && nq_packed < 32) { 4772 nqbuf--; 4773 nq_packed++; 4774 *qbp++ = *rsp++; 4775 if (rsp >= rsp_end) 4776 rsp = rspq; 4777 } 4778 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 4779 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 4780 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 4781 } 4782 4783 /* 4784 * Send this portion of the RRS table update to the firmware; 4785 * bail out on any errors. 4786 */ 4787 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 4788 if (ret) 4789 return ret; 4790 } 4791 return 0; 4792} 4793 4794/** 4795 * t4_config_glbl_rss - configure the global RSS mode 4796 * @adapter: the adapter 4797 * @mbox: mbox to use for the FW command 4798 * @mode: global RSS mode 4799 * @flags: mode-specific flags 4800 * 4801 * Sets the global RSS mode. 4802 */ 4803int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 4804 unsigned int flags) 4805{ 4806 struct fw_rss_glb_config_cmd c; 4807 4808 memset(&c, 0, sizeof(c)); 4809 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 4810 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 4811 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4812 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 4813 c.u.manual.mode_pkd = 4814 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4815 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 4816 c.u.basicvirtual.mode_keymode = 4817 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4818 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 4819 } else 4820 return -EINVAL; 4821 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4822} 4823 4824/** 4825 * t4_config_vi_rss - configure per VI RSS settings 4826 * @adapter: the adapter 4827 * @mbox: mbox to use for the FW command 4828 * @viid: the VI id 4829 * @flags: RSS flags 4830 * @defq: id of the default RSS queue for the VI. 4831 * @skeyidx: RSS secret key table index for non-global mode 4832 * @skey: RSS vf_scramble key for VI. 4833 * 4834 * Configures VI-specific RSS properties. 4835 */ 4836int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 4837 unsigned int flags, unsigned int defq, unsigned int skeyidx, 4838 unsigned int skey) 4839{ 4840 struct fw_rss_vi_config_cmd c; 4841 4842 memset(&c, 0, sizeof(c)); 4843 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 4844 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4845 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 4846 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4847 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 4848 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 4849 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 4850 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 4851 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 4852 4853 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4854} 4855 4856/* Read an RSS table row */ 4857static int rd_rss_row(struct adapter *adap, int row, u32 *val) 4858{ 4859 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 4860 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 4861 5, 0, val); 4862} 4863 4864/** 4865 * t4_read_rss - read the contents of the RSS mapping table 4866 * @adapter: the adapter 4867 * @map: holds the contents of the RSS mapping table 4868 * 4869 * Reads the contents of the RSS hash->queue mapping table. 4870 */ 4871int t4_read_rss(struct adapter *adapter, u16 *map) 4872{ 4873 u32 val; 4874 int i, ret; 4875 4876 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 4877 ret = rd_rss_row(adapter, i, &val); 4878 if (ret) 4879 return ret; 4880 *map++ = G_LKPTBLQUEUE0(val); 4881 *map++ = G_LKPTBLQUEUE1(val); 4882 } 4883 return 0; 4884} 4885 4886/** 4887 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 4888 * @adap: the adapter 4889 * @cmd: TP fw ldst address space type 4890 * @vals: where the indirect register values are stored/written 4891 * @nregs: how many indirect registers to read/write 4892 * @start_idx: index of first indirect register to read/write 4893 * @rw: Read (1) or Write (0) 4894 * @sleep_ok: if true we may sleep while awaiting command completion 4895 * 4896 * Access TP indirect registers through LDST 4897 **/ 4898static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 4899 unsigned int nregs, unsigned int start_index, 4900 unsigned int rw, bool sleep_ok) 4901{ 4902 int ret = 0; 4903 unsigned int i; 4904 struct fw_ldst_cmd c; 4905 4906 for (i = 0; i < nregs; i++) { 4907 memset(&c, 0, sizeof(c)); 4908 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4909 F_FW_CMD_REQUEST | 4910 (rw ? F_FW_CMD_READ : 4911 F_FW_CMD_WRITE) | 4912 V_FW_LDST_CMD_ADDRSPACE(cmd)); 4913 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 4914 4915 c.u.addrval.addr = cpu_to_be32(start_index + i); 4916 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 4917 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 4918 sleep_ok); 4919 if (ret) 4920 return ret; 4921 4922 if (rw) 4923 vals[i] = be32_to_cpu(c.u.addrval.val); 4924 } 4925 return 0; 4926} 4927 4928/** 4929 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 4930 * @adap: the adapter 4931 * @reg_addr: Address Register 4932 * @reg_data: Data register 4933 * @buff: where the indirect register values are stored/written 4934 * @nregs: how many indirect registers to read/write 4935 * @start_index: index of first indirect register to read/write 4936 * @rw: READ(1) or WRITE(0) 4937 * @sleep_ok: if true we may sleep while awaiting command completion 4938 * 4939 * Read/Write TP indirect registers through LDST if possible. 4940 * Else, use backdoor access 4941 **/ 4942static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 4943 u32 *buff, u32 nregs, u32 start_index, int rw, 4944 bool sleep_ok) 4945{ 4946 int rc = -EINVAL; 4947 int cmd; 4948 4949 switch (reg_addr) { 4950 case A_TP_PIO_ADDR: 4951 cmd = FW_LDST_ADDRSPC_TP_PIO; 4952 break; 4953 case A_TP_TM_PIO_ADDR: 4954 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 4955 break; 4956 case A_TP_MIB_INDEX: 4957 cmd = FW_LDST_ADDRSPC_TP_MIB; 4958 break; 4959 default: 4960 goto indirect_access; 4961 } 4962 4963 if (t4_use_ldst(adap)) 4964 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 4965 sleep_ok); 4966 4967indirect_access: 4968 4969 if (rc) { 4970 if (rw) 4971 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 4972 start_index); 4973 else 4974 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 4975 start_index); 4976 } 4977} 4978 4979/** 4980 * t4_tp_pio_read - Read TP PIO registers 4981 * @adap: the adapter 4982 * @buff: where the indirect register values are written 4983 * @nregs: how many indirect registers to read 4984 * @start_index: index of first indirect register to read 4985 * @sleep_ok: if true we may sleep while awaiting command completion 4986 * 4987 * Read TP PIO Registers 4988 **/ 4989void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 4990 u32 start_index, bool sleep_ok) 4991{ 4992 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 4993 start_index, 1, sleep_ok); 4994} 4995 4996/** 4997 * t4_tp_pio_write - Write TP PIO registers 4998 * @adap: the adapter 4999 * @buff: where the indirect register values are stored 5000 * @nregs: how many indirect registers to write 5001 * @start_index: index of first indirect register to write 5002 * @sleep_ok: if true we may sleep while awaiting command completion 5003 * 5004 * Write TP PIO Registers 5005 **/ 5006void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs, 5007 u32 start_index, bool sleep_ok) 5008{ 5009 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 5010 __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok); 5011} 5012 5013/** 5014 * t4_tp_tm_pio_read - Read TP TM PIO registers 5015 * @adap: the adapter 5016 * @buff: where the indirect register values are written 5017 * @nregs: how many indirect registers to read 5018 * @start_index: index of first indirect register to read 5019 * @sleep_ok: if true we may sleep while awaiting command completion 5020 * 5021 * Read TP TM PIO Registers 5022 **/ 5023void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 5024 u32 start_index, bool sleep_ok) 5025{ 5026 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 5027 nregs, start_index, 1, sleep_ok); 5028} 5029 5030/** 5031 * t4_tp_mib_read - Read TP MIB registers 5032 * @adap: the adapter 5033 * @buff: where the indirect register values are written 5034 * @nregs: how many indirect registers to read 5035 * @start_index: index of first indirect register to read 5036 * @sleep_ok: if true we may sleep while awaiting command completion 5037 * 5038 * Read TP MIB Registers 5039 **/ 5040void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 5041 bool sleep_ok) 5042{ 5043 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 5044 start_index, 1, sleep_ok); 5045} 5046 5047/** 5048 * t4_read_rss_key - read the global RSS key 5049 * @adap: the adapter 5050 * @key: 10-entry array holding the 320-bit RSS key 5051 * @sleep_ok: if true we may sleep while awaiting command completion 5052 * 5053 * Reads the global 320-bit RSS key. 5054 */ 5055void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 5056{ 5057 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5058} 5059 5060/** 5061 * t4_write_rss_key - program one of the RSS keys 5062 * @adap: the adapter 5063 * @key: 10-entry array holding the 320-bit RSS key 5064 * @idx: which RSS key to write 5065 * @sleep_ok: if true we may sleep while awaiting command completion 5066 * 5067 * Writes one of the RSS keys with the given 320-bit value. If @idx is 5068 * 0..15 the corresponding entry in the RSS key table is written, 5069 * otherwise the global RSS key is written. 5070 */ 5071void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 5072 bool sleep_ok) 5073{ 5074 u8 rss_key_addr_cnt = 16; 5075 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 5076 5077 /* 5078 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 5079 * allows access to key addresses 16-63 by using KeyWrAddrX 5080 * as index[5:4](upper 2) into key table 5081 */ 5082 if ((chip_id(adap) > CHELSIO_T5) && 5083 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 5084 rss_key_addr_cnt = 32; 5085 5086 t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5087 5088 if (idx >= 0 && idx < rss_key_addr_cnt) { 5089 if (rss_key_addr_cnt > 16) 5090 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5091 vrt | V_KEYWRADDRX(idx >> 4) | 5092 V_T6_VFWRADDR(idx) | F_KEYWREN); 5093 else 5094 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5095 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 5096 } 5097} 5098 5099/** 5100 * t4_read_rss_pf_config - read PF RSS Configuration Table 5101 * @adapter: the adapter 5102 * @index: the entry in the PF RSS table to read 5103 * @valp: where to store the returned value 5104 * @sleep_ok: if true we may sleep while awaiting command completion 5105 * 5106 * Reads the PF RSS Configuration Table at the specified index and returns 5107 * the value found there. 5108 */ 5109void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 5110 u32 *valp, bool sleep_ok) 5111{ 5112 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 5113} 5114 5115/** 5116 * t4_write_rss_pf_config - write PF RSS Configuration Table 5117 * @adapter: the adapter 5118 * @index: the entry in the VF RSS table to read 5119 * @val: the value to store 5120 * @sleep_ok: if true we may sleep while awaiting command completion 5121 * 5122 * Writes the PF RSS Configuration Table at the specified index with the 5123 * specified value. 5124 */ 5125void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 5126 u32 val, bool sleep_ok) 5127{ 5128 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 5129 sleep_ok); 5130} 5131 5132/** 5133 * t4_read_rss_vf_config - read VF RSS Configuration Table 5134 * @adapter: the adapter 5135 * @index: the entry in the VF RSS table to read 5136 * @vfl: where to store the returned VFL 5137 * @vfh: where to store the returned VFH 5138 * @sleep_ok: if true we may sleep while awaiting command completion 5139 * 5140 * Reads the VF RSS Configuration Table at the specified index and returns 5141 * the (VFL, VFH) values found there. 5142 */ 5143void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 5144 u32 *vfl, u32 *vfh, bool sleep_ok) 5145{ 5146 u32 vrt, mask, data; 5147 5148 if (chip_id(adapter) <= CHELSIO_T5) { 5149 mask = V_VFWRADDR(M_VFWRADDR); 5150 data = V_VFWRADDR(index); 5151 } else { 5152 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5153 data = V_T6_VFWRADDR(index); 5154 } 5155 /* 5156 * Request that the index'th VF Table values be read into VFL/VFH. 5157 */ 5158 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5159 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5160 vrt |= data | F_VFRDEN; 5161 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5162 5163 /* 5164 * Grab the VFL/VFH values ... 5165 */ 5166 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5167 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5168} 5169 5170/** 5171 * t4_write_rss_vf_config - write VF RSS Configuration Table 5172 * 5173 * @adapter: the adapter 5174 * @index: the entry in the VF RSS table to write 5175 * @vfl: the VFL to store 5176 * @vfh: the VFH to store 5177 * 5178 * Writes the VF RSS Configuration Table at the specified index with the 5179 * specified (VFL, VFH) values. 5180 */ 5181void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 5182 u32 vfl, u32 vfh, bool sleep_ok) 5183{ 5184 u32 vrt, mask, data; 5185 5186 if (chip_id(adapter) <= CHELSIO_T5) { 5187 mask = V_VFWRADDR(M_VFWRADDR); 5188 data = V_VFWRADDR(index); 5189 } else { 5190 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5191 data = V_T6_VFWRADDR(index); 5192 } 5193 5194 /* 5195 * Load up VFL/VFH with the values to be written ... 5196 */ 5197 t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5198 t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5199 5200 /* 5201 * Write the VFL/VFH into the VF Table at index'th location. 5202 */ 5203 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5204 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5205 vrt |= data | F_VFRDEN; 5206 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5207} 5208 5209/** 5210 * t4_read_rss_pf_map - read PF RSS Map 5211 * @adapter: the adapter 5212 * @sleep_ok: if true we may sleep while awaiting command completion 5213 * 5214 * Reads the PF RSS Map register and returns its value. 5215 */ 5216u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 5217{ 5218 u32 pfmap; 5219 5220 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5221 5222 return pfmap; 5223} 5224 5225/** 5226 * t4_write_rss_pf_map - write PF RSS Map 5227 * @adapter: the adapter 5228 * @pfmap: PF RSS Map value 5229 * 5230 * Writes the specified value to the PF RSS Map register. 5231 */ 5232void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok) 5233{ 5234 t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5235} 5236 5237/** 5238 * t4_read_rss_pf_mask - read PF RSS Mask 5239 * @adapter: the adapter 5240 * @sleep_ok: if true we may sleep while awaiting command completion 5241 * 5242 * Reads the PF RSS Mask register and returns its value. 5243 */ 5244u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 5245{ 5246 u32 pfmask; 5247 5248 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 5249 5250 return pfmask; 5251} 5252 5253/** 5254 * t4_write_rss_pf_mask - write PF RSS Mask 5255 * @adapter: the adapter 5256 * @pfmask: PF RSS Mask value 5257 * 5258 * Writes the specified value to the PF RSS Mask register. 5259 */ 5260void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok) 5261{ 5262 t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 5263} 5264 5265/** 5266 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 5267 * @adap: the adapter 5268 * @v4: holds the TCP/IP counter values 5269 * @v6: holds the TCP/IPv6 counter values 5270 * @sleep_ok: if true we may sleep while awaiting command completion 5271 * 5272 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 5273 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 5274 */ 5275void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 5276 struct tp_tcp_stats *v6, bool sleep_ok) 5277{ 5278 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 5279 5280#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 5281#define STAT(x) val[STAT_IDX(x)] 5282#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 5283 5284 if (v4) { 5285 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5286 A_TP_MIB_TCP_OUT_RST, sleep_ok); 5287 v4->tcp_out_rsts = STAT(OUT_RST); 5288 v4->tcp_in_segs = STAT64(IN_SEG); 5289 v4->tcp_out_segs = STAT64(OUT_SEG); 5290 v4->tcp_retrans_segs = STAT64(RXT_SEG); 5291 } 5292 if (v6) { 5293 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5294 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 5295 v6->tcp_out_rsts = STAT(OUT_RST); 5296 v6->tcp_in_segs = STAT64(IN_SEG); 5297 v6->tcp_out_segs = STAT64(OUT_SEG); 5298 v6->tcp_retrans_segs = STAT64(RXT_SEG); 5299 } 5300#undef STAT64 5301#undef STAT 5302#undef STAT_IDX 5303} 5304 5305/** 5306 * t4_tp_get_err_stats - read TP's error MIB counters 5307 * @adap: the adapter 5308 * @st: holds the counter values 5309 * @sleep_ok: if true we may sleep while awaiting command completion 5310 * 5311 * Returns the values of TP's error counters. 5312 */ 5313void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 5314 bool sleep_ok) 5315{ 5316 int nchan = adap->chip_params->nchan; 5317 5318 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 5319 sleep_ok); 5320 5321 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 5322 sleep_ok); 5323 5324 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 5325 sleep_ok); 5326 5327 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 5328 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 5329 5330 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 5331 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 5332 5333 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 5334 sleep_ok); 5335 5336 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 5337 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 5338 5339 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 5340 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 5341 5342 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 5343 sleep_ok); 5344} 5345 5346/** 5347 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 5348 * @adap: the adapter 5349 * @st: holds the counter values 5350 * 5351 * Returns the values of TP's proxy counters. 5352 */ 5353void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st, 5354 bool sleep_ok) 5355{ 5356 int nchan = adap->chip_params->nchan; 5357 5358 t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok); 5359} 5360 5361/** 5362 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 5363 * @adap: the adapter 5364 * @st: holds the counter values 5365 * @sleep_ok: if true we may sleep while awaiting command completion 5366 * 5367 * Returns the values of TP's CPL counters. 5368 */ 5369void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 5370 bool sleep_ok) 5371{ 5372 int nchan = adap->chip_params->nchan; 5373 5374 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 5375 5376 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 5377} 5378 5379/** 5380 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 5381 * @adap: the adapter 5382 * @st: holds the counter values 5383 * 5384 * Returns the values of TP's RDMA counters. 5385 */ 5386void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 5387 bool sleep_ok) 5388{ 5389 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 5390 sleep_ok); 5391} 5392 5393/** 5394 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 5395 * @adap: the adapter 5396 * @idx: the port index 5397 * @st: holds the counter values 5398 * @sleep_ok: if true we may sleep while awaiting command completion 5399 * 5400 * Returns the values of TP's FCoE counters for the selected port. 5401 */ 5402void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 5403 struct tp_fcoe_stats *st, bool sleep_ok) 5404{ 5405 u32 val[2]; 5406 5407 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 5408 sleep_ok); 5409 5410 t4_tp_mib_read(adap, &st->frames_drop, 1, 5411 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 5412 5413 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 5414 sleep_ok); 5415 5416 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 5417} 5418 5419/** 5420 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 5421 * @adap: the adapter 5422 * @st: holds the counter values 5423 * @sleep_ok: if true we may sleep while awaiting command completion 5424 * 5425 * Returns the values of TP's counters for non-TCP directly-placed packets. 5426 */ 5427void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 5428 bool sleep_ok) 5429{ 5430 u32 val[4]; 5431 5432 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 5433 5434 st->frames = val[0]; 5435 st->drops = val[1]; 5436 st->octets = ((u64)val[2] << 32) | val[3]; 5437} 5438 5439/** 5440 * t4_read_mtu_tbl - returns the values in the HW path MTU table 5441 * @adap: the adapter 5442 * @mtus: where to store the MTU values 5443 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 5444 * 5445 * Reads the HW path MTU table. 5446 */ 5447void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 5448{ 5449 u32 v; 5450 int i; 5451 5452 for (i = 0; i < NMTUS; ++i) { 5453 t4_write_reg(adap, A_TP_MTU_TABLE, 5454 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 5455 v = t4_read_reg(adap, A_TP_MTU_TABLE); 5456 mtus[i] = G_MTUVALUE(v); 5457 if (mtu_log) 5458 mtu_log[i] = G_MTUWIDTH(v); 5459 } 5460} 5461 5462/** 5463 * t4_read_cong_tbl - reads the congestion control table 5464 * @adap: the adapter 5465 * @incr: where to store the alpha values 5466 * 5467 * Reads the additive increments programmed into the HW congestion 5468 * control table. 5469 */ 5470void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 5471{ 5472 unsigned int mtu, w; 5473 5474 for (mtu = 0; mtu < NMTUS; ++mtu) 5475 for (w = 0; w < NCCTRL_WIN; ++w) { 5476 t4_write_reg(adap, A_TP_CCTRL_TABLE, 5477 V_ROWINDEX(0xffff) | (mtu << 5) | w); 5478 incr[mtu][w] = (u16)t4_read_reg(adap, 5479 A_TP_CCTRL_TABLE) & 0x1fff; 5480 } 5481} 5482 5483/** 5484 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 5485 * @adap: the adapter 5486 * @addr: the indirect TP register address 5487 * @mask: specifies the field within the register to modify 5488 * @val: new value for the field 5489 * 5490 * Sets a field of an indirect TP register to the given value. 5491 */ 5492void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 5493 unsigned int mask, unsigned int val) 5494{ 5495 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 5496 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 5497 t4_write_reg(adap, A_TP_PIO_DATA, val); 5498} 5499 5500/** 5501 * init_cong_ctrl - initialize congestion control parameters 5502 * @a: the alpha values for congestion control 5503 * @b: the beta values for congestion control 5504 * 5505 * Initialize the congestion control parameters. 5506 */ 5507static void init_cong_ctrl(unsigned short *a, unsigned short *b) 5508{ 5509 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 5510 a[9] = 2; 5511 a[10] = 3; 5512 a[11] = 4; 5513 a[12] = 5; 5514 a[13] = 6; 5515 a[14] = 7; 5516 a[15] = 8; 5517 a[16] = 9; 5518 a[17] = 10; 5519 a[18] = 14; 5520 a[19] = 17; 5521 a[20] = 21; 5522 a[21] = 25; 5523 a[22] = 30; 5524 a[23] = 35; 5525 a[24] = 45; 5526 a[25] = 60; 5527 a[26] = 80; 5528 a[27] = 100; 5529 a[28] = 200; 5530 a[29] = 300; 5531 a[30] = 400; 5532 a[31] = 500; 5533 5534 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 5535 b[9] = b[10] = 1; 5536 b[11] = b[12] = 2; 5537 b[13] = b[14] = b[15] = b[16] = 3; 5538 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 5539 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 5540 b[28] = b[29] = 6; 5541 b[30] = b[31] = 7; 5542} 5543 5544/* The minimum additive increment value for the congestion control table */ 5545#define CC_MIN_INCR 2U 5546 5547/** 5548 * t4_load_mtus - write the MTU and congestion control HW tables 5549 * @adap: the adapter 5550 * @mtus: the values for the MTU table 5551 * @alpha: the values for the congestion control alpha parameter 5552 * @beta: the values for the congestion control beta parameter 5553 * 5554 * Write the HW MTU table with the supplied MTUs and the high-speed 5555 * congestion control table with the supplied alpha, beta, and MTUs. 5556 * We write the two tables together because the additive increments 5557 * depend on the MTUs. 5558 */ 5559void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 5560 const unsigned short *alpha, const unsigned short *beta) 5561{ 5562 static const unsigned int avg_pkts[NCCTRL_WIN] = { 5563 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 5564 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 5565 28672, 40960, 57344, 81920, 114688, 163840, 229376 5566 }; 5567 5568 unsigned int i, w; 5569 5570 for (i = 0; i < NMTUS; ++i) { 5571 unsigned int mtu = mtus[i]; 5572 unsigned int log2 = fls(mtu); 5573 5574 if (!(mtu & ((1 << log2) >> 2))) /* round */ 5575 log2--; 5576 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 5577 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 5578 5579 for (w = 0; w < NCCTRL_WIN; ++w) { 5580 unsigned int inc; 5581 5582 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 5583 CC_MIN_INCR); 5584 5585 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 5586 (w << 16) | (beta[w] << 13) | inc); 5587 } 5588 } 5589} 5590 5591/** 5592 * t4_set_pace_tbl - set the pace table 5593 * @adap: the adapter 5594 * @pace_vals: the pace values in microseconds 5595 * @start: index of the first entry in the HW pace table to set 5596 * @n: how many entries to set 5597 * 5598 * Sets (a subset of the) HW pace table. 5599 */ 5600int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 5601 unsigned int start, unsigned int n) 5602{ 5603 unsigned int vals[NTX_SCHED], i; 5604 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 5605 5606 if (n > NTX_SCHED) 5607 return -ERANGE; 5608 5609 /* convert values from us to dack ticks, rounding to closest value */ 5610 for (i = 0; i < n; i++, pace_vals++) { 5611 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 5612 if (vals[i] > 0x7ff) 5613 return -ERANGE; 5614 if (*pace_vals && vals[i] == 0) 5615 return -ERANGE; 5616 } 5617 for (i = 0; i < n; i++, start++) 5618 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 5619 return 0; 5620} 5621 5622/** 5623 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 5624 * @adap: the adapter 5625 * @kbps: target rate in Kbps 5626 * @sched: the scheduler index 5627 * 5628 * Configure a Tx HW scheduler for the target rate. 5629 */ 5630int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 5631{ 5632 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 5633 unsigned int clk = adap->params.vpd.cclk * 1000; 5634 unsigned int selected_cpt = 0, selected_bpt = 0; 5635 5636 if (kbps > 0) { 5637 kbps *= 125; /* -> bytes */ 5638 for (cpt = 1; cpt <= 255; cpt++) { 5639 tps = clk / cpt; 5640 bpt = (kbps + tps / 2) / tps; 5641 if (bpt > 0 && bpt <= 255) { 5642 v = bpt * tps; 5643 delta = v >= kbps ? v - kbps : kbps - v; 5644 if (delta < mindelta) { 5645 mindelta = delta; 5646 selected_cpt = cpt; 5647 selected_bpt = bpt; 5648 } 5649 } else if (selected_cpt) 5650 break; 5651 } 5652 if (!selected_cpt) 5653 return -EINVAL; 5654 } 5655 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 5656 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 5657 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5658 if (sched & 1) 5659 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 5660 else 5661 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 5662 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5663 return 0; 5664} 5665 5666/** 5667 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 5668 * @adap: the adapter 5669 * @sched: the scheduler index 5670 * @ipg: the interpacket delay in tenths of nanoseconds 5671 * 5672 * Set the interpacket delay for a HW packet rate scheduler. 5673 */ 5674int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 5675{ 5676 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 5677 5678 /* convert ipg to nearest number of core clocks */ 5679 ipg *= core_ticks_per_usec(adap); 5680 ipg = (ipg + 5000) / 10000; 5681 if (ipg > M_TXTIMERSEPQ0) 5682 return -EINVAL; 5683 5684 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 5685 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5686 if (sched & 1) 5687 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 5688 else 5689 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 5690 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5691 t4_read_reg(adap, A_TP_TM_PIO_DATA); 5692 return 0; 5693} 5694 5695/* 5696 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 5697 * clocks. The formula is 5698 * 5699 * bytes/s = bytes256 * 256 * ClkFreq / 4096 5700 * 5701 * which is equivalent to 5702 * 5703 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 5704 */ 5705static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 5706{ 5707 u64 v = bytes256 * adap->params.vpd.cclk; 5708 5709 return v * 62 + v / 2; 5710} 5711 5712/** 5713 * t4_get_chan_txrate - get the current per channel Tx rates 5714 * @adap: the adapter 5715 * @nic_rate: rates for NIC traffic 5716 * @ofld_rate: rates for offloaded traffic 5717 * 5718 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 5719 * for each channel. 5720 */ 5721void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 5722{ 5723 u32 v; 5724 5725 v = t4_read_reg(adap, A_TP_TX_TRATE); 5726 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 5727 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 5728 if (adap->chip_params->nchan > 2) { 5729 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 5730 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 5731 } 5732 5733 v = t4_read_reg(adap, A_TP_TX_ORATE); 5734 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 5735 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 5736 if (adap->chip_params->nchan > 2) { 5737 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 5738 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 5739 } 5740} 5741 5742/** 5743 * t4_set_trace_filter - configure one of the tracing filters 5744 * @adap: the adapter 5745 * @tp: the desired trace filter parameters 5746 * @idx: which filter to configure 5747 * @enable: whether to enable or disable the filter 5748 * 5749 * Configures one of the tracing filters available in HW. If @tp is %NULL 5750 * it indicates that the filter is already written in the register and it 5751 * just needs to be enabled or disabled. 5752 */ 5753int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 5754 int idx, int enable) 5755{ 5756 int i, ofst = idx * 4; 5757 u32 data_reg, mask_reg, cfg; 5758 u32 multitrc = F_TRCMULTIFILTER; 5759 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 5760 5761 if (idx < 0 || idx >= NTRACE) 5762 return -EINVAL; 5763 5764 if (tp == NULL || !enable) { 5765 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 5766 enable ? en : 0); 5767 return 0; 5768 } 5769 5770 /* 5771 * TODO - After T4 data book is updated, specify the exact 5772 * section below. 5773 * 5774 * See T4 data book - MPS section for a complete description 5775 * of the below if..else handling of A_MPS_TRC_CFG register 5776 * value. 5777 */ 5778 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 5779 if (cfg & F_TRCMULTIFILTER) { 5780 /* 5781 * If multiple tracers are enabled, then maximum 5782 * capture size is 2.5KB (FIFO size of a single channel) 5783 * minus 2 flits for CPL_TRACE_PKT header. 5784 */ 5785 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 5786 return -EINVAL; 5787 } else { 5788 /* 5789 * If multiple tracers are disabled, to avoid deadlocks 5790 * maximum packet capture size of 9600 bytes is recommended. 5791 * Also in this mode, only trace0 can be enabled and running. 5792 */ 5793 multitrc = 0; 5794 if (tp->snap_len > 9600 || idx) 5795 return -EINVAL; 5796 } 5797 5798 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 5799 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 5800 tp->min_len > M_TFMINPKTSIZE) 5801 return -EINVAL; 5802 5803 /* stop the tracer we'll be changing */ 5804 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 5805 5806 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 5807 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 5808 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 5809 5810 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5811 t4_write_reg(adap, data_reg, tp->data[i]); 5812 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 5813 } 5814 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 5815 V_TFCAPTUREMAX(tp->snap_len) | 5816 V_TFMINPKTSIZE(tp->min_len)); 5817 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 5818 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 5819 (is_t4(adap) ? 5820 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 5821 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 5822 5823 return 0; 5824} 5825 5826/** 5827 * t4_get_trace_filter - query one of the tracing filters 5828 * @adap: the adapter 5829 * @tp: the current trace filter parameters 5830 * @idx: which trace filter to query 5831 * @enabled: non-zero if the filter is enabled 5832 * 5833 * Returns the current settings of one of the HW tracing filters. 5834 */ 5835void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 5836 int *enabled) 5837{ 5838 u32 ctla, ctlb; 5839 int i, ofst = idx * 4; 5840 u32 data_reg, mask_reg; 5841 5842 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 5843 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 5844 5845 if (is_t4(adap)) { 5846 *enabled = !!(ctla & F_TFEN); 5847 tp->port = G_TFPORT(ctla); 5848 tp->invert = !!(ctla & F_TFINVERTMATCH); 5849 } else { 5850 *enabled = !!(ctla & F_T5_TFEN); 5851 tp->port = G_T5_TFPORT(ctla); 5852 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 5853 } 5854 tp->snap_len = G_TFCAPTUREMAX(ctlb); 5855 tp->min_len = G_TFMINPKTSIZE(ctlb); 5856 tp->skip_ofst = G_TFOFFSET(ctla); 5857 tp->skip_len = G_TFLENGTH(ctla); 5858 5859 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 5860 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 5861 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 5862 5863 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5864 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 5865 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 5866 } 5867} 5868 5869/** 5870 * t4_pmtx_get_stats - returns the HW stats from PMTX 5871 * @adap: the adapter 5872 * @cnt: where to store the count statistics 5873 * @cycles: where to store the cycle statistics 5874 * 5875 * Returns performance statistics from PMTX. 5876 */ 5877void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5878{ 5879 int i; 5880 u32 data[2]; 5881 5882 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5883 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 5884 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 5885 if (is_t4(adap)) 5886 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 5887 else { 5888 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 5889 A_PM_TX_DBG_DATA, data, 2, 5890 A_PM_TX_DBG_STAT_MSB); 5891 cycles[i] = (((u64)data[0] << 32) | data[1]); 5892 } 5893 } 5894} 5895 5896/** 5897 * t4_pmrx_get_stats - returns the HW stats from PMRX 5898 * @adap: the adapter 5899 * @cnt: where to store the count statistics 5900 * @cycles: where to store the cycle statistics 5901 * 5902 * Returns performance statistics from PMRX. 5903 */ 5904void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5905{ 5906 int i; 5907 u32 data[2]; 5908 5909 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5910 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 5911 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 5912 if (is_t4(adap)) { 5913 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 5914 } else { 5915 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 5916 A_PM_RX_DBG_DATA, data, 2, 5917 A_PM_RX_DBG_STAT_MSB); 5918 cycles[i] = (((u64)data[0] << 32) | data[1]); 5919 } 5920 } 5921} 5922 5923/** 5924 * t4_get_mps_bg_map - return the buffer groups associated with a port 5925 * @adap: the adapter 5926 * @idx: the port index 5927 * 5928 * Returns a bitmap indicating which MPS buffer groups are associated 5929 * with the given port. Bit i is set if buffer group i is used by the 5930 * port. 5931 */ 5932static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 5933{ 5934 u32 n; 5935 5936 if (adap->params.mps_bg_map) 5937 return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff); 5938 5939 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5940 if (n == 0) 5941 return idx == 0 ? 0xf : 0; 5942 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5943 return idx < 2 ? (3 << (2 * idx)) : 0; 5944 return 1 << idx; 5945} 5946 5947/* 5948 * TP RX e-channels associated with the port. 5949 */ 5950static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx) 5951{ 5952 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5953 5954 if (n == 0) 5955 return idx == 0 ? 0xf : 0; 5956 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5957 return idx < 2 ? (3 << (2 * idx)) : 0; 5958 return 1 << idx; 5959} 5960 5961/** 5962 * t4_get_port_type_description - return Port Type string description 5963 * @port_type: firmware Port Type enumeration 5964 */ 5965const char *t4_get_port_type_description(enum fw_port_type port_type) 5966{ 5967 static const char *const port_type_description[] = { 5968 "Fiber_XFI", 5969 "Fiber_XAUI", 5970 "BT_SGMII", 5971 "BT_XFI", 5972 "BT_XAUI", 5973 "KX4", 5974 "CX4", 5975 "KX", 5976 "KR", 5977 "SFP", 5978 "BP_AP", 5979 "BP4_AP", 5980 "QSFP_10G", 5981 "QSA", 5982 "QSFP", 5983 "BP40_BA", 5984 "KR4_100G", 5985 "CR4_QSFP", 5986 "CR_QSFP", 5987 "CR2_QSFP", 5988 "SFP28", 5989 "KR_SFP28", 5990 }; 5991 5992 if (port_type < ARRAY_SIZE(port_type_description)) 5993 return port_type_description[port_type]; 5994 return "UNKNOWN"; 5995} 5996 5997/** 5998 * t4_get_port_stats_offset - collect port stats relative to a previous 5999 * snapshot 6000 * @adap: The adapter 6001 * @idx: The port 6002 * @stats: Current stats to fill 6003 * @offset: Previous stats snapshot 6004 */ 6005void t4_get_port_stats_offset(struct adapter *adap, int idx, 6006 struct port_stats *stats, 6007 struct port_stats *offset) 6008{ 6009 u64 *s, *o; 6010 int i; 6011 6012 t4_get_port_stats(adap, idx, stats); 6013 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 6014 i < (sizeof(struct port_stats)/sizeof(u64)) ; 6015 i++, s++, o++) 6016 *s -= *o; 6017} 6018 6019/** 6020 * t4_get_port_stats - collect port statistics 6021 * @adap: the adapter 6022 * @idx: the port index 6023 * @p: the stats structure to fill 6024 * 6025 * Collect statistics related to the given port from HW. 6026 */ 6027void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 6028{ 6029 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6030 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 6031 6032#define GET_STAT(name) \ 6033 t4_read_reg64(adap, \ 6034 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 6035 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 6036#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6037 6038 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 6039 p->tx_octets = GET_STAT(TX_PORT_BYTES); 6040 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 6041 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 6042 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 6043 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 6044 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 6045 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 6046 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 6047 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 6048 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 6049 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 6050 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 6051 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 6052 p->tx_drop = GET_STAT(TX_PORT_DROP); 6053 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 6054 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 6055 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 6056 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 6057 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 6058 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 6059 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 6060 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 6061 6062 if (chip_id(adap) >= CHELSIO_T5) { 6063 if (stat_ctl & F_COUNTPAUSESTATTX) { 6064 p->tx_frames -= p->tx_pause; 6065 p->tx_octets -= p->tx_pause * 64; 6066 } 6067 if (stat_ctl & F_COUNTPAUSEMCTX) 6068 p->tx_mcast_frames -= p->tx_pause; 6069 } 6070 6071 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 6072 p->rx_octets = GET_STAT(RX_PORT_BYTES); 6073 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 6074 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 6075 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 6076 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 6077 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 6078 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 6079 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 6080 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 6081 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 6082 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 6083 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 6084 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 6085 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 6086 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 6087 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 6088 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 6089 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 6090 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 6091 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 6092 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 6093 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 6094 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 6095 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 6096 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 6097 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 6098 6099 if (chip_id(adap) >= CHELSIO_T5) { 6100 if (stat_ctl & F_COUNTPAUSESTATRX) { 6101 p->rx_frames -= p->rx_pause; 6102 p->rx_octets -= p->rx_pause * 64; 6103 } 6104 if (stat_ctl & F_COUNTPAUSEMCRX) 6105 p->rx_mcast_frames -= p->rx_pause; 6106 } 6107 6108 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 6109 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 6110 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 6111 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 6112 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 6113 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 6114 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 6115 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 6116 6117#undef GET_STAT 6118#undef GET_STAT_COM 6119} 6120 6121/** 6122 * t4_get_lb_stats - collect loopback port statistics 6123 * @adap: the adapter 6124 * @idx: the loopback port index 6125 * @p: the stats structure to fill 6126 * 6127 * Return HW statistics for the given loopback port. 6128 */ 6129void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 6130{ 6131 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 6132 6133#define GET_STAT(name) \ 6134 t4_read_reg64(adap, \ 6135 (is_t4(adap) ? \ 6136 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 6137 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 6138#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6139 6140 p->octets = GET_STAT(BYTES); 6141 p->frames = GET_STAT(FRAMES); 6142 p->bcast_frames = GET_STAT(BCAST); 6143 p->mcast_frames = GET_STAT(MCAST); 6144 p->ucast_frames = GET_STAT(UCAST); 6145 p->error_frames = GET_STAT(ERROR); 6146 6147 p->frames_64 = GET_STAT(64B); 6148 p->frames_65_127 = GET_STAT(65B_127B); 6149 p->frames_128_255 = GET_STAT(128B_255B); 6150 p->frames_256_511 = GET_STAT(256B_511B); 6151 p->frames_512_1023 = GET_STAT(512B_1023B); 6152 p->frames_1024_1518 = GET_STAT(1024B_1518B); 6153 p->frames_1519_max = GET_STAT(1519B_MAX); 6154 p->drop = GET_STAT(DROP_FRAMES); 6155 6156 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 6157 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 6158 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 6159 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 6160 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 6161 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 6162 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 6163 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 6164 6165#undef GET_STAT 6166#undef GET_STAT_COM 6167} 6168 6169/** 6170 * t4_wol_magic_enable - enable/disable magic packet WoL 6171 * @adap: the adapter 6172 * @port: the physical port index 6173 * @addr: MAC address expected in magic packets, %NULL to disable 6174 * 6175 * Enables/disables magic packet wake-on-LAN for the selected port. 6176 */ 6177void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 6178 const u8 *addr) 6179{ 6180 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 6181 6182 if (is_t4(adap)) { 6183 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 6184 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 6185 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6186 } else { 6187 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 6188 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 6189 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 6190 } 6191 6192 if (addr) { 6193 t4_write_reg(adap, mag_id_reg_l, 6194 (addr[2] << 24) | (addr[3] << 16) | 6195 (addr[4] << 8) | addr[5]); 6196 t4_write_reg(adap, mag_id_reg_h, 6197 (addr[0] << 8) | addr[1]); 6198 } 6199 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 6200 V_MAGICEN(addr != NULL)); 6201} 6202 6203/** 6204 * t4_wol_pat_enable - enable/disable pattern-based WoL 6205 * @adap: the adapter 6206 * @port: the physical port index 6207 * @map: bitmap of which HW pattern filters to set 6208 * @mask0: byte mask for bytes 0-63 of a packet 6209 * @mask1: byte mask for bytes 64-127 of a packet 6210 * @crc: Ethernet CRC for selected bytes 6211 * @enable: enable/disable switch 6212 * 6213 * Sets the pattern filters indicated in @map to mask out the bytes 6214 * specified in @mask0/@mask1 in received packets and compare the CRC of 6215 * the resulting packet against @crc. If @enable is %true pattern-based 6216 * WoL is enabled, otherwise disabled. 6217 */ 6218int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 6219 u64 mask0, u64 mask1, unsigned int crc, bool enable) 6220{ 6221 int i; 6222 u32 port_cfg_reg; 6223 6224 if (is_t4(adap)) 6225 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 6226 else 6227 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 6228 6229 if (!enable) { 6230 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 6231 return 0; 6232 } 6233 if (map > 0xff) 6234 return -EINVAL; 6235 6236#define EPIO_REG(name) \ 6237 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 6238 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 6239 6240 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 6241 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 6242 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 6243 6244 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 6245 if (!(map & 1)) 6246 continue; 6247 6248 /* write byte masks */ 6249 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 6250 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 6251 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 6252 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 6253 return -ETIMEDOUT; 6254 6255 /* write CRC */ 6256 t4_write_reg(adap, EPIO_REG(DATA0), crc); 6257 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 6258 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 6259 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 6260 return -ETIMEDOUT; 6261 } 6262#undef EPIO_REG 6263 6264 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 6265 return 0; 6266} 6267 6268/* t4_mk_filtdelwr - create a delete filter WR 6269 * @ftid: the filter ID 6270 * @wr: the filter work request to populate 6271 * @qid: ingress queue to receive the delete notification 6272 * 6273 * Creates a filter work request to delete the supplied filter. If @qid is 6274 * negative the delete notification is suppressed. 6275 */ 6276void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 6277{ 6278 memset(wr, 0, sizeof(*wr)); 6279 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 6280 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 6281 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 6282 V_FW_FILTER_WR_NOREPLY(qid < 0)); 6283 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 6284 if (qid >= 0) 6285 wr->rx_chan_rx_rpl_iq = 6286 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 6287} 6288 6289#define INIT_CMD(var, cmd, rd_wr) do { \ 6290 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 6291 F_FW_CMD_REQUEST | \ 6292 F_FW_CMD_##rd_wr); \ 6293 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 6294} while (0) 6295 6296int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 6297 u32 addr, u32 val) 6298{ 6299 u32 ldst_addrspace; 6300 struct fw_ldst_cmd c; 6301 6302 memset(&c, 0, sizeof(c)); 6303 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 6304 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6305 F_FW_CMD_REQUEST | 6306 F_FW_CMD_WRITE | 6307 ldst_addrspace); 6308 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6309 c.u.addrval.addr = cpu_to_be32(addr); 6310 c.u.addrval.val = cpu_to_be32(val); 6311 6312 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6313} 6314 6315/** 6316 * t4_mdio_rd - read a PHY register through MDIO 6317 * @adap: the adapter 6318 * @mbox: mailbox to use for the FW command 6319 * @phy_addr: the PHY address 6320 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6321 * @reg: the register to read 6322 * @valp: where to store the value 6323 * 6324 * Issues a FW command through the given mailbox to read a PHY register. 6325 */ 6326int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6327 unsigned int mmd, unsigned int reg, unsigned int *valp) 6328{ 6329 int ret; 6330 u32 ldst_addrspace; 6331 struct fw_ldst_cmd c; 6332 6333 memset(&c, 0, sizeof(c)); 6334 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6335 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6336 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6337 ldst_addrspace); 6338 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6339 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6340 V_FW_LDST_CMD_MMD(mmd)); 6341 c.u.mdio.raddr = cpu_to_be16(reg); 6342 6343 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6344 if (ret == 0) 6345 *valp = be16_to_cpu(c.u.mdio.rval); 6346 return ret; 6347} 6348 6349/** 6350 * t4_mdio_wr - write a PHY register through MDIO 6351 * @adap: the adapter 6352 * @mbox: mailbox to use for the FW command 6353 * @phy_addr: the PHY address 6354 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6355 * @reg: the register to write 6356 * @valp: value to write 6357 * 6358 * Issues a FW command through the given mailbox to write a PHY register. 6359 */ 6360int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6361 unsigned int mmd, unsigned int reg, unsigned int val) 6362{ 6363 u32 ldst_addrspace; 6364 struct fw_ldst_cmd c; 6365 6366 memset(&c, 0, sizeof(c)); 6367 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6368 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6369 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6370 ldst_addrspace); 6371 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6372 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6373 V_FW_LDST_CMD_MMD(mmd)); 6374 c.u.mdio.raddr = cpu_to_be16(reg); 6375 c.u.mdio.rval = cpu_to_be16(val); 6376 6377 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6378} 6379 6380/** 6381 * 6382 * t4_sge_decode_idma_state - decode the idma state 6383 * @adap: the adapter 6384 * @state: the state idma is stuck in 6385 */ 6386void t4_sge_decode_idma_state(struct adapter *adapter, int state) 6387{ 6388 static const char * const t4_decode[] = { 6389 "IDMA_IDLE", 6390 "IDMA_PUSH_MORE_CPL_FIFO", 6391 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6392 "Not used", 6393 "IDMA_PHYSADDR_SEND_PCIEHDR", 6394 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6395 "IDMA_PHYSADDR_SEND_PAYLOAD", 6396 "IDMA_SEND_FIFO_TO_IMSG", 6397 "IDMA_FL_REQ_DATA_FL_PREP", 6398 "IDMA_FL_REQ_DATA_FL", 6399 "IDMA_FL_DROP", 6400 "IDMA_FL_H_REQ_HEADER_FL", 6401 "IDMA_FL_H_SEND_PCIEHDR", 6402 "IDMA_FL_H_PUSH_CPL_FIFO", 6403 "IDMA_FL_H_SEND_CPL", 6404 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6405 "IDMA_FL_H_SEND_IP_HDR", 6406 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6407 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6408 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6409 "IDMA_FL_D_SEND_PCIEHDR", 6410 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6411 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6412 "IDMA_FL_SEND_PCIEHDR", 6413 "IDMA_FL_PUSH_CPL_FIFO", 6414 "IDMA_FL_SEND_CPL", 6415 "IDMA_FL_SEND_PAYLOAD_FIRST", 6416 "IDMA_FL_SEND_PAYLOAD", 6417 "IDMA_FL_REQ_NEXT_DATA_FL", 6418 "IDMA_FL_SEND_NEXT_PCIEHDR", 6419 "IDMA_FL_SEND_PADDING", 6420 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6421 "IDMA_FL_SEND_FIFO_TO_IMSG", 6422 "IDMA_FL_REQ_DATAFL_DONE", 6423 "IDMA_FL_REQ_HEADERFL_DONE", 6424 }; 6425 static const char * const t5_decode[] = { 6426 "IDMA_IDLE", 6427 "IDMA_ALMOST_IDLE", 6428 "IDMA_PUSH_MORE_CPL_FIFO", 6429 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6430 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6431 "IDMA_PHYSADDR_SEND_PCIEHDR", 6432 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6433 "IDMA_PHYSADDR_SEND_PAYLOAD", 6434 "IDMA_SEND_FIFO_TO_IMSG", 6435 "IDMA_FL_REQ_DATA_FL", 6436 "IDMA_FL_DROP", 6437 "IDMA_FL_DROP_SEND_INC", 6438 "IDMA_FL_H_REQ_HEADER_FL", 6439 "IDMA_FL_H_SEND_PCIEHDR", 6440 "IDMA_FL_H_PUSH_CPL_FIFO", 6441 "IDMA_FL_H_SEND_CPL", 6442 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6443 "IDMA_FL_H_SEND_IP_HDR", 6444 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6445 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6446 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6447 "IDMA_FL_D_SEND_PCIEHDR", 6448 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6449 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6450 "IDMA_FL_SEND_PCIEHDR", 6451 "IDMA_FL_PUSH_CPL_FIFO", 6452 "IDMA_FL_SEND_CPL", 6453 "IDMA_FL_SEND_PAYLOAD_FIRST", 6454 "IDMA_FL_SEND_PAYLOAD", 6455 "IDMA_FL_REQ_NEXT_DATA_FL", 6456 "IDMA_FL_SEND_NEXT_PCIEHDR", 6457 "IDMA_FL_SEND_PADDING", 6458 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6459 }; 6460 static const char * const t6_decode[] = { 6461 "IDMA_IDLE", 6462 "IDMA_PUSH_MORE_CPL_FIFO", 6463 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6464 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6465 "IDMA_PHYSADDR_SEND_PCIEHDR", 6466 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6467 "IDMA_PHYSADDR_SEND_PAYLOAD", 6468 "IDMA_FL_REQ_DATA_FL", 6469 "IDMA_FL_DROP", 6470 "IDMA_FL_DROP_SEND_INC", 6471 "IDMA_FL_H_REQ_HEADER_FL", 6472 "IDMA_FL_H_SEND_PCIEHDR", 6473 "IDMA_FL_H_PUSH_CPL_FIFO", 6474 "IDMA_FL_H_SEND_CPL", 6475 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6476 "IDMA_FL_H_SEND_IP_HDR", 6477 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6478 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6479 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6480 "IDMA_FL_D_SEND_PCIEHDR", 6481 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6482 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6483 "IDMA_FL_SEND_PCIEHDR", 6484 "IDMA_FL_PUSH_CPL_FIFO", 6485 "IDMA_FL_SEND_CPL", 6486 "IDMA_FL_SEND_PAYLOAD_FIRST", 6487 "IDMA_FL_SEND_PAYLOAD", 6488 "IDMA_FL_REQ_NEXT_DATA_FL", 6489 "IDMA_FL_SEND_NEXT_PCIEHDR", 6490 "IDMA_FL_SEND_PADDING", 6491 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6492 }; 6493 static const u32 sge_regs[] = { 6494 A_SGE_DEBUG_DATA_LOW_INDEX_2, 6495 A_SGE_DEBUG_DATA_LOW_INDEX_3, 6496 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 6497 }; 6498 const char * const *sge_idma_decode; 6499 int sge_idma_decode_nstates; 6500 int i; 6501 unsigned int chip_version = chip_id(adapter); 6502 6503 /* Select the right set of decode strings to dump depending on the 6504 * adapter chip type. 6505 */ 6506 switch (chip_version) { 6507 case CHELSIO_T4: 6508 sge_idma_decode = (const char * const *)t4_decode; 6509 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 6510 break; 6511 6512 case CHELSIO_T5: 6513 sge_idma_decode = (const char * const *)t5_decode; 6514 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 6515 break; 6516 6517 case CHELSIO_T6: 6518 sge_idma_decode = (const char * const *)t6_decode; 6519 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 6520 break; 6521 6522 default: 6523 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 6524 return; 6525 } 6526 6527 if (state < sge_idma_decode_nstates) 6528 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 6529 else 6530 CH_WARN(adapter, "idma state %d unknown\n", state); 6531 6532 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 6533 CH_WARN(adapter, "SGE register %#x value %#x\n", 6534 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 6535} 6536 6537/** 6538 * t4_sge_ctxt_flush - flush the SGE context cache 6539 * @adap: the adapter 6540 * @mbox: mailbox to use for the FW command 6541 * 6542 * Issues a FW command through the given mailbox to flush the 6543 * SGE context cache. 6544 */ 6545int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 6546{ 6547 int ret; 6548 u32 ldst_addrspace; 6549 struct fw_ldst_cmd c; 6550 6551 memset(&c, 0, sizeof(c)); 6552 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); 6553 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6554 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6555 ldst_addrspace); 6556 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6557 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 6558 6559 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6560 return ret; 6561} 6562 6563/** 6564 * t4_fw_hello - establish communication with FW 6565 * @adap: the adapter 6566 * @mbox: mailbox to use for the FW command 6567 * @evt_mbox: mailbox to receive async FW events 6568 * @master: specifies the caller's willingness to be the device master 6569 * @state: returns the current device state (if non-NULL) 6570 * 6571 * Issues a command to establish communication with FW. Returns either 6572 * an error (negative integer) or the mailbox of the Master PF. 6573 */ 6574int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 6575 enum dev_master master, enum dev_state *state) 6576{ 6577 int ret; 6578 struct fw_hello_cmd c; 6579 u32 v; 6580 unsigned int master_mbox; 6581 int retries = FW_CMD_HELLO_RETRIES; 6582 6583retry: 6584 memset(&c, 0, sizeof(c)); 6585 INIT_CMD(c, HELLO, WRITE); 6586 c.err_to_clearinit = cpu_to_be32( 6587 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 6588 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 6589 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 6590 mbox : M_FW_HELLO_CMD_MBMASTER) | 6591 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 6592 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 6593 F_FW_HELLO_CMD_CLEARINIT); 6594 6595 /* 6596 * Issue the HELLO command to the firmware. If it's not successful 6597 * but indicates that we got a "busy" or "timeout" condition, retry 6598 * the HELLO until we exhaust our retry limit. If we do exceed our 6599 * retry limit, check to see if the firmware left us any error 6600 * information and report that if so ... 6601 */ 6602 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6603 if (ret != FW_SUCCESS) { 6604 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 6605 goto retry; 6606 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 6607 t4_report_fw_error(adap); 6608 return ret; 6609 } 6610 6611 v = be32_to_cpu(c.err_to_clearinit); 6612 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 6613 if (state) { 6614 if (v & F_FW_HELLO_CMD_ERR) 6615 *state = DEV_STATE_ERR; 6616 else if (v & F_FW_HELLO_CMD_INIT) 6617 *state = DEV_STATE_INIT; 6618 else 6619 *state = DEV_STATE_UNINIT; 6620 } 6621 6622 /* 6623 * If we're not the Master PF then we need to wait around for the 6624 * Master PF Driver to finish setting up the adapter. 6625 * 6626 * Note that we also do this wait if we're a non-Master-capable PF and 6627 * there is no current Master PF; a Master PF may show up momentarily 6628 * and we wouldn't want to fail pointlessly. (This can happen when an 6629 * OS loads lots of different drivers rapidly at the same time). In 6630 * this case, the Master PF returned by the firmware will be 6631 * M_PCIE_FW_MASTER so the test below will work ... 6632 */ 6633 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 6634 master_mbox != mbox) { 6635 int waiting = FW_CMD_HELLO_TIMEOUT; 6636 6637 /* 6638 * Wait for the firmware to either indicate an error or 6639 * initialized state. If we see either of these we bail out 6640 * and report the issue to the caller. If we exhaust the 6641 * "hello timeout" and we haven't exhausted our retries, try 6642 * again. Otherwise bail with a timeout error. 6643 */ 6644 for (;;) { 6645 u32 pcie_fw; 6646 6647 msleep(50); 6648 waiting -= 50; 6649 6650 /* 6651 * If neither Error nor Initialialized are indicated 6652 * by the firmware keep waiting till we exhaust our 6653 * timeout ... and then retry if we haven't exhausted 6654 * our retries ... 6655 */ 6656 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 6657 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 6658 if (waiting <= 0) { 6659 if (retries-- > 0) 6660 goto retry; 6661 6662 return -ETIMEDOUT; 6663 } 6664 continue; 6665 } 6666 6667 /* 6668 * We either have an Error or Initialized condition 6669 * report errors preferentially. 6670 */ 6671 if (state) { 6672 if (pcie_fw & F_PCIE_FW_ERR) 6673 *state = DEV_STATE_ERR; 6674 else if (pcie_fw & F_PCIE_FW_INIT) 6675 *state = DEV_STATE_INIT; 6676 } 6677 6678 /* 6679 * If we arrived before a Master PF was selected and 6680 * there's not a valid Master PF, grab its identity 6681 * for our caller. 6682 */ 6683 if (master_mbox == M_PCIE_FW_MASTER && 6684 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 6685 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 6686 break; 6687 } 6688 } 6689 6690 return master_mbox; 6691} 6692 6693/** 6694 * t4_fw_bye - end communication with FW 6695 * @adap: the adapter 6696 * @mbox: mailbox to use for the FW command 6697 * 6698 * Issues a command to terminate communication with FW. 6699 */ 6700int t4_fw_bye(struct adapter *adap, unsigned int mbox) 6701{ 6702 struct fw_bye_cmd c; 6703 6704 memset(&c, 0, sizeof(c)); 6705 INIT_CMD(c, BYE, WRITE); 6706 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6707} 6708 6709/** 6710 * t4_fw_reset - issue a reset to FW 6711 * @adap: the adapter 6712 * @mbox: mailbox to use for the FW command 6713 * @reset: specifies the type of reset to perform 6714 * 6715 * Issues a reset command of the specified type to FW. 6716 */ 6717int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 6718{ 6719 struct fw_reset_cmd c; 6720 6721 memset(&c, 0, sizeof(c)); 6722 INIT_CMD(c, RESET, WRITE); 6723 c.val = cpu_to_be32(reset); 6724 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6725} 6726 6727/** 6728 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 6729 * @adap: the adapter 6730 * @mbox: mailbox to use for the FW RESET command (if desired) 6731 * @force: force uP into RESET even if FW RESET command fails 6732 * 6733 * Issues a RESET command to firmware (if desired) with a HALT indication 6734 * and then puts the microprocessor into RESET state. The RESET command 6735 * will only be issued if a legitimate mailbox is provided (mbox <= 6736 * M_PCIE_FW_MASTER). 6737 * 6738 * This is generally used in order for the host to safely manipulate the 6739 * adapter without fear of conflicting with whatever the firmware might 6740 * be doing. The only way out of this state is to RESTART the firmware 6741 * ... 6742 */ 6743int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 6744{ 6745 int ret = 0; 6746 6747 /* 6748 * If a legitimate mailbox is provided, issue a RESET command 6749 * with a HALT indication. 6750 */ 6751 if (mbox <= M_PCIE_FW_MASTER) { 6752 struct fw_reset_cmd c; 6753 6754 memset(&c, 0, sizeof(c)); 6755 INIT_CMD(c, RESET, WRITE); 6756 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 6757 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 6758 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6759 } 6760 6761 /* 6762 * Normally we won't complete the operation if the firmware RESET 6763 * command fails but if our caller insists we'll go ahead and put the 6764 * uP into RESET. This can be useful if the firmware is hung or even 6765 * missing ... We'll have to take the risk of putting the uP into 6766 * RESET without the cooperation of firmware in that case. 6767 * 6768 * We also force the firmware's HALT flag to be on in case we bypassed 6769 * the firmware RESET command above or we're dealing with old firmware 6770 * which doesn't have the HALT capability. This will serve as a flag 6771 * for the incoming firmware to know that it's coming out of a HALT 6772 * rather than a RESET ... if it's new enough to understand that ... 6773 */ 6774 if (ret == 0 || force) { 6775 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6776 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 6777 F_PCIE_FW_HALT); 6778 } 6779 6780 /* 6781 * And we always return the result of the firmware RESET command 6782 * even when we force the uP into RESET ... 6783 */ 6784 return ret; 6785} 6786 6787/** 6788 * t4_fw_restart - restart the firmware by taking the uP out of RESET 6789 * @adap: the adapter 6790 * @reset: if we want to do a RESET to restart things 6791 * 6792 * Restart firmware previously halted by t4_fw_halt(). On successful 6793 * return the previous PF Master remains as the new PF Master and there 6794 * is no need to issue a new HELLO command, etc. 6795 * 6796 * We do this in two ways: 6797 * 6798 * 1. If we're dealing with newer firmware we'll simply want to take 6799 * the chip's microprocessor out of RESET. This will cause the 6800 * firmware to start up from its start vector. And then we'll loop 6801 * until the firmware indicates it's started again (PCIE_FW.HALT 6802 * reset to 0) or we timeout. 6803 * 6804 * 2. If we're dealing with older firmware then we'll need to RESET 6805 * the chip since older firmware won't recognize the PCIE_FW.HALT 6806 * flag and automatically RESET itself on startup. 6807 */ 6808int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 6809{ 6810 if (reset) { 6811 /* 6812 * Since we're directing the RESET instead of the firmware 6813 * doing it automatically, we need to clear the PCIE_FW.HALT 6814 * bit. 6815 */ 6816 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 6817 6818 /* 6819 * If we've been given a valid mailbox, first try to get the 6820 * firmware to do the RESET. If that works, great and we can 6821 * return success. Otherwise, if we haven't been given a 6822 * valid mailbox or the RESET command failed, fall back to 6823 * hitting the chip with a hammer. 6824 */ 6825 if (mbox <= M_PCIE_FW_MASTER) { 6826 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6827 msleep(100); 6828 if (t4_fw_reset(adap, mbox, 6829 F_PIORST | F_PIORSTMODE) == 0) 6830 return 0; 6831 } 6832 6833 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6834 msleep(2000); 6835 } else { 6836 int ms; 6837 6838 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6839 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 6840 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 6841 return FW_SUCCESS; 6842 msleep(100); 6843 ms += 100; 6844 } 6845 return -ETIMEDOUT; 6846 } 6847 return 0; 6848} 6849 6850/** 6851 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 6852 * @adap: the adapter 6853 * @mbox: mailbox to use for the FW RESET command (if desired) 6854 * @fw_data: the firmware image to write 6855 * @size: image size 6856 * @force: force upgrade even if firmware doesn't cooperate 6857 * 6858 * Perform all of the steps necessary for upgrading an adapter's 6859 * firmware image. Normally this requires the cooperation of the 6860 * existing firmware in order to halt all existing activities 6861 * but if an invalid mailbox token is passed in we skip that step 6862 * (though we'll still put the adapter microprocessor into RESET in 6863 * that case). 6864 * 6865 * On successful return the new firmware will have been loaded and 6866 * the adapter will have been fully RESET losing all previous setup 6867 * state. On unsuccessful return the adapter may be completely hosed ... 6868 * positive errno indicates that the adapter is ~probably~ intact, a 6869 * negative errno indicates that things are looking bad ... 6870 */ 6871int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 6872 const u8 *fw_data, unsigned int size, int force) 6873{ 6874 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6875 unsigned int bootstrap = 6876 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6877 int reset, ret; 6878 6879 if (!t4_fw_matches_chip(adap, fw_hdr)) 6880 return -EINVAL; 6881 6882 if (!bootstrap) { 6883 ret = t4_fw_halt(adap, mbox, force); 6884 if (ret < 0 && !force) 6885 return ret; 6886 } 6887 6888 ret = t4_load_fw(adap, fw_data, size); 6889 if (ret < 0 || bootstrap) 6890 return ret; 6891 6892 /* 6893 * Older versions of the firmware don't understand the new 6894 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 6895 * restart. So for newly loaded older firmware we'll have to do the 6896 * RESET for it so it starts up on a clean slate. We can tell if 6897 * the newly loaded firmware will handle this right by checking 6898 * its header flags to see if it advertises the capability. 6899 */ 6900 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 6901 return t4_fw_restart(adap, mbox, reset); 6902} 6903 6904/* 6905 * Card doesn't have a firmware, install one. 6906 */ 6907int t4_fw_forceinstall(struct adapter *adap, const u8 *fw_data, 6908 unsigned int size) 6909{ 6910 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6911 unsigned int bootstrap = 6912 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6913 int ret; 6914 6915 if (!t4_fw_matches_chip(adap, fw_hdr) || bootstrap) 6916 return -EINVAL; 6917 6918 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6919 t4_write_reg(adap, A_PCIE_FW, 0); /* Clobber internal state */ 6920 ret = t4_load_fw(adap, fw_data, size); 6921 if (ret < 0) 6922 return ret; 6923 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6924 msleep(1000); 6925 6926 return (0); 6927} 6928 6929/** 6930 * t4_fw_initialize - ask FW to initialize the device 6931 * @adap: the adapter 6932 * @mbox: mailbox to use for the FW command 6933 * 6934 * Issues a command to FW to partially initialize the device. This 6935 * performs initialization that generally doesn't depend on user input. 6936 */ 6937int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 6938{ 6939 struct fw_initialize_cmd c; 6940 6941 memset(&c, 0, sizeof(c)); 6942 INIT_CMD(c, INITIALIZE, WRITE); 6943 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6944} 6945 6946/** 6947 * t4_query_params_rw - query FW or device parameters 6948 * @adap: the adapter 6949 * @mbox: mailbox to use for the FW command 6950 * @pf: the PF 6951 * @vf: the VF 6952 * @nparams: the number of parameters 6953 * @params: the parameter names 6954 * @val: the parameter values 6955 * @rw: Write and read flag 6956 * 6957 * Reads the value of FW or device parameters. Up to 7 parameters can be 6958 * queried at once. 6959 */ 6960int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 6961 unsigned int vf, unsigned int nparams, const u32 *params, 6962 u32 *val, int rw) 6963{ 6964 int i, ret; 6965 struct fw_params_cmd c; 6966 __be32 *p = &c.param[0].mnem; 6967 6968 if (nparams > 7) 6969 return -EINVAL; 6970 6971 memset(&c, 0, sizeof(c)); 6972 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6973 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6974 V_FW_PARAMS_CMD_PFN(pf) | 6975 V_FW_PARAMS_CMD_VFN(vf)); 6976 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6977 6978 for (i = 0; i < nparams; i++) { 6979 *p++ = cpu_to_be32(*params++); 6980 if (rw) 6981 *p = cpu_to_be32(*(val + i)); 6982 p++; 6983 } 6984 6985 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6986 if (ret == 0) 6987 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 6988 *val++ = be32_to_cpu(*p); 6989 return ret; 6990} 6991 6992int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6993 unsigned int vf, unsigned int nparams, const u32 *params, 6994 u32 *val) 6995{ 6996 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 6997} 6998 6999/** 7000 * t4_set_params_timeout - sets FW or device parameters 7001 * @adap: the adapter 7002 * @mbox: mailbox to use for the FW command 7003 * @pf: the PF 7004 * @vf: the VF 7005 * @nparams: the number of parameters 7006 * @params: the parameter names 7007 * @val: the parameter values 7008 * @timeout: the timeout time 7009 * 7010 * Sets the value of FW or device parameters. Up to 7 parameters can be 7011 * specified at once. 7012 */ 7013int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 7014 unsigned int pf, unsigned int vf, 7015 unsigned int nparams, const u32 *params, 7016 const u32 *val, int timeout) 7017{ 7018 struct fw_params_cmd c; 7019 __be32 *p = &c.param[0].mnem; 7020 7021 if (nparams > 7) 7022 return -EINVAL; 7023 7024 memset(&c, 0, sizeof(c)); 7025 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 7026 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7027 V_FW_PARAMS_CMD_PFN(pf) | 7028 V_FW_PARAMS_CMD_VFN(vf)); 7029 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7030 7031 while (nparams--) { 7032 *p++ = cpu_to_be32(*params++); 7033 *p++ = cpu_to_be32(*val++); 7034 } 7035 7036 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 7037} 7038 7039/** 7040 * t4_set_params - sets FW or device parameters 7041 * @adap: the adapter 7042 * @mbox: mailbox to use for the FW command 7043 * @pf: the PF 7044 * @vf: the VF 7045 * @nparams: the number of parameters 7046 * @params: the parameter names 7047 * @val: the parameter values 7048 * 7049 * Sets the value of FW or device parameters. Up to 7 parameters can be 7050 * specified at once. 7051 */ 7052int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7053 unsigned int vf, unsigned int nparams, const u32 *params, 7054 const u32 *val) 7055{ 7056 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 7057 FW_CMD_MAX_TIMEOUT); 7058} 7059 7060/** 7061 * t4_cfg_pfvf - configure PF/VF resource limits 7062 * @adap: the adapter 7063 * @mbox: mailbox to use for the FW command 7064 * @pf: the PF being configured 7065 * @vf: the VF being configured 7066 * @txq: the max number of egress queues 7067 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 7068 * @rxqi: the max number of interrupt-capable ingress queues 7069 * @rxq: the max number of interruptless ingress queues 7070 * @tc: the PCI traffic class 7071 * @vi: the max number of virtual interfaces 7072 * @cmask: the channel access rights mask for the PF/VF 7073 * @pmask: the port access rights mask for the PF/VF 7074 * @nexact: the maximum number of exact MPS filters 7075 * @rcaps: read capabilities 7076 * @wxcaps: write/execute capabilities 7077 * 7078 * Configures resource limits and capabilities for a physical or virtual 7079 * function. 7080 */ 7081int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 7082 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 7083 unsigned int rxqi, unsigned int rxq, unsigned int tc, 7084 unsigned int vi, unsigned int cmask, unsigned int pmask, 7085 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 7086{ 7087 struct fw_pfvf_cmd c; 7088 7089 memset(&c, 0, sizeof(c)); 7090 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 7091 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 7092 V_FW_PFVF_CMD_VFN(vf)); 7093 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7094 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 7095 V_FW_PFVF_CMD_NIQ(rxq)); 7096 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 7097 V_FW_PFVF_CMD_PMASK(pmask) | 7098 V_FW_PFVF_CMD_NEQ(txq)); 7099 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 7100 V_FW_PFVF_CMD_NVI(vi) | 7101 V_FW_PFVF_CMD_NEXACTF(nexact)); 7102 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 7103 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 7104 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 7105 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7106} 7107 7108/** 7109 * t4_alloc_vi_func - allocate a virtual interface 7110 * @adap: the adapter 7111 * @mbox: mailbox to use for the FW command 7112 * @port: physical port associated with the VI 7113 * @pf: the PF owning the VI 7114 * @vf: the VF owning the VI 7115 * @nmac: number of MAC addresses needed (1 to 5) 7116 * @mac: the MAC addresses of the VI 7117 * @rss_size: size of RSS table slice associated with this VI 7118 * @portfunc: which Port Application Function MAC Address is desired 7119 * @idstype: Intrusion Detection Type 7120 * 7121 * Allocates a virtual interface for the given physical port. If @mac is 7122 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 7123 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 7124 * @mac should be large enough to hold @nmac Ethernet addresses, they are 7125 * stored consecutively so the space needed is @nmac * 6 bytes. 7126 * Returns a negative error number or the non-negative VI id. 7127 */ 7128int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 7129 unsigned int port, unsigned int pf, unsigned int vf, 7130 unsigned int nmac, u8 *mac, u16 *rss_size, 7131 unsigned int portfunc, unsigned int idstype) 7132{ 7133 int ret; 7134 struct fw_vi_cmd c; 7135 7136 memset(&c, 0, sizeof(c)); 7137 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 7138 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 7139 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 7140 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 7141 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 7142 V_FW_VI_CMD_FUNC(portfunc)); 7143 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 7144 c.nmac = nmac - 1; 7145 if(!rss_size) 7146 c.norss_rsssize = F_FW_VI_CMD_NORSS; 7147 7148 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7149 if (ret) 7150 return ret; 7151 7152 if (mac) { 7153 memcpy(mac, c.mac, sizeof(c.mac)); 7154 switch (nmac) { 7155 case 5: 7156 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 7157 case 4: 7158 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 7159 case 3: 7160 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 7161 case 2: 7162 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 7163 } 7164 } 7165 if (rss_size) 7166 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 7167 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 7168} 7169 7170/** 7171 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 7172 * @adap: the adapter 7173 * @mbox: mailbox to use for the FW command 7174 * @port: physical port associated with the VI 7175 * @pf: the PF owning the VI 7176 * @vf: the VF owning the VI 7177 * @nmac: number of MAC addresses needed (1 to 5) 7178 * @mac: the MAC addresses of the VI 7179 * @rss_size: size of RSS table slice associated with this VI 7180 * 7181 * backwards compatible and convieniance routine to allocate a Virtual 7182 * Interface with a Ethernet Port Application Function and Intrustion 7183 * Detection System disabled. 7184 */ 7185int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 7186 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 7187 u16 *rss_size) 7188{ 7189 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 7190 FW_VI_FUNC_ETH, 0); 7191} 7192 7193/** 7194 * t4_free_vi - free a virtual interface 7195 * @adap: the adapter 7196 * @mbox: mailbox to use for the FW command 7197 * @pf: the PF owning the VI 7198 * @vf: the VF owning the VI 7199 * @viid: virtual interface identifiler 7200 * 7201 * Free a previously allocated virtual interface. 7202 */ 7203int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 7204 unsigned int vf, unsigned int viid) 7205{ 7206 struct fw_vi_cmd c; 7207 7208 memset(&c, 0, sizeof(c)); 7209 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 7210 F_FW_CMD_REQUEST | 7211 F_FW_CMD_EXEC | 7212 V_FW_VI_CMD_PFN(pf) | 7213 V_FW_VI_CMD_VFN(vf)); 7214 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 7215 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 7216 7217 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7218} 7219 7220/** 7221 * t4_set_rxmode - set Rx properties of a virtual interface 7222 * @adap: the adapter 7223 * @mbox: mailbox to use for the FW command 7224 * @viid: the VI id 7225 * @mtu: the new MTU or -1 7226 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 7227 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 7228 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 7229 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 7230 * @sleep_ok: if true we may sleep while awaiting command completion 7231 * 7232 * Sets Rx properties of a virtual interface. 7233 */ 7234int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 7235 int mtu, int promisc, int all_multi, int bcast, int vlanex, 7236 bool sleep_ok) 7237{ 7238 struct fw_vi_rxmode_cmd c; 7239 7240 /* convert to FW values */ 7241 if (mtu < 0) 7242 mtu = M_FW_VI_RXMODE_CMD_MTU; 7243 if (promisc < 0) 7244 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 7245 if (all_multi < 0) 7246 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 7247 if (bcast < 0) 7248 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 7249 if (vlanex < 0) 7250 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 7251 7252 memset(&c, 0, sizeof(c)); 7253 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 7254 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7255 V_FW_VI_RXMODE_CMD_VIID(viid)); 7256 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7257 c.mtu_to_vlanexen = 7258 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 7259 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 7260 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 7261 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 7262 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 7263 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7264} 7265 7266/** 7267 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 7268 * @adap: the adapter 7269 * @mbox: mailbox to use for the FW command 7270 * @viid: the VI id 7271 * @free: if true any existing filters for this VI id are first removed 7272 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 7273 * @addr: the MAC address(es) 7274 * @idx: where to store the index of each allocated filter 7275 * @hash: pointer to hash address filter bitmap 7276 * @sleep_ok: call is allowed to sleep 7277 * 7278 * Allocates an exact-match filter for each of the supplied addresses and 7279 * sets it to the corresponding address. If @idx is not %NULL it should 7280 * have at least @naddr entries, each of which will be set to the index of 7281 * the filter allocated for the corresponding MAC address. If a filter 7282 * could not be allocated for an address its index is set to 0xffff. 7283 * If @hash is not %NULL addresses that fail to allocate an exact filter 7284 * are hashed and update the hash filter bitmap pointed at by @hash. 7285 * 7286 * Returns a negative error number or the number of filters allocated. 7287 */ 7288int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 7289 unsigned int viid, bool free, unsigned int naddr, 7290 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 7291{ 7292 int offset, ret = 0; 7293 struct fw_vi_mac_cmd c; 7294 unsigned int nfilters = 0; 7295 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 7296 unsigned int rem = naddr; 7297 7298 if (naddr > max_naddr) 7299 return -EINVAL; 7300 7301 for (offset = 0; offset < naddr ; /**/) { 7302 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 7303 ? rem 7304 : ARRAY_SIZE(c.u.exact)); 7305 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 7306 u.exact[fw_naddr]), 16); 7307 struct fw_vi_mac_exact *p; 7308 int i; 7309 7310 memset(&c, 0, sizeof(c)); 7311 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7312 F_FW_CMD_REQUEST | 7313 F_FW_CMD_WRITE | 7314 V_FW_CMD_EXEC(free) | 7315 V_FW_VI_MAC_CMD_VIID(viid)); 7316 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 7317 V_FW_CMD_LEN16(len16)); 7318 7319 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 7320 p->valid_to_idx = 7321 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 7322 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 7323 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 7324 } 7325 7326 /* 7327 * It's okay if we run out of space in our MAC address arena. 7328 * Some of the addresses we submit may get stored so we need 7329 * to run through the reply to see what the results were ... 7330 */ 7331 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 7332 if (ret && ret != -FW_ENOMEM) 7333 break; 7334 7335 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 7336 u16 index = G_FW_VI_MAC_CMD_IDX( 7337 be16_to_cpu(p->valid_to_idx)); 7338 7339 if (idx) 7340 idx[offset+i] = (index >= max_naddr 7341 ? 0xffff 7342 : index); 7343 if (index < max_naddr) 7344 nfilters++; 7345 else if (hash) 7346 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 7347 } 7348 7349 free = false; 7350 offset += fw_naddr; 7351 rem -= fw_naddr; 7352 } 7353 7354 if (ret == 0 || ret == -FW_ENOMEM) 7355 ret = nfilters; 7356 return ret; 7357} 7358 7359/** 7360 * t4_change_mac - modifies the exact-match filter for a MAC address 7361 * @adap: the adapter 7362 * @mbox: mailbox to use for the FW command 7363 * @viid: the VI id 7364 * @idx: index of existing filter for old value of MAC address, or -1 7365 * @addr: the new MAC address value 7366 * @persist: whether a new MAC allocation should be persistent 7367 * @add_smt: if true also add the address to the HW SMT 7368 * 7369 * Modifies an exact-match filter and sets it to the new MAC address if 7370 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 7371 * latter case the address is added persistently if @persist is %true. 7372 * 7373 * Note that in general it is not possible to modify the value of a given 7374 * filter so the generic way to modify an address filter is to free the one 7375 * being used by the old address value and allocate a new filter for the 7376 * new address value. 7377 * 7378 * Returns a negative error number or the index of the filter with the new 7379 * MAC value. Note that this index may differ from @idx. 7380 */ 7381int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 7382 int idx, const u8 *addr, bool persist, bool add_smt) 7383{ 7384 int ret, mode; 7385 struct fw_vi_mac_cmd c; 7386 struct fw_vi_mac_exact *p = c.u.exact; 7387 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 7388 7389 if (idx < 0) /* new allocation */ 7390 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 7391 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 7392 7393 memset(&c, 0, sizeof(c)); 7394 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7395 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7396 V_FW_VI_MAC_CMD_VIID(viid)); 7397 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 7398 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 7399 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 7400 V_FW_VI_MAC_CMD_IDX(idx)); 7401 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 7402 7403 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7404 if (ret == 0) { 7405 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 7406 if (ret >= max_mac_addr) 7407 ret = -ENOMEM; 7408 } 7409 return ret; 7410} 7411 7412/** 7413 * t4_set_addr_hash - program the MAC inexact-match hash filter 7414 * @adap: the adapter 7415 * @mbox: mailbox to use for the FW command 7416 * @viid: the VI id 7417 * @ucast: whether the hash filter should also match unicast addresses 7418 * @vec: the value to be written to the hash filter 7419 * @sleep_ok: call is allowed to sleep 7420 * 7421 * Sets the 64-bit inexact-match hash filter for a virtual interface. 7422 */ 7423int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 7424 bool ucast, u64 vec, bool sleep_ok) 7425{ 7426 struct fw_vi_mac_cmd c; 7427 u32 val; 7428 7429 memset(&c, 0, sizeof(c)); 7430 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7431 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7432 V_FW_VI_ENABLE_CMD_VIID(viid)); 7433 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 7434 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 7435 c.freemacs_to_len16 = cpu_to_be32(val); 7436 c.u.hash.hashvec = cpu_to_be64(vec); 7437 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7438} 7439 7440/** 7441 * t4_enable_vi_params - enable/disable a virtual interface 7442 * @adap: the adapter 7443 * @mbox: mailbox to use for the FW command 7444 * @viid: the VI id 7445 * @rx_en: 1=enable Rx, 0=disable Rx 7446 * @tx_en: 1=enable Tx, 0=disable Tx 7447 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 7448 * 7449 * Enables/disables a virtual interface. Note that setting DCB Enable 7450 * only makes sense when enabling a Virtual Interface ... 7451 */ 7452int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 7453 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 7454{ 7455 struct fw_vi_enable_cmd c; 7456 7457 memset(&c, 0, sizeof(c)); 7458 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7459 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7460 V_FW_VI_ENABLE_CMD_VIID(viid)); 7461 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 7462 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 7463 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 7464 FW_LEN16(c)); 7465 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 7466} 7467 7468/** 7469 * t4_enable_vi - enable/disable a virtual interface 7470 * @adap: the adapter 7471 * @mbox: mailbox to use for the FW command 7472 * @viid: the VI id 7473 * @rx_en: 1=enable Rx, 0=disable Rx 7474 * @tx_en: 1=enable Tx, 0=disable Tx 7475 * 7476 * Enables/disables a virtual interface. Note that setting DCB Enable 7477 * only makes sense when enabling a Virtual Interface ... 7478 */ 7479int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 7480 bool rx_en, bool tx_en) 7481{ 7482 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 7483} 7484 7485/** 7486 * t4_identify_port - identify a VI's port by blinking its LED 7487 * @adap: the adapter 7488 * @mbox: mailbox to use for the FW command 7489 * @viid: the VI id 7490 * @nblinks: how many times to blink LED at 2.5 Hz 7491 * 7492 * Identifies a VI's port by blinking its LED. 7493 */ 7494int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 7495 unsigned int nblinks) 7496{ 7497 struct fw_vi_enable_cmd c; 7498 7499 memset(&c, 0, sizeof(c)); 7500 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7501 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7502 V_FW_VI_ENABLE_CMD_VIID(viid)); 7503 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 7504 c.blinkdur = cpu_to_be16(nblinks); 7505 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7506} 7507 7508/** 7509 * t4_iq_stop - stop an ingress queue and its FLs 7510 * @adap: the adapter 7511 * @mbox: mailbox to use for the FW command 7512 * @pf: the PF owning the queues 7513 * @vf: the VF owning the queues 7514 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7515 * @iqid: ingress queue id 7516 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7517 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7518 * 7519 * Stops an ingress queue and its associated FLs, if any. This causes 7520 * any current or future data/messages destined for these queues to be 7521 * tossed. 7522 */ 7523int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 7524 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7525 unsigned int fl0id, unsigned int fl1id) 7526{ 7527 struct fw_iq_cmd c; 7528 7529 memset(&c, 0, sizeof(c)); 7530 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7531 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7532 V_FW_IQ_CMD_VFN(vf)); 7533 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 7534 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7535 c.iqid = cpu_to_be16(iqid); 7536 c.fl0id = cpu_to_be16(fl0id); 7537 c.fl1id = cpu_to_be16(fl1id); 7538 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7539} 7540 7541/** 7542 * t4_iq_free - free an ingress queue and its FLs 7543 * @adap: the adapter 7544 * @mbox: mailbox to use for the FW command 7545 * @pf: the PF owning the queues 7546 * @vf: the VF owning the queues 7547 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7548 * @iqid: ingress queue id 7549 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7550 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7551 * 7552 * Frees an ingress queue and its associated FLs, if any. 7553 */ 7554int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7555 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7556 unsigned int fl0id, unsigned int fl1id) 7557{ 7558 struct fw_iq_cmd c; 7559 7560 memset(&c, 0, sizeof(c)); 7561 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7562 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7563 V_FW_IQ_CMD_VFN(vf)); 7564 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 7565 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7566 c.iqid = cpu_to_be16(iqid); 7567 c.fl0id = cpu_to_be16(fl0id); 7568 c.fl1id = cpu_to_be16(fl1id); 7569 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7570} 7571 7572/** 7573 * t4_eth_eq_free - free an Ethernet egress queue 7574 * @adap: the adapter 7575 * @mbox: mailbox to use for the FW command 7576 * @pf: the PF owning the queue 7577 * @vf: the VF owning the queue 7578 * @eqid: egress queue id 7579 * 7580 * Frees an Ethernet egress queue. 7581 */ 7582int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7583 unsigned int vf, unsigned int eqid) 7584{ 7585 struct fw_eq_eth_cmd c; 7586 7587 memset(&c, 0, sizeof(c)); 7588 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 7589 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7590 V_FW_EQ_ETH_CMD_PFN(pf) | 7591 V_FW_EQ_ETH_CMD_VFN(vf)); 7592 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 7593 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 7594 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7595} 7596 7597/** 7598 * t4_ctrl_eq_free - free a control egress queue 7599 * @adap: the adapter 7600 * @mbox: mailbox to use for the FW command 7601 * @pf: the PF owning the queue 7602 * @vf: the VF owning the queue 7603 * @eqid: egress queue id 7604 * 7605 * Frees a control egress queue. 7606 */ 7607int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7608 unsigned int vf, unsigned int eqid) 7609{ 7610 struct fw_eq_ctrl_cmd c; 7611 7612 memset(&c, 0, sizeof(c)); 7613 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 7614 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7615 V_FW_EQ_CTRL_CMD_PFN(pf) | 7616 V_FW_EQ_CTRL_CMD_VFN(vf)); 7617 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 7618 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 7619 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7620} 7621 7622/** 7623 * t4_ofld_eq_free - free an offload egress queue 7624 * @adap: the adapter 7625 * @mbox: mailbox to use for the FW command 7626 * @pf: the PF owning the queue 7627 * @vf: the VF owning the queue 7628 * @eqid: egress queue id 7629 * 7630 * Frees a control egress queue. 7631 */ 7632int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7633 unsigned int vf, unsigned int eqid) 7634{ 7635 struct fw_eq_ofld_cmd c; 7636 7637 memset(&c, 0, sizeof(c)); 7638 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 7639 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7640 V_FW_EQ_OFLD_CMD_PFN(pf) | 7641 V_FW_EQ_OFLD_CMD_VFN(vf)); 7642 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 7643 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 7644 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7645} 7646 7647/** 7648 * t4_link_down_rc_str - return a string for a Link Down Reason Code 7649 * @link_down_rc: Link Down Reason Code 7650 * 7651 * Returns a string representation of the Link Down Reason Code. 7652 */ 7653const char *t4_link_down_rc_str(unsigned char link_down_rc) 7654{ 7655 static const char *reason[] = { 7656 "Link Down", 7657 "Remote Fault", 7658 "Auto-negotiation Failure", 7659 "Reserved3", 7660 "Insufficient Airflow", 7661 "Unable To Determine Reason", 7662 "No RX Signal Detected", 7663 "Reserved7", 7664 }; 7665 7666 if (link_down_rc >= ARRAY_SIZE(reason)) 7667 return "Bad Reason Code"; 7668 7669 return reason[link_down_rc]; 7670} 7671 7672/* 7673 * Updates all fields owned by the common code in port_info and link_config 7674 * based on information provided by the firmware. Does not touch any 7675 * requested_* field. 7676 */ 7677static void handle_port_info(struct port_info *pi, const struct fw_port_info *p) 7678{ 7679 struct link_config *lc = &pi->link_cfg; 7680 int speed; 7681 unsigned char fc, fec; 7682 u32 stat = be32_to_cpu(p->lstatus_to_modtype); 7683 7684 pi->port_type = G_FW_PORT_CMD_PTYPE(stat); 7685 pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat); 7686 pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ? 7687 G_FW_PORT_CMD_MDIOADDR(stat) : -1; 7688 7689 lc->supported = be16_to_cpu(p->pcap); 7690 lc->advertising = be16_to_cpu(p->acap); 7691 lc->lp_advertising = be16_to_cpu(p->lpacap); 7692 lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 7693 lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat); 7694 7695 speed = 0; 7696 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 7697 speed = 100; 7698 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 7699 speed = 1000; 7700 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 7701 speed = 10000; 7702 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 7703 speed = 25000; 7704 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 7705 speed = 40000; 7706 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 7707 speed = 100000; 7708 lc->speed = speed; 7709 7710 fc = 0; 7711 if (stat & F_FW_PORT_CMD_RXPAUSE) 7712 fc |= PAUSE_RX; 7713 if (stat & F_FW_PORT_CMD_TXPAUSE) 7714 fc |= PAUSE_TX; 7715 lc->fc = fc; 7716 7717 fec = 0; 7718 if (lc->advertising & FW_PORT_CAP_FEC_RS) 7719 fec = FEC_RS; 7720 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS) 7721 fec = FEC_BASER_RS; 7722 lc->fec = fec; 7723} 7724 7725/** 7726 * t4_update_port_info - retrieve and update port information if changed 7727 * @pi: the port_info 7728 * 7729 * We issue a Get Port Information Command to the Firmware and, if 7730 * successful, we check to see if anything is different from what we 7731 * last recorded and update things accordingly. 7732 */ 7733 int t4_update_port_info(struct port_info *pi) 7734 { 7735 struct fw_port_cmd port_cmd; 7736 int ret; 7737 7738 memset(&port_cmd, 0, sizeof port_cmd); 7739 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 7740 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7741 V_FW_PORT_CMD_PORTID(pi->tx_chan)); 7742 port_cmd.action_to_len16 = cpu_to_be32( 7743 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 7744 FW_LEN16(port_cmd)); 7745 ret = t4_wr_mbox_ns(pi->adapter, pi->adapter->mbox, 7746 &port_cmd, sizeof(port_cmd), &port_cmd); 7747 if (ret) 7748 return ret; 7749 7750 handle_port_info(pi, &port_cmd.u.info); 7751 return 0; 7752} 7753 7754/** 7755 * t4_handle_fw_rpl - process a FW reply message 7756 * @adap: the adapter 7757 * @rpl: start of the FW message 7758 * 7759 * Processes a FW message, such as link state change messages. 7760 */ 7761int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 7762{ 7763 u8 opcode = *(const u8 *)rpl; 7764 const struct fw_port_cmd *p = (const void *)rpl; 7765 unsigned int action = 7766 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 7767 7768 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 7769 /* link/module state change message */ 7770 int i, old_ptype, old_mtype; 7771 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 7772 struct port_info *pi = NULL; 7773 struct link_config *lc, *old_lc; 7774 7775 for_each_port(adap, i) { 7776 pi = adap2pinfo(adap, i); 7777 if (pi->tx_chan == chan) 7778 break; 7779 } 7780 7781 lc = &pi->link_cfg; 7782 PORT_LOCK(pi); 7783 old_lc = &pi->old_link_cfg; 7784 old_ptype = pi->port_type; 7785 old_mtype = pi->mod_type; 7786 handle_port_info(pi, &p->u.info); 7787 PORT_UNLOCK(pi); 7788 if (old_ptype != pi->port_type || old_mtype != pi->mod_type) { 7789 t4_os_portmod_changed(pi); 7790 } 7791 PORT_LOCK(pi); 7792 if (old_lc->link_ok != lc->link_ok || 7793 old_lc->speed != lc->speed || 7794 old_lc->fec != lc->fec || 7795 old_lc->fc != lc->fc) { 7796 t4_os_link_changed(pi); 7797 *old_lc = *lc; 7798 } 7799 PORT_UNLOCK(pi); 7800 } else { 7801 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 7802 return -EINVAL; 7803 } 7804 return 0; 7805} 7806 7807/** 7808 * get_pci_mode - determine a card's PCI mode 7809 * @adapter: the adapter 7810 * @p: where to store the PCI settings 7811 * 7812 * Determines a card's PCI mode and associated parameters, such as speed 7813 * and width. 7814 */ 7815static void get_pci_mode(struct adapter *adapter, 7816 struct pci_params *p) 7817{ 7818 u16 val; 7819 u32 pcie_cap; 7820 7821 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7822 if (pcie_cap) { 7823 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 7824 p->speed = val & PCI_EXP_LNKSTA_CLS; 7825 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 7826 } 7827} 7828 7829struct flash_desc { 7830 u32 vendor_and_model_id; 7831 u32 size_mb; 7832}; 7833 7834int t4_get_flash_params(struct adapter *adapter) 7835{ 7836 /* 7837 * Table for non-standard supported Flash parts. Note, all Flash 7838 * parts must have 64KB sectors. 7839 */ 7840 static struct flash_desc supported_flash[] = { 7841 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 7842 }; 7843 7844 int ret; 7845 u32 flashid = 0; 7846 unsigned int part, manufacturer; 7847 unsigned int density, size = 0; 7848 7849 7850 /* 7851 * Issue a Read ID Command to the Flash part. We decode supported 7852 * Flash parts and their sizes from this. There's a newer Query 7853 * Command which can retrieve detailed geometry information but many 7854 * Flash parts don't support it. 7855 */ 7856 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 7857 if (!ret) 7858 ret = sf1_read(adapter, 3, 0, 1, &flashid); 7859 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 7860 if (ret < 0) 7861 return ret; 7862 7863 /* 7864 * Check to see if it's one of our non-standard supported Flash parts. 7865 */ 7866 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 7867 if (supported_flash[part].vendor_and_model_id == flashid) { 7868 adapter->params.sf_size = 7869 supported_flash[part].size_mb; 7870 adapter->params.sf_nsec = 7871 adapter->params.sf_size / SF_SEC_SIZE; 7872 goto found; 7873 } 7874 7875 /* 7876 * Decode Flash part size. The code below looks repetative with 7877 * common encodings, but that's not guaranteed in the JEDEC 7878 * specification for the Read JADEC ID command. The only thing that 7879 * we're guaranteed by the JADEC specification is where the 7880 * Manufacturer ID is in the returned result. After that each 7881 * Manufacturer ~could~ encode things completely differently. 7882 * Note, all Flash parts must have 64KB sectors. 7883 */ 7884 manufacturer = flashid & 0xff; 7885 switch (manufacturer) { 7886 case 0x20: /* Micron/Numonix */ 7887 /* 7888 * This Density -> Size decoding table is taken from Micron 7889 * Data Sheets. 7890 */ 7891 density = (flashid >> 16) & 0xff; 7892 switch (density) { 7893 case 0x14: size = 1 << 20; break; /* 1MB */ 7894 case 0x15: size = 1 << 21; break; /* 2MB */ 7895 case 0x16: size = 1 << 22; break; /* 4MB */ 7896 case 0x17: size = 1 << 23; break; /* 8MB */ 7897 case 0x18: size = 1 << 24; break; /* 16MB */ 7898 case 0x19: size = 1 << 25; break; /* 32MB */ 7899 case 0x20: size = 1 << 26; break; /* 64MB */ 7900 case 0x21: size = 1 << 27; break; /* 128MB */ 7901 case 0x22: size = 1 << 28; break; /* 256MB */ 7902 } 7903 break; 7904 7905 case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */ 7906 /* 7907 * This Density -> Size decoding table is taken from ISSI 7908 * Data Sheets. 7909 */ 7910 density = (flashid >> 16) & 0xff; 7911 switch (density) { 7912 case 0x16: size = 1 << 25; break; /* 32MB */ 7913 case 0x17: size = 1 << 26; break; /* 64MB */ 7914 } 7915 break; 7916 7917 case 0xc2: /* Macronix */ 7918 /* 7919 * This Density -> Size decoding table is taken from Macronix 7920 * Data Sheets. 7921 */ 7922 density = (flashid >> 16) & 0xff; 7923 switch (density) { 7924 case 0x17: size = 1 << 23; break; /* 8MB */ 7925 case 0x18: size = 1 << 24; break; /* 16MB */ 7926 } 7927 break; 7928 7929 case 0xef: /* Winbond */ 7930 /* 7931 * This Density -> Size decoding table is taken from Winbond 7932 * Data Sheets. 7933 */ 7934 density = (flashid >> 16) & 0xff; 7935 switch (density) { 7936 case 0x17: size = 1 << 23; break; /* 8MB */ 7937 case 0x18: size = 1 << 24; break; /* 16MB */ 7938 } 7939 break; 7940 } 7941 7942 /* If we didn't recognize the FLASH part, that's no real issue: the 7943 * Hardware/Software contract says that Hardware will _*ALWAYS*_ 7944 * use a FLASH part which is at least 4MB in size and has 64KB 7945 * sectors. The unrecognized FLASH part is likely to be much larger 7946 * than 4MB, but that's all we really need. 7947 */ 7948 if (size == 0) { 7949 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); 7950 size = 1 << 22; 7951 } 7952 7953 /* 7954 * Store decoded Flash size and fall through into vetting code. 7955 */ 7956 adapter->params.sf_size = size; 7957 adapter->params.sf_nsec = size / SF_SEC_SIZE; 7958 7959 found: 7960 /* 7961 * We should ~probably~ reject adapters with FLASHes which are too 7962 * small but we have some legacy FPGAs with small FLASHes that we'd 7963 * still like to use. So instead we emit a scary message ... 7964 */ 7965 if (adapter->params.sf_size < FLASH_MIN_SIZE) 7966 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 7967 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 7968 7969 return 0; 7970} 7971 7972static void set_pcie_completion_timeout(struct adapter *adapter, 7973 u8 range) 7974{ 7975 u16 val; 7976 u32 pcie_cap; 7977 7978 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7979 if (pcie_cap) { 7980 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 7981 val &= 0xfff0; 7982 val |= range ; 7983 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 7984 } 7985} 7986 7987const struct chip_params *t4_get_chip_params(int chipid) 7988{ 7989 static const struct chip_params chip_params[] = { 7990 { 7991 /* T4 */ 7992 .nchan = NCHAN, 7993 .pm_stats_cnt = PM_NSTATS, 7994 .cng_ch_bits_log = 2, 7995 .nsched_cls = 15, 7996 .cim_num_obq = CIM_NUM_OBQ, 7997 .mps_rplc_size = 128, 7998 .vfcount = 128, 7999 .sge_fl_db = F_DBPRIO, 8000 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 8001 }, 8002 { 8003 /* T5 */ 8004 .nchan = NCHAN, 8005 .pm_stats_cnt = PM_NSTATS, 8006 .cng_ch_bits_log = 2, 8007 .nsched_cls = 16, 8008 .cim_num_obq = CIM_NUM_OBQ_T5, 8009 .mps_rplc_size = 128, 8010 .vfcount = 128, 8011 .sge_fl_db = F_DBPRIO | F_DBTYPE, 8012 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 8013 }, 8014 { 8015 /* T6 */ 8016 .nchan = T6_NCHAN, 8017 .pm_stats_cnt = T6_PM_NSTATS, 8018 .cng_ch_bits_log = 3, 8019 .nsched_cls = 16, 8020 .cim_num_obq = CIM_NUM_OBQ_T5, 8021 .mps_rplc_size = 256, 8022 .vfcount = 256, 8023 .sge_fl_db = 0, 8024 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 8025 }, 8026 }; 8027 8028 chipid -= CHELSIO_T4; 8029 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 8030 return NULL; 8031 8032 return &chip_params[chipid]; 8033} 8034 8035/** 8036 * t4_prep_adapter - prepare SW and HW for operation 8037 * @adapter: the adapter 8038 * @buf: temporary space of at least VPD_LEN size provided by the caller. 8039 * 8040 * Initialize adapter SW state for the various HW modules, set initial 8041 * values for some adapter tunables, take PHYs out of reset, and 8042 * initialize the MDIO interface. 8043 */ 8044int t4_prep_adapter(struct adapter *adapter, u32 *buf) 8045{ 8046 int ret; 8047 uint16_t device_id; 8048 uint32_t pl_rev; 8049 8050 get_pci_mode(adapter, &adapter->params.pci); 8051 8052 pl_rev = t4_read_reg(adapter, A_PL_REV); 8053 adapter->params.chipid = G_CHIPID(pl_rev); 8054 adapter->params.rev = G_REV(pl_rev); 8055 if (adapter->params.chipid == 0) { 8056 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 8057 adapter->params.chipid = CHELSIO_T4; 8058 8059 /* T4A1 chip is not supported */ 8060 if (adapter->params.rev == 1) { 8061 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 8062 return -EINVAL; 8063 } 8064 } 8065 8066 adapter->chip_params = t4_get_chip_params(chip_id(adapter)); 8067 if (adapter->chip_params == NULL) 8068 return -EINVAL; 8069 8070 adapter->params.pci.vpd_cap_addr = 8071 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 8072 8073 ret = t4_get_flash_params(adapter); 8074 if (ret < 0) 8075 return ret; 8076 8077 /* Cards with real ASICs have the chipid in the PCIe device id */ 8078 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 8079 if (device_id >> 12 == chip_id(adapter)) 8080 adapter->params.cim_la_size = CIMLA_SIZE; 8081 else { 8082 /* FPGA */ 8083 adapter->params.fpga = 1; 8084 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 8085 } 8086 8087 ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf); 8088 if (ret < 0) 8089 return ret; 8090 8091 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 8092 8093 /* 8094 * Default port and clock for debugging in case we can't reach FW. 8095 */ 8096 adapter->params.nports = 1; 8097 adapter->params.portvec = 1; 8098 adapter->params.vpd.cclk = 50000; 8099 8100 /* Set pci completion timeout value to 4 seconds. */ 8101 set_pcie_completion_timeout(adapter, 0xd); 8102 return 0; 8103} 8104 8105/** 8106 * t4_shutdown_adapter - shut down adapter, host & wire 8107 * @adapter: the adapter 8108 * 8109 * Perform an emergency shutdown of the adapter and stop it from 8110 * continuing any further communication on the ports or DMA to the 8111 * host. This is typically used when the adapter and/or firmware 8112 * have crashed and we want to prevent any further accidental 8113 * communication with the rest of the world. This will also force 8114 * the port Link Status to go down -- if register writes work -- 8115 * which should help our peers figure out that we're down. 8116 */ 8117int t4_shutdown_adapter(struct adapter *adapter) 8118{ 8119 int port; 8120 8121 t4_intr_disable(adapter); 8122 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 8123 for_each_port(adapter, port) { 8124 u32 a_port_cfg = is_t4(adapter) ? 8125 PORT_REG(port, A_XGMAC_PORT_CFG) : 8126 T5_PORT_REG(port, A_MAC_PORT_CFG); 8127 8128 t4_write_reg(adapter, a_port_cfg, 8129 t4_read_reg(adapter, a_port_cfg) 8130 & ~V_SIGNAL_DET(1)); 8131 } 8132 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 8133 8134 return 0; 8135} 8136 8137/** 8138 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 8139 * @adapter: the adapter 8140 * @qid: the Queue ID 8141 * @qtype: the Ingress or Egress type for @qid 8142 * @user: true if this request is for a user mode queue 8143 * @pbar2_qoffset: BAR2 Queue Offset 8144 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 8145 * 8146 * Returns the BAR2 SGE Queue Registers information associated with the 8147 * indicated Absolute Queue ID. These are passed back in return value 8148 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 8149 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 8150 * 8151 * This may return an error which indicates that BAR2 SGE Queue 8152 * registers aren't available. If an error is not returned, then the 8153 * following values are returned: 8154 * 8155 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 8156 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 8157 * 8158 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 8159 * require the "Inferred Queue ID" ability may be used. E.g. the 8160 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 8161 * then these "Inferred Queue ID" register may not be used. 8162 */ 8163int t4_bar2_sge_qregs(struct adapter *adapter, 8164 unsigned int qid, 8165 enum t4_bar2_qtype qtype, 8166 int user, 8167 u64 *pbar2_qoffset, 8168 unsigned int *pbar2_qid) 8169{ 8170 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 8171 u64 bar2_page_offset, bar2_qoffset; 8172 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 8173 8174 /* T4 doesn't support BAR2 SGE Queue registers for kernel 8175 * mode queues. 8176 */ 8177 if (!user && is_t4(adapter)) 8178 return -EINVAL; 8179 8180 /* Get our SGE Page Size parameters. 8181 */ 8182 page_shift = adapter->params.sge.page_shift; 8183 page_size = 1 << page_shift; 8184 8185 /* Get the right Queues per Page parameters for our Queue. 8186 */ 8187 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 8188 ? adapter->params.sge.eq_s_qpp 8189 : adapter->params.sge.iq_s_qpp); 8190 qpp_mask = (1 << qpp_shift) - 1; 8191 8192 /* Calculate the basics of the BAR2 SGE Queue register area: 8193 * o The BAR2 page the Queue registers will be in. 8194 * o The BAR2 Queue ID. 8195 * o The BAR2 Queue ID Offset into the BAR2 page. 8196 */ 8197 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 8198 bar2_qid = qid & qpp_mask; 8199 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 8200 8201 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 8202 * hardware will infer the Absolute Queue ID simply from the writes to 8203 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 8204 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 8205 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 8206 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 8207 * from the BAR2 Page and BAR2 Queue ID. 8208 * 8209 * One important censequence of this is that some BAR2 SGE registers 8210 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 8211 * there. But other registers synthesize the SGE Queue ID purely 8212 * from the writes to the registers -- the Write Combined Doorbell 8213 * Buffer is a good example. These BAR2 SGE Registers are only 8214 * available for those BAR2 SGE Register areas where the SGE Absolute 8215 * Queue ID can be inferred from simple writes. 8216 */ 8217 bar2_qoffset = bar2_page_offset; 8218 bar2_qinferred = (bar2_qid_offset < page_size); 8219 if (bar2_qinferred) { 8220 bar2_qoffset += bar2_qid_offset; 8221 bar2_qid = 0; 8222 } 8223 8224 *pbar2_qoffset = bar2_qoffset; 8225 *pbar2_qid = bar2_qid; 8226 return 0; 8227} 8228 8229/** 8230 * t4_init_devlog_params - initialize adapter->params.devlog 8231 * @adap: the adapter 8232 * @fw_attach: whether we can talk to the firmware 8233 * 8234 * Initialize various fields of the adapter's Firmware Device Log 8235 * Parameters structure. 8236 */ 8237int t4_init_devlog_params(struct adapter *adap, int fw_attach) 8238{ 8239 struct devlog_params *dparams = &adap->params.devlog; 8240 u32 pf_dparams; 8241 unsigned int devlog_meminfo; 8242 struct fw_devlog_cmd devlog_cmd; 8243 int ret; 8244 8245 /* If we're dealing with newer firmware, the Device Log Paramerters 8246 * are stored in a designated register which allows us to access the 8247 * Device Log even if we can't talk to the firmware. 8248 */ 8249 pf_dparams = 8250 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 8251 if (pf_dparams) { 8252 unsigned int nentries, nentries128; 8253 8254 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 8255 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 8256 8257 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 8258 nentries = (nentries128 + 1) * 128; 8259 dparams->size = nentries * sizeof(struct fw_devlog_e); 8260 8261 return 0; 8262 } 8263 8264 /* 8265 * For any failing returns ... 8266 */ 8267 memset(dparams, 0, sizeof *dparams); 8268 8269 /* 8270 * If we can't talk to the firmware, there's really nothing we can do 8271 * at this point. 8272 */ 8273 if (!fw_attach) 8274 return -ENXIO; 8275 8276 /* Otherwise, ask the firmware for it's Device Log Parameters. 8277 */ 8278 memset(&devlog_cmd, 0, sizeof devlog_cmd); 8279 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 8280 F_FW_CMD_REQUEST | F_FW_CMD_READ); 8281 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 8282 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 8283 &devlog_cmd); 8284 if (ret) 8285 return ret; 8286 8287 devlog_meminfo = 8288 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 8289 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 8290 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 8291 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 8292 8293 return 0; 8294} 8295 8296/** 8297 * t4_init_sge_params - initialize adap->params.sge 8298 * @adapter: the adapter 8299 * 8300 * Initialize various fields of the adapter's SGE Parameters structure. 8301 */ 8302int t4_init_sge_params(struct adapter *adapter) 8303{ 8304 u32 r; 8305 struct sge_params *sp = &adapter->params.sge; 8306 unsigned i, tscale = 1; 8307 8308 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 8309 sp->counter_val[0] = G_THRESHOLD_0(r); 8310 sp->counter_val[1] = G_THRESHOLD_1(r); 8311 sp->counter_val[2] = G_THRESHOLD_2(r); 8312 sp->counter_val[3] = G_THRESHOLD_3(r); 8313 8314 if (chip_id(adapter) >= CHELSIO_T6) { 8315 r = t4_read_reg(adapter, A_SGE_ITP_CONTROL); 8316 tscale = G_TSCALE(r); 8317 if (tscale == 0) 8318 tscale = 1; 8319 else 8320 tscale += 2; 8321 } 8322 8323 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 8324 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale; 8325 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale; 8326 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 8327 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale; 8328 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale; 8329 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 8330 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale; 8331 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale; 8332 8333 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 8334 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 8335 if (is_t4(adapter)) 8336 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 8337 else if (is_t5(adapter)) 8338 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 8339 else 8340 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1; 8341 8342 /* egress queues: log2 of # of doorbells per BAR2 page */ 8343 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 8344 r >>= S_QUEUESPERPAGEPF0 + 8345 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 8346 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 8347 8348 /* ingress queues: log2 of # of doorbells per BAR2 page */ 8349 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 8350 r >>= S_QUEUESPERPAGEPF0 + 8351 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 8352 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 8353 8354 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 8355 r >>= S_HOSTPAGESIZEPF0 + 8356 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 8357 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 8358 8359 r = t4_read_reg(adapter, A_SGE_CONTROL); 8360 sp->sge_control = r; 8361 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 8362 sp->fl_pktshift = G_PKTSHIFT(r); 8363 if (chip_id(adapter) <= CHELSIO_T5) { 8364 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 8365 X_INGPADBOUNDARY_SHIFT); 8366 } else { 8367 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 8368 X_T6_INGPADBOUNDARY_SHIFT); 8369 } 8370 if (is_t4(adapter)) 8371 sp->pack_boundary = sp->pad_boundary; 8372 else { 8373 r = t4_read_reg(adapter, A_SGE_CONTROL2); 8374 if (G_INGPACKBOUNDARY(r) == 0) 8375 sp->pack_boundary = 16; 8376 else 8377 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 8378 } 8379 for (i = 0; i < SGE_FLBUF_SIZES; i++) 8380 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, 8381 A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 8382 8383 return 0; 8384} 8385 8386/* 8387 * Read and cache the adapter's compressed filter mode and ingress config. 8388 */ 8389static void read_filter_mode_and_ingress_config(struct adapter *adap, 8390 bool sleep_ok) 8391{ 8392 struct tp_params *tpp = &adap->params.tp; 8393 8394 t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP, 8395 sleep_ok); 8396 t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG, 8397 sleep_ok); 8398 8399 /* 8400 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 8401 * shift positions of several elements of the Compressed Filter Tuple 8402 * for this adapter which we need frequently ... 8403 */ 8404 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 8405 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 8406 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 8407 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 8408 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 8409 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 8410 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 8411 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 8412 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 8413 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 8414 8415 /* 8416 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 8417 * represents the presence of an Outer VLAN instead of a VNIC ID. 8418 */ 8419 if ((tpp->ingress_config & F_VNIC) == 0) 8420 tpp->vnic_shift = -1; 8421} 8422 8423/** 8424 * t4_init_tp_params - initialize adap->params.tp 8425 * @adap: the adapter 8426 * 8427 * Initialize various fields of the adapter's TP Parameters structure. 8428 */ 8429int t4_init_tp_params(struct adapter *adap, bool sleep_ok) 8430{ 8431 int chan; 8432 u32 v; 8433 struct tp_params *tpp = &adap->params.tp; 8434 8435 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 8436 tpp->tre = G_TIMERRESOLUTION(v); 8437 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 8438 8439 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 8440 for (chan = 0; chan < MAX_NCHAN; chan++) 8441 tpp->tx_modq[chan] = chan; 8442 8443 read_filter_mode_and_ingress_config(adap, sleep_ok); 8444 8445 /* 8446 * Cache a mask of the bits that represent the error vector portion of 8447 * rx_pkt.err_vec. T6+ can use a compressed error vector to make room 8448 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE). 8449 */ 8450 tpp->err_vec_mask = htobe16(0xffff); 8451 if (chip_id(adap) > CHELSIO_T5) { 8452 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 8453 if (v & F_CRXPKTENC) { 8454 tpp->err_vec_mask = 8455 htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC)); 8456 } 8457 } 8458 8459 return 0; 8460} 8461 8462/** 8463 * t4_filter_field_shift - calculate filter field shift 8464 * @adap: the adapter 8465 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 8466 * 8467 * Return the shift position of a filter field within the Compressed 8468 * Filter Tuple. The filter field is specified via its selection bit 8469 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 8470 */ 8471int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 8472{ 8473 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 8474 unsigned int sel; 8475 int field_shift; 8476 8477 if ((filter_mode & filter_sel) == 0) 8478 return -1; 8479 8480 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 8481 switch (filter_mode & sel) { 8482 case F_FCOE: 8483 field_shift += W_FT_FCOE; 8484 break; 8485 case F_PORT: 8486 field_shift += W_FT_PORT; 8487 break; 8488 case F_VNIC_ID: 8489 field_shift += W_FT_VNIC_ID; 8490 break; 8491 case F_VLAN: 8492 field_shift += W_FT_VLAN; 8493 break; 8494 case F_TOS: 8495 field_shift += W_FT_TOS; 8496 break; 8497 case F_PROTOCOL: 8498 field_shift += W_FT_PROTOCOL; 8499 break; 8500 case F_ETHERTYPE: 8501 field_shift += W_FT_ETHERTYPE; 8502 break; 8503 case F_MACMATCH: 8504 field_shift += W_FT_MACMATCH; 8505 break; 8506 case F_MPSHITTYPE: 8507 field_shift += W_FT_MPSHITTYPE; 8508 break; 8509 case F_FRAGMENTATION: 8510 field_shift += W_FT_FRAGMENTATION; 8511 break; 8512 } 8513 } 8514 return field_shift; 8515} 8516 8517int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 8518{ 8519 u8 addr[6]; 8520 int ret, i, j; 8521 u16 rss_size; 8522 struct port_info *p = adap2pinfo(adap, port_id); 8523 u32 param, val; 8524 8525 for (i = 0, j = -1; i <= p->port_id; i++) { 8526 do { 8527 j++; 8528 } while ((adap->params.portvec & (1 << j)) == 0); 8529 } 8530 8531 if (!(adap->flags & IS_VF) || 8532 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { 8533 t4_update_port_info(p); 8534 } 8535 8536 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 8537 if (ret < 0) 8538 return ret; 8539 8540 p->vi[0].viid = ret; 8541 if (chip_id(adap) <= CHELSIO_T5) 8542 p->vi[0].smt_idx = (ret & 0x7f) << 1; 8543 else 8544 p->vi[0].smt_idx = (ret & 0x7f); 8545 p->tx_chan = j; 8546 p->mps_bg_map = t4_get_mps_bg_map(adap, j); 8547 p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j); 8548 p->lport = j; 8549 p->vi[0].rss_size = rss_size; 8550 t4_os_set_hw_addr(p, addr); 8551 8552 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 8553 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 8554 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); 8555 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 8556 if (ret) 8557 p->vi[0].rss_base = 0xffff; 8558 else { 8559 /* MPASS((val >> 16) == rss_size); */ 8560 p->vi[0].rss_base = val & 0xffff; 8561 } 8562 8563 return 0; 8564} 8565 8566/** 8567 * t4_read_cimq_cfg - read CIM queue configuration 8568 * @adap: the adapter 8569 * @base: holds the queue base addresses in bytes 8570 * @size: holds the queue sizes in bytes 8571 * @thres: holds the queue full thresholds in bytes 8572 * 8573 * Returns the current configuration of the CIM queues, starting with 8574 * the IBQs, then the OBQs. 8575 */ 8576void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 8577{ 8578 unsigned int i, v; 8579 int cim_num_obq = adap->chip_params->cim_num_obq; 8580 8581 for (i = 0; i < CIM_NUM_IBQ; i++) { 8582 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 8583 V_QUENUMSELECT(i)); 8584 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8585 /* value is in 256-byte units */ 8586 *base++ = G_CIMQBASE(v) * 256; 8587 *size++ = G_CIMQSIZE(v) * 256; 8588 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 8589 } 8590 for (i = 0; i < cim_num_obq; i++) { 8591 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8592 V_QUENUMSELECT(i)); 8593 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8594 /* value is in 256-byte units */ 8595 *base++ = G_CIMQBASE(v) * 256; 8596 *size++ = G_CIMQSIZE(v) * 256; 8597 } 8598} 8599 8600/** 8601 * t4_read_cim_ibq - read the contents of a CIM inbound queue 8602 * @adap: the adapter 8603 * @qid: the queue index 8604 * @data: where to store the queue contents 8605 * @n: capacity of @data in 32-bit words 8606 * 8607 * Reads the contents of the selected CIM queue starting at address 0 up 8608 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8609 * error and the number of 32-bit words actually read on success. 8610 */ 8611int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8612{ 8613 int i, err, attempts; 8614 unsigned int addr; 8615 const unsigned int nwords = CIM_IBQ_SIZE * 4; 8616 8617 if (qid > 5 || (n & 3)) 8618 return -EINVAL; 8619 8620 addr = qid * nwords; 8621 if (n > nwords) 8622 n = nwords; 8623 8624 /* It might take 3-10ms before the IBQ debug read access is allowed. 8625 * Wait for 1 Sec with a delay of 1 usec. 8626 */ 8627 attempts = 1000000; 8628 8629 for (i = 0; i < n; i++, addr++) { 8630 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 8631 F_IBQDBGEN); 8632 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 8633 attempts, 1); 8634 if (err) 8635 return err; 8636 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 8637 } 8638 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 8639 return i; 8640} 8641 8642/** 8643 * t4_read_cim_obq - read the contents of a CIM outbound queue 8644 * @adap: the adapter 8645 * @qid: the queue index 8646 * @data: where to store the queue contents 8647 * @n: capacity of @data in 32-bit words 8648 * 8649 * Reads the contents of the selected CIM queue starting at address 0 up 8650 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8651 * error and the number of 32-bit words actually read on success. 8652 */ 8653int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8654{ 8655 int i, err; 8656 unsigned int addr, v, nwords; 8657 int cim_num_obq = adap->chip_params->cim_num_obq; 8658 8659 if ((qid > (cim_num_obq - 1)) || (n & 3)) 8660 return -EINVAL; 8661 8662 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8663 V_QUENUMSELECT(qid)); 8664 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8665 8666 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 8667 nwords = G_CIMQSIZE(v) * 64; /* same */ 8668 if (n > nwords) 8669 n = nwords; 8670 8671 for (i = 0; i < n; i++, addr++) { 8672 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 8673 F_OBQDBGEN); 8674 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 8675 2, 1); 8676 if (err) 8677 return err; 8678 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 8679 } 8680 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 8681 return i; 8682} 8683 8684enum { 8685 CIM_QCTL_BASE = 0, 8686 CIM_CTL_BASE = 0x2000, 8687 CIM_PBT_ADDR_BASE = 0x2800, 8688 CIM_PBT_LRF_BASE = 0x3000, 8689 CIM_PBT_DATA_BASE = 0x3800 8690}; 8691 8692/** 8693 * t4_cim_read - read a block from CIM internal address space 8694 * @adap: the adapter 8695 * @addr: the start address within the CIM address space 8696 * @n: number of words to read 8697 * @valp: where to store the result 8698 * 8699 * Reads a block of 4-byte words from the CIM intenal address space. 8700 */ 8701int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 8702 unsigned int *valp) 8703{ 8704 int ret = 0; 8705 8706 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8707 return -EBUSY; 8708 8709 for ( ; !ret && n--; addr += 4) { 8710 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 8711 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8712 0, 5, 2); 8713 if (!ret) 8714 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 8715 } 8716 return ret; 8717} 8718 8719/** 8720 * t4_cim_write - write a block into CIM internal address space 8721 * @adap: the adapter 8722 * @addr: the start address within the CIM address space 8723 * @n: number of words to write 8724 * @valp: set of values to write 8725 * 8726 * Writes a block of 4-byte words into the CIM intenal address space. 8727 */ 8728int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 8729 const unsigned int *valp) 8730{ 8731 int ret = 0; 8732 8733 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8734 return -EBUSY; 8735 8736 for ( ; !ret && n--; addr += 4) { 8737 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 8738 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 8739 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8740 0, 5, 2); 8741 } 8742 return ret; 8743} 8744 8745static int t4_cim_write1(struct adapter *adap, unsigned int addr, 8746 unsigned int val) 8747{ 8748 return t4_cim_write(adap, addr, 1, &val); 8749} 8750 8751/** 8752 * t4_cim_ctl_read - read a block from CIM control region 8753 * @adap: the adapter 8754 * @addr: the start address within the CIM control region 8755 * @n: number of words to read 8756 * @valp: where to store the result 8757 * 8758 * Reads a block of 4-byte words from the CIM control region. 8759 */ 8760int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 8761 unsigned int *valp) 8762{ 8763 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 8764} 8765 8766/** 8767 * t4_cim_read_la - read CIM LA capture buffer 8768 * @adap: the adapter 8769 * @la_buf: where to store the LA data 8770 * @wrptr: the HW write pointer within the capture buffer 8771 * 8772 * Reads the contents of the CIM LA buffer with the most recent entry at 8773 * the end of the returned data and with the entry at @wrptr first. 8774 * We try to leave the LA in the running state we find it in. 8775 */ 8776int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 8777{ 8778 int i, ret; 8779 unsigned int cfg, val, idx; 8780 8781 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 8782 if (ret) 8783 return ret; 8784 8785 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 8786 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 8787 if (ret) 8788 return ret; 8789 } 8790 8791 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8792 if (ret) 8793 goto restart; 8794 8795 idx = G_UPDBGLAWRPTR(val); 8796 if (wrptr) 8797 *wrptr = idx; 8798 8799 for (i = 0; i < adap->params.cim_la_size; i++) { 8800 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8801 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 8802 if (ret) 8803 break; 8804 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8805 if (ret) 8806 break; 8807 if (val & F_UPDBGLARDEN) { 8808 ret = -ETIMEDOUT; 8809 break; 8810 } 8811 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 8812 if (ret) 8813 break; 8814 8815 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 8816 idx = (idx + 1) & M_UPDBGLARDPTR; 8817 /* 8818 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 8819 * identify the 32-bit portion of the full 312-bit data 8820 */ 8821 if (is_t6(adap)) 8822 while ((idx & 0xf) > 9) 8823 idx = (idx + 1) % M_UPDBGLARDPTR; 8824 } 8825restart: 8826 if (cfg & F_UPDBGLAEN) { 8827 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8828 cfg & ~F_UPDBGLARDEN); 8829 if (!ret) 8830 ret = r; 8831 } 8832 return ret; 8833} 8834 8835/** 8836 * t4_tp_read_la - read TP LA capture buffer 8837 * @adap: the adapter 8838 * @la_buf: where to store the LA data 8839 * @wrptr: the HW write pointer within the capture buffer 8840 * 8841 * Reads the contents of the TP LA buffer with the most recent entry at 8842 * the end of the returned data and with the entry at @wrptr first. 8843 * We leave the LA in the running state we find it in. 8844 */ 8845void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 8846{ 8847 bool last_incomplete; 8848 unsigned int i, cfg, val, idx; 8849 8850 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 8851 if (cfg & F_DBGLAENABLE) /* freeze LA */ 8852 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8853 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 8854 8855 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 8856 idx = G_DBGLAWPTR(val); 8857 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 8858 if (last_incomplete) 8859 idx = (idx + 1) & M_DBGLARPTR; 8860 if (wrptr) 8861 *wrptr = idx; 8862 8863 val &= 0xffff; 8864 val &= ~V_DBGLARPTR(M_DBGLARPTR); 8865 val |= adap->params.tp.la_mask; 8866 8867 for (i = 0; i < TPLA_SIZE; i++) { 8868 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 8869 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 8870 idx = (idx + 1) & M_DBGLARPTR; 8871 } 8872 8873 /* Wipe out last entry if it isn't valid */ 8874 if (last_incomplete) 8875 la_buf[TPLA_SIZE - 1] = ~0ULL; 8876 8877 if (cfg & F_DBGLAENABLE) /* restore running state */ 8878 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8879 cfg | adap->params.tp.la_mask); 8880} 8881 8882/* 8883 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 8884 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 8885 * state for more than the Warning Threshold then we'll issue a warning about 8886 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 8887 * appears to be hung every Warning Repeat second till the situation clears. 8888 * If the situation clears, we'll note that as well. 8889 */ 8890#define SGE_IDMA_WARN_THRESH 1 8891#define SGE_IDMA_WARN_REPEAT 300 8892 8893/** 8894 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 8895 * @adapter: the adapter 8896 * @idma: the adapter IDMA Monitor state 8897 * 8898 * Initialize the state of an SGE Ingress DMA Monitor. 8899 */ 8900void t4_idma_monitor_init(struct adapter *adapter, 8901 struct sge_idma_monitor_state *idma) 8902{ 8903 /* Initialize the state variables for detecting an SGE Ingress DMA 8904 * hang. The SGE has internal counters which count up on each clock 8905 * tick whenever the SGE finds its Ingress DMA State Engines in the 8906 * same state they were on the previous clock tick. The clock used is 8907 * the Core Clock so we have a limit on the maximum "time" they can 8908 * record; typically a very small number of seconds. For instance, 8909 * with a 600MHz Core Clock, we can only count up to a bit more than 8910 * 7s. So we'll synthesize a larger counter in order to not run the 8911 * risk of having the "timers" overflow and give us the flexibility to 8912 * maintain a Hung SGE State Machine of our own which operates across 8913 * a longer time frame. 8914 */ 8915 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 8916 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 8917} 8918 8919/** 8920 * t4_idma_monitor - monitor SGE Ingress DMA state 8921 * @adapter: the adapter 8922 * @idma: the adapter IDMA Monitor state 8923 * @hz: number of ticks/second 8924 * @ticks: number of ticks since the last IDMA Monitor call 8925 */ 8926void t4_idma_monitor(struct adapter *adapter, 8927 struct sge_idma_monitor_state *idma, 8928 int hz, int ticks) 8929{ 8930 int i, idma_same_state_cnt[2]; 8931 8932 /* Read the SGE Debug Ingress DMA Same State Count registers. These 8933 * are counters inside the SGE which count up on each clock when the 8934 * SGE finds its Ingress DMA State Engines in the same states they 8935 * were in the previous clock. The counters will peg out at 8936 * 0xffffffff without wrapping around so once they pass the 1s 8937 * threshold they'll stay above that till the IDMA state changes. 8938 */ 8939 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 8940 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 8941 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8942 8943 for (i = 0; i < 2; i++) { 8944 u32 debug0, debug11; 8945 8946 /* If the Ingress DMA Same State Counter ("timer") is less 8947 * than 1s, then we can reset our synthesized Stall Timer and 8948 * continue. If we have previously emitted warnings about a 8949 * potential stalled Ingress Queue, issue a note indicating 8950 * that the Ingress Queue has resumed forward progress. 8951 */ 8952 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 8953 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 8954 CH_WARN(adapter, "SGE idma%d, queue %u, " 8955 "resumed after %d seconds\n", 8956 i, idma->idma_qid[i], 8957 idma->idma_stalled[i]/hz); 8958 idma->idma_stalled[i] = 0; 8959 continue; 8960 } 8961 8962 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 8963 * domain. The first time we get here it'll be because we 8964 * passed the 1s Threshold; each additional time it'll be 8965 * because the RX Timer Callback is being fired on its regular 8966 * schedule. 8967 * 8968 * If the stall is below our Potential Hung Ingress Queue 8969 * Warning Threshold, continue. 8970 */ 8971 if (idma->idma_stalled[i] == 0) { 8972 idma->idma_stalled[i] = hz; 8973 idma->idma_warn[i] = 0; 8974 } else { 8975 idma->idma_stalled[i] += ticks; 8976 idma->idma_warn[i] -= ticks; 8977 } 8978 8979 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 8980 continue; 8981 8982 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 8983 */ 8984 if (idma->idma_warn[i] > 0) 8985 continue; 8986 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 8987 8988 /* Read and save the SGE IDMA State and Queue ID information. 8989 * We do this every time in case it changes across time ... 8990 * can't be too careful ... 8991 */ 8992 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 8993 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8994 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 8995 8996 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 8997 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8998 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 8999 9000 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 9001 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 9002 i, idma->idma_qid[i], idma->idma_state[i], 9003 idma->idma_stalled[i]/hz, 9004 debug0, debug11); 9005 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 9006 } 9007} 9008 9009/** 9010 * t4_read_pace_tbl - read the pace table 9011 * @adap: the adapter 9012 * @pace_vals: holds the returned values 9013 * 9014 * Returns the values of TP's pace table in microseconds. 9015 */ 9016void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 9017{ 9018 unsigned int i, v; 9019 9020 for (i = 0; i < NTX_SCHED; i++) { 9021 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 9022 v = t4_read_reg(adap, A_TP_PACE_TABLE); 9023 pace_vals[i] = dack_ticks_to_usec(adap, v); 9024 } 9025} 9026 9027/** 9028 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 9029 * @adap: the adapter 9030 * @sched: the scheduler index 9031 * @kbps: the byte rate in Kbps 9032 * @ipg: the interpacket delay in tenths of nanoseconds 9033 * 9034 * Return the current configuration of a HW Tx scheduler. 9035 */ 9036void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 9037 unsigned int *ipg, bool sleep_ok) 9038{ 9039 unsigned int v, addr, bpt, cpt; 9040 9041 if (kbps) { 9042 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 9043 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 9044 if (sched & 1) 9045 v >>= 16; 9046 bpt = (v >> 8) & 0xff; 9047 cpt = v & 0xff; 9048 if (!cpt) 9049 *kbps = 0; /* scheduler disabled */ 9050 else { 9051 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 9052 *kbps = (v * bpt) / 125; 9053 } 9054 } 9055 if (ipg) { 9056 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 9057 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 9058 if (sched & 1) 9059 v >>= 16; 9060 v &= 0xffff; 9061 *ipg = (10000 * v) / core_ticks_per_usec(adap); 9062 } 9063} 9064 9065/** 9066 * t4_load_cfg - download config file 9067 * @adap: the adapter 9068 * @cfg_data: the cfg text file to write 9069 * @size: text file size 9070 * 9071 * Write the supplied config text file to the card's serial flash. 9072 */ 9073int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 9074{ 9075 int ret, i, n, cfg_addr; 9076 unsigned int addr; 9077 unsigned int flash_cfg_start_sec; 9078 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9079 9080 cfg_addr = t4_flash_cfg_addr(adap); 9081 if (cfg_addr < 0) 9082 return cfg_addr; 9083 9084 addr = cfg_addr; 9085 flash_cfg_start_sec = addr / SF_SEC_SIZE; 9086 9087 if (size > FLASH_CFG_MAX_SIZE) { 9088 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 9089 FLASH_CFG_MAX_SIZE); 9090 return -EFBIG; 9091 } 9092 9093 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 9094 sf_sec_size); 9095 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 9096 flash_cfg_start_sec + i - 1); 9097 /* 9098 * If size == 0 then we're simply erasing the FLASH sectors associated 9099 * with the on-adapter Firmware Configuration File. 9100 */ 9101 if (ret || size == 0) 9102 goto out; 9103 9104 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 9105 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 9106 if ( (size - i) < SF_PAGE_SIZE) 9107 n = size - i; 9108 else 9109 n = SF_PAGE_SIZE; 9110 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 9111 if (ret) 9112 goto out; 9113 9114 addr += SF_PAGE_SIZE; 9115 cfg_data += SF_PAGE_SIZE; 9116 } 9117 9118out: 9119 if (ret) 9120 CH_ERR(adap, "config file %s failed %d\n", 9121 (size == 0 ? "clear" : "download"), ret); 9122 return ret; 9123} 9124 9125/** 9126 * t5_fw_init_extern_mem - initialize the external memory 9127 * @adap: the adapter 9128 * 9129 * Initializes the external memory on T5. 9130 */ 9131int t5_fw_init_extern_mem(struct adapter *adap) 9132{ 9133 u32 params[1], val[1]; 9134 int ret; 9135 9136 if (!is_t5(adap)) 9137 return 0; 9138 9139 val[0] = 0xff; /* Initialize all MCs */ 9140 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 9141 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 9142 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 9143 FW_CMD_MAX_TIMEOUT); 9144 9145 return ret; 9146} 9147 9148/* BIOS boot headers */ 9149typedef struct pci_expansion_rom_header { 9150 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 9151 u8 reserved[22]; /* Reserved per processor Architecture data */ 9152 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 9153} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 9154 9155/* Legacy PCI Expansion ROM Header */ 9156typedef struct legacy_pci_expansion_rom_header { 9157 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 9158 u8 size512; /* Current Image Size in units of 512 bytes */ 9159 u8 initentry_point[4]; 9160 u8 cksum; /* Checksum computed on the entire Image */ 9161 u8 reserved[16]; /* Reserved */ 9162 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 9163} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 9164 9165/* EFI PCI Expansion ROM Header */ 9166typedef struct efi_pci_expansion_rom_header { 9167 u8 signature[2]; // ROM signature. The value 0xaa55 9168 u8 initialization_size[2]; /* Units 512. Includes this header */ 9169 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 9170 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 9171 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 9172 u8 compression_type[2]; /* Compression type. */ 9173 /* 9174 * Compression type definition 9175 * 0x0: uncompressed 9176 * 0x1: Compressed 9177 * 0x2-0xFFFF: Reserved 9178 */ 9179 u8 reserved[8]; /* Reserved */ 9180 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 9181 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 9182} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 9183 9184/* PCI Data Structure Format */ 9185typedef struct pcir_data_structure { /* PCI Data Structure */ 9186 u8 signature[4]; /* Signature. The string "PCIR" */ 9187 u8 vendor_id[2]; /* Vendor Identification */ 9188 u8 device_id[2]; /* Device Identification */ 9189 u8 vital_product[2]; /* Pointer to Vital Product Data */ 9190 u8 length[2]; /* PCIR Data Structure Length */ 9191 u8 revision; /* PCIR Data Structure Revision */ 9192 u8 class_code[3]; /* Class Code */ 9193 u8 image_length[2]; /* Image Length. Multiple of 512B */ 9194 u8 code_revision[2]; /* Revision Level of Code/Data */ 9195 u8 code_type; /* Code Type. */ 9196 /* 9197 * PCI Expansion ROM Code Types 9198 * 0x00: Intel IA-32, PC-AT compatible. Legacy 9199 * 0x01: Open Firmware standard for PCI. FCODE 9200 * 0x02: Hewlett-Packard PA RISC. HP reserved 9201 * 0x03: EFI Image. EFI 9202 * 0x04-0xFF: Reserved. 9203 */ 9204 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 9205 u8 reserved[2]; /* Reserved */ 9206} pcir_data_t; /* PCI__DATA_STRUCTURE */ 9207 9208/* BOOT constants */ 9209enum { 9210 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 9211 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 9212 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 9213 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 9214 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 9215 VENDOR_ID = 0x1425, /* Vendor ID */ 9216 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 9217}; 9218 9219/* 9220 * modify_device_id - Modifies the device ID of the Boot BIOS image 9221 * @adatper: the device ID to write. 9222 * @boot_data: the boot image to modify. 9223 * 9224 * Write the supplied device ID to the boot BIOS image. 9225 */ 9226static void modify_device_id(int device_id, u8 *boot_data) 9227{ 9228 legacy_pci_exp_rom_header_t *header; 9229 pcir_data_t *pcir_header; 9230 u32 cur_header = 0; 9231 9232 /* 9233 * Loop through all chained images and change the device ID's 9234 */ 9235 while (1) { 9236 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 9237 pcir_header = (pcir_data_t *) &boot_data[cur_header + 9238 le16_to_cpu(*(u16*)header->pcir_offset)]; 9239 9240 /* 9241 * Only modify the Device ID if code type is Legacy or HP. 9242 * 0x00: Okay to modify 9243 * 0x01: FCODE. Do not be modify 9244 * 0x03: Okay to modify 9245 * 0x04-0xFF: Do not modify 9246 */ 9247 if (pcir_header->code_type == 0x00) { 9248 u8 csum = 0; 9249 int i; 9250 9251 /* 9252 * Modify Device ID to match current adatper 9253 */ 9254 *(u16*) pcir_header->device_id = device_id; 9255 9256 /* 9257 * Set checksum temporarily to 0. 9258 * We will recalculate it later. 9259 */ 9260 header->cksum = 0x0; 9261 9262 /* 9263 * Calculate and update checksum 9264 */ 9265 for (i = 0; i < (header->size512 * 512); i++) 9266 csum += (u8)boot_data[cur_header + i]; 9267 9268 /* 9269 * Invert summed value to create the checksum 9270 * Writing new checksum value directly to the boot data 9271 */ 9272 boot_data[cur_header + 7] = -csum; 9273 9274 } else if (pcir_header->code_type == 0x03) { 9275 9276 /* 9277 * Modify Device ID to match current adatper 9278 */ 9279 *(u16*) pcir_header->device_id = device_id; 9280 9281 } 9282 9283 9284 /* 9285 * Check indicator element to identify if this is the last 9286 * image in the ROM. 9287 */ 9288 if (pcir_header->indicator & 0x80) 9289 break; 9290 9291 /* 9292 * Move header pointer up to the next image in the ROM. 9293 */ 9294 cur_header += header->size512 * 512; 9295 } 9296} 9297 9298/* 9299 * t4_load_boot - download boot flash 9300 * @adapter: the adapter 9301 * @boot_data: the boot image to write 9302 * @boot_addr: offset in flash to write boot_data 9303 * @size: image size 9304 * 9305 * Write the supplied boot image to the card's serial flash. 9306 * The boot image has the following sections: a 28-byte header and the 9307 * boot image. 9308 */ 9309int t4_load_boot(struct adapter *adap, u8 *boot_data, 9310 unsigned int boot_addr, unsigned int size) 9311{ 9312 pci_exp_rom_header_t *header; 9313 int pcir_offset ; 9314 pcir_data_t *pcir_header; 9315 int ret, addr; 9316 uint16_t device_id; 9317 unsigned int i; 9318 unsigned int boot_sector = (boot_addr * 1024 ); 9319 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9320 9321 /* 9322 * Make sure the boot image does not encroach on the firmware region 9323 */ 9324 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 9325 CH_ERR(adap, "boot image encroaching on firmware region\n"); 9326 return -EFBIG; 9327 } 9328 9329 /* 9330 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 9331 * and Boot configuration data sections. These 3 boot sections span 9332 * sectors 0 to 7 in flash and live right before the FW image location. 9333 */ 9334 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 9335 sf_sec_size); 9336 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 9337 (boot_sector >> 16) + i - 1); 9338 9339 /* 9340 * If size == 0 then we're simply erasing the FLASH sectors associated 9341 * with the on-adapter option ROM file 9342 */ 9343 if (ret || (size == 0)) 9344 goto out; 9345 9346 /* Get boot header */ 9347 header = (pci_exp_rom_header_t *)boot_data; 9348 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 9349 /* PCIR Data Structure */ 9350 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 9351 9352 /* 9353 * Perform some primitive sanity testing to avoid accidentally 9354 * writing garbage over the boot sectors. We ought to check for 9355 * more but it's not worth it for now ... 9356 */ 9357 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 9358 CH_ERR(adap, "boot image too small/large\n"); 9359 return -EFBIG; 9360 } 9361 9362#ifndef CHELSIO_T4_DIAGS 9363 /* 9364 * Check BOOT ROM header signature 9365 */ 9366 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 9367 CH_ERR(adap, "Boot image missing signature\n"); 9368 return -EINVAL; 9369 } 9370 9371 /* 9372 * Check PCI header signature 9373 */ 9374 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 9375 CH_ERR(adap, "PCI header missing signature\n"); 9376 return -EINVAL; 9377 } 9378 9379 /* 9380 * Check Vendor ID matches Chelsio ID 9381 */ 9382 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 9383 CH_ERR(adap, "Vendor ID missing signature\n"); 9384 return -EINVAL; 9385 } 9386#endif 9387 9388 /* 9389 * Retrieve adapter's device ID 9390 */ 9391 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 9392 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 9393 device_id = device_id & 0xf0ff; 9394 9395 /* 9396 * Check PCIE Device ID 9397 */ 9398 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 9399 /* 9400 * Change the device ID in the Boot BIOS image to match 9401 * the Device ID of the current adapter. 9402 */ 9403 modify_device_id(device_id, boot_data); 9404 } 9405 9406 /* 9407 * Skip over the first SF_PAGE_SIZE worth of data and write it after 9408 * we finish copying the rest of the boot image. This will ensure 9409 * that the BIOS boot header will only be written if the boot image 9410 * was written in full. 9411 */ 9412 addr = boot_sector; 9413 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 9414 addr += SF_PAGE_SIZE; 9415 boot_data += SF_PAGE_SIZE; 9416 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 9417 if (ret) 9418 goto out; 9419 } 9420 9421 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 9422 (const u8 *)header, 0); 9423 9424out: 9425 if (ret) 9426 CH_ERR(adap, "boot image download failed, error %d\n", ret); 9427 return ret; 9428} 9429 9430/* 9431 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 9432 * @adapter: the adapter 9433 * 9434 * Return the address within the flash where the OptionROM Configuration 9435 * is stored, or an error if the device FLASH is too small to contain 9436 * a OptionROM Configuration. 9437 */ 9438static int t4_flash_bootcfg_addr(struct adapter *adapter) 9439{ 9440 /* 9441 * If the device FLASH isn't large enough to hold a Firmware 9442 * Configuration File, return an error. 9443 */ 9444 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 9445 return -ENOSPC; 9446 9447 return FLASH_BOOTCFG_START; 9448} 9449 9450int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 9451{ 9452 int ret, i, n, cfg_addr; 9453 unsigned int addr; 9454 unsigned int flash_cfg_start_sec; 9455 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 9456 9457 cfg_addr = t4_flash_bootcfg_addr(adap); 9458 if (cfg_addr < 0) 9459 return cfg_addr; 9460 9461 addr = cfg_addr; 9462 flash_cfg_start_sec = addr / SF_SEC_SIZE; 9463 9464 if (size > FLASH_BOOTCFG_MAX_SIZE) { 9465 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 9466 FLASH_BOOTCFG_MAX_SIZE); 9467 return -EFBIG; 9468 } 9469 9470 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 9471 sf_sec_size); 9472 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 9473 flash_cfg_start_sec + i - 1); 9474 9475 /* 9476 * If size == 0 then we're simply erasing the FLASH sectors associated 9477 * with the on-adapter OptionROM Configuration File. 9478 */ 9479 if (ret || size == 0) 9480 goto out; 9481 9482 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 9483 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 9484 if ( (size - i) < SF_PAGE_SIZE) 9485 n = size - i; 9486 else 9487 n = SF_PAGE_SIZE; 9488 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 9489 if (ret) 9490 goto out; 9491 9492 addr += SF_PAGE_SIZE; 9493 cfg_data += SF_PAGE_SIZE; 9494 } 9495 9496out: 9497 if (ret) 9498 CH_ERR(adap, "boot config data %s failed %d\n", 9499 (size == 0 ? "clear" : "download"), ret); 9500 return ret; 9501} 9502 9503/** 9504 * t4_set_filter_mode - configure the optional components of filter tuples 9505 * @adap: the adapter 9506 * @mode_map: a bitmap selcting which optional filter components to enable 9507 * @sleep_ok: if true we may sleep while awaiting command completion 9508 * 9509 * Sets the filter mode by selecting the optional components to enable 9510 * in filter tuples. Returns 0 on success and a negative error if the 9511 * requested mode needs more bits than are available for optional 9512 * components. 9513 */ 9514int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, 9515 bool sleep_ok) 9516{ 9517 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 9518 9519 int i, nbits = 0; 9520 9521 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 9522 if (mode_map & (1 << i)) 9523 nbits += width[i]; 9524 if (nbits > FILTER_OPT_LEN) 9525 return -EINVAL; 9526 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); 9527 read_filter_mode_and_ingress_config(adap, sleep_ok); 9528 9529 return 0; 9530} 9531 9532/** 9533 * t4_clr_port_stats - clear port statistics 9534 * @adap: the adapter 9535 * @idx: the port index 9536 * 9537 * Clear HW statistics for the given port. 9538 */ 9539void t4_clr_port_stats(struct adapter *adap, int idx) 9540{ 9541 unsigned int i; 9542 u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map; 9543 u32 port_base_addr; 9544 9545 if (is_t4(adap)) 9546 port_base_addr = PORT_BASE(idx); 9547 else 9548 port_base_addr = T5_PORT_BASE(idx); 9549 9550 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 9551 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 9552 t4_write_reg(adap, port_base_addr + i, 0); 9553 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 9554 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 9555 t4_write_reg(adap, port_base_addr + i, 0); 9556 for (i = 0; i < 4; i++) 9557 if (bgmap & (1 << i)) { 9558 t4_write_reg(adap, 9559 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 9560 t4_write_reg(adap, 9561 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 9562 } 9563} 9564 9565/** 9566 * t4_i2c_rd - read I2C data from adapter 9567 * @adap: the adapter 9568 * @port: Port number if per-port device; <0 if not 9569 * @devid: per-port device ID or absolute device ID 9570 * @offset: byte offset into device I2C space 9571 * @len: byte length of I2C space data 9572 * @buf: buffer in which to return I2C data 9573 * 9574 * Reads the I2C data from the indicated device and location. 9575 */ 9576int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 9577 int port, unsigned int devid, 9578 unsigned int offset, unsigned int len, 9579 u8 *buf) 9580{ 9581 u32 ldst_addrspace; 9582 struct fw_ldst_cmd ldst; 9583 int ret; 9584 9585 if (port >= 4 || 9586 devid >= 256 || 9587 offset >= 256 || 9588 len > sizeof ldst.u.i2c.data) 9589 return -EINVAL; 9590 9591 memset(&ldst, 0, sizeof ldst); 9592 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9593 ldst.op_to_addrspace = 9594 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9595 F_FW_CMD_REQUEST | 9596 F_FW_CMD_READ | 9597 ldst_addrspace); 9598 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9599 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9600 ldst.u.i2c.did = devid; 9601 ldst.u.i2c.boffset = offset; 9602 ldst.u.i2c.blen = len; 9603 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9604 if (!ret) 9605 memcpy(buf, ldst.u.i2c.data, len); 9606 return ret; 9607} 9608 9609/** 9610 * t4_i2c_wr - write I2C data to adapter 9611 * @adap: the adapter 9612 * @port: Port number if per-port device; <0 if not 9613 * @devid: per-port device ID or absolute device ID 9614 * @offset: byte offset into device I2C space 9615 * @len: byte length of I2C space data 9616 * @buf: buffer containing new I2C data 9617 * 9618 * Write the I2C data to the indicated device and location. 9619 */ 9620int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 9621 int port, unsigned int devid, 9622 unsigned int offset, unsigned int len, 9623 u8 *buf) 9624{ 9625 u32 ldst_addrspace; 9626 struct fw_ldst_cmd ldst; 9627 9628 if (port >= 4 || 9629 devid >= 256 || 9630 offset >= 256 || 9631 len > sizeof ldst.u.i2c.data) 9632 return -EINVAL; 9633 9634 memset(&ldst, 0, sizeof ldst); 9635 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9636 ldst.op_to_addrspace = 9637 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9638 F_FW_CMD_REQUEST | 9639 F_FW_CMD_WRITE | 9640 ldst_addrspace); 9641 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9642 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9643 ldst.u.i2c.did = devid; 9644 ldst.u.i2c.boffset = offset; 9645 ldst.u.i2c.blen = len; 9646 memcpy(ldst.u.i2c.data, buf, len); 9647 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9648} 9649 9650/** 9651 * t4_sge_ctxt_rd - read an SGE context through FW 9652 * @adap: the adapter 9653 * @mbox: mailbox to use for the FW command 9654 * @cid: the context id 9655 * @ctype: the context type 9656 * @data: where to store the context data 9657 * 9658 * Issues a FW command through the given mailbox to read an SGE context. 9659 */ 9660int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 9661 enum ctxt_type ctype, u32 *data) 9662{ 9663 int ret; 9664 struct fw_ldst_cmd c; 9665 9666 if (ctype == CTXT_EGRESS) 9667 ret = FW_LDST_ADDRSPC_SGE_EGRC; 9668 else if (ctype == CTXT_INGRESS) 9669 ret = FW_LDST_ADDRSPC_SGE_INGC; 9670 else if (ctype == CTXT_FLM) 9671 ret = FW_LDST_ADDRSPC_SGE_FLMC; 9672 else 9673 ret = FW_LDST_ADDRSPC_SGE_CONMC; 9674 9675 memset(&c, 0, sizeof(c)); 9676 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9677 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9678 V_FW_LDST_CMD_ADDRSPACE(ret)); 9679 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 9680 c.u.idctxt.physid = cpu_to_be32(cid); 9681 9682 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 9683 if (ret == 0) { 9684 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 9685 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 9686 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 9687 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 9688 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 9689 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 9690 } 9691 return ret; 9692} 9693 9694/** 9695 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 9696 * @adap: the adapter 9697 * @cid: the context id 9698 * @ctype: the context type 9699 * @data: where to store the context data 9700 * 9701 * Reads an SGE context directly, bypassing FW. This is only for 9702 * debugging when FW is unavailable. 9703 */ 9704int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 9705 u32 *data) 9706{ 9707 int i, ret; 9708 9709 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 9710 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 9711 if (!ret) 9712 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 9713 *data++ = t4_read_reg(adap, i); 9714 return ret; 9715} 9716 9717int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 9718 int sleep_ok) 9719{ 9720 struct fw_sched_cmd cmd; 9721 9722 memset(&cmd, 0, sizeof(cmd)); 9723 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9724 F_FW_CMD_REQUEST | 9725 F_FW_CMD_WRITE); 9726 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9727 9728 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 9729 cmd.u.config.type = type; 9730 cmd.u.config.minmaxen = minmaxen; 9731 9732 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9733 NULL, sleep_ok); 9734} 9735 9736int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 9737 int rateunit, int ratemode, int channel, int cl, 9738 int minrate, int maxrate, int weight, int pktsize, 9739 int sleep_ok) 9740{ 9741 struct fw_sched_cmd cmd; 9742 9743 memset(&cmd, 0, sizeof(cmd)); 9744 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9745 F_FW_CMD_REQUEST | 9746 F_FW_CMD_WRITE); 9747 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9748 9749 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9750 cmd.u.params.type = type; 9751 cmd.u.params.level = level; 9752 cmd.u.params.mode = mode; 9753 cmd.u.params.ch = channel; 9754 cmd.u.params.cl = cl; 9755 cmd.u.params.unit = rateunit; 9756 cmd.u.params.rate = ratemode; 9757 cmd.u.params.min = cpu_to_be32(minrate); 9758 cmd.u.params.max = cpu_to_be32(maxrate); 9759 cmd.u.params.weight = cpu_to_be16(weight); 9760 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9761 9762 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9763 NULL, sleep_ok); 9764} 9765 9766int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode, 9767 unsigned int maxrate, int sleep_ok) 9768{ 9769 struct fw_sched_cmd cmd; 9770 9771 memset(&cmd, 0, sizeof(cmd)); 9772 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9773 F_FW_CMD_REQUEST | 9774 F_FW_CMD_WRITE); 9775 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9776 9777 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9778 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9779 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL; 9780 cmd.u.params.ch = channel; 9781 cmd.u.params.rate = ratemode; /* REL or ABS */ 9782 cmd.u.params.max = cpu_to_be32(maxrate);/* % or kbps */ 9783 9784 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9785 NULL, sleep_ok); 9786} 9787 9788int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl, 9789 int weight, int sleep_ok) 9790{ 9791 struct fw_sched_cmd cmd; 9792 9793 if (weight < 0 || weight > 100) 9794 return -EINVAL; 9795 9796 memset(&cmd, 0, sizeof(cmd)); 9797 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9798 F_FW_CMD_REQUEST | 9799 F_FW_CMD_WRITE); 9800 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9801 9802 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9803 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9804 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 9805 cmd.u.params.ch = channel; 9806 cmd.u.params.cl = cl; 9807 cmd.u.params.weight = cpu_to_be16(weight); 9808 9809 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9810 NULL, sleep_ok); 9811} 9812 9813int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl, 9814 int mode, unsigned int maxrate, int pktsize, int sleep_ok) 9815{ 9816 struct fw_sched_cmd cmd; 9817 9818 memset(&cmd, 0, sizeof(cmd)); 9819 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9820 F_FW_CMD_REQUEST | 9821 F_FW_CMD_WRITE); 9822 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9823 9824 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9825 cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED; 9826 cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL; 9827 cmd.u.params.mode = mode; 9828 cmd.u.params.ch = channel; 9829 cmd.u.params.cl = cl; 9830 cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE; 9831 cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS; 9832 cmd.u.params.max = cpu_to_be32(maxrate); 9833 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9834 9835 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9836 NULL, sleep_ok); 9837} 9838 9839/* 9840 * t4_config_watchdog - configure (enable/disable) a watchdog timer 9841 * @adapter: the adapter 9842 * @mbox: mailbox to use for the FW command 9843 * @pf: the PF owning the queue 9844 * @vf: the VF owning the queue 9845 * @timeout: watchdog timeout in ms 9846 * @action: watchdog timer / action 9847 * 9848 * There are separate watchdog timers for each possible watchdog 9849 * action. Configure one of the watchdog timers by setting a non-zero 9850 * timeout. Disable a watchdog timer by using a timeout of zero. 9851 */ 9852int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 9853 unsigned int pf, unsigned int vf, 9854 unsigned int timeout, unsigned int action) 9855{ 9856 struct fw_watchdog_cmd wdog; 9857 unsigned int ticks; 9858 9859 /* 9860 * The watchdog command expects a timeout in units of 10ms so we need 9861 * to convert it here (via rounding) and force a minimum of one 10ms 9862 * "tick" if the timeout is non-zero but the conversion results in 0 9863 * ticks. 9864 */ 9865 ticks = (timeout + 5)/10; 9866 if (timeout && !ticks) 9867 ticks = 1; 9868 9869 memset(&wdog, 0, sizeof wdog); 9870 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 9871 F_FW_CMD_REQUEST | 9872 F_FW_CMD_WRITE | 9873 V_FW_PARAMS_CMD_PFN(pf) | 9874 V_FW_PARAMS_CMD_VFN(vf)); 9875 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 9876 wdog.timeout = cpu_to_be32(ticks); 9877 wdog.action = cpu_to_be32(action); 9878 9879 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 9880} 9881 9882int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 9883{ 9884 struct fw_devlog_cmd devlog_cmd; 9885 int ret; 9886 9887 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9888 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9889 F_FW_CMD_REQUEST | F_FW_CMD_READ); 9890 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9891 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9892 sizeof(devlog_cmd), &devlog_cmd); 9893 if (ret) 9894 return ret; 9895 9896 *level = devlog_cmd.level; 9897 return 0; 9898} 9899 9900int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 9901{ 9902 struct fw_devlog_cmd devlog_cmd; 9903 9904 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9905 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9906 F_FW_CMD_REQUEST | 9907 F_FW_CMD_WRITE); 9908 devlog_cmd.level = level; 9909 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9910 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9911 sizeof(devlog_cmd), &devlog_cmd); 9912} 9913