Deleted Added
sdiff udiff text old ( 252747 ) new ( 253691 )
full compact
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/cxgbe/common/t4_hw.c 252747 2013-07-05 01:53:51Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39 if (cold) \
40 DELAY((x) * 1000); \
41 else \
42 pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 * t4_wait_op_done_val - wait until an operation is completed
47 * @adapter: the adapter performing the operation
48 * @reg: the register to check for completion
49 * @mask: a single-bit field within @reg that indicates completion
50 * @polarity: the value of the field when the operation is completed
51 * @attempts: number of check iterations
52 * @delay: delay in usecs between iterations
53 * @valp: where to store the value of the register at completion time
54 *
55 * Wait until an operation is completed by checking a bit in a register
56 * up to @attempts times. If @valp is not NULL the value of the register
57 * at the time it indicated completion is stored there. Returns 0 if the
58 * operation completes and -EAGAIN otherwise.
59 */
60int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61 int polarity, int attempts, int delay, u32 *valp)
62{
63 while (1) {
64 u32 val = t4_read_reg(adapter, reg);
65
66 if (!!(val & mask) == polarity) {
67 if (valp)
68 *valp = val;
69 return 0;
70 }
71 if (--attempts == 0)
72 return -EAGAIN;
73 if (delay)
74 udelay(delay);
75 }
76}
77
78/**
79 * t4_set_reg_field - set a register field to a value
80 * @adapter: the adapter to program
81 * @addr: the register address
82 * @mask: specifies the portion of the register to modify
83 * @val: the new value for the register field
84 *
85 * Sets a register field specified by the supplied mask to the
86 * given value.
87 */
88void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89 u32 val)
90{
91 u32 v = t4_read_reg(adapter, addr) & ~mask;
92
93 t4_write_reg(adapter, addr, v | val);
94 (void) t4_read_reg(adapter, addr); /* flush */
95}
96
97/**
98 * t4_read_indirect - read indirectly addressed registers
99 * @adap: the adapter
100 * @addr_reg: register holding the indirect address
101 * @data_reg: register holding the value of the indirect register
102 * @vals: where the read register values are stored
103 * @nregs: how many indirect registers to read
104 * @start_idx: index of first indirect register to read
105 *
106 * Reads registers that are accessed indirectly through an address/data
107 * register pair.
108 */
109void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110 unsigned int data_reg, u32 *vals, unsigned int nregs,
111 unsigned int start_idx)
112{
113 while (nregs--) {
114 t4_write_reg(adap, addr_reg, start_idx);
115 *vals++ = t4_read_reg(adap, data_reg);
116 start_idx++;
117 }
118}
119
120/**
121 * t4_write_indirect - write indirectly addressed registers
122 * @adap: the adapter
123 * @addr_reg: register holding the indirect addresses
124 * @data_reg: register holding the value for the indirect registers
125 * @vals: values to write
126 * @nregs: how many indirect registers to write
127 * @start_idx: address of first indirect register to write
128 *
129 * Writes a sequential block of registers that are accessed indirectly
130 * through an address/data register pair.
131 */
132void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133 unsigned int data_reg, const u32 *vals,
134 unsigned int nregs, unsigned int start_idx)
135{
136 while (nregs--) {
137 t4_write_reg(adap, addr_reg, start_idx++);
138 t4_write_reg(adap, data_reg, *vals++);
139 }
140}
141
142/*
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism. This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
147 */
148u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149{
150 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151 F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152 V_REGISTER(reg));
153 return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154}
155
156/*
157 * t4_report_fw_error - report firmware error
158 * @adap: the adapter
159 *
160 * The adapter firmware can indicate error conditions to the host.
161 * This routine prints out the reason for the firmware error (as
162 * reported by the firmware).
163 */
164static void t4_report_fw_error(struct adapter *adap)
165{
166 static const char *reason[] = {
167 "Crash", /* PCIE_FW_EVAL_CRASH */
168 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
169 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
170 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
171 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
173 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
174 "Reserved", /* reserved */
175 };
176 u32 pcie_fw;
177
178 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179 if (pcie_fw & F_PCIE_FW_ERR)
180 CH_ERR(adap, "Firmware reports adapter error: %s\n",
181 reason[G_PCIE_FW_EVAL(pcie_fw)]);
182}
183
184/*
185 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
186 */
187static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
188 u32 mbox_addr)
189{
190 for ( ; nflit; nflit--, mbox_addr += 8)
191 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
192}
193
194/*
195 * Handle a FW assertion reported in a mailbox.
196 */
197static void fw_asrt(struct adapter *adap, u32 mbox_addr)
198{
199 struct fw_debug_cmd asrt;
200
201 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202 CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
205}
206
207#define X_CIM_PF_NOACCESS 0xeeeeeeee
208/**
209 * t4_wr_mbox_meat - send a command to FW through the given mailbox
210 * @adap: the adapter
211 * @mbox: index of the mailbox to use
212 * @cmd: the command to write
213 * @size: command length in bytes
214 * @rpl: where to optionally store the reply
215 * @sleep_ok: if true we may sleep while awaiting command completion
216 *
217 * Sends the given command to FW through the selected mailbox and waits
218 * for the FW to execute the command. If @rpl is not %NULL it is used to
219 * store the FW's reply to the command. The command and its optional
220 * reply are of the same length. Some FW commands like RESET and
221 * INITIALIZE can take a considerable amount of time to execute.
222 * @sleep_ok determines whether we may sleep while awaiting the response.
223 * If sleeping is allowed we use progressive backoff otherwise we spin.
224 *
225 * The return value is 0 on success or a negative errno on failure. A
226 * failure can happen either because we are not able to execute the
227 * command or FW executes it but signals an error. In the latter case
228 * the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231 void *rpl, bool sleep_ok)
232{
233 /*
234 * We delay in small increments at first in an effort to maintain
235 * responsiveness for simple, fast executing commands but then back
236 * off to larger delays to a maximum retry delay.
237 */
238 static const int delay[] = {
239 1, 1, 3, 5, 10, 10, 20, 50, 100
240 };
241
242 u32 v;
243 u64 res;
244 int i, ms, delay_idx;
245 const __be64 *p = cmd;
246 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
248
249 if ((size & 15) || size > MBOX_LEN)
250 return -EINVAL;
251
252 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253 for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254 v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255
256 if (v != X_MBOWNER_PL)
257 return v ? -EBUSY : -ETIMEDOUT;
258
259 for (i = 0; i < size; i += 8, p++)
260 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
261
262 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
263 t4_read_reg(adap, ctl_reg); /* flush write */
264
265 delay_idx = 0;
266 ms = delay[0];
267
268 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
269 if (sleep_ok) {
270 ms = delay[delay_idx]; /* last element may repeat */
271 if (delay_idx < ARRAY_SIZE(delay) - 1)
272 delay_idx++;
273 msleep(ms);
274 } else
275 mdelay(ms);
276
277 v = t4_read_reg(adap, ctl_reg);
278 if (v == X_CIM_PF_NOACCESS)
279 continue;
280 if (G_MBOWNER(v) == X_MBOWNER_PL) {
281 if (!(v & F_MBMSGVALID)) {
282 t4_write_reg(adap, ctl_reg,
283 V_MBOWNER(X_MBOWNER_NONE));
284 continue;
285 }
286
287 res = t4_read_reg64(adap, data_reg);
288 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
289 fw_asrt(adap, data_reg);
290 res = V_FW_CMD_RETVAL(EIO);
291 } else if (rpl)
292 get_mbox_rpl(adap, rpl, size / 8, data_reg);
293 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
294 return -G_FW_CMD_RETVAL((int)res);
295 }
296 }
297
298 /*
299 * We timed out waiting for a reply to our mailbox command. Report
300 * the error and also check to see if the firmware reported any
301 * errors ...
302 */
303 CH_ERR(adap, "command %#x in mailbox %d timed out\n",
304 *(const u8 *)cmd, mbox);
305 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
306 t4_report_fw_error(adap);
307 return -ETIMEDOUT;
308}
309
310/**
311 * t4_mc_read - read from MC through backdoor accesses
312 * @adap: the adapter
313 * @idx: which MC to access
314 * @addr: address of first byte requested
315 * @data: 64 bytes of data containing the requested address
316 * @ecc: where to store the corresponding 64-bit ECC word
317 *
318 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
319 * that covers the requested address @addr. If @parity is not %NULL it
320 * is assigned the 64-bit ECC word for the read data.
321 */
322int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323{
324 int i;
325 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
326 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
327
328 if (is_t4(adap)) {
329 mc_bist_cmd_reg = A_MC_BIST_CMD;
330 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
331 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
332 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
333 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
334 } else {
335 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
336 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
337 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
338 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
339 idx);
340 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
341 idx);
342 }
343
344 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
345 return -EBUSY;
346 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
347 t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
348 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
349 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
350 F_START_BIST | V_BIST_CMD_GAP(1));
351 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
352 if (i)
353 return i;
354
355#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
356
357 for (i = 15; i >= 0; i--)
358 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
359 if (ecc)
360 *ecc = t4_read_reg64(adap, MC_DATA(16));
361#undef MC_DATA
362 return 0;
363}
364
365/**
366 * t4_edc_read - read from EDC through backdoor accesses
367 * @adap: the adapter
368 * @idx: which EDC to access
369 * @addr: address of first byte requested
370 * @data: 64 bytes of data containing the requested address
371 * @ecc: where to store the corresponding 64-bit ECC word
372 *
373 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
374 * that covers the requested address @addr. If @parity is not %NULL it
375 * is assigned the 64-bit ECC word for the read data.
376 */
377int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
378{
379 int i;
380 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
381 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
382
383 if (is_t4(adap)) {
384 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
385 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
386 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
387 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
388 idx);
389 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
390 idx);
391 } else {
392/*
393 * These macro are missing in t4_regs.h file.
394 * Added temporarily for testing.
395 */
396#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
397#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
398 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
399 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
400 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
401 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
402 idx);
403 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
404 idx);
405#undef EDC_REG_T5
406#undef EDC_STRIDE_T5
407 }
408
409 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
410 return -EBUSY;
411 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
412 t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
413 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
414 t4_write_reg(adap, edc_bist_cmd_reg,
415 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
416 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
417 if (i)
418 return i;
419
420#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
421
422 for (i = 15; i >= 0; i--)
423 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
424 if (ecc)
425 *ecc = t4_read_reg64(adap, EDC_DATA(16));
426#undef EDC_DATA
427 return 0;
428}
429
430/**
431 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
432 * @adap: the adapter
433 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
434 * @addr: address within indicated memory type
435 * @len: amount of memory to read
436 * @buf: host memory buffer
437 *
438 * Reads an [almost] arbitrary memory region in the firmware: the
439 * firmware memory address, length and host buffer must be aligned on
440 * 32-bit boudaries. The memory is returned as a raw byte sequence from
441 * the firmware's memory. If this memory contains data structures which
442 * contain multi-byte integers, it's the callers responsibility to
443 * perform appropriate byte order conversions.
444 */
445int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
446 __be32 *buf)
447{
448 u32 pos, start, end, offset;
449 int ret;
450
451 /*
452 * Argument sanity checks ...
453 */
454 if ((addr & 0x3) || (len & 0x3))
455 return -EINVAL;
456
457 /*
458 * The underlaying EDC/MC read routines read 64 bytes at a time so we
459 * need to round down the start and round up the end. We'll start
460 * copying out of the first line at (addr - start) a word at a time.
461 */
462 start = addr & ~(64-1);
463 end = (addr + len + 64-1) & ~(64-1);
464 offset = (addr - start)/sizeof(__be32);
465
466 for (pos = start; pos < end; pos += 64, offset = 0) {
467 __be32 data[16];
468
469 /*
470 * Read the chip's memory block and bail if there's an error.
471 */
472 if ((mtype == MEM_MC) || (mtype == MEM_MC1))
473 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
474 else
475 ret = t4_edc_read(adap, mtype, pos, data, NULL);
476 if (ret)
477 return ret;
478
479 /*
480 * Copy the data into the caller's memory buffer.
481 */
482 while (offset < 16 && len > 0) {
483 *buf++ = data[offset++];
484 len -= sizeof(__be32);
485 }
486 }
487
488 return 0;
489}
490
491/*
492 * Partial EEPROM Vital Product Data structure. Includes only the ID and
493 * VPD-R header.
494 */
495struct t4_vpd_hdr {
496 u8 id_tag;
497 u8 id_len[2];
498 u8 id_data[ID_LEN];
499 u8 vpdr_tag;
500 u8 vpdr_len[2];
501};
502
503/*
504 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
505 */
506#define EEPROM_MAX_RD_POLL 40
507#define EEPROM_MAX_WR_POLL 6
508#define EEPROM_STAT_ADDR 0x7bfc
509#define VPD_BASE 0x400
510#define VPD_BASE_OLD 0
511#define VPD_LEN 1024
512#define VPD_INFO_FLD_HDR_SIZE 3
513#define CHELSIO_VPD_UNIQUE_ID 0x82
514
515/**
516 * t4_seeprom_read - read a serial EEPROM location
517 * @adapter: adapter to read
518 * @addr: EEPROM virtual address
519 * @data: where to store the read data
520 *
521 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
522 * VPD capability. Note that this function must be called with a virtual
523 * address.
524 */
525int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
526{
527 u16 val;
528 int attempts = EEPROM_MAX_RD_POLL;
529 unsigned int base = adapter->params.pci.vpd_cap_addr;
530
531 if (addr >= EEPROMVSIZE || (addr & 3))
532 return -EINVAL;
533
534 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
535 do {
536 udelay(10);
537 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
538 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
539
540 if (!(val & PCI_VPD_ADDR_F)) {
541 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
542 return -EIO;
543 }
544 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
545 *data = le32_to_cpu(*data);
546 return 0;
547}
548
549/**
550 * t4_seeprom_write - write a serial EEPROM location
551 * @adapter: adapter to write
552 * @addr: virtual EEPROM address
553 * @data: value to write
554 *
555 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
556 * VPD capability. Note that this function must be called with a virtual
557 * address.
558 */
559int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
560{
561 u16 val;
562 int attempts = EEPROM_MAX_WR_POLL;
563 unsigned int base = adapter->params.pci.vpd_cap_addr;
564
565 if (addr >= EEPROMVSIZE || (addr & 3))
566 return -EINVAL;
567
568 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
569 cpu_to_le32(data));
570 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
571 (u16)addr | PCI_VPD_ADDR_F);
572 do {
573 msleep(1);
574 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
575 } while ((val & PCI_VPD_ADDR_F) && --attempts);
576
577 if (val & PCI_VPD_ADDR_F) {
578 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
579 return -EIO;
580 }
581 return 0;
582}
583
584/**
585 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
586 * @phys_addr: the physical EEPROM address
587 * @fn: the PCI function number
588 * @sz: size of function-specific area
589 *
590 * Translate a physical EEPROM address to virtual. The first 1K is
591 * accessed through virtual addresses starting at 31K, the rest is
592 * accessed through virtual addresses starting at 0.
593 *
594 * The mapping is as follows:
595 * [0..1K) -> [31K..32K)
596 * [1K..1K+A) -> [ES-A..ES)
597 * [1K+A..ES) -> [0..ES-A-1K)
598 *
599 * where A = @fn * @sz, and ES = EEPROM size.
600 */
601int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
602{
603 fn *= sz;
604 if (phys_addr < 1024)
605 return phys_addr + (31 << 10);
606 if (phys_addr < 1024 + fn)
607 return EEPROMSIZE - fn + phys_addr - 1024;
608 if (phys_addr < EEPROMSIZE)
609 return phys_addr - 1024 - fn;
610 return -EINVAL;
611}
612
613/**
614 * t4_seeprom_wp - enable/disable EEPROM write protection
615 * @adapter: the adapter
616 * @enable: whether to enable or disable write protection
617 *
618 * Enables or disables write protection on the serial EEPROM.
619 */
620int t4_seeprom_wp(struct adapter *adapter, int enable)
621{
622 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
623}
624
625/**
626 * get_vpd_keyword_val - Locates an information field keyword in the VPD
627 * @v: Pointer to buffered vpd data structure
628 * @kw: The keyword to search for
629 *
630 * Returns the value of the information field keyword or
631 * -ENOENT otherwise.
632 */
633static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
634{
635 int i;
636 unsigned int offset , len;
637 const u8 *buf = &v->id_tag;
638 const u8 *vpdr_len = &v->vpdr_tag;
639 offset = sizeof(struct t4_vpd_hdr);
640 len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
641
642 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
643 return -ENOENT;
644 }
645
646 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
647 if(memcmp(buf + i , kw , 2) == 0){
648 i += VPD_INFO_FLD_HDR_SIZE;
649 return i;
650 }
651
652 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
653 }
654
655 return -ENOENT;
656}
657
658
659/**
660 * get_vpd_params - read VPD parameters from VPD EEPROM
661 * @adapter: adapter to read
662 * @p: where to store the parameters
663 *
664 * Reads card parameters stored in VPD EEPROM.
665 */
666static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667{
668 int i, ret, addr;
669 int ec, sn, pn, na;
670 u8 vpd[VPD_LEN], csum;
671 const struct t4_vpd_hdr *v;
672
673 /*
674 * Card information normally starts at VPD_BASE but early cards had
675 * it at 0.
676 */
677 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
678 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
679
680 for (i = 0; i < sizeof(vpd); i += 4) {
681 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
682 if (ret)
683 return ret;
684 }
685 v = (const struct t4_vpd_hdr *)vpd;
686
687#define FIND_VPD_KW(var,name) do { \
688 var = get_vpd_keyword_val(v , name); \
689 if (var < 0) { \
690 CH_ERR(adapter, "missing VPD keyword " name "\n"); \
691 return -EINVAL; \
692 } \
693} while (0)
694
695 FIND_VPD_KW(i, "RV");
696 for (csum = 0; i >= 0; i--)
697 csum += vpd[i];
698
699 if (csum) {
700 CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
701 return -EINVAL;
702 }
703 FIND_VPD_KW(ec, "EC");
704 FIND_VPD_KW(sn, "SN");
705 FIND_VPD_KW(pn, "PN");
706 FIND_VPD_KW(na, "NA");
707#undef FIND_VPD_KW
708
709 memcpy(p->id, v->id_data, ID_LEN);
710 strstrip(p->id);
711 memcpy(p->ec, vpd + ec, EC_LEN);
712 strstrip(p->ec);
713 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
714 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
715 strstrip(p->sn);
716 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
717 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
718 strstrip((char *)p->pn);
719 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
720 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
721 strstrip((char *)p->na);
722
723 return 0;
724}
725
726/* serial flash and firmware constants and flash config file constants */
727enum {
728 SF_ATTEMPTS = 10, /* max retries for SF operations */
729
730 /* flash command opcodes */
731 SF_PROG_PAGE = 2, /* program page */
732 SF_WR_DISABLE = 4, /* disable writes */
733 SF_RD_STATUS = 5, /* read status register */
734 SF_WR_ENABLE = 6, /* enable writes */
735 SF_RD_DATA_FAST = 0xb, /* read flash */
736 SF_RD_ID = 0x9f, /* read ID */
737 SF_ERASE_SECTOR = 0xd8, /* erase sector */
738};
739
740/**
741 * sf1_read - read data from the serial flash
742 * @adapter: the adapter
743 * @byte_cnt: number of bytes to read
744 * @cont: whether another operation will be chained
745 * @lock: whether to lock SF for PL access only
746 * @valp: where to store the read data
747 *
748 * Reads up to 4 bytes of data from the serial flash. The location of
749 * the read needs to be specified prior to calling this by issuing the
750 * appropriate commands to the serial flash.
751 */
752static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
753 int lock, u32 *valp)
754{
755 int ret;
756
757 if (!byte_cnt || byte_cnt > 4)
758 return -EINVAL;
759 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
760 return -EBUSY;
761 t4_write_reg(adapter, A_SF_OP,
762 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
763 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
764 if (!ret)
765 *valp = t4_read_reg(adapter, A_SF_DATA);
766 return ret;
767}
768
769/**
770 * sf1_write - write data to the serial flash
771 * @adapter: the adapter
772 * @byte_cnt: number of bytes to write
773 * @cont: whether another operation will be chained
774 * @lock: whether to lock SF for PL access only
775 * @val: value to write
776 *
777 * Writes up to 4 bytes of data to the serial flash. The location of
778 * the write needs to be specified prior to calling this by issuing the
779 * appropriate commands to the serial flash.
780 */
781static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
782 int lock, u32 val)
783{
784 if (!byte_cnt || byte_cnt > 4)
785 return -EINVAL;
786 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
787 return -EBUSY;
788 t4_write_reg(adapter, A_SF_DATA, val);
789 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
790 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
791 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
792}
793
794/**
795 * flash_wait_op - wait for a flash operation to complete
796 * @adapter: the adapter
797 * @attempts: max number of polls of the status register
798 * @delay: delay between polls in ms
799 *
800 * Wait for a flash operation to complete by polling the status register.
801 */
802static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
803{
804 int ret;
805 u32 status;
806
807 while (1) {
808 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
809 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
810 return ret;
811 if (!(status & 1))
812 return 0;
813 if (--attempts == 0)
814 return -EAGAIN;
815 if (delay)
816 msleep(delay);
817 }
818}
819
820/**
821 * t4_read_flash - read words from serial flash
822 * @adapter: the adapter
823 * @addr: the start address for the read
824 * @nwords: how many 32-bit words to read
825 * @data: where to store the read data
826 * @byte_oriented: whether to store data as bytes or as words
827 *
828 * Read the specified number of 32-bit words from the serial flash.
829 * If @byte_oriented is set the read data is stored as a byte array
830 * (i.e., big-endian), otherwise as 32-bit words in the platform's
831 * natural endianess.
832 */
833int t4_read_flash(struct adapter *adapter, unsigned int addr,
834 unsigned int nwords, u32 *data, int byte_oriented)
835{
836 int ret;
837
838 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
839 return -EINVAL;
840
841 addr = swab32(addr) | SF_RD_DATA_FAST;
842
843 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
844 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
845 return ret;
846
847 for ( ; nwords; nwords--, data++) {
848 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
849 if (nwords == 1)
850 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
851 if (ret)
852 return ret;
853 if (byte_oriented)
854 *data = htonl(*data);
855 }
856 return 0;
857}
858
859/**
860 * t4_write_flash - write up to a page of data to the serial flash
861 * @adapter: the adapter
862 * @addr: the start address to write
863 * @n: length of data to write in bytes
864 * @data: the data to write
865 * @byte_oriented: whether to store data as bytes or as words
866 *
867 * Writes up to a page of data (256 bytes) to the serial flash starting
868 * at the given address. All the data must be written to the same page.
869 * If @byte_oriented is set the write data is stored as byte stream
870 * (i.e. matches what on disk), otherwise in big-endian.
871 */
872static int t4_write_flash(struct adapter *adapter, unsigned int addr,
873 unsigned int n, const u8 *data, int byte_oriented)
874{
875 int ret;
876 u32 buf[SF_PAGE_SIZE / 4];
877 unsigned int i, c, left, val, offset = addr & 0xff;
878
879 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
880 return -EINVAL;
881
882 val = swab32(addr) | SF_PROG_PAGE;
883
884 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
885 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
886 goto unlock;
887
888 for (left = n; left; left -= c) {
889 c = min(left, 4U);
890 for (val = 0, i = 0; i < c; ++i)
891 val = (val << 8) + *data++;
892
893 if (!byte_oriented)
894 val = htonl(val);
895
896 ret = sf1_write(adapter, c, c != left, 1, val);
897 if (ret)
898 goto unlock;
899 }
900 ret = flash_wait_op(adapter, 8, 1);
901 if (ret)
902 goto unlock;
903
904 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
905
906 /* Read the page to verify the write succeeded */
907 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
908 byte_oriented);
909 if (ret)
910 return ret;
911
912 if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 CH_ERR(adapter, "failed to correctly write the flash page "
914 "at %#x\n", addr);
915 return -EIO;
916 }
917 return 0;
918
919unlock:
920 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
921 return ret;
922}
923
924/**
925 * t4_get_fw_version - read the firmware version
926 * @adapter: the adapter
927 * @vers: where to place the version
928 *
929 * Reads the FW version from flash.
930 */
931int t4_get_fw_version(struct adapter *adapter, u32 *vers)
932{
933 return t4_read_flash(adapter,
934 FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
935 vers, 0);
936}
937
938/**
939 * t4_get_tp_version - read the TP microcode version
940 * @adapter: the adapter
941 * @vers: where to place the version
942 *
943 * Reads the TP microcode version from flash.
944 */
945int t4_get_tp_version(struct adapter *adapter, u32 *vers)
946{
947 return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
948 tp_microcode_ver),
949 1, vers, 0);
950}
951
952/**
953 * t4_check_fw_version - check if the FW is compatible with this driver
954 * @adapter: the adapter
955 *
956 * Checks if an adapter's FW is compatible with the driver. Returns 0
957 * if there's exact match, a negative error if the version could not be
958 * read or there's a major version mismatch, and a positive value if the
959 * expected major version is found but there's a minor version mismatch.
960 */
961int t4_check_fw_version(struct adapter *adapter)
962{
963 int ret, major, minor, micro;
964 int exp_major, exp_minor, exp_micro;
965
966 ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
967 if (!ret)
968 ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
969 if (ret)
970 return ret;
971
972 major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
973 minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
974 micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
975
976 switch (chip_id(adapter)) {
977 case CHELSIO_T4:
978 exp_major = T4FW_VERSION_MAJOR;
979 exp_minor = T4FW_VERSION_MINOR;
980 exp_micro = T4FW_VERSION_MICRO;
981 break;
982 case CHELSIO_T5:
983 exp_major = T5FW_VERSION_MAJOR;
984 exp_minor = T5FW_VERSION_MINOR;
985 exp_micro = T5FW_VERSION_MICRO;
986 break;
987 default:
988 CH_ERR(adapter, "Unsupported chip type, %x\n",
989 chip_id(adapter));
990 return -EINVAL;
991 }
992
993 if (major != exp_major) { /* major mismatch - fail */
994 CH_ERR(adapter, "card FW has major version %u, driver wants "
995 "%u\n", major, exp_major);
996 return -EINVAL;
997 }
998
999 if (minor == exp_minor && micro == exp_micro)
1000 return 0; /* perfect match */
1001
1002 /* Minor/micro version mismatch. Report it but often it's OK. */
1003 return 1;
1004}
1005
1006/**
1007 * t4_flash_erase_sectors - erase a range of flash sectors
1008 * @adapter: the adapter
1009 * @start: the first sector to erase
1010 * @end: the last sector to erase
1011 *
1012 * Erases the sectors in the given inclusive range.
1013 */
1014static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015{
1016 int ret = 0;
1017
1018 while (start <= end) {
1019 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1020 (ret = sf1_write(adapter, 4, 0, 1,
1021 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1022 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1023 CH_ERR(adapter, "erase of flash sector %d failed, "
1024 "error %d\n", start, ret);
1025 break;
1026 }
1027 start++;
1028 }
1029 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
1030 return ret;
1031}
1032
1033/**
1034 * t4_flash_cfg_addr - return the address of the flash configuration file
1035 * @adapter: the adapter
1036 *
1037 * Return the address within the flash where the Firmware Configuration
1038 * File is stored, or an error if the device FLASH is too small to contain
1039 * a Firmware Configuration File.
1040 */
1041int t4_flash_cfg_addr(struct adapter *adapter)
1042{
1043 /*
1044 * If the device FLASH isn't large enough to hold a Firmware
1045 * Configuration File, return an error.
1046 */
1047 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1048 return -ENOSPC;
1049
1050 return FLASH_CFG_START;
1051}
1052
1053/**
1054 * t4_load_cfg - download config file
1055 * @adap: the adapter
1056 * @cfg_data: the cfg text file to write
1057 * @size: text file size
1058 *
1059 * Write the supplied config text file to the card's serial flash.
1060 */
1061int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1062{
1063 int ret, i, n, cfg_addr;
1064 unsigned int addr;
1065 unsigned int flash_cfg_start_sec;
1066 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1067
1068 cfg_addr = t4_flash_cfg_addr(adap);
1069 if (cfg_addr < 0)
1070 return cfg_addr;
1071
1072 addr = cfg_addr;
1073 flash_cfg_start_sec = addr / SF_SEC_SIZE;
1074
1075 if (size > FLASH_CFG_MAX_SIZE) {
1076 CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1077 FLASH_CFG_MAX_SIZE);
1078 return -EFBIG;
1079 }
1080
1081 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
1082 sf_sec_size);
1083 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1084 flash_cfg_start_sec + i - 1);
1085 /*
1086 * If size == 0 then we're simply erasing the FLASH sectors associated
1087 * with the on-adapter Firmware Configuration File.
1088 */
1089 if (ret || size == 0)
1090 goto out;
1091
1092 /* this will write to the flash up to SF_PAGE_SIZE at a time */
1093 for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1094 if ( (size - i) < SF_PAGE_SIZE)
1095 n = size - i;
1096 else
1097 n = SF_PAGE_SIZE;
1098 ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1099 if (ret)
1100 goto out;
1101
1102 addr += SF_PAGE_SIZE;
1103 cfg_data += SF_PAGE_SIZE;
1104 }
1105
1106out:
1107 if (ret)
1108 CH_ERR(adap, "config file %s failed %d\n",
1109 (size == 0 ? "clear" : "download"), ret);
1110 return ret;
1111}
1112
1113
1114/**
1115 * t4_load_fw - download firmware
1116 * @adap: the adapter
1117 * @fw_data: the firmware image to write
1118 * @size: image size
1119 *
1120 * Write the supplied firmware image to the card's serial flash.
1121 */
1122int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1123{
1124 u32 csum;
1125 int ret, addr;
1126 unsigned int i;
1127 u8 first_page[SF_PAGE_SIZE];
1128 const u32 *p = (const u32 *)fw_data;
1129 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1130 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1131 unsigned int fw_start_sec;
1132 unsigned int fw_start;
1133 unsigned int fw_size;
1134
1135 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1136 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1137 fw_start = FLASH_FWBOOTSTRAP_START;
1138 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1139 } else {
1140 fw_start_sec = FLASH_FW_START_SEC;
1141 fw_start = FLASH_FW_START;
1142 fw_size = FLASH_FW_MAX_SIZE;
1143 }
1144 if (!size) {
1145 CH_ERR(adap, "FW image has no data\n");
1146 return -EINVAL;
1147 }
1148 if (size & 511) {
1149 CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1150 return -EINVAL;
1151 }
1152 if (ntohs(hdr->len512) * 512 != size) {
1153 CH_ERR(adap, "FW image size differs from size in FW header\n");
1154 return -EINVAL;
1155 }
1156 if (size > fw_size) {
1157 CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1158 return -EFBIG;
1159 }
1160 if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1161 (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1162 CH_ERR(adap,
1163 "FW image (%d) is not suitable for this adapter (%d)\n",
1164 hdr->chip, chip_id(adap));
1165 return -EINVAL;
1166 }
1167
1168 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1169 csum += ntohl(p[i]);
1170
1171 if (csum != 0xffffffff) {
1172 CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1173 csum);
1174 return -EINVAL;
1175 }
1176
1177 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1178 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1179 if (ret)
1180 goto out;
1181
1182 /*
1183 * We write the correct version at the end so the driver can see a bad
1184 * version if the FW write fails. Start by writing a copy of the
1185 * first page with a bad version.
1186 */
1187 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1188 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1189 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1190 if (ret)
1191 goto out;
1192
1193 addr = fw_start;
1194 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1195 addr += SF_PAGE_SIZE;
1196 fw_data += SF_PAGE_SIZE;
1197 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1198 if (ret)
1199 goto out;
1200 }
1201
1202 ret = t4_write_flash(adap,
1203 fw_start + offsetof(struct fw_hdr, fw_ver),
1204 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1205out:
1206 if (ret)
1207 CH_ERR(adap, "firmware download failed, error %d\n", ret);
1208 return ret;
1209}
1210
1211/* BIOS boot headers */
1212typedef struct pci_expansion_rom_header {
1213 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1214 u8 reserved[22]; /* Reserved per processor Architecture data */
1215 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1216} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1217
1218/* Legacy PCI Expansion ROM Header */
1219typedef struct legacy_pci_expansion_rom_header {
1220 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
1221 u8 size512; /* Current Image Size in units of 512 bytes */
1222 u8 initentry_point[4];
1223 u8 cksum; /* Checksum computed on the entire Image */
1224 u8 reserved[16]; /* Reserved */
1225 u8 pcir_offset[2]; /* Offset to PCI Data Struture */
1226} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1227
1228/* EFI PCI Expansion ROM Header */
1229typedef struct efi_pci_expansion_rom_header {
1230 u8 signature[2]; // ROM signature. The value 0xaa55
1231 u8 initialization_size[2]; /* Units 512. Includes this header */
1232 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1233 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */
1234 u8 efi_machine_type[2]; /* Machine type from EFI image header */
1235 u8 compression_type[2]; /* Compression type. */
1236 /*
1237 * Compression type definition
1238 * 0x0: uncompressed
1239 * 0x1: Compressed
1240 * 0x2-0xFFFF: Reserved
1241 */
1242 u8 reserved[8]; /* Reserved */
1243 u8 efi_image_header_offset[2]; /* Offset to EFI Image */
1244 u8 pcir_offset[2]; /* Offset to PCI Data Structure */
1245} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1246
1247/* PCI Data Structure Format */
1248typedef struct pcir_data_structure { /* PCI Data Structure */
1249 u8 signature[4]; /* Signature. The string "PCIR" */
1250 u8 vendor_id[2]; /* Vendor Identification */
1251 u8 device_id[2]; /* Device Identification */
1252 u8 vital_product[2]; /* Pointer to Vital Product Data */
1253 u8 length[2]; /* PCIR Data Structure Length */
1254 u8 revision; /* PCIR Data Structure Revision */
1255 u8 class_code[3]; /* Class Code */
1256 u8 image_length[2]; /* Image Length. Multiple of 512B */
1257 u8 code_revision[2]; /* Revision Level of Code/Data */
1258 u8 code_type; /* Code Type. */
1259 /*
1260 * PCI Expansion ROM Code Types
1261 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1262 * 0x01: Open Firmware standard for PCI. FCODE
1263 * 0x02: Hewlett-Packard PA RISC. HP reserved
1264 * 0x03: EFI Image. EFI
1265 * 0x04-0xFF: Reserved.
1266 */
1267 u8 indicator; /* Indicator. Identifies the last image in the ROM */
1268 u8 reserved[2]; /* Reserved */
1269} pcir_data_t; /* PCI__DATA_STRUCTURE */
1270
1271/* BOOT constants */
1272enum {
1273 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1274 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */
1275 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */
1276 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1277 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */
1278 VENDOR_ID = 0x1425, /* Vendor ID */
1279 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1280};
1281
1282/*
1283 * modify_device_id - Modifies the device ID of the Boot BIOS image
1284 * @adatper: the device ID to write.
1285 * @boot_data: the boot image to modify.
1286 *
1287 * Write the supplied device ID to the boot BIOS image.
1288 */
1289static void modify_device_id(int device_id, u8 *boot_data)
1290{
1291 legacy_pci_exp_rom_header_t *header;
1292 pcir_data_t *pcir_header;
1293 u32 cur_header = 0;
1294
1295 /*
1296 * Loop through all chained images and change the device ID's
1297 */
1298 while (1) {
1299 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1300 pcir_header = (pcir_data_t *) &boot_data[cur_header +
1301 le16_to_cpu(*(u16*)header->pcir_offset)];
1302
1303 /*
1304 * Only modify the Device ID if code type is Legacy or HP.
1305 * 0x00: Okay to modify
1306 * 0x01: FCODE. Do not be modify
1307 * 0x03: Okay to modify
1308 * 0x04-0xFF: Do not modify
1309 */
1310 if (pcir_header->code_type == 0x00) {
1311 u8 csum = 0;
1312 int i;
1313
1314 /*
1315 * Modify Device ID to match current adatper
1316 */
1317 *(u16*) pcir_header->device_id = device_id;
1318
1319 /*
1320 * Set checksum temporarily to 0.
1321 * We will recalculate it later.
1322 */
1323 header->cksum = 0x0;
1324
1325 /*
1326 * Calculate and update checksum
1327 */
1328 for (i = 0; i < (header->size512 * 512); i++)
1329 csum += (u8)boot_data[cur_header + i];
1330
1331 /*
1332 * Invert summed value to create the checksum
1333 * Writing new checksum value directly to the boot data
1334 */
1335 boot_data[cur_header + 7] = -csum;
1336
1337 } else if (pcir_header->code_type == 0x03) {
1338
1339 /*
1340 * Modify Device ID to match current adatper
1341 */
1342 *(u16*) pcir_header->device_id = device_id;
1343
1344 }
1345
1346
1347 /*
1348 * Check indicator element to identify if this is the last
1349 * image in the ROM.
1350 */
1351 if (pcir_header->indicator & 0x80)
1352 break;
1353
1354 /*
1355 * Move header pointer up to the next image in the ROM.
1356 */
1357 cur_header += header->size512 * 512;
1358 }
1359}
1360
1361/*
1362 * t4_load_boot - download boot flash
1363 * @adapter: the adapter
1364 * @boot_data: the boot image to write
1365 * @boot_addr: offset in flash to write boot_data
1366 * @size: image size
1367 *
1368 * Write the supplied boot image to the card's serial flash.
1369 * The boot image has the following sections: a 28-byte header and the
1370 * boot image.
1371 */
1372int t4_load_boot(struct adapter *adap, u8 *boot_data,
1373 unsigned int boot_addr, unsigned int size)
1374{
1375 pci_exp_rom_header_t *header;
1376 int pcir_offset ;
1377 pcir_data_t *pcir_header;
1378 int ret, addr;
1379 uint16_t device_id;
1380 unsigned int i;
1381 unsigned int boot_sector = boot_addr * 1024;
1382 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1383
1384 /*
1385 * Make sure the boot image does not encroach on the firmware region
1386 */
1387 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1388 CH_ERR(adap, "boot image encroaching on firmware region\n");
1389 return -EFBIG;
1390 }
1391
1392 /*
1393 * Number of sectors spanned
1394 */
1395 i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1396 sf_sec_size);
1397 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1398 (boot_sector >> 16) + i - 1);
1399
1400 /*
1401 * If size == 0 then we're simply erasing the FLASH sectors associated
1402 * with the on-adapter option ROM file
1403 */
1404 if (ret || (size == 0))
1405 goto out;
1406
1407 /* Get boot header */
1408 header = (pci_exp_rom_header_t *)boot_data;
1409 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1410 /* PCIR Data Structure */
1411 pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1412
1413 /*
1414 * Perform some primitive sanity testing to avoid accidentally
1415 * writing garbage over the boot sectors. We ought to check for
1416 * more but it's not worth it for now ...
1417 */
1418 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1419 CH_ERR(adap, "boot image too small/large\n");
1420 return -EFBIG;
1421 }
1422
1423 /*
1424 * Check BOOT ROM header signature
1425 */
1426 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1427 CH_ERR(adap, "Boot image missing signature\n");
1428 return -EINVAL;
1429 }
1430
1431 /*
1432 * Check PCI header signature
1433 */
1434 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1435 CH_ERR(adap, "PCI header missing signature\n");
1436 return -EINVAL;
1437 }
1438
1439 /*
1440 * Check Vendor ID matches Chelsio ID
1441 */
1442 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1443 CH_ERR(adap, "Vendor ID missing signature\n");
1444 return -EINVAL;
1445 }
1446
1447 /*
1448 * Retrieve adapter's device ID
1449 */
1450 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1451 /* Want to deal with PF 0 so I strip off PF 4 indicator */
1452 device_id = (device_id & 0xff) | 0x4000;
1453
1454 /*
1455 * Check PCIE Device ID
1456 */
1457 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1458 /*
1459 * Change the device ID in the Boot BIOS image to match
1460 * the Device ID of the current adapter.
1461 */
1462 modify_device_id(device_id, boot_data);
1463 }
1464
1465 /*
1466 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1467 * we finish copying the rest of the boot image. This will ensure
1468 * that the BIOS boot header will only be written if the boot image
1469 * was written in full.
1470 */
1471 addr = boot_sector;
1472 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1473 addr += SF_PAGE_SIZE;
1474 boot_data += SF_PAGE_SIZE;
1475 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1476 if (ret)
1477 goto out;
1478 }
1479
1480 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1481
1482out:
1483 if (ret)
1484 CH_ERR(adap, "boot image download failed, error %d\n", ret);
1485 return ret;
1486}
1487
1488/**
1489 * t4_read_cimq_cfg - read CIM queue configuration
1490 * @adap: the adapter
1491 * @base: holds the queue base addresses in bytes
1492 * @size: holds the queue sizes in bytes
1493 * @thres: holds the queue full thresholds in bytes
1494 *
1495 * Returns the current configuration of the CIM queues, starting with
1496 * the IBQs, then the OBQs.
1497 */
1498void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1499{
1500 unsigned int i, v;
1501 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1502
1503 for (i = 0; i < CIM_NUM_IBQ; i++) {
1504 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1505 V_QUENUMSELECT(i));
1506 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1507 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1508 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1509 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
1510 }
1511 for (i = 0; i < cim_num_obq; i++) {
1512 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1513 V_QUENUMSELECT(i));
1514 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1515 *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1516 *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1517 }
1518}
1519
1520/**
1521 * t4_read_cim_ibq - read the contents of a CIM inbound queue
1522 * @adap: the adapter
1523 * @qid: the queue index
1524 * @data: where to store the queue contents
1525 * @n: capacity of @data in 32-bit words
1526 *
1527 * Reads the contents of the selected CIM queue starting at address 0 up
1528 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1529 * error and the number of 32-bit words actually read on success.
1530 */
1531int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1532{
1533 int i, err;
1534 unsigned int addr;
1535 const unsigned int nwords = CIM_IBQ_SIZE * 4;
1536
1537 if (qid > 5 || (n & 3))
1538 return -EINVAL;
1539
1540 addr = qid * nwords;
1541 if (n > nwords)
1542 n = nwords;
1543
1544 for (i = 0; i < n; i++, addr++) {
1545 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1546 F_IBQDBGEN);
1547 /*
1548 * It might take 3-10ms before the IBQ debug read access is
1549 * allowed. Wait for 1 Sec with a delay of 1 usec.
1550 */
1551 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1552 1000000, 1);
1553 if (err)
1554 return err;
1555 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1556 }
1557 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1558 return i;
1559}
1560
1561/**
1562 * t4_read_cim_obq - read the contents of a CIM outbound queue
1563 * @adap: the adapter
1564 * @qid: the queue index
1565 * @data: where to store the queue contents
1566 * @n: capacity of @data in 32-bit words
1567 *
1568 * Reads the contents of the selected CIM queue starting at address 0 up
1569 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
1570 * error and the number of 32-bit words actually read on success.
1571 */
1572int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1573{
1574 int i, err;
1575 unsigned int addr, v, nwords;
1576 int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1577
1578 if (qid >= cim_num_obq || (n & 3))
1579 return -EINVAL;
1580
1581 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1582 V_QUENUMSELECT(qid));
1583 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1584
1585 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
1586 nwords = G_CIMQSIZE(v) * 64; /* same */
1587 if (n > nwords)
1588 n = nwords;
1589
1590 for (i = 0; i < n; i++, addr++) {
1591 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1592 F_OBQDBGEN);
1593 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1594 2, 1);
1595 if (err)
1596 return err;
1597 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1598 }
1599 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1600 return i;
1601}
1602
1603enum {
1604 CIM_QCTL_BASE = 0,
1605 CIM_CTL_BASE = 0x2000,
1606 CIM_PBT_ADDR_BASE = 0x2800,
1607 CIM_PBT_LRF_BASE = 0x3000,
1608 CIM_PBT_DATA_BASE = 0x3800
1609};
1610
1611/**
1612 * t4_cim_read - read a block from CIM internal address space
1613 * @adap: the adapter
1614 * @addr: the start address within the CIM address space
1615 * @n: number of words to read
1616 * @valp: where to store the result
1617 *
1618 * Reads a block of 4-byte words from the CIM intenal address space.
1619 */
1620int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1621 unsigned int *valp)
1622{
1623 int ret = 0;
1624
1625 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1626 return -EBUSY;
1627
1628 for ( ; !ret && n--; addr += 4) {
1629 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1630 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1631 0, 5, 2);
1632 if (!ret)
1633 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1634 }
1635 return ret;
1636}
1637
1638/**
1639 * t4_cim_write - write a block into CIM internal address space
1640 * @adap: the adapter
1641 * @addr: the start address within the CIM address space
1642 * @n: number of words to write
1643 * @valp: set of values to write
1644 *
1645 * Writes a block of 4-byte words into the CIM intenal address space.
1646 */
1647int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1648 const unsigned int *valp)
1649{
1650 int ret = 0;
1651
1652 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1653 return -EBUSY;
1654
1655 for ( ; !ret && n--; addr += 4) {
1656 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1657 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1658 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1659 0, 5, 2);
1660 }
1661 return ret;
1662}
1663
1664static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1665{
1666 return t4_cim_write(adap, addr, 1, &val);
1667}
1668
1669/**
1670 * t4_cim_ctl_read - read a block from CIM control region
1671 * @adap: the adapter
1672 * @addr: the start address within the CIM control region
1673 * @n: number of words to read
1674 * @valp: where to store the result
1675 *
1676 * Reads a block of 4-byte words from the CIM control region.
1677 */
1678int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1679 unsigned int *valp)
1680{
1681 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1682}
1683
1684/**
1685 * t4_cim_read_la - read CIM LA capture buffer
1686 * @adap: the adapter
1687 * @la_buf: where to store the LA data
1688 * @wrptr: the HW write pointer within the capture buffer
1689 *
1690 * Reads the contents of the CIM LA buffer with the most recent entry at
1691 * the end of the returned data and with the entry at @wrptr first.
1692 * We try to leave the LA in the running state we find it in.
1693 */
1694int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1695{
1696 int i, ret;
1697 unsigned int cfg, val, idx;
1698
1699 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1700 if (ret)
1701 return ret;
1702
1703 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
1704 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1705 if (ret)
1706 return ret;
1707 }
1708
1709 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1710 if (ret)
1711 goto restart;
1712
1713 idx = G_UPDBGLAWRPTR(val);
1714 if (wrptr)
1715 *wrptr = idx;
1716
1717 for (i = 0; i < adap->params.cim_la_size; i++) {
1718 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1719 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1720 if (ret)
1721 break;
1722 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1723 if (ret)
1724 break;
1725 if (val & F_UPDBGLARDEN) {
1726 ret = -ETIMEDOUT;
1727 break;
1728 }
1729 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1730 if (ret)
1731 break;
1732 idx = (idx + 1) & M_UPDBGLARDPTR;
1733 }
1734restart:
1735 if (cfg & F_UPDBGLAEN) {
1736 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1737 cfg & ~F_UPDBGLARDEN);
1738 if (!ret)
1739 ret = r;
1740 }
1741 return ret;
1742}
1743
1744void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1745 unsigned int *pif_req_wrptr,
1746 unsigned int *pif_rsp_wrptr)
1747{
1748 int i, j;
1749 u32 cfg, val, req, rsp;
1750
1751 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1752 if (cfg & F_LADBGEN)
1753 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1754
1755 val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1756 req = G_POLADBGWRPTR(val);
1757 rsp = G_PILADBGWRPTR(val);
1758 if (pif_req_wrptr)
1759 *pif_req_wrptr = req;
1760 if (pif_rsp_wrptr)
1761 *pif_rsp_wrptr = rsp;
1762
1763 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1764 for (j = 0; j < 6; j++) {
1765 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1766 V_PILADBGRDPTR(rsp));
1767 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1768 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1769 req++;
1770 rsp++;
1771 }
1772 req = (req + 2) & M_POLADBGRDPTR;
1773 rsp = (rsp + 2) & M_PILADBGRDPTR;
1774 }
1775 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1776}
1777
1778void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1779{
1780 u32 cfg;
1781 int i, j, idx;
1782
1783 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1784 if (cfg & F_LADBGEN)
1785 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1786
1787 for (i = 0; i < CIM_MALA_SIZE; i++) {
1788 for (j = 0; j < 5; j++) {
1789 idx = 8 * i + j;
1790 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1791 V_PILADBGRDPTR(idx));
1792 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1793 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1794 }
1795 }
1796 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1797}
1798
1799/**
1800 * t4_tp_read_la - read TP LA capture buffer
1801 * @adap: the adapter
1802 * @la_buf: where to store the LA data
1803 * @wrptr: the HW write pointer within the capture buffer
1804 *
1805 * Reads the contents of the TP LA buffer with the most recent entry at
1806 * the end of the returned data and with the entry at @wrptr first.
1807 * We leave the LA in the running state we find it in.
1808 */
1809void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1810{
1811 bool last_incomplete;
1812 unsigned int i, cfg, val, idx;
1813
1814 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1815 if (cfg & F_DBGLAENABLE) /* freeze LA */
1816 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1817 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1818
1819 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1820 idx = G_DBGLAWPTR(val);
1821 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1822 if (last_incomplete)
1823 idx = (idx + 1) & M_DBGLARPTR;
1824 if (wrptr)
1825 *wrptr = idx;
1826
1827 val &= 0xffff;
1828 val &= ~V_DBGLARPTR(M_DBGLARPTR);
1829 val |= adap->params.tp.la_mask;
1830
1831 for (i = 0; i < TPLA_SIZE; i++) {
1832 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1833 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1834 idx = (idx + 1) & M_DBGLARPTR;
1835 }
1836
1837 /* Wipe out last entry if it isn't valid */
1838 if (last_incomplete)
1839 la_buf[TPLA_SIZE - 1] = ~0ULL;
1840
1841 if (cfg & F_DBGLAENABLE) /* restore running state */
1842 t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1843 cfg | adap->params.tp.la_mask);
1844}
1845
1846void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1847{
1848 unsigned int i, j;
1849
1850 for (i = 0; i < 8; i++) {
1851 u32 *p = la_buf + i;
1852
1853 t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1854 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1855 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1856 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1857 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1858 }
1859}
1860
1861#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1862 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1863 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1864
1865/**
1866 * t4_link_start - apply link configuration to MAC/PHY
1867 * @phy: the PHY to setup
1868 * @mac: the MAC to setup
1869 * @lc: the requested link configuration
1870 *
1871 * Set up a port's MAC and PHY according to a desired link configuration.
1872 * - If the PHY can auto-negotiate first decide what to advertise, then
1873 * enable/disable auto-negotiation as desired, and reset.
1874 * - If the PHY does not auto-negotiate just reset it.
1875 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1876 * otherwise do it later based on the outcome of auto-negotiation.
1877 */
1878int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1879 struct link_config *lc)
1880{
1881 struct fw_port_cmd c;
1882 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1883
1884 lc->link_ok = 0;
1885 if (lc->requested_fc & PAUSE_RX)
1886 fc |= FW_PORT_CAP_FC_RX;
1887 if (lc->requested_fc & PAUSE_TX)
1888 fc |= FW_PORT_CAP_FC_TX;
1889
1890 memset(&c, 0, sizeof(c));
1891 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1892 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1893 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1894 FW_LEN16(c));
1895
1896 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1897 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1898 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1899 } else if (lc->autoneg == AUTONEG_DISABLE) {
1900 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1901 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1902 } else
1903 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1904
1905 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1906}
1907
1908/**
1909 * t4_restart_aneg - restart autonegotiation
1910 * @adap: the adapter
1911 * @mbox: mbox to use for the FW command
1912 * @port: the port id
1913 *
1914 * Restarts autonegotiation for the selected port.
1915 */
1916int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1917{
1918 struct fw_port_cmd c;
1919
1920 memset(&c, 0, sizeof(c));
1921 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1922 F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1923 c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1924 FW_LEN16(c));
1925 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1926 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1927}
1928
1929struct intr_info {
1930 unsigned int mask; /* bits to check in interrupt status */
1931 const char *msg; /* message to print or NULL */
1932 short stat_idx; /* stat counter to increment or -1 */
1933 unsigned short fatal; /* whether the condition reported is fatal */
1934};
1935
1936/**
1937 * t4_handle_intr_status - table driven interrupt handler
1938 * @adapter: the adapter that generated the interrupt
1939 * @reg: the interrupt status register to process
1940 * @acts: table of interrupt actions
1941 *
1942 * A table driven interrupt handler that applies a set of masks to an
1943 * interrupt status word and performs the corresponding actions if the
1944 * interrupts described by the mask have occured. The actions include
1945 * optionally emitting a warning or alert message. The table is terminated
1946 * by an entry specifying mask 0. Returns the number of fatal interrupt
1947 * conditions.
1948 */
1949static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1950 const struct intr_info *acts)
1951{
1952 int fatal = 0;
1953 unsigned int mask = 0;
1954 unsigned int status = t4_read_reg(adapter, reg);
1955
1956 for ( ; acts->mask; ++acts) {
1957 if (!(status & acts->mask))
1958 continue;
1959 if (acts->fatal) {
1960 fatal++;
1961 CH_ALERT(adapter, "%s (0x%x)\n",
1962 acts->msg, status & acts->mask);
1963 } else if (acts->msg)
1964 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1965 acts->msg, status & acts->mask);
1966 mask |= acts->mask;
1967 }
1968 status &= mask;
1969 if (status) /* clear processed interrupts */
1970 t4_write_reg(adapter, reg, status);
1971 return fatal;
1972}
1973
1974/*
1975 * Interrupt handler for the PCIE module.
1976 */
1977static void pcie_intr_handler(struct adapter *adapter)
1978{
1979 static struct intr_info sysbus_intr_info[] = {
1980 { F_RNPP, "RXNP array parity error", -1, 1 },
1981 { F_RPCP, "RXPC array parity error", -1, 1 },
1982 { F_RCIP, "RXCIF array parity error", -1, 1 },
1983 { F_RCCP, "Rx completions control array parity error", -1, 1 },
1984 { F_RFTP, "RXFT array parity error", -1, 1 },
1985 { 0 }
1986 };
1987 static struct intr_info pcie_port_intr_info[] = {
1988 { F_TPCP, "TXPC array parity error", -1, 1 },
1989 { F_TNPP, "TXNP array parity error", -1, 1 },
1990 { F_TFTP, "TXFT array parity error", -1, 1 },
1991 { F_TCAP, "TXCA array parity error", -1, 1 },
1992 { F_TCIP, "TXCIF array parity error", -1, 1 },
1993 { F_RCAP, "RXCA array parity error", -1, 1 },
1994 { F_OTDD, "outbound request TLP discarded", -1, 1 },
1995 { F_RDPE, "Rx data parity error", -1, 1 },
1996 { F_TDUE, "Tx uncorrectable data error", -1, 1 },
1997 { 0 }
1998 };
1999 static struct intr_info pcie_intr_info[] = {
2000 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2001 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2002 { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2003 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2004 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2005 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2006 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2007 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2008 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2009 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2010 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2011 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2012 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2013 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2014 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2015 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2016 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2017 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2018 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2019 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2020 { F_FIDPERR, "PCI FID parity error", -1, 1 },
2021 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2022 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2023 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2024 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2025 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2026 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2027 { F_PCIESINT, "PCI core secondary fault", -1, 1 },
2028 { F_PCIEPINT, "PCI core primary fault", -1, 1 },
2029 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2030 0 },
2031 { 0 }
2032 };
2033
2034 static struct intr_info t5_pcie_intr_info[] = {
2035 { F_MSTGRPPERR, "Master Response Read Queue parity error",
2036 -1, 1 },
2037 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2038 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2039 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2040 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2041 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2042 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2043 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2044 -1, 1 },
2045 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2046 -1, 1 },
2047 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2048 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2049 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2050 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2051 { F_DREQWRPERR, "PCI DMA channel write request parity error",
2052 -1, 1 },
2053 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2054 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2055 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2056 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2057 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2058 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2059 { F_FIDPERR, "PCI FID parity error", -1, 1 },
2060 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2061 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2062 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2063 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2064 -1, 1 },
2065 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2066 -1, 1 },
2067 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2068 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2069 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2070 { F_READRSPERR, "Outbound read error", -1,
2071 0 },
2072 { 0 }
2073 };
2074
2075 int fat;
2076
2077 fat = t4_handle_intr_status(adapter,
2078 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2079 sysbus_intr_info) +
2080 t4_handle_intr_status(adapter,
2081 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2082 pcie_port_intr_info) +
2083 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2084 is_t4(adapter) ?
2085 pcie_intr_info : t5_pcie_intr_info);
2086 if (fat)
2087 t4_fatal_err(adapter);
2088}
2089
2090/*
2091 * TP interrupt handler.
2092 */
2093static void tp_intr_handler(struct adapter *adapter)
2094{
2095 static struct intr_info tp_intr_info[] = {
2096 { 0x3fffffff, "TP parity error", -1, 1 },
2097 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2098 { 0 }
2099 };
2100
2101 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2102 t4_fatal_err(adapter);
2103}
2104
2105/*
2106 * SGE interrupt handler.
2107 */
2108static void sge_intr_handler(struct adapter *adapter)
2109{
2110 u64 v;
2111 u32 err;
2112
2113 static struct intr_info sge_intr_info[] = {
2114 { F_ERR_CPL_EXCEED_IQE_SIZE,
2115 "SGE received CPL exceeding IQE size", -1, 1 },
2116 { F_ERR_INVALID_CIDX_INC,
2117 "SGE GTS CIDX increment too large", -1, 0 },
2118 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2119 { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2120 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2121 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2122 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2123 0 },
2124 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2125 0 },
2126 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2127 0 },
2128 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2129 0 },
2130 { F_ERR_ING_CTXT_PRIO,
2131 "SGE too many priority ingress contexts", -1, 0 },
2132 { F_ERR_EGR_CTXT_PRIO,
2133 "SGE too many priority egress contexts", -1, 0 },
2134 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2135 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2136 { 0 }
2137 };
2138
2139 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2140 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2141 if (v) {
2142 CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2143 (unsigned long long)v);
2144 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2145 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2146 }
2147
2148 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2149
2150 err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2151 if (err & F_ERROR_QID_VALID) {
2152 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2153 if (err & F_UNCAPTURED_ERROR)
2154 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2155 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2156 F_UNCAPTURED_ERROR);
2157 }
2158
2159 if (v != 0)
2160 t4_fatal_err(adapter);
2161}
2162
2163#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2164 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2165#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2166 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2167
2168/*
2169 * CIM interrupt handler.
2170 */
2171static void cim_intr_handler(struct adapter *adapter)
2172{
2173 static struct intr_info cim_intr_info[] = {
2174 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2175 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2176 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2177 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2178 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2179 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2180 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2181 { 0 }
2182 };
2183 static struct intr_info cim_upintr_info[] = {
2184 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2185 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2186 { F_ILLWRINT, "CIM illegal write", -1, 1 },
2187 { F_ILLRDINT, "CIM illegal read", -1, 1 },
2188 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2189 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2190 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2191 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2192 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2193 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2194 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2195 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2196 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2197 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2198 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2199 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2200 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2201 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2202 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2203 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2204 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2205 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2206 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2207 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2208 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2209 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2210 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2211 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2212 { 0 }
2213 };
2214 int fat;
2215
2216 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2217 t4_report_fw_error(adapter);
2218
2219 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2220 cim_intr_info) +
2221 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2222 cim_upintr_info);
2223 if (fat)
2224 t4_fatal_err(adapter);
2225}
2226
2227/*
2228 * ULP RX interrupt handler.
2229 */
2230static void ulprx_intr_handler(struct adapter *adapter)
2231{
2232 static struct intr_info ulprx_intr_info[] = {
2233 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2234 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2235 { 0x7fffff, "ULPRX parity error", -1, 1 },
2236 { 0 }
2237 };
2238
2239 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2240 t4_fatal_err(adapter);
2241}
2242
2243/*
2244 * ULP TX interrupt handler.
2245 */
2246static void ulptx_intr_handler(struct adapter *adapter)
2247{
2248 static struct intr_info ulptx_intr_info[] = {
2249 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2250 0 },
2251 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2252 0 },
2253 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2254 0 },
2255 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2256 0 },
2257 { 0xfffffff, "ULPTX parity error", -1, 1 },
2258 { 0 }
2259 };
2260
2261 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2262 t4_fatal_err(adapter);
2263}
2264
2265/*
2266 * PM TX interrupt handler.
2267 */
2268static void pmtx_intr_handler(struct adapter *adapter)
2269{
2270 static struct intr_info pmtx_intr_info[] = {
2271 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2272 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2273 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2274 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2275 { 0xffffff0, "PMTX framing error", -1, 1 },
2276 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2277 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2278 1 },
2279 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2280 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2281 { 0 }
2282 };
2283
2284 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2285 t4_fatal_err(adapter);
2286}
2287
2288/*
2289 * PM RX interrupt handler.
2290 */
2291static void pmrx_intr_handler(struct adapter *adapter)
2292{
2293 static struct intr_info pmrx_intr_info[] = {
2294 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2295 { 0x3ffff0, "PMRX framing error", -1, 1 },
2296 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2297 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2298 1 },
2299 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2300 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2301 { 0 }
2302 };
2303
2304 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2305 t4_fatal_err(adapter);
2306}
2307
2308/*
2309 * CPL switch interrupt handler.
2310 */
2311static void cplsw_intr_handler(struct adapter *adapter)
2312{
2313 static struct intr_info cplsw_intr_info[] = {
2314 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2315 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2316 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2317 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2318 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2319 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2320 { 0 }
2321 };
2322
2323 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2324 t4_fatal_err(adapter);
2325}
2326
2327/*
2328 * LE interrupt handler.
2329 */
2330static void le_intr_handler(struct adapter *adap)
2331{
2332 static struct intr_info le_intr_info[] = {
2333 { F_LIPMISS, "LE LIP miss", -1, 0 },
2334 { F_LIP0, "LE 0 LIP error", -1, 0 },
2335 { F_PARITYERR, "LE parity error", -1, 1 },
2336 { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2337 { F_REQQPARERR, "LE request queue parity error", -1, 1 },
2338 { 0 }
2339 };
2340
2341 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2342 t4_fatal_err(adap);
2343}
2344
2345/*
2346 * MPS interrupt handler.
2347 */
2348static void mps_intr_handler(struct adapter *adapter)
2349{
2350 static struct intr_info mps_rx_intr_info[] = {
2351 { 0xffffff, "MPS Rx parity error", -1, 1 },
2352 { 0 }
2353 };
2354 static struct intr_info mps_tx_intr_info[] = {
2355 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2356 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2357 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2358 -1, 1 },
2359 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2360 -1, 1 },
2361 { F_BUBBLE, "MPS Tx underflow", -1, 1 },
2362 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2363 { F_FRMERR, "MPS Tx framing error", -1, 1 },
2364 { 0 }
2365 };
2366 static struct intr_info mps_trc_intr_info[] = {
2367 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2368 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2369 1 },
2370 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2371 { 0 }
2372 };
2373 static struct intr_info mps_stat_sram_intr_info[] = {
2374 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2375 { 0 }
2376 };
2377 static struct intr_info mps_stat_tx_intr_info[] = {
2378 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2379 { 0 }
2380 };
2381 static struct intr_info mps_stat_rx_intr_info[] = {
2382 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2383 { 0 }
2384 };
2385 static struct intr_info mps_cls_intr_info[] = {
2386 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2387 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2388 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2389 { 0 }
2390 };
2391
2392 int fat;
2393
2394 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2395 mps_rx_intr_info) +
2396 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2397 mps_tx_intr_info) +
2398 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2399 mps_trc_intr_info) +
2400 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2401 mps_stat_sram_intr_info) +
2402 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2403 mps_stat_tx_intr_info) +
2404 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2405 mps_stat_rx_intr_info) +
2406 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2407 mps_cls_intr_info);
2408
2409 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2410 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
2411 if (fat)
2412 t4_fatal_err(adapter);
2413}
2414
2415#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2416
2417/*
2418 * EDC/MC interrupt handler.
2419 */
2420static void mem_intr_handler(struct adapter *adapter, int idx)
2421{
2422 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2423
2424 unsigned int addr, cnt_addr, v;
2425
2426 if (idx <= MEM_EDC1) {
2427 addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2428 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2429 } else {
2430 if (is_t4(adapter)) {
2431 addr = A_MC_INT_CAUSE;
2432 cnt_addr = A_MC_ECC_STATUS;
2433 } else {
2434 addr = A_MC_P_INT_CAUSE;
2435 cnt_addr = A_MC_P_ECC_STATUS;
2436 }
2437 }
2438
2439 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2440 if (v & F_PERR_INT_CAUSE)
2441 CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2442 if (v & F_ECC_CE_INT_CAUSE) {
2443 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2444
2445 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2446 CH_WARN_RATELIMIT(adapter,
2447 "%u %s correctable ECC data error%s\n",
2448 cnt, name[idx], cnt > 1 ? "s" : "");
2449 }
2450 if (v & F_ECC_UE_INT_CAUSE)
2451 CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2452 name[idx]);
2453
2454 t4_write_reg(adapter, addr, v);
2455 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2456 t4_fatal_err(adapter);
2457}
2458
2459/*
2460 * MA interrupt handler.
2461 */
2462static void ma_intr_handler(struct adapter *adapter)
2463{
2464 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2465
2466 if (status & F_MEM_PERR_INT_CAUSE)
2467 CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2468 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2469 if (status & F_MEM_WRAP_INT_CAUSE) {
2470 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2471 CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2472 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2473 G_MEM_WRAP_ADDRESS(v) << 4);
2474 }
2475 t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2476 t4_fatal_err(adapter);
2477}
2478
2479/*
2480 * SMB interrupt handler.
2481 */
2482static void smb_intr_handler(struct adapter *adap)
2483{
2484 static struct intr_info smb_intr_info[] = {
2485 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2486 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2487 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2488 { 0 }
2489 };
2490
2491 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2492 t4_fatal_err(adap);
2493}
2494
2495/*
2496 * NC-SI interrupt handler.
2497 */
2498static void ncsi_intr_handler(struct adapter *adap)
2499{
2500 static struct intr_info ncsi_intr_info[] = {
2501 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2502 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2503 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2504 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2505 { 0 }
2506 };
2507
2508 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2509 t4_fatal_err(adap);
2510}
2511
2512/*
2513 * XGMAC interrupt handler.
2514 */
2515static void xgmac_intr_handler(struct adapter *adap, int port)
2516{
2517 u32 v, int_cause_reg;
2518
2519 if (is_t4(adap))
2520 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2521 else
2522 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2523
2524 v = t4_read_reg(adap, int_cause_reg);
2525 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2526 if (!v)
2527 return;
2528
2529 if (v & F_TXFIFO_PRTY_ERR)
2530 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2531 if (v & F_RXFIFO_PRTY_ERR)
2532 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2533 t4_write_reg(adap, int_cause_reg, v);
2534 t4_fatal_err(adap);
2535}
2536
2537/*
2538 * PL interrupt handler.
2539 */
2540static void pl_intr_handler(struct adapter *adap)
2541{
2542 static struct intr_info pl_intr_info[] = {
2543 { F_FATALPERR, "Fatal parity error", -1, 1 },
2544 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2545 { 0 }
2546 };
2547
2548 static struct intr_info t5_pl_intr_info[] = {
2549 { F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2550 { F_FATALPERR, "Fatal parity error", -1, 1 },
2551 { 0 }
2552 };
2553
2554 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2555 is_t4(adap) ? pl_intr_info : t5_pl_intr_info))
2556 t4_fatal_err(adap);
2557}
2558
2559#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2560#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2561 F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2562 F_CPL_SWITCH | F_SGE | F_ULP_TX)
2563
2564/**
2565 * t4_slow_intr_handler - control path interrupt handler
2566 * @adapter: the adapter
2567 *
2568 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
2569 * The designation 'slow' is because it involves register reads, while
2570 * data interrupts typically don't involve any MMIOs.
2571 */
2572int t4_slow_intr_handler(struct adapter *adapter)
2573{
2574 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2575
2576 if (!(cause & GLBL_INTR_MASK))
2577 return 0;
2578 if (cause & F_CIM)
2579 cim_intr_handler(adapter);
2580 if (cause & F_MPS)
2581 mps_intr_handler(adapter);
2582 if (cause & F_NCSI)
2583 ncsi_intr_handler(adapter);
2584 if (cause & F_PL)
2585 pl_intr_handler(adapter);
2586 if (cause & F_SMB)
2587 smb_intr_handler(adapter);
2588 if (cause & F_XGMAC0)
2589 xgmac_intr_handler(adapter, 0);
2590 if (cause & F_XGMAC1)
2591 xgmac_intr_handler(adapter, 1);
2592 if (cause & F_XGMAC_KR0)
2593 xgmac_intr_handler(adapter, 2);
2594 if (cause & F_XGMAC_KR1)
2595 xgmac_intr_handler(adapter, 3);
2596 if (cause & F_PCIE)
2597 pcie_intr_handler(adapter);
2598 if (cause & F_MC)
2599 mem_intr_handler(adapter, MEM_MC);
2600 if (cause & F_EDC0)
2601 mem_intr_handler(adapter, MEM_EDC0);
2602 if (cause & F_EDC1)
2603 mem_intr_handler(adapter, MEM_EDC1);
2604 if (cause & F_LE)
2605 le_intr_handler(adapter);
2606 if (cause & F_TP)
2607 tp_intr_handler(adapter);
2608 if (cause & F_MA)
2609 ma_intr_handler(adapter);
2610 if (cause & F_PM_TX)
2611 pmtx_intr_handler(adapter);
2612 if (cause & F_PM_RX)
2613 pmrx_intr_handler(adapter);
2614 if (cause & F_ULP_RX)
2615 ulprx_intr_handler(adapter);
2616 if (cause & F_CPL_SWITCH)
2617 cplsw_intr_handler(adapter);
2618 if (cause & F_SGE)
2619 sge_intr_handler(adapter);
2620 if (cause & F_ULP_TX)
2621 ulptx_intr_handler(adapter);
2622
2623 /* Clear the interrupts just processed for which we are the master. */
2624 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2625 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2626 return 1;
2627}
2628
2629/**
2630 * t4_intr_enable - enable interrupts
2631 * @adapter: the adapter whose interrupts should be enabled
2632 *
2633 * Enable PF-specific interrupts for the calling function and the top-level
2634 * interrupt concentrator for global interrupts. Interrupts are already
2635 * enabled at each module, here we just enable the roots of the interrupt
2636 * hierarchies.
2637 *
2638 * Note: this function should be called only when the driver manages
2639 * non PF-specific interrupts from the various HW modules. Only one PCI
2640 * function at a time should be doing this.
2641 */
2642void t4_intr_enable(struct adapter *adapter)
2643{
2644 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2645
2646 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2647 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2648 F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2649 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2650 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2651 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2652 F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2653 F_EGRESS_SIZE_ERR);
2654 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2655 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2656}
2657
2658/**
2659 * t4_intr_disable - disable interrupts
2660 * @adapter: the adapter whose interrupts should be disabled
2661 *
2662 * Disable interrupts. We only disable the top-level interrupt
2663 * concentrators. The caller must be a PCI function managing global
2664 * interrupts.
2665 */
2666void t4_intr_disable(struct adapter *adapter)
2667{
2668 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2669
2670 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2671 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2672}
2673
2674/**
2675 * t4_intr_clear - clear all interrupts
2676 * @adapter: the adapter whose interrupts should be cleared
2677 *
2678 * Clears all interrupts. The caller must be a PCI function managing
2679 * global interrupts.
2680 */
2681void t4_intr_clear(struct adapter *adapter)
2682{
2683 static const unsigned int cause_reg[] = {
2684 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2685 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2686 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2687 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2688 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2689 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2690 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2691 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2692 A_TP_INT_CAUSE,
2693 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2694 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2695 A_MPS_RX_PERR_INT_CAUSE,
2696 A_CPL_INTR_CAUSE,
2697 MYPF_REG(A_PL_PF_INT_CAUSE),
2698 A_PL_PL_INT_CAUSE,
2699 A_LE_DB_INT_CAUSE,
2700 };
2701
2702 unsigned int i;
2703
2704 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2705 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2706
2707 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2708 A_MC_P_INT_CAUSE, 0xffffffff);
2709
2710 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2711 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2712}
2713
2714/**
2715 * hash_mac_addr - return the hash value of a MAC address
2716 * @addr: the 48-bit Ethernet MAC address
2717 *
2718 * Hashes a MAC address according to the hash function used by HW inexact
2719 * (hash) address matching.
2720 */
2721static int hash_mac_addr(const u8 *addr)
2722{
2723 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2724 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2725 a ^= b;
2726 a ^= (a >> 12);
2727 a ^= (a >> 6);
2728 return a & 0x3f;
2729}
2730
2731/**
2732 * t4_config_rss_range - configure a portion of the RSS mapping table
2733 * @adapter: the adapter
2734 * @mbox: mbox to use for the FW command
2735 * @viid: virtual interface whose RSS subtable is to be written
2736 * @start: start entry in the table to write
2737 * @n: how many table entries to write
2738 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2739 * @nrspq: number of values in @rspq
2740 *
2741 * Programs the selected part of the VI's RSS mapping table with the
2742 * provided values. If @nrspq < @n the supplied values are used repeatedly
2743 * until the full table range is populated.
2744 *
2745 * The caller must ensure the values in @rspq are in the range allowed for
2746 * @viid.
2747 */
2748int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2749 int start, int n, const u16 *rspq, unsigned int nrspq)
2750{
2751 int ret;
2752 const u16 *rsp = rspq;
2753 const u16 *rsp_end = rspq + nrspq;
2754 struct fw_rss_ind_tbl_cmd cmd;
2755
2756 memset(&cmd, 0, sizeof(cmd));
2757 cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2758 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2759 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2760 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2761
2762
2763 /*
2764 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2765 * Queue Identifiers. These Ingress Queue IDs are packed three to
2766 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2767 * reserved.
2768 */
2769 while (n > 0) {
2770 int nq = min(n, 32);
2771 int nq_packed = 0;
2772 __be32 *qp = &cmd.iq0_to_iq2;
2773
2774 /*
2775 * Set up the firmware RSS command header to send the next
2776 * "nq" Ingress Queue IDs to the firmware.
2777 */
2778 cmd.niqid = htons(nq);
2779 cmd.startidx = htons(start);
2780
2781 /*
2782 * "nq" more done for the start of the next loop.
2783 */
2784 start += nq;
2785 n -= nq;
2786
2787 /*
2788 * While there are still Ingress Queue IDs to stuff into the
2789 * current firmware RSS command, retrieve them from the
2790 * Ingress Queue ID array and insert them into the command.
2791 */
2792 while (nq > 0) {
2793 /*
2794 * Grab up to the next 3 Ingress Queue IDs (wrapping
2795 * around the Ingress Queue ID array if necessary) and
2796 * insert them into the firmware RSS command at the
2797 * current 3-tuple position within the commad.
2798 */
2799 u16 qbuf[3];
2800 u16 *qbp = qbuf;
2801 int nqbuf = min(3, nq);
2802
2803 nq -= nqbuf;
2804 qbuf[0] = qbuf[1] = qbuf[2] = 0;
2805 while (nqbuf && nq_packed < 32) {
2806 nqbuf--;
2807 nq_packed++;
2808 *qbp++ = *rsp++;
2809 if (rsp >= rsp_end)
2810 rsp = rspq;
2811 }
2812 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2813 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2814 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2815 }
2816
2817 /*
2818 * Send this portion of the RRS table update to the firmware;
2819 * bail out on any errors.
2820 */
2821 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2822 if (ret)
2823 return ret;
2824 }
2825
2826 return 0;
2827}
2828
2829/**
2830 * t4_config_glbl_rss - configure the global RSS mode
2831 * @adapter: the adapter
2832 * @mbox: mbox to use for the FW command
2833 * @mode: global RSS mode
2834 * @flags: mode-specific flags
2835 *
2836 * Sets the global RSS mode.
2837 */
2838int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2839 unsigned int flags)
2840{
2841 struct fw_rss_glb_config_cmd c;
2842
2843 memset(&c, 0, sizeof(c));
2844 c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2845 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2846 c.retval_len16 = htonl(FW_LEN16(c));
2847 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2848 c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2849 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2850 c.u.basicvirtual.mode_pkd =
2851 htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2852 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2853 } else
2854 return -EINVAL;
2855 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2856}
2857
2858/**
2859 * t4_config_vi_rss - configure per VI RSS settings
2860 * @adapter: the adapter
2861 * @mbox: mbox to use for the FW command
2862 * @viid: the VI id
2863 * @flags: RSS flags
2864 * @defq: id of the default RSS queue for the VI.
2865 *
2866 * Configures VI-specific RSS properties.
2867 */
2868int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2869 unsigned int flags, unsigned int defq)
2870{
2871 struct fw_rss_vi_config_cmd c;
2872
2873 memset(&c, 0, sizeof(c));
2874 c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2875 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2876 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2877 c.retval_len16 = htonl(FW_LEN16(c));
2878 c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2879 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2880 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2881}
2882
2883/* Read an RSS table row */
2884static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2885{
2886 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2887 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2888 5, 0, val);
2889}
2890
2891/**
2892 * t4_read_rss - read the contents of the RSS mapping table
2893 * @adapter: the adapter
2894 * @map: holds the contents of the RSS mapping table
2895 *
2896 * Reads the contents of the RSS hash->queue mapping table.
2897 */
2898int t4_read_rss(struct adapter *adapter, u16 *map)
2899{
2900 u32 val;
2901 int i, ret;
2902
2903 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2904 ret = rd_rss_row(adapter, i, &val);
2905 if (ret)
2906 return ret;
2907 *map++ = G_LKPTBLQUEUE0(val);
2908 *map++ = G_LKPTBLQUEUE1(val);
2909 }
2910 return 0;
2911}
2912
2913/**
2914 * t4_read_rss_key - read the global RSS key
2915 * @adap: the adapter
2916 * @key: 10-entry array holding the 320-bit RSS key
2917 *
2918 * Reads the global 320-bit RSS key.
2919 */
2920void t4_read_rss_key(struct adapter *adap, u32 *key)
2921{
2922 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2923 A_TP_RSS_SECRET_KEY0);
2924}
2925
2926/**
2927 * t4_write_rss_key - program one of the RSS keys
2928 * @adap: the adapter
2929 * @key: 10-entry array holding the 320-bit RSS key
2930 * @idx: which RSS key to write
2931 *
2932 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2933 * 0..15 the corresponding entry in the RSS key table is written,
2934 * otherwise the global RSS key is written.
2935 */
2936void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2937{
2938 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2939 A_TP_RSS_SECRET_KEY0);
2940 if (idx >= 0 && idx < 16)
2941 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2942 V_KEYWRADDR(idx) | F_KEYWREN);
2943}
2944
2945/**
2946 * t4_read_rss_pf_config - read PF RSS Configuration Table
2947 * @adapter: the adapter
2948 * @index: the entry in the PF RSS table to read
2949 * @valp: where to store the returned value
2950 *
2951 * Reads the PF RSS Configuration Table at the specified index and returns
2952 * the value found there.
2953 */
2954void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2955{
2956 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2957 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2958}
2959
2960/**
2961 * t4_write_rss_pf_config - write PF RSS Configuration Table
2962 * @adapter: the adapter
2963 * @index: the entry in the VF RSS table to read
2964 * @val: the value to store
2965 *
2966 * Writes the PF RSS Configuration Table at the specified index with the
2967 * specified value.
2968 */
2969void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2970{
2971 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2972 &val, 1, A_TP_RSS_PF0_CONFIG + index);
2973}
2974
2975/**
2976 * t4_read_rss_vf_config - read VF RSS Configuration Table
2977 * @adapter: the adapter
2978 * @index: the entry in the VF RSS table to read
2979 * @vfl: where to store the returned VFL
2980 * @vfh: where to store the returned VFH
2981 *
2982 * Reads the VF RSS Configuration Table at the specified index and returns
2983 * the (VFL, VFH) values found there.
2984 */
2985void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2986 u32 *vfl, u32 *vfh)
2987{
2988 u32 vrt;
2989
2990 /*
2991 * Request that the index'th VF Table values be read into VFL/VFH.
2992 */
2993 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2994 vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2995 vrt |= V_VFWRADDR(index) | F_VFRDEN;
2996 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2997
2998 /*
2999 * Grab the VFL/VFH values ...
3000 */
3001 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3002 vfl, 1, A_TP_RSS_VFL_CONFIG);
3003 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3004 vfh, 1, A_TP_RSS_VFH_CONFIG);
3005}
3006
3007/**
3008 * t4_write_rss_vf_config - write VF RSS Configuration Table
3009 *
3010 * @adapter: the adapter
3011 * @index: the entry in the VF RSS table to write
3012 * @vfl: the VFL to store
3013 * @vfh: the VFH to store
3014 *
3015 * Writes the VF RSS Configuration Table at the specified index with the
3016 * specified (VFL, VFH) values.
3017 */
3018void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3019 u32 vfl, u32 vfh)
3020{
3021 u32 vrt;
3022
3023 /*
3024 * Load up VFL/VFH with the values to be written ...
3025 */
3026 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3027 &vfl, 1, A_TP_RSS_VFL_CONFIG);
3028 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3029 &vfh, 1, A_TP_RSS_VFH_CONFIG);
3030
3031 /*
3032 * Write the VFL/VFH into the VF Table at index'th location.
3033 */
3034 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3035 vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3036 vrt |= V_VFWRADDR(index) | F_VFWREN;
3037 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3038}
3039
3040/**
3041 * t4_read_rss_pf_map - read PF RSS Map
3042 * @adapter: the adapter
3043 *
3044 * Reads the PF RSS Map register and returns its value.
3045 */
3046u32 t4_read_rss_pf_map(struct adapter *adapter)
3047{
3048 u32 pfmap;
3049
3050 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3051 &pfmap, 1, A_TP_RSS_PF_MAP);
3052 return pfmap;
3053}
3054
3055/**
3056 * t4_write_rss_pf_map - write PF RSS Map
3057 * @adapter: the adapter
3058 * @pfmap: PF RSS Map value
3059 *
3060 * Writes the specified value to the PF RSS Map register.
3061 */
3062void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3063{
3064 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3065 &pfmap, 1, A_TP_RSS_PF_MAP);
3066}
3067
3068/**
3069 * t4_read_rss_pf_mask - read PF RSS Mask
3070 * @adapter: the adapter
3071 *
3072 * Reads the PF RSS Mask register and returns its value.
3073 */
3074u32 t4_read_rss_pf_mask(struct adapter *adapter)
3075{
3076 u32 pfmask;
3077
3078 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3079 &pfmask, 1, A_TP_RSS_PF_MSK);
3080 return pfmask;
3081}
3082
3083/**
3084 * t4_write_rss_pf_mask - write PF RSS Mask
3085 * @adapter: the adapter
3086 * @pfmask: PF RSS Mask value
3087 *
3088 * Writes the specified value to the PF RSS Mask register.
3089 */
3090void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3091{
3092 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3093 &pfmask, 1, A_TP_RSS_PF_MSK);
3094}
3095
3096/**
3097 * t4_set_filter_mode - configure the optional components of filter tuples
3098 * @adap: the adapter
3099 * @mode_map: a bitmap selcting which optional filter components to enable
3100 *
3101 * Sets the filter mode by selecting the optional components to enable
3102 * in filter tuples. Returns 0 on success and a negative error if the
3103 * requested mode needs more bits than are available for optional
3104 * components.
3105 */
3106int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3107{
3108 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3109
3110 int i, nbits = 0;
3111
3112 for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3113 if (mode_map & (1 << i))
3114 nbits += width[i];
3115 if (nbits > FILTER_OPT_LEN)
3116 return -EINVAL;
3117 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3118 A_TP_VLAN_PRI_MAP);
3119 return 0;
3120}
3121
3122/**
3123 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
3124 * @adap: the adapter
3125 * @v4: holds the TCP/IP counter values
3126 * @v6: holds the TCP/IPv6 counter values
3127 *
3128 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3129 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3130 */
3131void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3132 struct tp_tcp_stats *v6)
3133{
3134 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3135
3136#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3137#define STAT(x) val[STAT_IDX(x)]
3138#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3139
3140 if (v4) {
3141 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3142 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3143 v4->tcpOutRsts = STAT(OUT_RST);
3144 v4->tcpInSegs = STAT64(IN_SEG);
3145 v4->tcpOutSegs = STAT64(OUT_SEG);
3146 v4->tcpRetransSegs = STAT64(RXT_SEG);
3147 }
3148 if (v6) {
3149 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3150 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3151 v6->tcpOutRsts = STAT(OUT_RST);
3152 v6->tcpInSegs = STAT64(IN_SEG);
3153 v6->tcpOutSegs = STAT64(OUT_SEG);
3154 v6->tcpRetransSegs = STAT64(RXT_SEG);
3155 }
3156#undef STAT64
3157#undef STAT
3158#undef STAT_IDX
3159}
3160
3161/**
3162 * t4_tp_get_err_stats - read TP's error MIB counters
3163 * @adap: the adapter
3164 * @st: holds the counter values
3165 *
3166 * Returns the values of TP's error counters.
3167 */
3168void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3169{
3170 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3171 12, A_TP_MIB_MAC_IN_ERR_0);
3172 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3173 8, A_TP_MIB_TNL_CNG_DROP_0);
3174 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3175 4, A_TP_MIB_TNL_DROP_0);
3176 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3177 4, A_TP_MIB_OFD_VLN_DROP_0);
3178 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3179 4, A_TP_MIB_TCP_V6IN_ERR_0);
3180 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3181 2, A_TP_MIB_OFD_ARP_DROP);
3182}
3183
3184/**
3185 * t4_tp_get_proxy_stats - read TP's proxy MIB counters
3186 * @adap: the adapter
3187 * @st: holds the counter values
3188 *
3189 * Returns the values of TP's proxy counters.
3190 */
3191void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3192{
3193 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3194 4, A_TP_MIB_TNL_LPBK_0);
3195}
3196
3197/**
3198 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
3199 * @adap: the adapter
3200 * @st: holds the counter values
3201 *
3202 * Returns the values of TP's CPL counters.
3203 */
3204void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3205{
3206 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3207 8, A_TP_MIB_CPL_IN_REQ_0);
3208}
3209
3210/**
3211 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3212 * @adap: the adapter
3213 * @st: holds the counter values
3214 *
3215 * Returns the values of TP's RDMA counters.
3216 */
3217void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3218{
3219 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3220 2, A_TP_MIB_RQE_DFR_MOD);
3221}
3222
3223/**
3224 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3225 * @adap: the adapter
3226 * @idx: the port index
3227 * @st: holds the counter values
3228 *
3229 * Returns the values of TP's FCoE counters for the selected port.
3230 */
3231void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3232 struct tp_fcoe_stats *st)
3233{
3234 u32 val[2];
3235
3236 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3237 1, A_TP_MIB_FCOE_DDP_0 + idx);
3238 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3239 1, A_TP_MIB_FCOE_DROP_0 + idx);
3240 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3241 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3242 st->octetsDDP = ((u64)val[0] << 32) | val[1];
3243}
3244
3245/**
3246 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3247 * @adap: the adapter
3248 * @st: holds the counter values
3249 *
3250 * Returns the values of TP's counters for non-TCP directly-placed packets.
3251 */
3252void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3253{
3254 u32 val[4];
3255
3256 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3257 A_TP_MIB_USM_PKTS);
3258 st->frames = val[0];
3259 st->drops = val[1];
3260 st->octets = ((u64)val[2] << 32) | val[3];
3261}
3262
3263/**
3264 * t4_read_mtu_tbl - returns the values in the HW path MTU table
3265 * @adap: the adapter
3266 * @mtus: where to store the MTU values
3267 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
3268 *
3269 * Reads the HW path MTU table.
3270 */
3271void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3272{
3273 u32 v;
3274 int i;
3275
3276 for (i = 0; i < NMTUS; ++i) {
3277 t4_write_reg(adap, A_TP_MTU_TABLE,
3278 V_MTUINDEX(0xff) | V_MTUVALUE(i));
3279 v = t4_read_reg(adap, A_TP_MTU_TABLE);
3280 mtus[i] = G_MTUVALUE(v);
3281 if (mtu_log)
3282 mtu_log[i] = G_MTUWIDTH(v);
3283 }
3284}
3285
3286/**
3287 * t4_read_cong_tbl - reads the congestion control table
3288 * @adap: the adapter
3289 * @incr: where to store the alpha values
3290 *
3291 * Reads the additive increments programmed into the HW congestion
3292 * control table.
3293 */
3294void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3295{
3296 unsigned int mtu, w;
3297
3298 for (mtu = 0; mtu < NMTUS; ++mtu)
3299 for (w = 0; w < NCCTRL_WIN; ++w) {
3300 t4_write_reg(adap, A_TP_CCTRL_TABLE,
3301 V_ROWINDEX(0xffff) | (mtu << 5) | w);
3302 incr[mtu][w] = (u16)t4_read_reg(adap,
3303 A_TP_CCTRL_TABLE) & 0x1fff;
3304 }
3305}
3306
3307/**
3308 * t4_read_pace_tbl - read the pace table
3309 * @adap: the adapter
3310 * @pace_vals: holds the returned values
3311 *
3312 * Returns the values of TP's pace table in microseconds.
3313 */
3314void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3315{
3316 unsigned int i, v;
3317
3318 for (i = 0; i < NTX_SCHED; i++) {
3319 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3320 v = t4_read_reg(adap, A_TP_PACE_TABLE);
3321 pace_vals[i] = dack_ticks_to_usec(adap, v);
3322 }
3323}
3324
3325/**
3326 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3327 * @adap: the adapter
3328 * @addr: the indirect TP register address
3329 * @mask: specifies the field within the register to modify
3330 * @val: new value for the field
3331 *
3332 * Sets a field of an indirect TP register to the given value.
3333 */
3334void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3335 unsigned int mask, unsigned int val)
3336{
3337 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3338 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3339 t4_write_reg(adap, A_TP_PIO_DATA, val);
3340}
3341
3342/**
3343 * init_cong_ctrl - initialize congestion control parameters
3344 * @a: the alpha values for congestion control
3345 * @b: the beta values for congestion control
3346 *
3347 * Initialize the congestion control parameters.
3348 */
3349static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3350{
3351 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3352 a[9] = 2;
3353 a[10] = 3;
3354 a[11] = 4;
3355 a[12] = 5;
3356 a[13] = 6;
3357 a[14] = 7;
3358 a[15] = 8;
3359 a[16] = 9;
3360 a[17] = 10;
3361 a[18] = 14;
3362 a[19] = 17;
3363 a[20] = 21;
3364 a[21] = 25;
3365 a[22] = 30;
3366 a[23] = 35;
3367 a[24] = 45;
3368 a[25] = 60;
3369 a[26] = 80;
3370 a[27] = 100;
3371 a[28] = 200;
3372 a[29] = 300;
3373 a[30] = 400;
3374 a[31] = 500;
3375
3376 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3377 b[9] = b[10] = 1;
3378 b[11] = b[12] = 2;
3379 b[13] = b[14] = b[15] = b[16] = 3;
3380 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3381 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3382 b[28] = b[29] = 6;
3383 b[30] = b[31] = 7;
3384}
3385
3386/* The minimum additive increment value for the congestion control table */
3387#define CC_MIN_INCR 2U
3388
3389/**
3390 * t4_load_mtus - write the MTU and congestion control HW tables
3391 * @adap: the adapter
3392 * @mtus: the values for the MTU table
3393 * @alpha: the values for the congestion control alpha parameter
3394 * @beta: the values for the congestion control beta parameter
3395 *
3396 * Write the HW MTU table with the supplied MTUs and the high-speed
3397 * congestion control table with the supplied alpha, beta, and MTUs.
3398 * We write the two tables together because the additive increments
3399 * depend on the MTUs.
3400 */
3401void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3402 const unsigned short *alpha, const unsigned short *beta)
3403{
3404 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3405 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3406 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3407 28672, 40960, 57344, 81920, 114688, 163840, 229376
3408 };
3409
3410 unsigned int i, w;
3411
3412 for (i = 0; i < NMTUS; ++i) {
3413 unsigned int mtu = mtus[i];
3414 unsigned int log2 = fls(mtu);
3415
3416 if (!(mtu & ((1 << log2) >> 2))) /* round */
3417 log2--;
3418 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3419 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3420
3421 for (w = 0; w < NCCTRL_WIN; ++w) {
3422 unsigned int inc;
3423
3424 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3425 CC_MIN_INCR);
3426
3427 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3428 (w << 16) | (beta[w] << 13) | inc);
3429 }
3430 }
3431}
3432
3433/**
3434 * t4_set_pace_tbl - set the pace table
3435 * @adap: the adapter
3436 * @pace_vals: the pace values in microseconds
3437 * @start: index of the first entry in the HW pace table to set
3438 * @n: how many entries to set
3439 *
3440 * Sets (a subset of the) HW pace table.
3441 */
3442int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3443 unsigned int start, unsigned int n)
3444{
3445 unsigned int vals[NTX_SCHED], i;
3446 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3447
3448 if (n > NTX_SCHED)
3449 return -ERANGE;
3450
3451 /* convert values from us to dack ticks, rounding to closest value */
3452 for (i = 0; i < n; i++, pace_vals++) {
3453 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3454 if (vals[i] > 0x7ff)
3455 return -ERANGE;
3456 if (*pace_vals && vals[i] == 0)
3457 return -ERANGE;
3458 }
3459 for (i = 0; i < n; i++, start++)
3460 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3461 return 0;
3462}
3463
3464/**
3465 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3466 * @adap: the adapter
3467 * @kbps: target rate in Kbps
3468 * @sched: the scheduler index
3469 *
3470 * Configure a Tx HW scheduler for the target rate.
3471 */
3472int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3473{
3474 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3475 unsigned int clk = adap->params.vpd.cclk * 1000;
3476 unsigned int selected_cpt = 0, selected_bpt = 0;
3477
3478 if (kbps > 0) {
3479 kbps *= 125; /* -> bytes */
3480 for (cpt = 1; cpt <= 255; cpt++) {
3481 tps = clk / cpt;
3482 bpt = (kbps + tps / 2) / tps;
3483 if (bpt > 0 && bpt <= 255) {
3484 v = bpt * tps;
3485 delta = v >= kbps ? v - kbps : kbps - v;
3486 if (delta < mindelta) {
3487 mindelta = delta;
3488 selected_cpt = cpt;
3489 selected_bpt = bpt;
3490 }
3491 } else if (selected_cpt)
3492 break;
3493 }
3494 if (!selected_cpt)
3495 return -EINVAL;
3496 }
3497 t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3498 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3499 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3500 if (sched & 1)
3501 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3502 else
3503 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3504 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3505 return 0;
3506}
3507
3508/**
3509 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3510 * @adap: the adapter
3511 * @sched: the scheduler index
3512 * @ipg: the interpacket delay in tenths of nanoseconds
3513 *
3514 * Set the interpacket delay for a HW packet rate scheduler.
3515 */
3516int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3517{
3518 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3519
3520 /* convert ipg to nearest number of core clocks */
3521 ipg *= core_ticks_per_usec(adap);
3522 ipg = (ipg + 5000) / 10000;
3523 if (ipg > M_TXTIMERSEPQ0)
3524 return -EINVAL;
3525
3526 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3527 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3528 if (sched & 1)
3529 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3530 else
3531 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3532 t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3533 t4_read_reg(adap, A_TP_TM_PIO_DATA);
3534 return 0;
3535}
3536
3537/**
3538 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3539 * @adap: the adapter
3540 * @sched: the scheduler index
3541 * @kbps: the byte rate in Kbps
3542 * @ipg: the interpacket delay in tenths of nanoseconds
3543 *
3544 * Return the current configuration of a HW Tx scheduler.
3545 */
3546void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3547 unsigned int *ipg)
3548{
3549 unsigned int v, addr, bpt, cpt;
3550
3551 if (kbps) {
3552 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3553 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3554 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3555 if (sched & 1)
3556 v >>= 16;
3557 bpt = (v >> 8) & 0xff;
3558 cpt = v & 0xff;
3559 if (!cpt)
3560 *kbps = 0; /* scheduler disabled */
3561 else {
3562 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3563 *kbps = (v * bpt) / 125;
3564 }
3565 }
3566 if (ipg) {
3567 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3568 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3569 v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3570 if (sched & 1)
3571 v >>= 16;
3572 v &= 0xffff;
3573 *ipg = (10000 * v) / core_ticks_per_usec(adap);
3574 }
3575}
3576
3577/*
3578 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3579 * clocks. The formula is
3580 *
3581 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3582 *
3583 * which is equivalent to
3584 *
3585 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3586 */
3587static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3588{
3589 u64 v = bytes256 * adap->params.vpd.cclk;
3590
3591 return v * 62 + v / 2;
3592}
3593
3594/**
3595 * t4_get_chan_txrate - get the current per channel Tx rates
3596 * @adap: the adapter
3597 * @nic_rate: rates for NIC traffic
3598 * @ofld_rate: rates for offloaded traffic
3599 *
3600 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
3601 * for each channel.
3602 */
3603void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3604{
3605 u32 v;
3606
3607 v = t4_read_reg(adap, A_TP_TX_TRATE);
3608 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3609 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3610 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3611 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3612
3613 v = t4_read_reg(adap, A_TP_TX_ORATE);
3614 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3615 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3616 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3617 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3618}
3619
3620/**
3621 * t4_set_trace_filter - configure one of the tracing filters
3622 * @adap: the adapter
3623 * @tp: the desired trace filter parameters
3624 * @idx: which filter to configure
3625 * @enable: whether to enable or disable the filter
3626 *
3627 * Configures one of the tracing filters available in HW. If @enable is
3628 * %0 @tp is not examined and may be %NULL. The user is responsible to
3629 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3630 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3631 * docs/readme.txt for a complete description of how to setup traceing on
3632 * T4.
3633 */
3634int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3635 int enable)
3636{
3637 int i, ofst = idx * 4;
3638 u32 data_reg, mask_reg, cfg;
3639 u32 multitrc = F_TRCMULTIFILTER;
3640
3641 if (!enable) {
3642 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3643 return 0;
3644 }
3645
3646 /*
3647 * TODO - After T4 data book is updated, specify the exact
3648 * section below.
3649 *
3650 * See T4 data book - MPS section for a complete description
3651 * of the below if..else handling of A_MPS_TRC_CFG register
3652 * value.
3653 */
3654 cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3655 if (cfg & F_TRCMULTIFILTER) {
3656 /*
3657 * If multiple tracers are enabled, then maximum
3658 * capture size is 2.5KB (FIFO size of a single channel)
3659 * minus 2 flits for CPL_TRACE_PKT header.
3660 */
3661 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3662 return -EINVAL;
3663 }
3664 else {
3665 /*
3666 * If multiple tracers are disabled, to avoid deadlocks
3667 * maximum packet capture size of 9600 bytes is recommended.
3668 * Also in this mode, only trace0 can be enabled and running.
3669 */
3670 multitrc = 0;
3671 if (tp->snap_len > 9600 || idx)
3672 return -EINVAL;
3673 }
3674
3675 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3676 tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3677 return -EINVAL;
3678
3679 /* stop the tracer we'll be changing */
3680 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3681
3682 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3683 data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3684 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3685
3686 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3687 t4_write_reg(adap, data_reg, tp->data[i]);
3688 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3689 }
3690 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3691 V_TFCAPTUREMAX(tp->snap_len) |
3692 V_TFMINPKTSIZE(tp->min_len));
3693 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3694 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3695 is_t4(adap) ?
3696 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
3697 V_T5_TFPORT(tp->port) | F_T5_TFEN |
3698 V_T5_TFINVERTMATCH(tp->invert));
3699
3700 return 0;
3701}
3702
3703/**
3704 * t4_get_trace_filter - query one of the tracing filters
3705 * @adap: the adapter
3706 * @tp: the current trace filter parameters
3707 * @idx: which trace filter to query
3708 * @enabled: non-zero if the filter is enabled
3709 *
3710 * Returns the current settings of one of the HW tracing filters.
3711 */
3712void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3713 int *enabled)
3714{
3715 u32 ctla, ctlb;
3716 int i, ofst = idx * 4;
3717 u32 data_reg, mask_reg;
3718
3719 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3720 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3721
3722 if (is_t4(adap)) {
3723 *enabled = !!(ctla & F_TFEN);
3724 tp->port = G_TFPORT(ctla);
3725 } else {
3726 *enabled = !!(ctla & F_T5_TFEN);
3727 tp->port = G_T5_TFPORT(ctla);
3728 }
3729 tp->snap_len = G_TFCAPTUREMAX(ctlb);
3730 tp->min_len = G_TFMINPKTSIZE(ctlb);
3731 tp->skip_ofst = G_TFOFFSET(ctla);
3732 tp->skip_len = G_TFLENGTH(ctla);
3733 tp->invert = !!(ctla & F_TFINVERTMATCH);
3734
3735 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3736 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3737 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3738
3739 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3740 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3741 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3742 }
3743}
3744
3745/**
3746 * t4_pmtx_get_stats - returns the HW stats from PMTX
3747 * @adap: the adapter
3748 * @cnt: where to store the count statistics
3749 * @cycles: where to store the cycle statistics
3750 *
3751 * Returns performance statistics from PMTX.
3752 */
3753void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3754{
3755 int i;
3756 u32 data[2];
3757
3758 for (i = 0; i < PM_NSTATS; i++) {
3759 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3760 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3761 if (is_t4(adap))
3762 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3763 else {
3764 t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3765 A_PM_TX_DBG_DATA, data, 2,
3766 A_PM_TX_DBG_STAT_MSB);
3767 cycles[i] = (((u64)data[0] << 32) | data[1]);
3768 }
3769 }
3770}
3771
3772/**
3773 * t4_pmrx_get_stats - returns the HW stats from PMRX
3774 * @adap: the adapter
3775 * @cnt: where to store the count statistics
3776 * @cycles: where to store the cycle statistics
3777 *
3778 * Returns performance statistics from PMRX.
3779 */
3780void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3781{
3782 int i;
3783 u32 data[2];
3784
3785 for (i = 0; i < PM_NSTATS; i++) {
3786 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3787 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3788 if (is_t4(adap))
3789 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3790 else {
3791 t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3792 A_PM_RX_DBG_DATA, data, 2,
3793 A_PM_RX_DBG_STAT_MSB);
3794 cycles[i] = (((u64)data[0] << 32) | data[1]);
3795 }
3796 }
3797}
3798
3799/**
3800 * get_mps_bg_map - return the buffer groups associated with a port
3801 * @adap: the adapter
3802 * @idx: the port index
3803 *
3804 * Returns a bitmap indicating which MPS buffer groups are associated
3805 * with the given port. Bit i is set if buffer group i is used by the
3806 * port.
3807 */
3808static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3809{
3810 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3811
3812 if (n == 0)
3813 return idx == 0 ? 0xf : 0;
3814 if (n == 1)
3815 return idx < 2 ? (3 << (2 * idx)) : 0;
3816 return 1 << idx;
3817}
3818
3819/**
3820 * t4_get_port_stats_offset - collect port stats relative to a previous
3821 * snapshot
3822 * @adap: The adapter
3823 * @idx: The port
3824 * @stats: Current stats to fill
3825 * @offset: Previous stats snapshot
3826 */
3827void t4_get_port_stats_offset(struct adapter *adap, int idx,
3828 struct port_stats *stats,
3829 struct port_stats *offset)
3830{
3831 u64 *s, *o;
3832 int i;
3833
3834 t4_get_port_stats(adap, idx, stats);
3835 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3836 i < (sizeof(struct port_stats)/sizeof(u64)) ;
3837 i++, s++, o++)
3838 *s -= *o;
3839}
3840
3841/**
3842 * t4_get_port_stats - collect port statistics
3843 * @adap: the adapter
3844 * @idx: the port index
3845 * @p: the stats structure to fill
3846 *
3847 * Collect statistics related to the given port from HW.
3848 */
3849void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3850{
3851 u32 bgmap = get_mps_bg_map(adap, idx);
3852
3853#define GET_STAT(name) \
3854 t4_read_reg64(adap, \
3855 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3856 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3857#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3858
3859 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3860 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3861 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3862 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3863 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3864 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3865 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3866 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3867 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3868 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3869 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3870 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3871 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3872 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3873 p->tx_drop = GET_STAT(TX_PORT_DROP);
3874 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3875 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3876 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3877 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3878 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3879 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3880 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3881 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3882
3883 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3884 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3885 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3886 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3887 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3888 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3889 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3890 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3891 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3892 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3893 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3894 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3895 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3896 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3897 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3898 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3899 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3900 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3901 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3902 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3903 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3904 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3905 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3906 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3907 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3908 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3909 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3910
3911 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3912 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3913 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3914 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3915 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3916 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3917 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3918 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3919
3920#undef GET_STAT
3921#undef GET_STAT_COM
3922}
3923
3924/**
3925 * t4_clr_port_stats - clear port statistics
3926 * @adap: the adapter
3927 * @idx: the port index
3928 *
3929 * Clear HW statistics for the given port.
3930 */
3931void t4_clr_port_stats(struct adapter *adap, int idx)
3932{
3933 unsigned int i;
3934 u32 bgmap = get_mps_bg_map(adap, idx);
3935 u32 port_base_addr;
3936
3937 if (is_t4(adap))
3938 port_base_addr = PORT_BASE(idx);
3939 else
3940 port_base_addr = T5_PORT_BASE(idx);
3941
3942 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3943 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3944 t4_write_reg(adap, port_base_addr + i, 0);
3945 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3946 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3947 t4_write_reg(adap, port_base_addr + i, 0);
3948 for (i = 0; i < 4; i++)
3949 if (bgmap & (1 << i)) {
3950 t4_write_reg(adap,
3951 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3952 t4_write_reg(adap,
3953 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3954 }
3955}
3956
3957/**
3958 * t4_get_lb_stats - collect loopback port statistics
3959 * @adap: the adapter
3960 * @idx: the loopback port index
3961 * @p: the stats structure to fill
3962 *
3963 * Return HW statistics for the given loopback port.
3964 */
3965void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3966{
3967 u32 bgmap = get_mps_bg_map(adap, idx);
3968
3969#define GET_STAT(name) \
3970 t4_read_reg64(adap, \
3971 (is_t4(adap) ? \
3972 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3973 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3974#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3975
3976 p->octets = GET_STAT(BYTES);
3977 p->frames = GET_STAT(FRAMES);
3978 p->bcast_frames = GET_STAT(BCAST);
3979 p->mcast_frames = GET_STAT(MCAST);
3980 p->ucast_frames = GET_STAT(UCAST);
3981 p->error_frames = GET_STAT(ERROR);
3982
3983 p->frames_64 = GET_STAT(64B);
3984 p->frames_65_127 = GET_STAT(65B_127B);
3985 p->frames_128_255 = GET_STAT(128B_255B);
3986 p->frames_256_511 = GET_STAT(256B_511B);
3987 p->frames_512_1023 = GET_STAT(512B_1023B);
3988 p->frames_1024_1518 = GET_STAT(1024B_1518B);
3989 p->frames_1519_max = GET_STAT(1519B_MAX);
3990 p->drop = GET_STAT(DROP_FRAMES);
3991
3992 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3993 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3994 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3995 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3996 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3997 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3998 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3999 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4000
4001#undef GET_STAT
4002#undef GET_STAT_COM
4003}
4004
4005/**
4006 * t4_wol_magic_enable - enable/disable magic packet WoL
4007 * @adap: the adapter
4008 * @port: the physical port index
4009 * @addr: MAC address expected in magic packets, %NULL to disable
4010 *
4011 * Enables/disables magic packet wake-on-LAN for the selected port.
4012 */
4013void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4014 const u8 *addr)
4015{
4016 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4017
4018 if (is_t4(adap)) {
4019 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4020 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4021 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4022 } else {
4023 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4024 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4025 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4026 }
4027
4028 if (addr) {
4029 t4_write_reg(adap, mag_id_reg_l,
4030 (addr[2] << 24) | (addr[3] << 16) |
4031 (addr[4] << 8) | addr[5]);
4032 t4_write_reg(adap, mag_id_reg_h,
4033 (addr[0] << 8) | addr[1]);
4034 }
4035 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4036 V_MAGICEN(addr != NULL));
4037}
4038
4039/**
4040 * t4_wol_pat_enable - enable/disable pattern-based WoL
4041 * @adap: the adapter
4042 * @port: the physical port index
4043 * @map: bitmap of which HW pattern filters to set
4044 * @mask0: byte mask for bytes 0-63 of a packet
4045 * @mask1: byte mask for bytes 64-127 of a packet
4046 * @crc: Ethernet CRC for selected bytes
4047 * @enable: enable/disable switch
4048 *
4049 * Sets the pattern filters indicated in @map to mask out the bytes
4050 * specified in @mask0/@mask1 in received packets and compare the CRC of
4051 * the resulting packet against @crc. If @enable is %true pattern-based
4052 * WoL is enabled, otherwise disabled.
4053 */
4054int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4055 u64 mask0, u64 mask1, unsigned int crc, bool enable)
4056{
4057 int i;
4058 u32 port_cfg_reg;
4059
4060 if (is_t4(adap))
4061 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4062 else
4063 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4064
4065 if (!enable) {
4066 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4067 return 0;
4068 }
4069 if (map > 0xff)
4070 return -EINVAL;
4071
4072#define EPIO_REG(name) \
4073 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4074 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4075
4076 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4077 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4078 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4079
4080 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4081 if (!(map & 1))
4082 continue;
4083
4084 /* write byte masks */
4085 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4086 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4087 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
4088 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4089 return -ETIMEDOUT;
4090
4091 /* write CRC */
4092 t4_write_reg(adap, EPIO_REG(DATA0), crc);
4093 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4094 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
4095 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4096 return -ETIMEDOUT;
4097 }
4098#undef EPIO_REG
4099
4100 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4101 return 0;
4102}
4103
4104/**
4105 * t4_mk_filtdelwr - create a delete filter WR
4106 * @ftid: the filter ID
4107 * @wr: the filter work request to populate
4108 * @qid: ingress queue to receive the delete notification
4109 *
4110 * Creates a filter work request to delete the supplied filter. If @qid is
4111 * negative the delete notification is suppressed.
4112 */
4113void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4114{
4115 memset(wr, 0, sizeof(*wr));
4116 wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4117 wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4118 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4119 V_FW_FILTER_WR_NOREPLY(qid < 0));
4120 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4121 if (qid >= 0)
4122 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4123}
4124
4125#define INIT_CMD(var, cmd, rd_wr) do { \
4126 (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4127 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4128 (var).retval_len16 = htonl(FW_LEN16(var)); \
4129} while (0)
4130
4131int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4132{
4133 struct fw_ldst_cmd c;
4134
4135 memset(&c, 0, sizeof(c));
4136 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4137 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4138 c.cycles_to_len16 = htonl(FW_LEN16(c));
4139 c.u.addrval.addr = htonl(addr);
4140 c.u.addrval.val = htonl(val);
4141
4142 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4143}
4144
4145/**
4146 * t4_i2c_rd - read a byte from an i2c addressable device
4147 * @adap: the adapter
4148 * @mbox: mailbox to use for the FW command
4149 * @port_id: the port id
4150 * @dev_addr: the i2c device address
4151 * @offset: the byte offset to read from
4152 * @valp: where to store the value
4153 */
4154int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
4155 u8 dev_addr, u8 offset, u8 *valp)
4156{
4157 int ret;
4158 struct fw_ldst_cmd c;
4159
4160 memset(&c, 0, sizeof(c));
4161 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4162 F_FW_CMD_READ |
4163 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
4164 c.cycles_to_len16 = htonl(FW_LEN16(c));
4165 c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
4166 c.u.i2c_deprecated.base = dev_addr;
4167 c.u.i2c_deprecated.boffset = offset;
4168
4169 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4170 if (ret == 0)
4171 *valp = c.u.i2c_deprecated.data;
4172 return ret;
4173}
4174
4175/**
4176 * t4_mdio_rd - read a PHY register through MDIO
4177 * @adap: the adapter
4178 * @mbox: mailbox to use for the FW command
4179 * @phy_addr: the PHY address
4180 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4181 * @reg: the register to read
4182 * @valp: where to store the value
4183 *
4184 * Issues a FW command through the given mailbox to read a PHY register.
4185 */
4186int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4187 unsigned int mmd, unsigned int reg, unsigned int *valp)
4188{
4189 int ret;
4190 struct fw_ldst_cmd c;
4191
4192 memset(&c, 0, sizeof(c));
4193 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4194 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4195 c.cycles_to_len16 = htonl(FW_LEN16(c));
4196 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4197 V_FW_LDST_CMD_MMD(mmd));
4198 c.u.mdio.raddr = htons(reg);
4199
4200 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4201 if (ret == 0)
4202 *valp = ntohs(c.u.mdio.rval);
4203 return ret;
4204}
4205
4206/**
4207 * t4_mdio_wr - write a PHY register through MDIO
4208 * @adap: the adapter
4209 * @mbox: mailbox to use for the FW command
4210 * @phy_addr: the PHY address
4211 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
4212 * @reg: the register to write
4213 * @valp: value to write
4214 *
4215 * Issues a FW command through the given mailbox to write a PHY register.
4216 */
4217int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4218 unsigned int mmd, unsigned int reg, unsigned int val)
4219{
4220 struct fw_ldst_cmd c;
4221
4222 memset(&c, 0, sizeof(c));
4223 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4224 F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4225 c.cycles_to_len16 = htonl(FW_LEN16(c));
4226 c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4227 V_FW_LDST_CMD_MMD(mmd));
4228 c.u.mdio.raddr = htons(reg);
4229 c.u.mdio.rval = htons(val);
4230
4231 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4232}
4233
4234/**
4235 * t4_sge_ctxt_flush - flush the SGE context cache
4236 * @adap: the adapter
4237 * @mbox: mailbox to use for the FW command
4238 *
4239 * Issues a FW command through the given mailbox to flush the
4240 * SGE context cache.
4241 */
4242int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4243{
4244 int ret;
4245 struct fw_ldst_cmd c;
4246
4247 memset(&c, 0, sizeof(c));
4248 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4249 F_FW_CMD_READ |
4250 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4251 c.cycles_to_len16 = htonl(FW_LEN16(c));
4252 c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4253
4254 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4255 return ret;
4256}
4257
4258/**
4259 * t4_sge_ctxt_rd - read an SGE context through FW
4260 * @adap: the adapter
4261 * @mbox: mailbox to use for the FW command
4262 * @cid: the context id
4263 * @ctype: the context type
4264 * @data: where to store the context data
4265 *
4266 * Issues a FW command through the given mailbox to read an SGE context.
4267 */
4268int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4269 enum ctxt_type ctype, u32 *data)
4270{
4271 int ret;
4272 struct fw_ldst_cmd c;
4273
4274 if (ctype == CTXT_EGRESS)
4275 ret = FW_LDST_ADDRSPC_SGE_EGRC;
4276 else if (ctype == CTXT_INGRESS)
4277 ret = FW_LDST_ADDRSPC_SGE_INGC;
4278 else if (ctype == CTXT_FLM)
4279 ret = FW_LDST_ADDRSPC_SGE_FLMC;
4280 else
4281 ret = FW_LDST_ADDRSPC_SGE_CONMC;
4282
4283 memset(&c, 0, sizeof(c));
4284 c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4285 F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4286 c.cycles_to_len16 = htonl(FW_LEN16(c));
4287 c.u.idctxt.physid = htonl(cid);
4288
4289 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4290 if (ret == 0) {
4291 data[0] = ntohl(c.u.idctxt.ctxt_data0);
4292 data[1] = ntohl(c.u.idctxt.ctxt_data1);
4293 data[2] = ntohl(c.u.idctxt.ctxt_data2);
4294 data[3] = ntohl(c.u.idctxt.ctxt_data3);
4295 data[4] = ntohl(c.u.idctxt.ctxt_data4);
4296 data[5] = ntohl(c.u.idctxt.ctxt_data5);
4297 }
4298 return ret;
4299}
4300
4301/**
4302 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4303 * @adap: the adapter
4304 * @cid: the context id
4305 * @ctype: the context type
4306 * @data: where to store the context data
4307 *
4308 * Reads an SGE context directly, bypassing FW. This is only for
4309 * debugging when FW is unavailable.
4310 */
4311int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4312 u32 *data)
4313{
4314 int i, ret;
4315
4316 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4317 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4318 if (!ret)
4319 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4320 *data++ = t4_read_reg(adap, i);
4321 return ret;
4322}
4323
4324/**
4325 * t4_fw_hello - establish communication with FW
4326 * @adap: the adapter
4327 * @mbox: mailbox to use for the FW command
4328 * @evt_mbox: mailbox to receive async FW events
4329 * @master: specifies the caller's willingness to be the device master
4330 * @state: returns the current device state (if non-NULL)
4331 *
4332 * Issues a command to establish communication with FW. Returns either
4333 * an error (negative integer) or the mailbox of the Master PF.
4334 */
4335int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4336 enum dev_master master, enum dev_state *state)
4337{
4338 int ret;
4339 struct fw_hello_cmd c;
4340 u32 v;
4341 unsigned int master_mbox;
4342 int retries = FW_CMD_HELLO_RETRIES;
4343
4344retry:
4345 memset(&c, 0, sizeof(c));
4346 INIT_CMD(c, HELLO, WRITE);
4347 c.err_to_clearinit = htonl(
4348 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4349 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4350 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4351 M_FW_HELLO_CMD_MBMASTER) |
4352 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4353 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4354 F_FW_HELLO_CMD_CLEARINIT);
4355
4356 /*
4357 * Issue the HELLO command to the firmware. If it's not successful
4358 * but indicates that we got a "busy" or "timeout" condition, retry
4359 * the HELLO until we exhaust our retry limit. If we do exceed our
4360 * retry limit, check to see if the firmware left us any error
4361 * information and report that if so ...
4362 */
4363 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4364 if (ret != FW_SUCCESS) {
4365 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4366 goto retry;
4367 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4368 t4_report_fw_error(adap);
4369 return ret;
4370 }
4371
4372 v = ntohl(c.err_to_clearinit);
4373 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4374 if (state) {
4375 if (v & F_FW_HELLO_CMD_ERR)
4376 *state = DEV_STATE_ERR;
4377 else if (v & F_FW_HELLO_CMD_INIT)
4378 *state = DEV_STATE_INIT;
4379 else
4380 *state = DEV_STATE_UNINIT;
4381 }
4382
4383 /*
4384 * If we're not the Master PF then we need to wait around for the
4385 * Master PF Driver to finish setting up the adapter.
4386 *
4387 * Note that we also do this wait if we're a non-Master-capable PF and
4388 * there is no current Master PF; a Master PF may show up momentarily
4389 * and we wouldn't want to fail pointlessly. (This can happen when an
4390 * OS loads lots of different drivers rapidly at the same time). In
4391 * this case, the Master PF returned by the firmware will be
4392 * M_PCIE_FW_MASTER so the test below will work ...
4393 */
4394 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4395 master_mbox != mbox) {
4396 int waiting = FW_CMD_HELLO_TIMEOUT;
4397
4398 /*
4399 * Wait for the firmware to either indicate an error or
4400 * initialized state. If we see either of these we bail out
4401 * and report the issue to the caller. If we exhaust the
4402 * "hello timeout" and we haven't exhausted our retries, try
4403 * again. Otherwise bail with a timeout error.
4404 */
4405 for (;;) {
4406 u32 pcie_fw;
4407
4408 msleep(50);
4409 waiting -= 50;
4410
4411 /*
4412 * If neither Error nor Initialialized are indicated
4413 * by the firmware keep waiting till we exhaust our
4414 * timeout ... and then retry if we haven't exhausted
4415 * our retries ...
4416 */
4417 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4418 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4419 if (waiting <= 0) {
4420 if (retries-- > 0)
4421 goto retry;
4422
4423 return -ETIMEDOUT;
4424 }
4425 continue;
4426 }
4427
4428 /*
4429 * We either have an Error or Initialized condition
4430 * report errors preferentially.
4431 */
4432 if (state) {
4433 if (pcie_fw & F_PCIE_FW_ERR)
4434 *state = DEV_STATE_ERR;
4435 else if (pcie_fw & F_PCIE_FW_INIT)
4436 *state = DEV_STATE_INIT;
4437 }
4438
4439 /*
4440 * If we arrived before a Master PF was selected and
4441 * there's not a valid Master PF, grab its identity
4442 * for our caller.
4443 */
4444 if (master_mbox == M_PCIE_FW_MASTER &&
4445 (pcie_fw & F_PCIE_FW_MASTER_VLD))
4446 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4447 break;
4448 }
4449 }
4450
4451 return master_mbox;
4452}
4453
4454/**
4455 * t4_fw_bye - end communication with FW
4456 * @adap: the adapter
4457 * @mbox: mailbox to use for the FW command
4458 *
4459 * Issues a command to terminate communication with FW.
4460 */
4461int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4462{
4463 struct fw_bye_cmd c;
4464
4465 memset(&c, 0, sizeof(c));
4466 INIT_CMD(c, BYE, WRITE);
4467 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4468}
4469
4470/**
4471 * t4_fw_reset - issue a reset to FW
4472 * @adap: the adapter
4473 * @mbox: mailbox to use for the FW command
4474 * @reset: specifies the type of reset to perform
4475 *
4476 * Issues a reset command of the specified type to FW.
4477 */
4478int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4479{
4480 struct fw_reset_cmd c;
4481
4482 memset(&c, 0, sizeof(c));
4483 INIT_CMD(c, RESET, WRITE);
4484 c.val = htonl(reset);
4485 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4486}
4487
4488/**
4489 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4490 * @adap: the adapter
4491 * @mbox: mailbox to use for the FW RESET command (if desired)
4492 * @force: force uP into RESET even if FW RESET command fails
4493 *
4494 * Issues a RESET command to firmware (if desired) with a HALT indication
4495 * and then puts the microprocessor into RESET state. The RESET command
4496 * will only be issued if a legitimate mailbox is provided (mbox <=
4497 * M_PCIE_FW_MASTER).
4498 *
4499 * This is generally used in order for the host to safely manipulate the
4500 * adapter without fear of conflicting with whatever the firmware might
4501 * be doing. The only way out of this state is to RESTART the firmware
4502 * ...
4503 */
4504int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4505{
4506 int ret = 0;
4507
4508 /*
4509 * If a legitimate mailbox is provided, issue a RESET command
4510 * with a HALT indication.
4511 */
4512 if (mbox <= M_PCIE_FW_MASTER) {
4513 struct fw_reset_cmd c;
4514
4515 memset(&c, 0, sizeof(c));
4516 INIT_CMD(c, RESET, WRITE);
4517 c.val = htonl(F_PIORST | F_PIORSTMODE);
4518 c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4519 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4520 }
4521
4522 /*
4523 * Normally we won't complete the operation if the firmware RESET
4524 * command fails but if our caller insists we'll go ahead and put the
4525 * uP into RESET. This can be useful if the firmware is hung or even
4526 * missing ... We'll have to take the risk of putting the uP into
4527 * RESET without the cooperation of firmware in that case.
4528 *
4529 * We also force the firmware's HALT flag to be on in case we bypassed
4530 * the firmware RESET command above or we're dealing with old firmware
4531 * which doesn't have the HALT capability. This will serve as a flag
4532 * for the incoming firmware to know that it's coming out of a HALT
4533 * rather than a RESET ... if it's new enough to understand that ...
4534 */
4535 if (ret == 0 || force) {
4536 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4537 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4538 }
4539
4540 /*
4541 * And we always return the result of the firmware RESET command
4542 * even when we force the uP into RESET ...
4543 */
4544 return ret;
4545}
4546
4547/**
4548 * t4_fw_restart - restart the firmware by taking the uP out of RESET
4549 * @adap: the adapter
4550 * @reset: if we want to do a RESET to restart things
4551 *
4552 * Restart firmware previously halted by t4_fw_halt(). On successful
4553 * return the previous PF Master remains as the new PF Master and there
4554 * is no need to issue a new HELLO command, etc.
4555 *
4556 * We do this in two ways:
4557 *
4558 * 1. If we're dealing with newer firmware we'll simply want to take
4559 * the chip's microprocessor out of RESET. This will cause the
4560 * firmware to start up from its start vector. And then we'll loop
4561 * until the firmware indicates it's started again (PCIE_FW.HALT
4562 * reset to 0) or we timeout.
4563 *
4564 * 2. If we're dealing with older firmware then we'll need to RESET
4565 * the chip since older firmware won't recognize the PCIE_FW.HALT
4566 * flag and automatically RESET itself on startup.
4567 */
4568int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4569{
4570 if (reset) {
4571 /*
4572 * Since we're directing the RESET instead of the firmware
4573 * doing it automatically, we need to clear the PCIE_FW.HALT
4574 * bit.
4575 */
4576 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4577
4578 /*
4579 * If we've been given a valid mailbox, first try to get the
4580 * firmware to do the RESET. If that works, great and we can
4581 * return success. Otherwise, if we haven't been given a
4582 * valid mailbox or the RESET command failed, fall back to
4583 * hitting the chip with a hammer.
4584 */
4585 if (mbox <= M_PCIE_FW_MASTER) {
4586 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4587 msleep(100);
4588 if (t4_fw_reset(adap, mbox,
4589 F_PIORST | F_PIORSTMODE) == 0)
4590 return 0;
4591 }
4592
4593 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4594 msleep(2000);
4595 } else {
4596 int ms;
4597
4598 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4599 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4600 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4601 return FW_SUCCESS;
4602 msleep(100);
4603 ms += 100;
4604 }
4605 return -ETIMEDOUT;
4606 }
4607 return 0;
4608}
4609
4610/**
4611 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4612 * @adap: the adapter
4613 * @mbox: mailbox to use for the FW RESET command (if desired)
4614 * @fw_data: the firmware image to write
4615 * @size: image size
4616 * @force: force upgrade even if firmware doesn't cooperate
4617 *
4618 * Perform all of the steps necessary for upgrading an adapter's
4619 * firmware image. Normally this requires the cooperation of the
4620 * existing firmware in order to halt all existing activities
4621 * but if an invalid mailbox token is passed in we skip that step
4622 * (though we'll still put the adapter microprocessor into RESET in
4623 * that case).
4624 *
4625 * On successful return the new firmware will have been loaded and
4626 * the adapter will have been fully RESET losing all previous setup
4627 * state. On unsuccessful return the adapter may be completely hosed ...
4628 * positive errno indicates that the adapter is ~probably~ intact, a
4629 * negative errno indicates that things are looking bad ...
4630 */
4631int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4632 const u8 *fw_data, unsigned int size, int force)
4633{
4634 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4635 unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4636 int reset, ret;
4637
4638 if (!bootstrap) {
4639 ret = t4_fw_halt(adap, mbox, force);
4640 if (ret < 0 && !force)
4641 return ret;
4642 }
4643
4644 ret = t4_load_fw(adap, fw_data, size);
4645 if (ret < 0 || bootstrap)
4646 return ret;
4647
4648 /*
4649 * Older versions of the firmware don't understand the new
4650 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4651 * restart. So for newly loaded older firmware we'll have to do the
4652 * RESET for it so it starts up on a clean slate. We can tell if
4653 * the newly loaded firmware will handle this right by checking
4654 * its header flags to see if it advertises the capability.
4655 */
4656 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4657 return t4_fw_restart(adap, mbox, reset);
4658}
4659
4660/**
4661 * t4_fw_initialize - ask FW to initialize the device
4662 * @adap: the adapter
4663 * @mbox: mailbox to use for the FW command
4664 *
4665 * Issues a command to FW to partially initialize the device. This
4666 * performs initialization that generally doesn't depend on user input.
4667 */
4668int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4669{
4670 struct fw_initialize_cmd c;
4671
4672 memset(&c, 0, sizeof(c));
4673 INIT_CMD(c, INITIALIZE, WRITE);
4674 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4675}
4676
4677/**
4678 * t4_query_params - query FW or device parameters
4679 * @adap: the adapter
4680 * @mbox: mailbox to use for the FW command
4681 * @pf: the PF
4682 * @vf: the VF
4683 * @nparams: the number of parameters
4684 * @params: the parameter names
4685 * @val: the parameter values
4686 *
4687 * Reads the value of FW or device parameters. Up to 7 parameters can be
4688 * queried at once.
4689 */
4690int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4691 unsigned int vf, unsigned int nparams, const u32 *params,
4692 u32 *val)
4693{
4694 int i, ret;
4695 struct fw_params_cmd c;
4696 __be32 *p = &c.param[0].mnem;
4697
4698 if (nparams > 7)
4699 return -EINVAL;
4700
4701 memset(&c, 0, sizeof(c));
4702 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4703 F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4704 V_FW_PARAMS_CMD_VFN(vf));
4705 c.retval_len16 = htonl(FW_LEN16(c));
4706
4707 for (i = 0; i < nparams; i++, p += 2, params++)
4708 *p = htonl(*params);
4709
4710 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4711 if (ret == 0)
4712 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4713 *val++ = ntohl(*p);
4714 return ret;
4715}
4716
4717/**
4718 * t4_set_params - sets FW or device parameters
4719 * @adap: the adapter
4720 * @mbox: mailbox to use for the FW command
4721 * @pf: the PF
4722 * @vf: the VF
4723 * @nparams: the number of parameters
4724 * @params: the parameter names
4725 * @val: the parameter values
4726 *
4727 * Sets the value of FW or device parameters. Up to 7 parameters can be
4728 * specified at once.
4729 */
4730int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4731 unsigned int vf, unsigned int nparams, const u32 *params,
4732 const u32 *val)
4733{
4734 struct fw_params_cmd c;
4735 __be32 *p = &c.param[0].mnem;
4736
4737 if (nparams > 7)
4738 return -EINVAL;
4739
4740 memset(&c, 0, sizeof(c));
4741 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4742 F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4743 V_FW_PARAMS_CMD_VFN(vf));
4744 c.retval_len16 = htonl(FW_LEN16(c));
4745
4746 while (nparams--) {
4747 *p++ = htonl(*params);
4748 params++;
4749 *p++ = htonl(*val);
4750 val++;
4751 }
4752
4753 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4754}
4755
4756/**
4757 * t4_cfg_pfvf - configure PF/VF resource limits
4758 * @adap: the adapter
4759 * @mbox: mailbox to use for the FW command
4760 * @pf: the PF being configured
4761 * @vf: the VF being configured
4762 * @txq: the max number of egress queues
4763 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
4764 * @rxqi: the max number of interrupt-capable ingress queues
4765 * @rxq: the max number of interruptless ingress queues
4766 * @tc: the PCI traffic class
4767 * @vi: the max number of virtual interfaces
4768 * @cmask: the channel access rights mask for the PF/VF
4769 * @pmask: the port access rights mask for the PF/VF
4770 * @nexact: the maximum number of exact MPS filters
4771 * @rcaps: read capabilities
4772 * @wxcaps: write/execute capabilities
4773 *
4774 * Configures resource limits and capabilities for a physical or virtual
4775 * function.
4776 */
4777int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4778 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4779 unsigned int rxqi, unsigned int rxq, unsigned int tc,
4780 unsigned int vi, unsigned int cmask, unsigned int pmask,
4781 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4782{
4783 struct fw_pfvf_cmd c;
4784
4785 memset(&c, 0, sizeof(c));
4786 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4787 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4788 V_FW_PFVF_CMD_VFN(vf));
4789 c.retval_len16 = htonl(FW_LEN16(c));
4790 c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4791 V_FW_PFVF_CMD_NIQ(rxq));
4792 c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4793 V_FW_PFVF_CMD_PMASK(pmask) |
4794 V_FW_PFVF_CMD_NEQ(txq));
4795 c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4796 V_FW_PFVF_CMD_NEXACTF(nexact));
4797 c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4798 V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4799 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4800 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4801}
4802
4803/**
4804 * t4_alloc_vi_func - allocate a virtual interface
4805 * @adap: the adapter
4806 * @mbox: mailbox to use for the FW command
4807 * @port: physical port associated with the VI
4808 * @pf: the PF owning the VI
4809 * @vf: the VF owning the VI
4810 * @nmac: number of MAC addresses needed (1 to 5)
4811 * @mac: the MAC addresses of the VI
4812 * @rss_size: size of RSS table slice associated with this VI
4813 * @portfunc: which Port Application Function MAC Address is desired
4814 * @idstype: Intrusion Detection Type
4815 *
4816 * Allocates a virtual interface for the given physical port. If @mac is
4817 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4818 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4819 * stored consecutively so the space needed is @nmac * 6 bytes.
4820 * Returns a negative error number or the non-negative VI id.
4821 */
4822int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4823 unsigned int port, unsigned int pf, unsigned int vf,
4824 unsigned int nmac, u8 *mac, unsigned int *rss_size,
4825 unsigned int portfunc, unsigned int idstype)
4826{
4827 int ret;
4828 struct fw_vi_cmd c;
4829
4830 memset(&c, 0, sizeof(c));
4831 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4832 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4833 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4834 c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4835 c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4836 V_FW_VI_CMD_FUNC(portfunc));
4837 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4838 c.nmac = nmac - 1;
4839
4840 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4841 if (ret)
4842 return ret;
4843
4844 if (mac) {
4845 memcpy(mac, c.mac, sizeof(c.mac));
4846 switch (nmac) {
4847 case 5:
4848 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4849 case 4:
4850 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4851 case 3:
4852 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4853 case 2:
4854 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4855 }
4856 }
4857 if (rss_size)
4858 *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4859 return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4860}
4861
4862/**
4863 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4864 * @adap: the adapter
4865 * @mbox: mailbox to use for the FW command
4866 * @port: physical port associated with the VI
4867 * @pf: the PF owning the VI
4868 * @vf: the VF owning the VI
4869 * @nmac: number of MAC addresses needed (1 to 5)
4870 * @mac: the MAC addresses of the VI
4871 * @rss_size: size of RSS table slice associated with this VI
4872 *
4873 * backwards compatible and convieniance routine to allocate a Virtual
4874 * Interface with a Ethernet Port Application Function and Intrustion
4875 * Detection System disabled.
4876 */
4877int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4878 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4879 unsigned int *rss_size)
4880{
4881 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4882 FW_VI_FUNC_ETH, 0);
4883}
4884
4885/**
4886 * t4_free_vi - free a virtual interface
4887 * @adap: the adapter
4888 * @mbox: mailbox to use for the FW command
4889 * @pf: the PF owning the VI
4890 * @vf: the VF owning the VI
4891 * @viid: virtual interface identifiler
4892 *
4893 * Free a previously allocated virtual interface.
4894 */
4895int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4896 unsigned int vf, unsigned int viid)
4897{
4898 struct fw_vi_cmd c;
4899
4900 memset(&c, 0, sizeof(c));
4901 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4902 F_FW_CMD_REQUEST |
4903 F_FW_CMD_EXEC |
4904 V_FW_VI_CMD_PFN(pf) |
4905 V_FW_VI_CMD_VFN(vf));
4906 c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4907 c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4908
4909 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4910}
4911
4912/**
4913 * t4_set_rxmode - set Rx properties of a virtual interface
4914 * @adap: the adapter
4915 * @mbox: mailbox to use for the FW command
4916 * @viid: the VI id
4917 * @mtu: the new MTU or -1
4918 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4919 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4920 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4921 * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4922 * @sleep_ok: if true we may sleep while awaiting command completion
4923 *
4924 * Sets Rx properties of a virtual interface.
4925 */
4926int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4927 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4928 bool sleep_ok)
4929{
4930 struct fw_vi_rxmode_cmd c;
4931
4932 /* convert to FW values */
4933 if (mtu < 0)
4934 mtu = M_FW_VI_RXMODE_CMD_MTU;
4935 if (promisc < 0)
4936 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4937 if (all_multi < 0)
4938 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4939 if (bcast < 0)
4940 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4941 if (vlanex < 0)
4942 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4943
4944 memset(&c, 0, sizeof(c));
4945 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4946 F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4947 c.retval_len16 = htonl(FW_LEN16(c));
4948 c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4949 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4950 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4951 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4952 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4953 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4954}
4955
4956/**
4957 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4958 * @adap: the adapter
4959 * @mbox: mailbox to use for the FW command
4960 * @viid: the VI id
4961 * @free: if true any existing filters for this VI id are first removed
4962 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
4963 * @addr: the MAC address(es)
4964 * @idx: where to store the index of each allocated filter
4965 * @hash: pointer to hash address filter bitmap
4966 * @sleep_ok: call is allowed to sleep
4967 *
4968 * Allocates an exact-match filter for each of the supplied addresses and
4969 * sets it to the corresponding address. If @idx is not %NULL it should
4970 * have at least @naddr entries, each of which will be set to the index of
4971 * the filter allocated for the corresponding MAC address. If a filter
4972 * could not be allocated for an address its index is set to 0xffff.
4973 * If @hash is not %NULL addresses that fail to allocate an exact filter
4974 * are hashed and update the hash filter bitmap pointed at by @hash.
4975 *
4976 * Returns a negative error number or the number of filters allocated.
4977 */
4978int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4979 unsigned int viid, bool free, unsigned int naddr,
4980 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4981{
4982 int offset, ret = 0;
4983 struct fw_vi_mac_cmd c;
4984 unsigned int nfilters = 0;
4985 unsigned int max_naddr = is_t4(adap) ?
4986 NUM_MPS_CLS_SRAM_L_INSTANCES :
4987 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4988 unsigned int rem = naddr;
4989
4990 if (naddr > max_naddr)
4991 return -EINVAL;
4992
4993 for (offset = 0; offset < naddr ; /**/) {
4994 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4995 ? rem
4996 : ARRAY_SIZE(c.u.exact));
4997 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4998 u.exact[fw_naddr]), 16);
4999 struct fw_vi_mac_exact *p;
5000 int i;
5001
5002 memset(&c, 0, sizeof(c));
5003 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5004 F_FW_CMD_REQUEST |
5005 F_FW_CMD_WRITE |
5006 V_FW_CMD_EXEC(free) |
5007 V_FW_VI_MAC_CMD_VIID(viid));
5008 c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5009 V_FW_CMD_LEN16(len16));
5010
5011 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5012 p->valid_to_idx = htons(
5013 F_FW_VI_MAC_CMD_VALID |
5014 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5015 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5016 }
5017
5018 /*
5019 * It's okay if we run out of space in our MAC address arena.
5020 * Some of the addresses we submit may get stored so we need
5021 * to run through the reply to see what the results were ...
5022 */
5023 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5024 if (ret && ret != -FW_ENOMEM)
5025 break;
5026
5027 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5028 u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5029
5030 if (idx)
5031 idx[offset+i] = (index >= max_naddr
5032 ? 0xffff
5033 : index);
5034 if (index < max_naddr)
5035 nfilters++;
5036 else if (hash)
5037 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5038 }
5039
5040 free = false;
5041 offset += fw_naddr;
5042 rem -= fw_naddr;
5043 }
5044
5045 if (ret == 0 || ret == -FW_ENOMEM)
5046 ret = nfilters;
5047 return ret;
5048}
5049
5050/**
5051 * t4_change_mac - modifies the exact-match filter for a MAC address
5052 * @adap: the adapter
5053 * @mbox: mailbox to use for the FW command
5054 * @viid: the VI id
5055 * @idx: index of existing filter for old value of MAC address, or -1
5056 * @addr: the new MAC address value
5057 * @persist: whether a new MAC allocation should be persistent
5058 * @add_smt: if true also add the address to the HW SMT
5059 *
5060 * Modifies an exact-match filter and sets it to the new MAC address if
5061 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
5062 * latter case the address is added persistently if @persist is %true.
5063 *
5064 * Note that in general it is not possible to modify the value of a given
5065 * filter so the generic way to modify an address filter is to free the one
5066 * being used by the old address value and allocate a new filter for the
5067 * new address value.
5068 *
5069 * Returns a negative error number or the index of the filter with the new
5070 * MAC value. Note that this index may differ from @idx.
5071 */
5072int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5073 int idx, const u8 *addr, bool persist, bool add_smt)
5074{
5075 int ret, mode;
5076 struct fw_vi_mac_cmd c;
5077 struct fw_vi_mac_exact *p = c.u.exact;
5078 unsigned int max_mac_addr = is_t4(adap) ?
5079 NUM_MPS_CLS_SRAM_L_INSTANCES :
5080 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5081
5082 if (idx < 0) /* new allocation */
5083 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5084 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5085
5086 memset(&c, 0, sizeof(c));
5087 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5088 F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5089 c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5090 p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5091 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5092 V_FW_VI_MAC_CMD_IDX(idx));
5093 memcpy(p->macaddr, addr, sizeof(p->macaddr));
5094
5095 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5096 if (ret == 0) {
5097 ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5098 if (ret >= max_mac_addr)
5099 ret = -ENOMEM;
5100 }
5101 return ret;
5102}
5103
5104/**
5105 * t4_set_addr_hash - program the MAC inexact-match hash filter
5106 * @adap: the adapter
5107 * @mbox: mailbox to use for the FW command
5108 * @viid: the VI id
5109 * @ucast: whether the hash filter should also match unicast addresses
5110 * @vec: the value to be written to the hash filter
5111 * @sleep_ok: call is allowed to sleep
5112 *
5113 * Sets the 64-bit inexact-match hash filter for a virtual interface.
5114 */
5115int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5116 bool ucast, u64 vec, bool sleep_ok)
5117{
5118 struct fw_vi_mac_cmd c;
5119
5120 memset(&c, 0, sizeof(c));
5121 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5122 F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5123 c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5124 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5125 V_FW_CMD_LEN16(1));
5126 c.u.hash.hashvec = cpu_to_be64(vec);
5127 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5128}
5129
5130/**
5131 * t4_enable_vi - enable/disable a virtual interface
5132 * @adap: the adapter
5133 * @mbox: mailbox to use for the FW command
5134 * @viid: the VI id
5135 * @rx_en: 1=enable Rx, 0=disable Rx
5136 * @tx_en: 1=enable Tx, 0=disable Tx
5137 *
5138 * Enables/disables a virtual interface.
5139 */
5140int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5141 bool rx_en, bool tx_en)
5142{
5143 struct fw_vi_enable_cmd c;
5144
5145 memset(&c, 0, sizeof(c));
5146 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5147 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5148 c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5149 V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5150 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5151}
5152
5153/**
5154 * t4_identify_port - identify a VI's port by blinking its LED
5155 * @adap: the adapter
5156 * @mbox: mailbox to use for the FW command
5157 * @viid: the VI id
5158 * @nblinks: how many times to blink LED at 2.5 Hz
5159 *
5160 * Identifies a VI's port by blinking its LED.
5161 */
5162int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5163 unsigned int nblinks)
5164{
5165 struct fw_vi_enable_cmd c;
5166
5167 memset(&c, 0, sizeof(c));
5168 c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5169 F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5170 c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5171 c.blinkdur = htons(nblinks);
5172 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5173}
5174
5175/**
5176 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
5177 * @adap: the adapter
5178 * @mbox: mailbox to use for the FW command
5179 * @start: %true to enable the queues, %false to disable them
5180 * @pf: the PF owning the queues
5181 * @vf: the VF owning the queues
5182 * @iqid: ingress queue id
5183 * @fl0id: FL0 queue id or 0xffff if no attached FL0
5184 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5185 *
5186 * Starts or stops an ingress queue and its associated FLs, if any.
5187 */
5188int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5189 unsigned int pf, unsigned int vf, unsigned int iqid,
5190 unsigned int fl0id, unsigned int fl1id)
5191{
5192 struct fw_iq_cmd c;
5193
5194 memset(&c, 0, sizeof(c));
5195 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5196 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5197 V_FW_IQ_CMD_VFN(vf));
5198 c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5199 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5200 c.iqid = htons(iqid);
5201 c.fl0id = htons(fl0id);
5202 c.fl1id = htons(fl1id);
5203 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5204}
5205
5206/**
5207 * t4_iq_free - free an ingress queue and its FLs
5208 * @adap: the adapter
5209 * @mbox: mailbox to use for the FW command
5210 * @pf: the PF owning the queues
5211 * @vf: the VF owning the queues
5212 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5213 * @iqid: ingress queue id
5214 * @fl0id: FL0 queue id or 0xffff if no attached FL0
5215 * @fl1id: FL1 queue id or 0xffff if no attached FL1
5216 *
5217 * Frees an ingress queue and its associated FLs, if any.
5218 */
5219int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5220 unsigned int vf, unsigned int iqtype, unsigned int iqid,
5221 unsigned int fl0id, unsigned int fl1id)
5222{
5223 struct fw_iq_cmd c;
5224
5225 memset(&c, 0, sizeof(c));
5226 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5227 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5228 V_FW_IQ_CMD_VFN(vf));
5229 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5230 c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5231 c.iqid = htons(iqid);
5232 c.fl0id = htons(fl0id);
5233 c.fl1id = htons(fl1id);
5234 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5235}
5236
5237/**
5238 * t4_eth_eq_free - free an Ethernet egress queue
5239 * @adap: the adapter
5240 * @mbox: mailbox to use for the FW command
5241 * @pf: the PF owning the queue
5242 * @vf: the VF owning the queue
5243 * @eqid: egress queue id
5244 *
5245 * Frees an Ethernet egress queue.
5246 */
5247int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5248 unsigned int vf, unsigned int eqid)
5249{
5250 struct fw_eq_eth_cmd c;
5251
5252 memset(&c, 0, sizeof(c));
5253 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5254 F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5255 V_FW_EQ_ETH_CMD_VFN(vf));
5256 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5257 c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5258 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5259}
5260
5261/**
5262 * t4_ctrl_eq_free - free a control egress queue
5263 * @adap: the adapter
5264 * @mbox: mailbox to use for the FW command
5265 * @pf: the PF owning the queue
5266 * @vf: the VF owning the queue
5267 * @eqid: egress queue id
5268 *
5269 * Frees a control egress queue.
5270 */
5271int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5272 unsigned int vf, unsigned int eqid)
5273{
5274 struct fw_eq_ctrl_cmd c;
5275
5276 memset(&c, 0, sizeof(c));
5277 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5278 F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5279 V_FW_EQ_CTRL_CMD_VFN(vf));
5280 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5281 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5282 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5283}
5284
5285/**
5286 * t4_ofld_eq_free - free an offload egress queue
5287 * @adap: the adapter
5288 * @mbox: mailbox to use for the FW command
5289 * @pf: the PF owning the queue
5290 * @vf: the VF owning the queue
5291 * @eqid: egress queue id
5292 *
5293 * Frees a control egress queue.
5294 */
5295int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5296 unsigned int vf, unsigned int eqid)
5297{
5298 struct fw_eq_ofld_cmd c;
5299
5300 memset(&c, 0, sizeof(c));
5301 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5302 F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5303 V_FW_EQ_OFLD_CMD_VFN(vf));
5304 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5305 c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5306 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5307}
5308
5309/**
5310 * t4_handle_fw_rpl - process a FW reply message
5311 * @adap: the adapter
5312 * @rpl: start of the FW message
5313 *
5314 * Processes a FW message, such as link state change messages.
5315 */
5316int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5317{
5318 u8 opcode = *(const u8 *)rpl;
5319 const struct fw_port_cmd *p = (const void *)rpl;
5320 unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5321
5322 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5323 /* link/module state change message */
5324 int speed = 0, fc = 0, i;
5325 int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5326 struct port_info *pi = NULL;
5327 struct link_config *lc;
5328 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5329 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5330 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5331
5332 if (stat & F_FW_PORT_CMD_RXPAUSE)
5333 fc |= PAUSE_RX;
5334 if (stat & F_FW_PORT_CMD_TXPAUSE)
5335 fc |= PAUSE_TX;
5336 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5337 speed = SPEED_100;
5338 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5339 speed = SPEED_1000;
5340 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5341 speed = SPEED_10000;
5342 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5343 speed = SPEED_40000;
5344
5345 for_each_port(adap, i) {
5346 pi = adap2pinfo(adap, i);
5347 if (pi->tx_chan == chan)
5348 break;
5349 }
5350 lc = &pi->link_cfg;
5351
5352 if (link_ok != lc->link_ok || speed != lc->speed ||
5353 fc != lc->fc) { /* something changed */
5354 int reason;
5355
5356 if (!link_ok && lc->link_ok)
5357 reason = G_FW_PORT_CMD_LINKDNRC(stat);
5358 else
5359 reason = -1;
5360
5361 lc->link_ok = link_ok;
5362 lc->speed = speed;
5363 lc->fc = fc;
5364 lc->supported = ntohs(p->u.info.pcap);
5365 t4_os_link_changed(adap, i, link_ok, reason);
5366 }
5367 if (mod != pi->mod_type) {
5368 pi->mod_type = mod;
5369 t4_os_portmod_changed(adap, i);
5370 }
5371 } else {
5372 CH_WARN_RATELIMIT(adap,
5373 "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5374 return -EINVAL;
5375 }
5376 return 0;
5377}
5378
5379/**
5380 * get_pci_mode - determine a card's PCI mode
5381 * @adapter: the adapter
5382 * @p: where to store the PCI settings
5383 *
5384 * Determines a card's PCI mode and associated parameters, such as speed
5385 * and width.
5386 */
5387static void __devinit get_pci_mode(struct adapter *adapter,
5388 struct pci_params *p)
5389{
5390 u16 val;
5391 u32 pcie_cap;
5392
5393 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5394 if (pcie_cap) {
5395 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5396 p->speed = val & PCI_EXP_LNKSTA_CLS;
5397 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5398 }
5399}
5400
5401/**
5402 * init_link_config - initialize a link's SW state
5403 * @lc: structure holding the link state
5404 * @caps: link capabilities
5405 *
5406 * Initializes the SW state maintained for each link, including the link's
5407 * capabilities and default speed/flow-control/autonegotiation settings.
5408 */
5409static void __devinit init_link_config(struct link_config *lc,
5410 unsigned int caps)
5411{
5412 lc->supported = caps;
5413 lc->requested_speed = 0;
5414 lc->speed = 0;
5415 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5416 if (lc->supported & FW_PORT_CAP_ANEG) {
5417 lc->advertising = lc->supported & ADVERT_MASK;
5418 lc->autoneg = AUTONEG_ENABLE;
5419 lc->requested_fc |= PAUSE_AUTONEG;
5420 } else {
5421 lc->advertising = 0;
5422 lc->autoneg = AUTONEG_DISABLE;
5423 }
5424}
5425
5426static int __devinit get_flash_params(struct adapter *adapter)
5427{
5428 int ret;
5429 u32 info = 0;
5430
5431 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5432 if (!ret)
5433 ret = sf1_read(adapter, 3, 0, 1, &info);
5434 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
5435 if (ret < 0)
5436 return ret;
5437
5438 if ((info & 0xff) != 0x20) /* not a Numonix flash */
5439 return -EINVAL;
5440 info >>= 16; /* log2 of size */
5441 if (info >= 0x14 && info < 0x18)
5442 adapter->params.sf_nsec = 1 << (info - 16);
5443 else if (info == 0x18)
5444 adapter->params.sf_nsec = 64;
5445 else
5446 return -EINVAL;
5447 adapter->params.sf_size = 1 << info;
5448 return 0;
5449}
5450
5451static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5452 u8 range)
5453{
5454 u16 val;
5455 u32 pcie_cap;
5456
5457 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5458 if (pcie_cap) {
5459 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5460 val &= 0xfff0;
5461 val |= range ;
5462 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5463 }
5464}
5465
5466/**
5467 * t4_prep_adapter - prepare SW and HW for operation
5468 * @adapter: the adapter
5469 * @reset: if true perform a HW reset
5470 *
5471 * Initialize adapter SW state for the various HW modules, set initial
5472 * values for some adapter tunables, take PHYs out of reset, and
5473 * initialize the MDIO interface.
5474 */
5475int __devinit t4_prep_adapter(struct adapter *adapter)
5476{
5477 int ret;
5478 uint16_t device_id;
5479 uint32_t pl_rev;
5480
5481 get_pci_mode(adapter, &adapter->params.pci);
5482
5483 pl_rev = t4_read_reg(adapter, A_PL_REV);
5484 adapter->params.chipid = G_CHIPID(pl_rev);
5485 adapter->params.rev = G_REV(pl_rev);
5486 if (adapter->params.chipid == 0) {
5487 /* T4 did not have chipid in PL_REV (T5 onwards do) */
5488 adapter->params.chipid = CHELSIO_T4;
5489
5490 /* T4A1 chip is not supported */
5491 if (adapter->params.rev == 1) {
5492 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5493 return -EINVAL;
5494 }
5495 }
5496 adapter->params.pci.vpd_cap_addr =
5497 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5498
5499 ret = get_flash_params(adapter);
5500 if (ret < 0)
5501 return ret;
5502
5503 ret = get_vpd_params(adapter, &adapter->params.vpd);
5504 if (ret < 0)
5505 return ret;
5506
5507 /* Cards with real ASICs have the chipid in the PCIe device id */
5508 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5509 if (device_id >> 12 == adapter->params.chipid)
5510 adapter->params.cim_la_size = CIMLA_SIZE;
5511 else {
5512 /* FPGA */
5513 adapter->params.fpga = 1;
5514 adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5515 }
5516
5517 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5518
5519 /*
5520 * Default port and clock for debugging in case we can't reach FW.
5521 */
5522 adapter->params.nports = 1;
5523 adapter->params.portvec = 1;
5524 adapter->params.vpd.cclk = 50000;
5525
5526 /* Set pci completion timeout value to 4 seconds. */
5527 set_pcie_completion_timeout(adapter, 0xd);
5528 return 0;
5529}
5530
5531/**
5532 * t4_init_tp_params - initialize adap->params.tp
5533 * @adap: the adapter
5534 *
5535 * Initialize various fields of the adapter's TP Parameters structure.
5536 */
5537int __devinit t4_init_tp_params(struct adapter *adap)
5538{
5539 int chan;
5540 u32 v;
5541
5542 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5543 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5544 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5545
5546 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5547 for (chan = 0; chan < NCHAN; chan++)
5548 adap->params.tp.tx_modq[chan] = chan;
5549
5550 /*
5551 * Cache the adapter's Compressed Filter Mode and global Incress
5552 * Configuration.
5553 */
5554 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5555 &adap->params.tp.vlan_pri_map, 1,
5556 A_TP_VLAN_PRI_MAP);
5557 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5558 &adap->params.tp.ingress_config, 1,
5559 A_TP_INGRESS_CONFIG);
5560
5561 /*
5562 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5563 * shift positions of several elements of the Compressed Filter Tuple
5564 * for this adapter which we need frequently ...
5565 */
5566 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5567 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5568 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5569 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
5570
5571 /*
5572 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5573 * represents the presense of an Outer VLAN instead of a VNIC ID.
5574 */
5575 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5576 adap->params.tp.vnic_shift = -1;
5577
5578 return 0;
5579}
5580
5581/**
5582 * t4_filter_field_shift - calculate filter field shift
5583 * @adap: the adapter
5584 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5585 *
5586 * Return the shift position of a filter field within the Compressed
5587 * Filter Tuple. The filter field is specified via its selection bit
5588 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5589 */
5590int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5591{
5592 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5593 unsigned int sel;
5594 int field_shift;
5595
5596 if ((filter_mode & filter_sel) == 0)
5597 return -1;
5598
5599 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5600 switch (filter_mode & sel) {
5601 case F_FCOE: field_shift += W_FT_FCOE; break;
5602 case F_PORT: field_shift += W_FT_PORT; break;
5603 case F_VNIC_ID: field_shift += W_FT_VNIC_ID; break;
5604 case F_VLAN: field_shift += W_FT_VLAN; break;
5605 case F_TOS: field_shift += W_FT_TOS; break;
5606 case F_PROTOCOL: field_shift += W_FT_PROTOCOL; break;
5607 case F_ETHERTYPE: field_shift += W_FT_ETHERTYPE; break;
5608 case F_MACMATCH: field_shift += W_FT_MACMATCH; break;
5609 case F_MPSHITTYPE: field_shift += W_FT_MPSHITTYPE; break;
5610 case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5611 }
5612 }
5613 return field_shift;
5614}
5615
5616int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5617{
5618 u8 addr[6];
5619 int ret, i, j;
5620 struct fw_port_cmd c;
5621 unsigned int rss_size;
5622 adapter_t *adap = p->adapter;
5623
5624 memset(&c, 0, sizeof(c));
5625
5626 for (i = 0, j = -1; i <= p->port_id; i++) {
5627 do {
5628 j++;
5629 } while ((adap->params.portvec & (1 << j)) == 0);
5630 }
5631
5632 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5633 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5634 V_FW_PORT_CMD_PORTID(j));
5635 c.action_to_len16 = htonl(
5636 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5637 FW_LEN16(c));
5638 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5639 if (ret)
5640 return ret;
5641
5642 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5643 if (ret < 0)
5644 return ret;
5645
5646 p->viid = ret;
5647 p->tx_chan = j;
5648 p->lport = j;
5649 p->rss_size = rss_size;
5650 t4_os_set_hw_addr(adap, p->port_id, addr);
5651
5652 ret = ntohl(c.u.info.lstatus_to_modtype);
5653 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5654 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5655 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5656 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5657
5658 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5659
5660 return 0;
5661}