t4_hw.c revision 309560
1/*-
2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.c 309560 2016-12-05 20:43:25Z jhb $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39	if (cold) \
40		DELAY((x) * 1000); \
41	else \
42		pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 *	t4_wait_op_done_val - wait until an operation is completed
47 *	@adapter: the adapter performing the operation
48 *	@reg: the register to check for completion
49 *	@mask: a single-bit field within @reg that indicates completion
50 *	@polarity: the value of the field when the operation is completed
51 *	@attempts: number of check iterations
52 *	@delay: delay in usecs between iterations
53 *	@valp: where to store the value of the register at completion time
54 *
55 *	Wait until an operation is completed by checking a bit in a register
56 *	up to @attempts times.  If @valp is not NULL the value of the register
57 *	at the time it indicated completion is stored there.  Returns 0 if the
58 *	operation completes and	-EAGAIN	otherwise.
59 */
60static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61			       int polarity, int attempts, int delay, u32 *valp)
62{
63	while (1) {
64		u32 val = t4_read_reg(adapter, reg);
65
66		if (!!(val & mask) == polarity) {
67			if (valp)
68				*valp = val;
69			return 0;
70		}
71		if (--attempts == 0)
72			return -EAGAIN;
73		if (delay)
74			udelay(delay);
75	}
76}
77
78static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79				  int polarity, int attempts, int delay)
80{
81	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82				   delay, NULL);
83}
84
85/**
86 *	t4_set_reg_field - set a register field to a value
87 *	@adapter: the adapter to program
88 *	@addr: the register address
89 *	@mask: specifies the portion of the register to modify
90 *	@val: the new value for the register field
91 *
92 *	Sets a register field specified by the supplied mask to the
93 *	given value.
94 */
95void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
96		      u32 val)
97{
98	u32 v = t4_read_reg(adapter, addr) & ~mask;
99
100	t4_write_reg(adapter, addr, v | val);
101	(void) t4_read_reg(adapter, addr);      /* flush */
102}
103
104/**
105 *	t4_read_indirect - read indirectly addressed registers
106 *	@adap: the adapter
107 *	@addr_reg: register holding the indirect address
108 *	@data_reg: register holding the value of the indirect register
109 *	@vals: where the read register values are stored
110 *	@nregs: how many indirect registers to read
111 *	@start_idx: index of first indirect register to read
112 *
113 *	Reads registers that are accessed indirectly through an address/data
114 *	register pair.
115 */
116void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117			     unsigned int data_reg, u32 *vals,
118			     unsigned int nregs, unsigned int start_idx)
119{
120	while (nregs--) {
121		t4_write_reg(adap, addr_reg, start_idx);
122		*vals++ = t4_read_reg(adap, data_reg);
123		start_idx++;
124	}
125}
126
127/**
128 *	t4_write_indirect - write indirectly addressed registers
129 *	@adap: the adapter
130 *	@addr_reg: register holding the indirect addresses
131 *	@data_reg: register holding the value for the indirect registers
132 *	@vals: values to write
133 *	@nregs: how many indirect registers to write
134 *	@start_idx: address of first indirect register to write
135 *
136 *	Writes a sequential block of registers that are accessed indirectly
137 *	through an address/data register pair.
138 */
139void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140		       unsigned int data_reg, const u32 *vals,
141		       unsigned int nregs, unsigned int start_idx)
142{
143	while (nregs--) {
144		t4_write_reg(adap, addr_reg, start_idx++);
145		t4_write_reg(adap, data_reg, *vals++);
146	}
147}
148
149/*
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism.  This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
154 *
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 *      the backdoor registers on a regular basis and we can end up
157 *      conflicting with it's uses!
158 */
159u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
160{
161	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
162	u32 val;
163
164	if (chip_id(adap) <= CHELSIO_T5)
165		req |= F_ENABLE;
166	else
167		req |= F_T6_ENABLE;
168
169	if (is_t4(adap))
170		req |= F_LOCALCFG;
171
172	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173	val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
174
175	/*
176	 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177	 * Configuration Space read.  (None of the other fields matter when
178	 * F_ENABLE is 0 so a simple register write is easier than a
179	 * read-modify-write via t4_set_reg_field().)
180	 */
181	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
182
183	return val;
184}
185
186/*
187 * t4_report_fw_error - report firmware error
188 * @adap: the adapter
189 *
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
193 */
194static void t4_report_fw_error(struct adapter *adap)
195{
196	static const char *const reason[] = {
197		"Crash",			/* PCIE_FW_EVAL_CRASH */
198		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
199		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
200		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
201		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
203		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
204		"Reserved",			/* reserved */
205	};
206	u32 pcie_fw;
207
208	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209	if (pcie_fw & F_PCIE_FW_ERR)
210		CH_ERR(adap, "Firmware reports adapter error: %s\n",
211			reason[G_PCIE_FW_EVAL(pcie_fw)]);
212}
213
214/*
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
216 */
217static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
218			 u32 mbox_addr)
219{
220	for ( ; nflit; nflit--, mbox_addr += 8)
221		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
222}
223
224/*
225 * Handle a FW assertion reported in a mailbox.
226 */
227static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
228{
229	CH_ALERT(adap,
230		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231		  asrt->u.assert.filename_0_7,
232		  be32_to_cpu(asrt->u.assert.line),
233		  be32_to_cpu(asrt->u.assert.x),
234		  be32_to_cpu(asrt->u.assert.y));
235}
236
237#define X_CIM_PF_NOACCESS 0xeeeeeeee
238/**
239 *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
240 *	@adap: the adapter
241 *	@mbox: index of the mailbox to use
242 *	@cmd: the command to write
243 *	@size: command length in bytes
244 *	@rpl: where to optionally store the reply
245 *	@sleep_ok: if true we may sleep while awaiting command completion
246 *	@timeout: time to wait for command to finish before timing out
247 *		(negative implies @sleep_ok=false)
248 *
249 *	Sends the given command to FW through the selected mailbox and waits
250 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
251 *	store the FW's reply to the command.  The command and its optional
252 *	reply are of the same length.  Some FW commands like RESET and
253 *	INITIALIZE can take a considerable amount of time to execute.
254 *	@sleep_ok determines whether we may sleep while awaiting the response.
255 *	If sleeping is allowed we use progressive backoff otherwise we spin.
256 *	Note that passing in a negative @timeout is an alternate mechanism
257 *	for specifying @sleep_ok=false.  This is useful when a higher level
258 *	interface allows for specification of @timeout but not @sleep_ok ...
259 *
260 *	The return value is 0 on success or a negative errno on failure.  A
261 *	failure can happen either because we are not able to execute the
262 *	command or FW executes it but signals an error.  In the latter case
263 *	the return value is the error code indicated by FW (negated).
264 */
265int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266			    int size, void *rpl, bool sleep_ok, int timeout)
267{
268	/*
269	 * We delay in small increments at first in an effort to maintain
270	 * responsiveness for simple, fast executing commands but then back
271	 * off to larger delays to a maximum retry delay.
272	 */
273	static const int delay[] = {
274		1, 1, 3, 5, 10, 10, 20, 50, 100
275	};
276	u32 v;
277	u64 res;
278	int i, ms, delay_idx, ret;
279	const __be64 *p = cmd;
280	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
282	u32 ctl;
283	__be64 cmd_rpl[MBOX_LEN/8];
284	u32 pcie_fw;
285
286	if ((size & 15) || size > MBOX_LEN)
287		return -EINVAL;
288
289	if (adap->flags & IS_VF) {
290		if (is_t6(adap))
291			data_reg = FW_T6VF_MBDATA_BASE_ADDR;
292		else
293			data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294		ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
295	}
296
297	/*
298	 * If we have a negative timeout, that implies that we can't sleep.
299	 */
300	if (timeout < 0) {
301		sleep_ok = false;
302		timeout = -timeout;
303	}
304
305	/*
306	 * Attempt to gain access to the mailbox.
307	 */
308	for (i = 0; i < 4; i++) {
309		ctl = t4_read_reg(adap, ctl_reg);
310		v = G_MBOWNER(ctl);
311		if (v != X_MBOWNER_NONE)
312			break;
313	}
314
315	/*
316	 * If we were unable to gain access, dequeue ourselves from the
317	 * mailbox atomic access list and report the error to our caller.
318	 */
319	if (v != X_MBOWNER_PL) {
320		t4_report_fw_error(adap);
321		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
322		return ret;
323	}
324
325	/*
326	 * If we gain ownership of the mailbox and there's a "valid" message
327	 * in it, this is likely an asynchronous error message from the
328	 * firmware.  So we'll report that and then proceed on with attempting
329	 * to issue our own command ... which may well fail if the error
330	 * presaged the firmware crashing ...
331	 */
332	if (ctl & F_MBMSGVALID) {
333		CH_ERR(adap, "found VALID command in mbox %u: "
334		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335		       (unsigned long long)t4_read_reg64(adap, data_reg),
336		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
343	}
344
345	/*
346	 * Copy in the new mailbox command and send it on its way ...
347	 */
348	for (i = 0; i < size; i += 8, p++)
349		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
350
351	if (adap->flags & IS_VF) {
352		/*
353		 * For the VFs, the Mailbox Data "registers" are
354		 * actually backed by T4's "MA" interface rather than
355		 * PL Registers (as is the case for the PFs).  Because
356		 * these are in different coherency domains, the write
357		 * to the VF's PL-register-backed Mailbox Control can
358		 * race in front of the writes to the MA-backed VF
359		 * Mailbox Data "registers".  So we need to do a
360		 * read-back on at least one byte of the VF Mailbox
361		 * Data registers before doing the write to the VF
362		 * Mailbox Control register.
363		 */
364		t4_read_reg(adap, data_reg);
365	}
366
367	CH_DUMP_MBOX(adap, mbox, data_reg);
368
369	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370	t4_read_reg(adap, ctl_reg);	/* flush write */
371
372	delay_idx = 0;
373	ms = delay[0];
374
375	/*
376	 * Loop waiting for the reply; bail out if we time out or the firmware
377	 * reports an error.
378	 */
379	pcie_fw = 0;
380	for (i = 0; i < timeout; i += ms) {
381		if (!(adap->flags & IS_VF)) {
382			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383			if (pcie_fw & F_PCIE_FW_ERR)
384				break;
385		}
386		if (sleep_ok) {
387			ms = delay[delay_idx];  /* last element may repeat */
388			if (delay_idx < ARRAY_SIZE(delay) - 1)
389				delay_idx++;
390			msleep(ms);
391		} else {
392			mdelay(ms);
393		}
394
395		v = t4_read_reg(adap, ctl_reg);
396		if (v == X_CIM_PF_NOACCESS)
397			continue;
398		if (G_MBOWNER(v) == X_MBOWNER_PL) {
399			if (!(v & F_MBMSGVALID)) {
400				t4_write_reg(adap, ctl_reg,
401					     V_MBOWNER(X_MBOWNER_NONE));
402				continue;
403			}
404
405			/*
406			 * Retrieve the command reply and release the mailbox.
407			 */
408			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
410
411			CH_DUMP_MBOX(adap, mbox, data_reg);
412
413			res = be64_to_cpu(cmd_rpl[0]);
414			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416				res = V_FW_CMD_RETVAL(EIO);
417			} else if (rpl)
418				memcpy(rpl, cmd_rpl, size);
419			return -G_FW_CMD_RETVAL((int)res);
420		}
421	}
422
423	/*
424	 * We timed out waiting for a reply to our mailbox command.  Report
425	 * the error and also check to see if the firmware reported any
426	 * errors ...
427	 */
428	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430	       *(const u8 *)cmd, mbox);
431
432	t4_report_fw_error(adap);
433	t4_fatal_err(adap);
434	return ret;
435}
436
437int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
438		    void *rpl, bool sleep_ok)
439{
440		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
441					       sleep_ok, FW_CMD_MAX_TIMEOUT);
442
443}
444
445static int t4_edc_err_read(struct adapter *adap, int idx)
446{
447	u32 edc_ecc_err_addr_reg;
448	u32 edc_bist_status_rdata_reg;
449
450	if (is_t4(adap)) {
451		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
452		return 0;
453	}
454	if (idx != 0 && idx != 1) {
455		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
456		return 0;
457	}
458
459	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
460	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
461
462	CH_WARN(adap,
463		"edc%d err addr 0x%x: 0x%x.\n",
464		idx, edc_ecc_err_addr_reg,
465		t4_read_reg(adap, edc_ecc_err_addr_reg));
466	CH_WARN(adap,
467	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
468		edc_bist_status_rdata_reg,
469		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
470		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
471		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
472		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
473		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
474		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
475		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
476		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
477		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
478
479	return 0;
480}
481
482/**
483 *	t4_mc_read - read from MC through backdoor accesses
484 *	@adap: the adapter
485 *	@idx: which MC to access
486 *	@addr: address of first byte requested
487 *	@data: 64 bytes of data containing the requested address
488 *	@ecc: where to store the corresponding 64-bit ECC word
489 *
490 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
491 *	that covers the requested address @addr.  If @parity is not %NULL it
492 *	is assigned the 64-bit ECC word for the read data.
493 */
494int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
495{
496	int i;
497	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
498	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
499
500	if (is_t4(adap)) {
501		mc_bist_cmd_reg = A_MC_BIST_CMD;
502		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
503		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
504		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
505		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
506	} else {
507		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
508		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
509		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
510		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
511						  idx);
512		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
513						  idx);
514	}
515
516	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
517		return -EBUSY;
518	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
519	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
520	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
521	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
522		     F_START_BIST | V_BIST_CMD_GAP(1));
523	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
524	if (i)
525		return i;
526
527#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
528
529	for (i = 15; i >= 0; i--)
530		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
531	if (ecc)
532		*ecc = t4_read_reg64(adap, MC_DATA(16));
533#undef MC_DATA
534	return 0;
535}
536
537/**
538 *	t4_edc_read - read from EDC through backdoor accesses
539 *	@adap: the adapter
540 *	@idx: which EDC to access
541 *	@addr: address of first byte requested
542 *	@data: 64 bytes of data containing the requested address
543 *	@ecc: where to store the corresponding 64-bit ECC word
544 *
545 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
546 *	that covers the requested address @addr.  If @parity is not %NULL it
547 *	is assigned the 64-bit ECC word for the read data.
548 */
549int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
550{
551	int i;
552	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
553	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
554
555	if (is_t4(adap)) {
556		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
557		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
558		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
559		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
560						    idx);
561		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
562						    idx);
563	} else {
564/*
565 * These macro are missing in t4_regs.h file.
566 * Added temporarily for testing.
567 */
568#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
569#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
570		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
571		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
572		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
573		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
574						    idx);
575		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
576						    idx);
577#undef EDC_REG_T5
578#undef EDC_STRIDE_T5
579	}
580
581	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
582		return -EBUSY;
583	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
584	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
585	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
586	t4_write_reg(adap, edc_bist_cmd_reg,
587		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
588	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
589	if (i)
590		return i;
591
592#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
593
594	for (i = 15; i >= 0; i--)
595		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
596	if (ecc)
597		*ecc = t4_read_reg64(adap, EDC_DATA(16));
598#undef EDC_DATA
599	return 0;
600}
601
602/**
603 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
604 *	@adap: the adapter
605 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
606 *	@addr: address within indicated memory type
607 *	@len: amount of memory to read
608 *	@buf: host memory buffer
609 *
610 *	Reads an [almost] arbitrary memory region in the firmware: the
611 *	firmware memory address, length and host buffer must be aligned on
612 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
613 *	the firmware's memory.  If this memory contains data structures which
614 *	contain multi-byte integers, it's the callers responsibility to
615 *	perform appropriate byte order conversions.
616 */
617int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
618		__be32 *buf)
619{
620	u32 pos, start, end, offset;
621	int ret;
622
623	/*
624	 * Argument sanity checks ...
625	 */
626	if ((addr & 0x3) || (len & 0x3))
627		return -EINVAL;
628
629	/*
630	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
631	 * need to round down the start and round up the end.  We'll start
632	 * copying out of the first line at (addr - start) a word at a time.
633	 */
634	start = addr & ~(64-1);
635	end = (addr + len + 64-1) & ~(64-1);
636	offset = (addr - start)/sizeof(__be32);
637
638	for (pos = start; pos < end; pos += 64, offset = 0) {
639		__be32 data[16];
640
641		/*
642		 * Read the chip's memory block and bail if there's an error.
643		 */
644		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
645			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
646		else
647			ret = t4_edc_read(adap, mtype, pos, data, NULL);
648		if (ret)
649			return ret;
650
651		/*
652		 * Copy the data into the caller's memory buffer.
653		 */
654		while (offset < 16 && len > 0) {
655			*buf++ = data[offset++];
656			len -= sizeof(__be32);
657		}
658	}
659
660	return 0;
661}
662
663/*
664 * Return the specified PCI-E Configuration Space register from our Physical
665 * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
666 * since we prefer to let the firmware own all of these registers, but if that
667 * fails we go for it directly ourselves.
668 */
669u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
670{
671
672	/*
673	 * If fw_attach != 0, construct and send the Firmware LDST Command to
674	 * retrieve the specified PCI-E Configuration Space register.
675	 */
676	if (drv_fw_attach != 0) {
677		struct fw_ldst_cmd ldst_cmd;
678		int ret;
679
680		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
681		ldst_cmd.op_to_addrspace =
682			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
683				    F_FW_CMD_REQUEST |
684				    F_FW_CMD_READ |
685				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
686		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
687		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
688		ldst_cmd.u.pcie.ctrl_to_fn =
689			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
690		ldst_cmd.u.pcie.r = reg;
691
692		/*
693		 * If the LDST Command succeeds, return the result, otherwise
694		 * fall through to reading it directly ourselves ...
695		 */
696		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
697				 &ldst_cmd);
698		if (ret == 0)
699			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
700
701		CH_WARN(adap, "Firmware failed to return "
702			"Configuration Space register %d, err = %d\n",
703			reg, -ret);
704	}
705
706	/*
707	 * Read the desired Configuration Space register via the PCI-E
708	 * Backdoor mechanism.
709	 */
710	return t4_hw_pci_read_cfg4(adap, reg);
711}
712
713/**
714 *	t4_get_regs_len - return the size of the chips register set
715 *	@adapter: the adapter
716 *
717 *	Returns the size of the chip's BAR0 register space.
718 */
719unsigned int t4_get_regs_len(struct adapter *adapter)
720{
721	unsigned int chip_version = chip_id(adapter);
722
723	switch (chip_version) {
724	case CHELSIO_T4:
725		if (adapter->flags & IS_VF)
726			return FW_T4VF_REGMAP_SIZE;
727		return T4_REGMAP_SIZE;
728
729	case CHELSIO_T5:
730	case CHELSIO_T6:
731		if (adapter->flags & IS_VF)
732			return FW_T4VF_REGMAP_SIZE;
733		return T5_REGMAP_SIZE;
734	}
735
736	CH_ERR(adapter,
737		"Unsupported chip version %d\n", chip_version);
738	return 0;
739}
740
741/**
742 *	t4_get_regs - read chip registers into provided buffer
743 *	@adap: the adapter
744 *	@buf: register buffer
745 *	@buf_size: size (in bytes) of register buffer
746 *
747 *	If the provided register buffer isn't large enough for the chip's
748 *	full register range, the register dump will be truncated to the
749 *	register buffer's size.
750 */
751void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
752{
753	static const unsigned int t4_reg_ranges[] = {
754		0x1008, 0x1108,
755		0x1180, 0x1184,
756		0x1190, 0x1194,
757		0x11a0, 0x11a4,
758		0x11b0, 0x11b4,
759		0x11fc, 0x123c,
760		0x1300, 0x173c,
761		0x1800, 0x18fc,
762		0x3000, 0x30d8,
763		0x30e0, 0x30e4,
764		0x30ec, 0x5910,
765		0x5920, 0x5924,
766		0x5960, 0x5960,
767		0x5968, 0x5968,
768		0x5970, 0x5970,
769		0x5978, 0x5978,
770		0x5980, 0x5980,
771		0x5988, 0x5988,
772		0x5990, 0x5990,
773		0x5998, 0x5998,
774		0x59a0, 0x59d4,
775		0x5a00, 0x5ae0,
776		0x5ae8, 0x5ae8,
777		0x5af0, 0x5af0,
778		0x5af8, 0x5af8,
779		0x6000, 0x6098,
780		0x6100, 0x6150,
781		0x6200, 0x6208,
782		0x6240, 0x6248,
783		0x6280, 0x62b0,
784		0x62c0, 0x6338,
785		0x6370, 0x638c,
786		0x6400, 0x643c,
787		0x6500, 0x6524,
788		0x6a00, 0x6a04,
789		0x6a14, 0x6a38,
790		0x6a60, 0x6a70,
791		0x6a78, 0x6a78,
792		0x6b00, 0x6b0c,
793		0x6b1c, 0x6b84,
794		0x6bf0, 0x6bf8,
795		0x6c00, 0x6c0c,
796		0x6c1c, 0x6c84,
797		0x6cf0, 0x6cf8,
798		0x6d00, 0x6d0c,
799		0x6d1c, 0x6d84,
800		0x6df0, 0x6df8,
801		0x6e00, 0x6e0c,
802		0x6e1c, 0x6e84,
803		0x6ef0, 0x6ef8,
804		0x6f00, 0x6f0c,
805		0x6f1c, 0x6f84,
806		0x6ff0, 0x6ff8,
807		0x7000, 0x700c,
808		0x701c, 0x7084,
809		0x70f0, 0x70f8,
810		0x7100, 0x710c,
811		0x711c, 0x7184,
812		0x71f0, 0x71f8,
813		0x7200, 0x720c,
814		0x721c, 0x7284,
815		0x72f0, 0x72f8,
816		0x7300, 0x730c,
817		0x731c, 0x7384,
818		0x73f0, 0x73f8,
819		0x7400, 0x7450,
820		0x7500, 0x7530,
821		0x7600, 0x760c,
822		0x7614, 0x761c,
823		0x7680, 0x76cc,
824		0x7700, 0x7798,
825		0x77c0, 0x77fc,
826		0x7900, 0x79fc,
827		0x7b00, 0x7b58,
828		0x7b60, 0x7b84,
829		0x7b8c, 0x7c38,
830		0x7d00, 0x7d38,
831		0x7d40, 0x7d80,
832		0x7d8c, 0x7ddc,
833		0x7de4, 0x7e04,
834		0x7e10, 0x7e1c,
835		0x7e24, 0x7e38,
836		0x7e40, 0x7e44,
837		0x7e4c, 0x7e78,
838		0x7e80, 0x7ea4,
839		0x7eac, 0x7edc,
840		0x7ee8, 0x7efc,
841		0x8dc0, 0x8e04,
842		0x8e10, 0x8e1c,
843		0x8e30, 0x8e78,
844		0x8ea0, 0x8eb8,
845		0x8ec0, 0x8f6c,
846		0x8fc0, 0x9008,
847		0x9010, 0x9058,
848		0x9060, 0x9060,
849		0x9068, 0x9074,
850		0x90fc, 0x90fc,
851		0x9400, 0x9408,
852		0x9410, 0x9458,
853		0x9600, 0x9600,
854		0x9608, 0x9638,
855		0x9640, 0x96bc,
856		0x9800, 0x9808,
857		0x9820, 0x983c,
858		0x9850, 0x9864,
859		0x9c00, 0x9c6c,
860		0x9c80, 0x9cec,
861		0x9d00, 0x9d6c,
862		0x9d80, 0x9dec,
863		0x9e00, 0x9e6c,
864		0x9e80, 0x9eec,
865		0x9f00, 0x9f6c,
866		0x9f80, 0x9fec,
867		0xd004, 0xd004,
868		0xd010, 0xd03c,
869		0xdfc0, 0xdfe0,
870		0xe000, 0xea7c,
871		0xf000, 0x11190,
872		0x19040, 0x1906c,
873		0x19078, 0x19080,
874		0x1908c, 0x190e4,
875		0x190f0, 0x190f8,
876		0x19100, 0x19110,
877		0x19120, 0x19124,
878		0x19150, 0x19194,
879		0x1919c, 0x191b0,
880		0x191d0, 0x191e8,
881		0x19238, 0x1924c,
882		0x193f8, 0x1943c,
883		0x1944c, 0x19474,
884		0x19490, 0x194e0,
885		0x194f0, 0x194f8,
886		0x19800, 0x19c08,
887		0x19c10, 0x19c90,
888		0x19ca0, 0x19ce4,
889		0x19cf0, 0x19d40,
890		0x19d50, 0x19d94,
891		0x19da0, 0x19de8,
892		0x19df0, 0x19e40,
893		0x19e50, 0x19e90,
894		0x19ea0, 0x19f4c,
895		0x1a000, 0x1a004,
896		0x1a010, 0x1a06c,
897		0x1a0b0, 0x1a0e4,
898		0x1a0ec, 0x1a0f4,
899		0x1a100, 0x1a108,
900		0x1a114, 0x1a120,
901		0x1a128, 0x1a130,
902		0x1a138, 0x1a138,
903		0x1a190, 0x1a1c4,
904		0x1a1fc, 0x1a1fc,
905		0x1e040, 0x1e04c,
906		0x1e284, 0x1e28c,
907		0x1e2c0, 0x1e2c0,
908		0x1e2e0, 0x1e2e0,
909		0x1e300, 0x1e384,
910		0x1e3c0, 0x1e3c8,
911		0x1e440, 0x1e44c,
912		0x1e684, 0x1e68c,
913		0x1e6c0, 0x1e6c0,
914		0x1e6e0, 0x1e6e0,
915		0x1e700, 0x1e784,
916		0x1e7c0, 0x1e7c8,
917		0x1e840, 0x1e84c,
918		0x1ea84, 0x1ea8c,
919		0x1eac0, 0x1eac0,
920		0x1eae0, 0x1eae0,
921		0x1eb00, 0x1eb84,
922		0x1ebc0, 0x1ebc8,
923		0x1ec40, 0x1ec4c,
924		0x1ee84, 0x1ee8c,
925		0x1eec0, 0x1eec0,
926		0x1eee0, 0x1eee0,
927		0x1ef00, 0x1ef84,
928		0x1efc0, 0x1efc8,
929		0x1f040, 0x1f04c,
930		0x1f284, 0x1f28c,
931		0x1f2c0, 0x1f2c0,
932		0x1f2e0, 0x1f2e0,
933		0x1f300, 0x1f384,
934		0x1f3c0, 0x1f3c8,
935		0x1f440, 0x1f44c,
936		0x1f684, 0x1f68c,
937		0x1f6c0, 0x1f6c0,
938		0x1f6e0, 0x1f6e0,
939		0x1f700, 0x1f784,
940		0x1f7c0, 0x1f7c8,
941		0x1f840, 0x1f84c,
942		0x1fa84, 0x1fa8c,
943		0x1fac0, 0x1fac0,
944		0x1fae0, 0x1fae0,
945		0x1fb00, 0x1fb84,
946		0x1fbc0, 0x1fbc8,
947		0x1fc40, 0x1fc4c,
948		0x1fe84, 0x1fe8c,
949		0x1fec0, 0x1fec0,
950		0x1fee0, 0x1fee0,
951		0x1ff00, 0x1ff84,
952		0x1ffc0, 0x1ffc8,
953		0x20000, 0x2002c,
954		0x20100, 0x2013c,
955		0x20190, 0x201a0,
956		0x201a8, 0x201b8,
957		0x201c4, 0x201c8,
958		0x20200, 0x20318,
959		0x20400, 0x204b4,
960		0x204c0, 0x20528,
961		0x20540, 0x20614,
962		0x21000, 0x21040,
963		0x2104c, 0x21060,
964		0x210c0, 0x210ec,
965		0x21200, 0x21268,
966		0x21270, 0x21284,
967		0x212fc, 0x21388,
968		0x21400, 0x21404,
969		0x21500, 0x21500,
970		0x21510, 0x21518,
971		0x2152c, 0x21530,
972		0x2153c, 0x2153c,
973		0x21550, 0x21554,
974		0x21600, 0x21600,
975		0x21608, 0x2161c,
976		0x21624, 0x21628,
977		0x21630, 0x21634,
978		0x2163c, 0x2163c,
979		0x21700, 0x2171c,
980		0x21780, 0x2178c,
981		0x21800, 0x21818,
982		0x21820, 0x21828,
983		0x21830, 0x21848,
984		0x21850, 0x21854,
985		0x21860, 0x21868,
986		0x21870, 0x21870,
987		0x21878, 0x21898,
988		0x218a0, 0x218a8,
989		0x218b0, 0x218c8,
990		0x218d0, 0x218d4,
991		0x218e0, 0x218e8,
992		0x218f0, 0x218f0,
993		0x218f8, 0x21a18,
994		0x21a20, 0x21a28,
995		0x21a30, 0x21a48,
996		0x21a50, 0x21a54,
997		0x21a60, 0x21a68,
998		0x21a70, 0x21a70,
999		0x21a78, 0x21a98,
1000		0x21aa0, 0x21aa8,
1001		0x21ab0, 0x21ac8,
1002		0x21ad0, 0x21ad4,
1003		0x21ae0, 0x21ae8,
1004		0x21af0, 0x21af0,
1005		0x21af8, 0x21c18,
1006		0x21c20, 0x21c20,
1007		0x21c28, 0x21c30,
1008		0x21c38, 0x21c38,
1009		0x21c80, 0x21c98,
1010		0x21ca0, 0x21ca8,
1011		0x21cb0, 0x21cc8,
1012		0x21cd0, 0x21cd4,
1013		0x21ce0, 0x21ce8,
1014		0x21cf0, 0x21cf0,
1015		0x21cf8, 0x21d7c,
1016		0x21e00, 0x21e04,
1017		0x22000, 0x2202c,
1018		0x22100, 0x2213c,
1019		0x22190, 0x221a0,
1020		0x221a8, 0x221b8,
1021		0x221c4, 0x221c8,
1022		0x22200, 0x22318,
1023		0x22400, 0x224b4,
1024		0x224c0, 0x22528,
1025		0x22540, 0x22614,
1026		0x23000, 0x23040,
1027		0x2304c, 0x23060,
1028		0x230c0, 0x230ec,
1029		0x23200, 0x23268,
1030		0x23270, 0x23284,
1031		0x232fc, 0x23388,
1032		0x23400, 0x23404,
1033		0x23500, 0x23500,
1034		0x23510, 0x23518,
1035		0x2352c, 0x23530,
1036		0x2353c, 0x2353c,
1037		0x23550, 0x23554,
1038		0x23600, 0x23600,
1039		0x23608, 0x2361c,
1040		0x23624, 0x23628,
1041		0x23630, 0x23634,
1042		0x2363c, 0x2363c,
1043		0x23700, 0x2371c,
1044		0x23780, 0x2378c,
1045		0x23800, 0x23818,
1046		0x23820, 0x23828,
1047		0x23830, 0x23848,
1048		0x23850, 0x23854,
1049		0x23860, 0x23868,
1050		0x23870, 0x23870,
1051		0x23878, 0x23898,
1052		0x238a0, 0x238a8,
1053		0x238b0, 0x238c8,
1054		0x238d0, 0x238d4,
1055		0x238e0, 0x238e8,
1056		0x238f0, 0x238f0,
1057		0x238f8, 0x23a18,
1058		0x23a20, 0x23a28,
1059		0x23a30, 0x23a48,
1060		0x23a50, 0x23a54,
1061		0x23a60, 0x23a68,
1062		0x23a70, 0x23a70,
1063		0x23a78, 0x23a98,
1064		0x23aa0, 0x23aa8,
1065		0x23ab0, 0x23ac8,
1066		0x23ad0, 0x23ad4,
1067		0x23ae0, 0x23ae8,
1068		0x23af0, 0x23af0,
1069		0x23af8, 0x23c18,
1070		0x23c20, 0x23c20,
1071		0x23c28, 0x23c30,
1072		0x23c38, 0x23c38,
1073		0x23c80, 0x23c98,
1074		0x23ca0, 0x23ca8,
1075		0x23cb0, 0x23cc8,
1076		0x23cd0, 0x23cd4,
1077		0x23ce0, 0x23ce8,
1078		0x23cf0, 0x23cf0,
1079		0x23cf8, 0x23d7c,
1080		0x23e00, 0x23e04,
1081		0x24000, 0x2402c,
1082		0x24100, 0x2413c,
1083		0x24190, 0x241a0,
1084		0x241a8, 0x241b8,
1085		0x241c4, 0x241c8,
1086		0x24200, 0x24318,
1087		0x24400, 0x244b4,
1088		0x244c0, 0x24528,
1089		0x24540, 0x24614,
1090		0x25000, 0x25040,
1091		0x2504c, 0x25060,
1092		0x250c0, 0x250ec,
1093		0x25200, 0x25268,
1094		0x25270, 0x25284,
1095		0x252fc, 0x25388,
1096		0x25400, 0x25404,
1097		0x25500, 0x25500,
1098		0x25510, 0x25518,
1099		0x2552c, 0x25530,
1100		0x2553c, 0x2553c,
1101		0x25550, 0x25554,
1102		0x25600, 0x25600,
1103		0x25608, 0x2561c,
1104		0x25624, 0x25628,
1105		0x25630, 0x25634,
1106		0x2563c, 0x2563c,
1107		0x25700, 0x2571c,
1108		0x25780, 0x2578c,
1109		0x25800, 0x25818,
1110		0x25820, 0x25828,
1111		0x25830, 0x25848,
1112		0x25850, 0x25854,
1113		0x25860, 0x25868,
1114		0x25870, 0x25870,
1115		0x25878, 0x25898,
1116		0x258a0, 0x258a8,
1117		0x258b0, 0x258c8,
1118		0x258d0, 0x258d4,
1119		0x258e0, 0x258e8,
1120		0x258f0, 0x258f0,
1121		0x258f8, 0x25a18,
1122		0x25a20, 0x25a28,
1123		0x25a30, 0x25a48,
1124		0x25a50, 0x25a54,
1125		0x25a60, 0x25a68,
1126		0x25a70, 0x25a70,
1127		0x25a78, 0x25a98,
1128		0x25aa0, 0x25aa8,
1129		0x25ab0, 0x25ac8,
1130		0x25ad0, 0x25ad4,
1131		0x25ae0, 0x25ae8,
1132		0x25af0, 0x25af0,
1133		0x25af8, 0x25c18,
1134		0x25c20, 0x25c20,
1135		0x25c28, 0x25c30,
1136		0x25c38, 0x25c38,
1137		0x25c80, 0x25c98,
1138		0x25ca0, 0x25ca8,
1139		0x25cb0, 0x25cc8,
1140		0x25cd0, 0x25cd4,
1141		0x25ce0, 0x25ce8,
1142		0x25cf0, 0x25cf0,
1143		0x25cf8, 0x25d7c,
1144		0x25e00, 0x25e04,
1145		0x26000, 0x2602c,
1146		0x26100, 0x2613c,
1147		0x26190, 0x261a0,
1148		0x261a8, 0x261b8,
1149		0x261c4, 0x261c8,
1150		0x26200, 0x26318,
1151		0x26400, 0x264b4,
1152		0x264c0, 0x26528,
1153		0x26540, 0x26614,
1154		0x27000, 0x27040,
1155		0x2704c, 0x27060,
1156		0x270c0, 0x270ec,
1157		0x27200, 0x27268,
1158		0x27270, 0x27284,
1159		0x272fc, 0x27388,
1160		0x27400, 0x27404,
1161		0x27500, 0x27500,
1162		0x27510, 0x27518,
1163		0x2752c, 0x27530,
1164		0x2753c, 0x2753c,
1165		0x27550, 0x27554,
1166		0x27600, 0x27600,
1167		0x27608, 0x2761c,
1168		0x27624, 0x27628,
1169		0x27630, 0x27634,
1170		0x2763c, 0x2763c,
1171		0x27700, 0x2771c,
1172		0x27780, 0x2778c,
1173		0x27800, 0x27818,
1174		0x27820, 0x27828,
1175		0x27830, 0x27848,
1176		0x27850, 0x27854,
1177		0x27860, 0x27868,
1178		0x27870, 0x27870,
1179		0x27878, 0x27898,
1180		0x278a0, 0x278a8,
1181		0x278b0, 0x278c8,
1182		0x278d0, 0x278d4,
1183		0x278e0, 0x278e8,
1184		0x278f0, 0x278f0,
1185		0x278f8, 0x27a18,
1186		0x27a20, 0x27a28,
1187		0x27a30, 0x27a48,
1188		0x27a50, 0x27a54,
1189		0x27a60, 0x27a68,
1190		0x27a70, 0x27a70,
1191		0x27a78, 0x27a98,
1192		0x27aa0, 0x27aa8,
1193		0x27ab0, 0x27ac8,
1194		0x27ad0, 0x27ad4,
1195		0x27ae0, 0x27ae8,
1196		0x27af0, 0x27af0,
1197		0x27af8, 0x27c18,
1198		0x27c20, 0x27c20,
1199		0x27c28, 0x27c30,
1200		0x27c38, 0x27c38,
1201		0x27c80, 0x27c98,
1202		0x27ca0, 0x27ca8,
1203		0x27cb0, 0x27cc8,
1204		0x27cd0, 0x27cd4,
1205		0x27ce0, 0x27ce8,
1206		0x27cf0, 0x27cf0,
1207		0x27cf8, 0x27d7c,
1208		0x27e00, 0x27e04,
1209	};
1210
1211	static const unsigned int t4vf_reg_ranges[] = {
1212		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1213		VF_MPS_REG(A_MPS_VF_CTL),
1214		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1215		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1216		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1217		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1218		FW_T4VF_MBDATA_BASE_ADDR,
1219		FW_T4VF_MBDATA_BASE_ADDR +
1220		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1221	};
1222
1223	static const unsigned int t5_reg_ranges[] = {
1224		0x1008, 0x10c0,
1225		0x10cc, 0x10f8,
1226		0x1100, 0x1100,
1227		0x110c, 0x1148,
1228		0x1180, 0x1184,
1229		0x1190, 0x1194,
1230		0x11a0, 0x11a4,
1231		0x11b0, 0x11b4,
1232		0x11fc, 0x123c,
1233		0x1280, 0x173c,
1234		0x1800, 0x18fc,
1235		0x3000, 0x3028,
1236		0x3060, 0x30b0,
1237		0x30b8, 0x30d8,
1238		0x30e0, 0x30fc,
1239		0x3140, 0x357c,
1240		0x35a8, 0x35cc,
1241		0x35ec, 0x35ec,
1242		0x3600, 0x5624,
1243		0x56cc, 0x56ec,
1244		0x56f4, 0x5720,
1245		0x5728, 0x575c,
1246		0x580c, 0x5814,
1247		0x5890, 0x589c,
1248		0x58a4, 0x58ac,
1249		0x58b8, 0x58bc,
1250		0x5940, 0x59c8,
1251		0x59d0, 0x59dc,
1252		0x59fc, 0x5a18,
1253		0x5a60, 0x5a70,
1254		0x5a80, 0x5a9c,
1255		0x5b94, 0x5bfc,
1256		0x6000, 0x6020,
1257		0x6028, 0x6040,
1258		0x6058, 0x609c,
1259		0x60a8, 0x614c,
1260		0x7700, 0x7798,
1261		0x77c0, 0x78fc,
1262		0x7b00, 0x7b58,
1263		0x7b60, 0x7b84,
1264		0x7b8c, 0x7c54,
1265		0x7d00, 0x7d38,
1266		0x7d40, 0x7d80,
1267		0x7d8c, 0x7ddc,
1268		0x7de4, 0x7e04,
1269		0x7e10, 0x7e1c,
1270		0x7e24, 0x7e38,
1271		0x7e40, 0x7e44,
1272		0x7e4c, 0x7e78,
1273		0x7e80, 0x7edc,
1274		0x7ee8, 0x7efc,
1275		0x8dc0, 0x8de0,
1276		0x8df8, 0x8e04,
1277		0x8e10, 0x8e84,
1278		0x8ea0, 0x8f84,
1279		0x8fc0, 0x9058,
1280		0x9060, 0x9060,
1281		0x9068, 0x90f8,
1282		0x9400, 0x9408,
1283		0x9410, 0x9470,
1284		0x9600, 0x9600,
1285		0x9608, 0x9638,
1286		0x9640, 0x96f4,
1287		0x9800, 0x9808,
1288		0x9820, 0x983c,
1289		0x9850, 0x9864,
1290		0x9c00, 0x9c6c,
1291		0x9c80, 0x9cec,
1292		0x9d00, 0x9d6c,
1293		0x9d80, 0x9dec,
1294		0x9e00, 0x9e6c,
1295		0x9e80, 0x9eec,
1296		0x9f00, 0x9f6c,
1297		0x9f80, 0xa020,
1298		0xd004, 0xd004,
1299		0xd010, 0xd03c,
1300		0xdfc0, 0xdfe0,
1301		0xe000, 0x1106c,
1302		0x11074, 0x11088,
1303		0x1109c, 0x1117c,
1304		0x11190, 0x11204,
1305		0x19040, 0x1906c,
1306		0x19078, 0x19080,
1307		0x1908c, 0x190e8,
1308		0x190f0, 0x190f8,
1309		0x19100, 0x19110,
1310		0x19120, 0x19124,
1311		0x19150, 0x19194,
1312		0x1919c, 0x191b0,
1313		0x191d0, 0x191e8,
1314		0x19238, 0x19290,
1315		0x193f8, 0x19428,
1316		0x19430, 0x19444,
1317		0x1944c, 0x1946c,
1318		0x19474, 0x19474,
1319		0x19490, 0x194cc,
1320		0x194f0, 0x194f8,
1321		0x19c00, 0x19c08,
1322		0x19c10, 0x19c60,
1323		0x19c94, 0x19ce4,
1324		0x19cf0, 0x19d40,
1325		0x19d50, 0x19d94,
1326		0x19da0, 0x19de8,
1327		0x19df0, 0x19e10,
1328		0x19e50, 0x19e90,
1329		0x19ea0, 0x19f24,
1330		0x19f34, 0x19f34,
1331		0x19f40, 0x19f50,
1332		0x19f90, 0x19fb4,
1333		0x19fc4, 0x19fe4,
1334		0x1a000, 0x1a004,
1335		0x1a010, 0x1a06c,
1336		0x1a0b0, 0x1a0e4,
1337		0x1a0ec, 0x1a0f8,
1338		0x1a100, 0x1a108,
1339		0x1a114, 0x1a120,
1340		0x1a128, 0x1a130,
1341		0x1a138, 0x1a138,
1342		0x1a190, 0x1a1c4,
1343		0x1a1fc, 0x1a1fc,
1344		0x1e008, 0x1e00c,
1345		0x1e040, 0x1e044,
1346		0x1e04c, 0x1e04c,
1347		0x1e284, 0x1e290,
1348		0x1e2c0, 0x1e2c0,
1349		0x1e2e0, 0x1e2e0,
1350		0x1e300, 0x1e384,
1351		0x1e3c0, 0x1e3c8,
1352		0x1e408, 0x1e40c,
1353		0x1e440, 0x1e444,
1354		0x1e44c, 0x1e44c,
1355		0x1e684, 0x1e690,
1356		0x1e6c0, 0x1e6c0,
1357		0x1e6e0, 0x1e6e0,
1358		0x1e700, 0x1e784,
1359		0x1e7c0, 0x1e7c8,
1360		0x1e808, 0x1e80c,
1361		0x1e840, 0x1e844,
1362		0x1e84c, 0x1e84c,
1363		0x1ea84, 0x1ea90,
1364		0x1eac0, 0x1eac0,
1365		0x1eae0, 0x1eae0,
1366		0x1eb00, 0x1eb84,
1367		0x1ebc0, 0x1ebc8,
1368		0x1ec08, 0x1ec0c,
1369		0x1ec40, 0x1ec44,
1370		0x1ec4c, 0x1ec4c,
1371		0x1ee84, 0x1ee90,
1372		0x1eec0, 0x1eec0,
1373		0x1eee0, 0x1eee0,
1374		0x1ef00, 0x1ef84,
1375		0x1efc0, 0x1efc8,
1376		0x1f008, 0x1f00c,
1377		0x1f040, 0x1f044,
1378		0x1f04c, 0x1f04c,
1379		0x1f284, 0x1f290,
1380		0x1f2c0, 0x1f2c0,
1381		0x1f2e0, 0x1f2e0,
1382		0x1f300, 0x1f384,
1383		0x1f3c0, 0x1f3c8,
1384		0x1f408, 0x1f40c,
1385		0x1f440, 0x1f444,
1386		0x1f44c, 0x1f44c,
1387		0x1f684, 0x1f690,
1388		0x1f6c0, 0x1f6c0,
1389		0x1f6e0, 0x1f6e0,
1390		0x1f700, 0x1f784,
1391		0x1f7c0, 0x1f7c8,
1392		0x1f808, 0x1f80c,
1393		0x1f840, 0x1f844,
1394		0x1f84c, 0x1f84c,
1395		0x1fa84, 0x1fa90,
1396		0x1fac0, 0x1fac0,
1397		0x1fae0, 0x1fae0,
1398		0x1fb00, 0x1fb84,
1399		0x1fbc0, 0x1fbc8,
1400		0x1fc08, 0x1fc0c,
1401		0x1fc40, 0x1fc44,
1402		0x1fc4c, 0x1fc4c,
1403		0x1fe84, 0x1fe90,
1404		0x1fec0, 0x1fec0,
1405		0x1fee0, 0x1fee0,
1406		0x1ff00, 0x1ff84,
1407		0x1ffc0, 0x1ffc8,
1408		0x30000, 0x30030,
1409		0x30038, 0x30038,
1410		0x30040, 0x30040,
1411		0x30100, 0x30144,
1412		0x30190, 0x301a0,
1413		0x301a8, 0x301b8,
1414		0x301c4, 0x301c8,
1415		0x301d0, 0x301d0,
1416		0x30200, 0x30318,
1417		0x30400, 0x304b4,
1418		0x304c0, 0x3052c,
1419		0x30540, 0x3061c,
1420		0x30800, 0x30828,
1421		0x30834, 0x30834,
1422		0x308c0, 0x30908,
1423		0x30910, 0x309ac,
1424		0x30a00, 0x30a14,
1425		0x30a1c, 0x30a2c,
1426		0x30a44, 0x30a50,
1427		0x30a74, 0x30a74,
1428		0x30a7c, 0x30afc,
1429		0x30b08, 0x30c24,
1430		0x30d00, 0x30d00,
1431		0x30d08, 0x30d14,
1432		0x30d1c, 0x30d20,
1433		0x30d3c, 0x30d3c,
1434		0x30d48, 0x30d50,
1435		0x31200, 0x3120c,
1436		0x31220, 0x31220,
1437		0x31240, 0x31240,
1438		0x31600, 0x3160c,
1439		0x31a00, 0x31a1c,
1440		0x31e00, 0x31e20,
1441		0x31e38, 0x31e3c,
1442		0x31e80, 0x31e80,
1443		0x31e88, 0x31ea8,
1444		0x31eb0, 0x31eb4,
1445		0x31ec8, 0x31ed4,
1446		0x31fb8, 0x32004,
1447		0x32200, 0x32200,
1448		0x32208, 0x32240,
1449		0x32248, 0x32280,
1450		0x32288, 0x322c0,
1451		0x322c8, 0x322fc,
1452		0x32600, 0x32630,
1453		0x32a00, 0x32abc,
1454		0x32b00, 0x32b10,
1455		0x32b20, 0x32b30,
1456		0x32b40, 0x32b50,
1457		0x32b60, 0x32b70,
1458		0x33000, 0x33028,
1459		0x33030, 0x33048,
1460		0x33060, 0x33068,
1461		0x33070, 0x3309c,
1462		0x330f0, 0x33128,
1463		0x33130, 0x33148,
1464		0x33160, 0x33168,
1465		0x33170, 0x3319c,
1466		0x331f0, 0x33238,
1467		0x33240, 0x33240,
1468		0x33248, 0x33250,
1469		0x3325c, 0x33264,
1470		0x33270, 0x332b8,
1471		0x332c0, 0x332e4,
1472		0x332f8, 0x33338,
1473		0x33340, 0x33340,
1474		0x33348, 0x33350,
1475		0x3335c, 0x33364,
1476		0x33370, 0x333b8,
1477		0x333c0, 0x333e4,
1478		0x333f8, 0x33428,
1479		0x33430, 0x33448,
1480		0x33460, 0x33468,
1481		0x33470, 0x3349c,
1482		0x334f0, 0x33528,
1483		0x33530, 0x33548,
1484		0x33560, 0x33568,
1485		0x33570, 0x3359c,
1486		0x335f0, 0x33638,
1487		0x33640, 0x33640,
1488		0x33648, 0x33650,
1489		0x3365c, 0x33664,
1490		0x33670, 0x336b8,
1491		0x336c0, 0x336e4,
1492		0x336f8, 0x33738,
1493		0x33740, 0x33740,
1494		0x33748, 0x33750,
1495		0x3375c, 0x33764,
1496		0x33770, 0x337b8,
1497		0x337c0, 0x337e4,
1498		0x337f8, 0x337fc,
1499		0x33814, 0x33814,
1500		0x3382c, 0x3382c,
1501		0x33880, 0x3388c,
1502		0x338e8, 0x338ec,
1503		0x33900, 0x33928,
1504		0x33930, 0x33948,
1505		0x33960, 0x33968,
1506		0x33970, 0x3399c,
1507		0x339f0, 0x33a38,
1508		0x33a40, 0x33a40,
1509		0x33a48, 0x33a50,
1510		0x33a5c, 0x33a64,
1511		0x33a70, 0x33ab8,
1512		0x33ac0, 0x33ae4,
1513		0x33af8, 0x33b10,
1514		0x33b28, 0x33b28,
1515		0x33b3c, 0x33b50,
1516		0x33bf0, 0x33c10,
1517		0x33c28, 0x33c28,
1518		0x33c3c, 0x33c50,
1519		0x33cf0, 0x33cfc,
1520		0x34000, 0x34030,
1521		0x34038, 0x34038,
1522		0x34040, 0x34040,
1523		0x34100, 0x34144,
1524		0x34190, 0x341a0,
1525		0x341a8, 0x341b8,
1526		0x341c4, 0x341c8,
1527		0x341d0, 0x341d0,
1528		0x34200, 0x34318,
1529		0x34400, 0x344b4,
1530		0x344c0, 0x3452c,
1531		0x34540, 0x3461c,
1532		0x34800, 0x34828,
1533		0x34834, 0x34834,
1534		0x348c0, 0x34908,
1535		0x34910, 0x349ac,
1536		0x34a00, 0x34a14,
1537		0x34a1c, 0x34a2c,
1538		0x34a44, 0x34a50,
1539		0x34a74, 0x34a74,
1540		0x34a7c, 0x34afc,
1541		0x34b08, 0x34c24,
1542		0x34d00, 0x34d00,
1543		0x34d08, 0x34d14,
1544		0x34d1c, 0x34d20,
1545		0x34d3c, 0x34d3c,
1546		0x34d48, 0x34d50,
1547		0x35200, 0x3520c,
1548		0x35220, 0x35220,
1549		0x35240, 0x35240,
1550		0x35600, 0x3560c,
1551		0x35a00, 0x35a1c,
1552		0x35e00, 0x35e20,
1553		0x35e38, 0x35e3c,
1554		0x35e80, 0x35e80,
1555		0x35e88, 0x35ea8,
1556		0x35eb0, 0x35eb4,
1557		0x35ec8, 0x35ed4,
1558		0x35fb8, 0x36004,
1559		0x36200, 0x36200,
1560		0x36208, 0x36240,
1561		0x36248, 0x36280,
1562		0x36288, 0x362c0,
1563		0x362c8, 0x362fc,
1564		0x36600, 0x36630,
1565		0x36a00, 0x36abc,
1566		0x36b00, 0x36b10,
1567		0x36b20, 0x36b30,
1568		0x36b40, 0x36b50,
1569		0x36b60, 0x36b70,
1570		0x37000, 0x37028,
1571		0x37030, 0x37048,
1572		0x37060, 0x37068,
1573		0x37070, 0x3709c,
1574		0x370f0, 0x37128,
1575		0x37130, 0x37148,
1576		0x37160, 0x37168,
1577		0x37170, 0x3719c,
1578		0x371f0, 0x37238,
1579		0x37240, 0x37240,
1580		0x37248, 0x37250,
1581		0x3725c, 0x37264,
1582		0x37270, 0x372b8,
1583		0x372c0, 0x372e4,
1584		0x372f8, 0x37338,
1585		0x37340, 0x37340,
1586		0x37348, 0x37350,
1587		0x3735c, 0x37364,
1588		0x37370, 0x373b8,
1589		0x373c0, 0x373e4,
1590		0x373f8, 0x37428,
1591		0x37430, 0x37448,
1592		0x37460, 0x37468,
1593		0x37470, 0x3749c,
1594		0x374f0, 0x37528,
1595		0x37530, 0x37548,
1596		0x37560, 0x37568,
1597		0x37570, 0x3759c,
1598		0x375f0, 0x37638,
1599		0x37640, 0x37640,
1600		0x37648, 0x37650,
1601		0x3765c, 0x37664,
1602		0x37670, 0x376b8,
1603		0x376c0, 0x376e4,
1604		0x376f8, 0x37738,
1605		0x37740, 0x37740,
1606		0x37748, 0x37750,
1607		0x3775c, 0x37764,
1608		0x37770, 0x377b8,
1609		0x377c0, 0x377e4,
1610		0x377f8, 0x377fc,
1611		0x37814, 0x37814,
1612		0x3782c, 0x3782c,
1613		0x37880, 0x3788c,
1614		0x378e8, 0x378ec,
1615		0x37900, 0x37928,
1616		0x37930, 0x37948,
1617		0x37960, 0x37968,
1618		0x37970, 0x3799c,
1619		0x379f0, 0x37a38,
1620		0x37a40, 0x37a40,
1621		0x37a48, 0x37a50,
1622		0x37a5c, 0x37a64,
1623		0x37a70, 0x37ab8,
1624		0x37ac0, 0x37ae4,
1625		0x37af8, 0x37b10,
1626		0x37b28, 0x37b28,
1627		0x37b3c, 0x37b50,
1628		0x37bf0, 0x37c10,
1629		0x37c28, 0x37c28,
1630		0x37c3c, 0x37c50,
1631		0x37cf0, 0x37cfc,
1632		0x38000, 0x38030,
1633		0x38038, 0x38038,
1634		0x38040, 0x38040,
1635		0x38100, 0x38144,
1636		0x38190, 0x381a0,
1637		0x381a8, 0x381b8,
1638		0x381c4, 0x381c8,
1639		0x381d0, 0x381d0,
1640		0x38200, 0x38318,
1641		0x38400, 0x384b4,
1642		0x384c0, 0x3852c,
1643		0x38540, 0x3861c,
1644		0x38800, 0x38828,
1645		0x38834, 0x38834,
1646		0x388c0, 0x38908,
1647		0x38910, 0x389ac,
1648		0x38a00, 0x38a14,
1649		0x38a1c, 0x38a2c,
1650		0x38a44, 0x38a50,
1651		0x38a74, 0x38a74,
1652		0x38a7c, 0x38afc,
1653		0x38b08, 0x38c24,
1654		0x38d00, 0x38d00,
1655		0x38d08, 0x38d14,
1656		0x38d1c, 0x38d20,
1657		0x38d3c, 0x38d3c,
1658		0x38d48, 0x38d50,
1659		0x39200, 0x3920c,
1660		0x39220, 0x39220,
1661		0x39240, 0x39240,
1662		0x39600, 0x3960c,
1663		0x39a00, 0x39a1c,
1664		0x39e00, 0x39e20,
1665		0x39e38, 0x39e3c,
1666		0x39e80, 0x39e80,
1667		0x39e88, 0x39ea8,
1668		0x39eb0, 0x39eb4,
1669		0x39ec8, 0x39ed4,
1670		0x39fb8, 0x3a004,
1671		0x3a200, 0x3a200,
1672		0x3a208, 0x3a240,
1673		0x3a248, 0x3a280,
1674		0x3a288, 0x3a2c0,
1675		0x3a2c8, 0x3a2fc,
1676		0x3a600, 0x3a630,
1677		0x3aa00, 0x3aabc,
1678		0x3ab00, 0x3ab10,
1679		0x3ab20, 0x3ab30,
1680		0x3ab40, 0x3ab50,
1681		0x3ab60, 0x3ab70,
1682		0x3b000, 0x3b028,
1683		0x3b030, 0x3b048,
1684		0x3b060, 0x3b068,
1685		0x3b070, 0x3b09c,
1686		0x3b0f0, 0x3b128,
1687		0x3b130, 0x3b148,
1688		0x3b160, 0x3b168,
1689		0x3b170, 0x3b19c,
1690		0x3b1f0, 0x3b238,
1691		0x3b240, 0x3b240,
1692		0x3b248, 0x3b250,
1693		0x3b25c, 0x3b264,
1694		0x3b270, 0x3b2b8,
1695		0x3b2c0, 0x3b2e4,
1696		0x3b2f8, 0x3b338,
1697		0x3b340, 0x3b340,
1698		0x3b348, 0x3b350,
1699		0x3b35c, 0x3b364,
1700		0x3b370, 0x3b3b8,
1701		0x3b3c0, 0x3b3e4,
1702		0x3b3f8, 0x3b428,
1703		0x3b430, 0x3b448,
1704		0x3b460, 0x3b468,
1705		0x3b470, 0x3b49c,
1706		0x3b4f0, 0x3b528,
1707		0x3b530, 0x3b548,
1708		0x3b560, 0x3b568,
1709		0x3b570, 0x3b59c,
1710		0x3b5f0, 0x3b638,
1711		0x3b640, 0x3b640,
1712		0x3b648, 0x3b650,
1713		0x3b65c, 0x3b664,
1714		0x3b670, 0x3b6b8,
1715		0x3b6c0, 0x3b6e4,
1716		0x3b6f8, 0x3b738,
1717		0x3b740, 0x3b740,
1718		0x3b748, 0x3b750,
1719		0x3b75c, 0x3b764,
1720		0x3b770, 0x3b7b8,
1721		0x3b7c0, 0x3b7e4,
1722		0x3b7f8, 0x3b7fc,
1723		0x3b814, 0x3b814,
1724		0x3b82c, 0x3b82c,
1725		0x3b880, 0x3b88c,
1726		0x3b8e8, 0x3b8ec,
1727		0x3b900, 0x3b928,
1728		0x3b930, 0x3b948,
1729		0x3b960, 0x3b968,
1730		0x3b970, 0x3b99c,
1731		0x3b9f0, 0x3ba38,
1732		0x3ba40, 0x3ba40,
1733		0x3ba48, 0x3ba50,
1734		0x3ba5c, 0x3ba64,
1735		0x3ba70, 0x3bab8,
1736		0x3bac0, 0x3bae4,
1737		0x3baf8, 0x3bb10,
1738		0x3bb28, 0x3bb28,
1739		0x3bb3c, 0x3bb50,
1740		0x3bbf0, 0x3bc10,
1741		0x3bc28, 0x3bc28,
1742		0x3bc3c, 0x3bc50,
1743		0x3bcf0, 0x3bcfc,
1744		0x3c000, 0x3c030,
1745		0x3c038, 0x3c038,
1746		0x3c040, 0x3c040,
1747		0x3c100, 0x3c144,
1748		0x3c190, 0x3c1a0,
1749		0x3c1a8, 0x3c1b8,
1750		0x3c1c4, 0x3c1c8,
1751		0x3c1d0, 0x3c1d0,
1752		0x3c200, 0x3c318,
1753		0x3c400, 0x3c4b4,
1754		0x3c4c0, 0x3c52c,
1755		0x3c540, 0x3c61c,
1756		0x3c800, 0x3c828,
1757		0x3c834, 0x3c834,
1758		0x3c8c0, 0x3c908,
1759		0x3c910, 0x3c9ac,
1760		0x3ca00, 0x3ca14,
1761		0x3ca1c, 0x3ca2c,
1762		0x3ca44, 0x3ca50,
1763		0x3ca74, 0x3ca74,
1764		0x3ca7c, 0x3cafc,
1765		0x3cb08, 0x3cc24,
1766		0x3cd00, 0x3cd00,
1767		0x3cd08, 0x3cd14,
1768		0x3cd1c, 0x3cd20,
1769		0x3cd3c, 0x3cd3c,
1770		0x3cd48, 0x3cd50,
1771		0x3d200, 0x3d20c,
1772		0x3d220, 0x3d220,
1773		0x3d240, 0x3d240,
1774		0x3d600, 0x3d60c,
1775		0x3da00, 0x3da1c,
1776		0x3de00, 0x3de20,
1777		0x3de38, 0x3de3c,
1778		0x3de80, 0x3de80,
1779		0x3de88, 0x3dea8,
1780		0x3deb0, 0x3deb4,
1781		0x3dec8, 0x3ded4,
1782		0x3dfb8, 0x3e004,
1783		0x3e200, 0x3e200,
1784		0x3e208, 0x3e240,
1785		0x3e248, 0x3e280,
1786		0x3e288, 0x3e2c0,
1787		0x3e2c8, 0x3e2fc,
1788		0x3e600, 0x3e630,
1789		0x3ea00, 0x3eabc,
1790		0x3eb00, 0x3eb10,
1791		0x3eb20, 0x3eb30,
1792		0x3eb40, 0x3eb50,
1793		0x3eb60, 0x3eb70,
1794		0x3f000, 0x3f028,
1795		0x3f030, 0x3f048,
1796		0x3f060, 0x3f068,
1797		0x3f070, 0x3f09c,
1798		0x3f0f0, 0x3f128,
1799		0x3f130, 0x3f148,
1800		0x3f160, 0x3f168,
1801		0x3f170, 0x3f19c,
1802		0x3f1f0, 0x3f238,
1803		0x3f240, 0x3f240,
1804		0x3f248, 0x3f250,
1805		0x3f25c, 0x3f264,
1806		0x3f270, 0x3f2b8,
1807		0x3f2c0, 0x3f2e4,
1808		0x3f2f8, 0x3f338,
1809		0x3f340, 0x3f340,
1810		0x3f348, 0x3f350,
1811		0x3f35c, 0x3f364,
1812		0x3f370, 0x3f3b8,
1813		0x3f3c0, 0x3f3e4,
1814		0x3f3f8, 0x3f428,
1815		0x3f430, 0x3f448,
1816		0x3f460, 0x3f468,
1817		0x3f470, 0x3f49c,
1818		0x3f4f0, 0x3f528,
1819		0x3f530, 0x3f548,
1820		0x3f560, 0x3f568,
1821		0x3f570, 0x3f59c,
1822		0x3f5f0, 0x3f638,
1823		0x3f640, 0x3f640,
1824		0x3f648, 0x3f650,
1825		0x3f65c, 0x3f664,
1826		0x3f670, 0x3f6b8,
1827		0x3f6c0, 0x3f6e4,
1828		0x3f6f8, 0x3f738,
1829		0x3f740, 0x3f740,
1830		0x3f748, 0x3f750,
1831		0x3f75c, 0x3f764,
1832		0x3f770, 0x3f7b8,
1833		0x3f7c0, 0x3f7e4,
1834		0x3f7f8, 0x3f7fc,
1835		0x3f814, 0x3f814,
1836		0x3f82c, 0x3f82c,
1837		0x3f880, 0x3f88c,
1838		0x3f8e8, 0x3f8ec,
1839		0x3f900, 0x3f928,
1840		0x3f930, 0x3f948,
1841		0x3f960, 0x3f968,
1842		0x3f970, 0x3f99c,
1843		0x3f9f0, 0x3fa38,
1844		0x3fa40, 0x3fa40,
1845		0x3fa48, 0x3fa50,
1846		0x3fa5c, 0x3fa64,
1847		0x3fa70, 0x3fab8,
1848		0x3fac0, 0x3fae4,
1849		0x3faf8, 0x3fb10,
1850		0x3fb28, 0x3fb28,
1851		0x3fb3c, 0x3fb50,
1852		0x3fbf0, 0x3fc10,
1853		0x3fc28, 0x3fc28,
1854		0x3fc3c, 0x3fc50,
1855		0x3fcf0, 0x3fcfc,
1856		0x40000, 0x4000c,
1857		0x40040, 0x40050,
1858		0x40060, 0x40068,
1859		0x4007c, 0x4008c,
1860		0x40094, 0x400b0,
1861		0x400c0, 0x40144,
1862		0x40180, 0x4018c,
1863		0x40200, 0x40254,
1864		0x40260, 0x40264,
1865		0x40270, 0x40288,
1866		0x40290, 0x40298,
1867		0x402ac, 0x402c8,
1868		0x402d0, 0x402e0,
1869		0x402f0, 0x402f0,
1870		0x40300, 0x4033c,
1871		0x403f8, 0x403fc,
1872		0x41304, 0x413c4,
1873		0x41400, 0x4140c,
1874		0x41414, 0x4141c,
1875		0x41480, 0x414d0,
1876		0x44000, 0x44054,
1877		0x4405c, 0x44078,
1878		0x440c0, 0x44174,
1879		0x44180, 0x441ac,
1880		0x441b4, 0x441b8,
1881		0x441c0, 0x44254,
1882		0x4425c, 0x44278,
1883		0x442c0, 0x44374,
1884		0x44380, 0x443ac,
1885		0x443b4, 0x443b8,
1886		0x443c0, 0x44454,
1887		0x4445c, 0x44478,
1888		0x444c0, 0x44574,
1889		0x44580, 0x445ac,
1890		0x445b4, 0x445b8,
1891		0x445c0, 0x44654,
1892		0x4465c, 0x44678,
1893		0x446c0, 0x44774,
1894		0x44780, 0x447ac,
1895		0x447b4, 0x447b8,
1896		0x447c0, 0x44854,
1897		0x4485c, 0x44878,
1898		0x448c0, 0x44974,
1899		0x44980, 0x449ac,
1900		0x449b4, 0x449b8,
1901		0x449c0, 0x449fc,
1902		0x45000, 0x45004,
1903		0x45010, 0x45030,
1904		0x45040, 0x45060,
1905		0x45068, 0x45068,
1906		0x45080, 0x45084,
1907		0x450a0, 0x450b0,
1908		0x45200, 0x45204,
1909		0x45210, 0x45230,
1910		0x45240, 0x45260,
1911		0x45268, 0x45268,
1912		0x45280, 0x45284,
1913		0x452a0, 0x452b0,
1914		0x460c0, 0x460e4,
1915		0x47000, 0x4703c,
1916		0x47044, 0x4708c,
1917		0x47200, 0x47250,
1918		0x47400, 0x47408,
1919		0x47414, 0x47420,
1920		0x47600, 0x47618,
1921		0x47800, 0x47814,
1922		0x48000, 0x4800c,
1923		0x48040, 0x48050,
1924		0x48060, 0x48068,
1925		0x4807c, 0x4808c,
1926		0x48094, 0x480b0,
1927		0x480c0, 0x48144,
1928		0x48180, 0x4818c,
1929		0x48200, 0x48254,
1930		0x48260, 0x48264,
1931		0x48270, 0x48288,
1932		0x48290, 0x48298,
1933		0x482ac, 0x482c8,
1934		0x482d0, 0x482e0,
1935		0x482f0, 0x482f0,
1936		0x48300, 0x4833c,
1937		0x483f8, 0x483fc,
1938		0x49304, 0x493c4,
1939		0x49400, 0x4940c,
1940		0x49414, 0x4941c,
1941		0x49480, 0x494d0,
1942		0x4c000, 0x4c054,
1943		0x4c05c, 0x4c078,
1944		0x4c0c0, 0x4c174,
1945		0x4c180, 0x4c1ac,
1946		0x4c1b4, 0x4c1b8,
1947		0x4c1c0, 0x4c254,
1948		0x4c25c, 0x4c278,
1949		0x4c2c0, 0x4c374,
1950		0x4c380, 0x4c3ac,
1951		0x4c3b4, 0x4c3b8,
1952		0x4c3c0, 0x4c454,
1953		0x4c45c, 0x4c478,
1954		0x4c4c0, 0x4c574,
1955		0x4c580, 0x4c5ac,
1956		0x4c5b4, 0x4c5b8,
1957		0x4c5c0, 0x4c654,
1958		0x4c65c, 0x4c678,
1959		0x4c6c0, 0x4c774,
1960		0x4c780, 0x4c7ac,
1961		0x4c7b4, 0x4c7b8,
1962		0x4c7c0, 0x4c854,
1963		0x4c85c, 0x4c878,
1964		0x4c8c0, 0x4c974,
1965		0x4c980, 0x4c9ac,
1966		0x4c9b4, 0x4c9b8,
1967		0x4c9c0, 0x4c9fc,
1968		0x4d000, 0x4d004,
1969		0x4d010, 0x4d030,
1970		0x4d040, 0x4d060,
1971		0x4d068, 0x4d068,
1972		0x4d080, 0x4d084,
1973		0x4d0a0, 0x4d0b0,
1974		0x4d200, 0x4d204,
1975		0x4d210, 0x4d230,
1976		0x4d240, 0x4d260,
1977		0x4d268, 0x4d268,
1978		0x4d280, 0x4d284,
1979		0x4d2a0, 0x4d2b0,
1980		0x4e0c0, 0x4e0e4,
1981		0x4f000, 0x4f03c,
1982		0x4f044, 0x4f08c,
1983		0x4f200, 0x4f250,
1984		0x4f400, 0x4f408,
1985		0x4f414, 0x4f420,
1986		0x4f600, 0x4f618,
1987		0x4f800, 0x4f814,
1988		0x50000, 0x50084,
1989		0x50090, 0x500cc,
1990		0x50400, 0x50400,
1991		0x50800, 0x50884,
1992		0x50890, 0x508cc,
1993		0x50c00, 0x50c00,
1994		0x51000, 0x5101c,
1995		0x51300, 0x51308,
1996	};
1997
1998	static const unsigned int t5vf_reg_ranges[] = {
1999		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2000		VF_MPS_REG(A_MPS_VF_CTL),
2001		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2002		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2003		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2004		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2005		FW_T4VF_MBDATA_BASE_ADDR,
2006		FW_T4VF_MBDATA_BASE_ADDR +
2007		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2008	};
2009
2010	static const unsigned int t6_reg_ranges[] = {
2011		0x1008, 0x101c,
2012		0x1024, 0x10a8,
2013		0x10b4, 0x10f8,
2014		0x1100, 0x1114,
2015		0x111c, 0x112c,
2016		0x1138, 0x113c,
2017		0x1144, 0x114c,
2018		0x1180, 0x1184,
2019		0x1190, 0x1194,
2020		0x11a0, 0x11a4,
2021		0x11b0, 0x11b4,
2022		0x11fc, 0x1274,
2023		0x1280, 0x133c,
2024		0x1800, 0x18fc,
2025		0x3000, 0x302c,
2026		0x3060, 0x30b0,
2027		0x30b8, 0x30d8,
2028		0x30e0, 0x30fc,
2029		0x3140, 0x357c,
2030		0x35a8, 0x35cc,
2031		0x35ec, 0x35ec,
2032		0x3600, 0x5624,
2033		0x56cc, 0x56ec,
2034		0x56f4, 0x5720,
2035		0x5728, 0x575c,
2036		0x580c, 0x5814,
2037		0x5890, 0x589c,
2038		0x58a4, 0x58ac,
2039		0x58b8, 0x58bc,
2040		0x5940, 0x595c,
2041		0x5980, 0x598c,
2042		0x59b0, 0x59c8,
2043		0x59d0, 0x59dc,
2044		0x59fc, 0x5a18,
2045		0x5a60, 0x5a6c,
2046		0x5a80, 0x5a8c,
2047		0x5a94, 0x5a9c,
2048		0x5b94, 0x5bfc,
2049		0x5c10, 0x5e48,
2050		0x5e50, 0x5e94,
2051		0x5ea0, 0x5eb0,
2052		0x5ec0, 0x5ec0,
2053		0x5ec8, 0x5ed0,
2054		0x5ee0, 0x5ee0,
2055		0x5ef0, 0x5ef0,
2056		0x5f00, 0x5f00,
2057		0x6000, 0x6020,
2058		0x6028, 0x6040,
2059		0x6058, 0x609c,
2060		0x60a8, 0x619c,
2061		0x7700, 0x7798,
2062		0x77c0, 0x7880,
2063		0x78cc, 0x78fc,
2064		0x7b00, 0x7b58,
2065		0x7b60, 0x7b84,
2066		0x7b8c, 0x7c54,
2067		0x7d00, 0x7d38,
2068		0x7d40, 0x7d84,
2069		0x7d8c, 0x7ddc,
2070		0x7de4, 0x7e04,
2071		0x7e10, 0x7e1c,
2072		0x7e24, 0x7e38,
2073		0x7e40, 0x7e44,
2074		0x7e4c, 0x7e78,
2075		0x7e80, 0x7edc,
2076		0x7ee8, 0x7efc,
2077		0x8dc0, 0x8de4,
2078		0x8df8, 0x8e04,
2079		0x8e10, 0x8e84,
2080		0x8ea0, 0x8f88,
2081		0x8fb8, 0x9058,
2082		0x9060, 0x9060,
2083		0x9068, 0x90f8,
2084		0x9100, 0x9124,
2085		0x9400, 0x9470,
2086		0x9600, 0x9600,
2087		0x9608, 0x9638,
2088		0x9640, 0x9704,
2089		0x9710, 0x971c,
2090		0x9800, 0x9808,
2091		0x9820, 0x983c,
2092		0x9850, 0x9864,
2093		0x9c00, 0x9c6c,
2094		0x9c80, 0x9cec,
2095		0x9d00, 0x9d6c,
2096		0x9d80, 0x9dec,
2097		0x9e00, 0x9e6c,
2098		0x9e80, 0x9eec,
2099		0x9f00, 0x9f6c,
2100		0x9f80, 0xa020,
2101		0xd004, 0xd03c,
2102		0xd100, 0xd118,
2103		0xd200, 0xd214,
2104		0xd220, 0xd234,
2105		0xd240, 0xd254,
2106		0xd260, 0xd274,
2107		0xd280, 0xd294,
2108		0xd2a0, 0xd2b4,
2109		0xd2c0, 0xd2d4,
2110		0xd2e0, 0xd2f4,
2111		0xd300, 0xd31c,
2112		0xdfc0, 0xdfe0,
2113		0xe000, 0xf008,
2114		0xf010, 0xf018,
2115		0xf020, 0xf028,
2116		0x11000, 0x11014,
2117		0x11048, 0x1106c,
2118		0x11074, 0x11088,
2119		0x11098, 0x11120,
2120		0x1112c, 0x1117c,
2121		0x11190, 0x112e0,
2122		0x11300, 0x1130c,
2123		0x12000, 0x1206c,
2124		0x19040, 0x1906c,
2125		0x19078, 0x19080,
2126		0x1908c, 0x190e8,
2127		0x190f0, 0x190f8,
2128		0x19100, 0x19110,
2129		0x19120, 0x19124,
2130		0x19150, 0x19194,
2131		0x1919c, 0x191b0,
2132		0x191d0, 0x191e8,
2133		0x19238, 0x19290,
2134		0x192a4, 0x192b0,
2135		0x192bc, 0x192bc,
2136		0x19348, 0x1934c,
2137		0x193f8, 0x19418,
2138		0x19420, 0x19428,
2139		0x19430, 0x19444,
2140		0x1944c, 0x1946c,
2141		0x19474, 0x19474,
2142		0x19490, 0x194cc,
2143		0x194f0, 0x194f8,
2144		0x19c00, 0x19c48,
2145		0x19c50, 0x19c80,
2146		0x19c94, 0x19c98,
2147		0x19ca0, 0x19cbc,
2148		0x19ce4, 0x19ce4,
2149		0x19cf0, 0x19cf8,
2150		0x19d00, 0x19d28,
2151		0x19d50, 0x19d78,
2152		0x19d94, 0x19d98,
2153		0x19da0, 0x19dc8,
2154		0x19df0, 0x19e10,
2155		0x19e50, 0x19e6c,
2156		0x19ea0, 0x19ebc,
2157		0x19ec4, 0x19ef4,
2158		0x19f04, 0x19f2c,
2159		0x19f34, 0x19f34,
2160		0x19f40, 0x19f50,
2161		0x19f90, 0x19fac,
2162		0x19fc4, 0x19fc8,
2163		0x19fd0, 0x19fe4,
2164		0x1a000, 0x1a004,
2165		0x1a010, 0x1a06c,
2166		0x1a0b0, 0x1a0e4,
2167		0x1a0ec, 0x1a0f8,
2168		0x1a100, 0x1a108,
2169		0x1a114, 0x1a120,
2170		0x1a128, 0x1a130,
2171		0x1a138, 0x1a138,
2172		0x1a190, 0x1a1c4,
2173		0x1a1fc, 0x1a1fc,
2174		0x1e008, 0x1e00c,
2175		0x1e040, 0x1e044,
2176		0x1e04c, 0x1e04c,
2177		0x1e284, 0x1e290,
2178		0x1e2c0, 0x1e2c0,
2179		0x1e2e0, 0x1e2e0,
2180		0x1e300, 0x1e384,
2181		0x1e3c0, 0x1e3c8,
2182		0x1e408, 0x1e40c,
2183		0x1e440, 0x1e444,
2184		0x1e44c, 0x1e44c,
2185		0x1e684, 0x1e690,
2186		0x1e6c0, 0x1e6c0,
2187		0x1e6e0, 0x1e6e0,
2188		0x1e700, 0x1e784,
2189		0x1e7c0, 0x1e7c8,
2190		0x1e808, 0x1e80c,
2191		0x1e840, 0x1e844,
2192		0x1e84c, 0x1e84c,
2193		0x1ea84, 0x1ea90,
2194		0x1eac0, 0x1eac0,
2195		0x1eae0, 0x1eae0,
2196		0x1eb00, 0x1eb84,
2197		0x1ebc0, 0x1ebc8,
2198		0x1ec08, 0x1ec0c,
2199		0x1ec40, 0x1ec44,
2200		0x1ec4c, 0x1ec4c,
2201		0x1ee84, 0x1ee90,
2202		0x1eec0, 0x1eec0,
2203		0x1eee0, 0x1eee0,
2204		0x1ef00, 0x1ef84,
2205		0x1efc0, 0x1efc8,
2206		0x1f008, 0x1f00c,
2207		0x1f040, 0x1f044,
2208		0x1f04c, 0x1f04c,
2209		0x1f284, 0x1f290,
2210		0x1f2c0, 0x1f2c0,
2211		0x1f2e0, 0x1f2e0,
2212		0x1f300, 0x1f384,
2213		0x1f3c0, 0x1f3c8,
2214		0x1f408, 0x1f40c,
2215		0x1f440, 0x1f444,
2216		0x1f44c, 0x1f44c,
2217		0x1f684, 0x1f690,
2218		0x1f6c0, 0x1f6c0,
2219		0x1f6e0, 0x1f6e0,
2220		0x1f700, 0x1f784,
2221		0x1f7c0, 0x1f7c8,
2222		0x1f808, 0x1f80c,
2223		0x1f840, 0x1f844,
2224		0x1f84c, 0x1f84c,
2225		0x1fa84, 0x1fa90,
2226		0x1fac0, 0x1fac0,
2227		0x1fae0, 0x1fae0,
2228		0x1fb00, 0x1fb84,
2229		0x1fbc0, 0x1fbc8,
2230		0x1fc08, 0x1fc0c,
2231		0x1fc40, 0x1fc44,
2232		0x1fc4c, 0x1fc4c,
2233		0x1fe84, 0x1fe90,
2234		0x1fec0, 0x1fec0,
2235		0x1fee0, 0x1fee0,
2236		0x1ff00, 0x1ff84,
2237		0x1ffc0, 0x1ffc8,
2238		0x30000, 0x30030,
2239		0x30038, 0x30038,
2240		0x30040, 0x30040,
2241		0x30048, 0x30048,
2242		0x30050, 0x30050,
2243		0x3005c, 0x30060,
2244		0x30068, 0x30068,
2245		0x30070, 0x30070,
2246		0x30100, 0x30168,
2247		0x30190, 0x301a0,
2248		0x301a8, 0x301b8,
2249		0x301c4, 0x301c8,
2250		0x301d0, 0x301d0,
2251		0x30200, 0x30320,
2252		0x30400, 0x304b4,
2253		0x304c0, 0x3052c,
2254		0x30540, 0x3061c,
2255		0x30800, 0x308a0,
2256		0x308c0, 0x30908,
2257		0x30910, 0x309b8,
2258		0x30a00, 0x30a04,
2259		0x30a0c, 0x30a14,
2260		0x30a1c, 0x30a2c,
2261		0x30a44, 0x30a50,
2262		0x30a74, 0x30a74,
2263		0x30a7c, 0x30afc,
2264		0x30b08, 0x30c24,
2265		0x30d00, 0x30d14,
2266		0x30d1c, 0x30d3c,
2267		0x30d44, 0x30d4c,
2268		0x30d54, 0x30d74,
2269		0x30d7c, 0x30d7c,
2270		0x30de0, 0x30de0,
2271		0x30e00, 0x30ed4,
2272		0x30f00, 0x30fa4,
2273		0x30fc0, 0x30fc4,
2274		0x31000, 0x31004,
2275		0x31080, 0x310fc,
2276		0x31208, 0x31220,
2277		0x3123c, 0x31254,
2278		0x31300, 0x31300,
2279		0x31308, 0x3131c,
2280		0x31338, 0x3133c,
2281		0x31380, 0x31380,
2282		0x31388, 0x313a8,
2283		0x313b4, 0x313b4,
2284		0x31400, 0x31420,
2285		0x31438, 0x3143c,
2286		0x31480, 0x31480,
2287		0x314a8, 0x314a8,
2288		0x314b0, 0x314b4,
2289		0x314c8, 0x314d4,
2290		0x31a40, 0x31a4c,
2291		0x31af0, 0x31b20,
2292		0x31b38, 0x31b3c,
2293		0x31b80, 0x31b80,
2294		0x31ba8, 0x31ba8,
2295		0x31bb0, 0x31bb4,
2296		0x31bc8, 0x31bd4,
2297		0x32140, 0x3218c,
2298		0x321f0, 0x321f4,
2299		0x32200, 0x32200,
2300		0x32218, 0x32218,
2301		0x32400, 0x32400,
2302		0x32408, 0x3241c,
2303		0x32618, 0x32620,
2304		0x32664, 0x32664,
2305		0x326a8, 0x326a8,
2306		0x326ec, 0x326ec,
2307		0x32a00, 0x32abc,
2308		0x32b00, 0x32b38,
2309		0x32b40, 0x32b58,
2310		0x32b60, 0x32b78,
2311		0x32c00, 0x32c00,
2312		0x32c08, 0x32c3c,
2313		0x32e00, 0x32e2c,
2314		0x32f00, 0x32f2c,
2315		0x33000, 0x3302c,
2316		0x33034, 0x33050,
2317		0x33058, 0x33058,
2318		0x33060, 0x3308c,
2319		0x3309c, 0x330ac,
2320		0x330c0, 0x330c0,
2321		0x330c8, 0x330d0,
2322		0x330d8, 0x330e0,
2323		0x330ec, 0x3312c,
2324		0x33134, 0x33150,
2325		0x33158, 0x33158,
2326		0x33160, 0x3318c,
2327		0x3319c, 0x331ac,
2328		0x331c0, 0x331c0,
2329		0x331c8, 0x331d0,
2330		0x331d8, 0x331e0,
2331		0x331ec, 0x33290,
2332		0x33298, 0x332c4,
2333		0x332e4, 0x33390,
2334		0x33398, 0x333c4,
2335		0x333e4, 0x3342c,
2336		0x33434, 0x33450,
2337		0x33458, 0x33458,
2338		0x33460, 0x3348c,
2339		0x3349c, 0x334ac,
2340		0x334c0, 0x334c0,
2341		0x334c8, 0x334d0,
2342		0x334d8, 0x334e0,
2343		0x334ec, 0x3352c,
2344		0x33534, 0x33550,
2345		0x33558, 0x33558,
2346		0x33560, 0x3358c,
2347		0x3359c, 0x335ac,
2348		0x335c0, 0x335c0,
2349		0x335c8, 0x335d0,
2350		0x335d8, 0x335e0,
2351		0x335ec, 0x33690,
2352		0x33698, 0x336c4,
2353		0x336e4, 0x33790,
2354		0x33798, 0x337c4,
2355		0x337e4, 0x337fc,
2356		0x33814, 0x33814,
2357		0x33854, 0x33868,
2358		0x33880, 0x3388c,
2359		0x338c0, 0x338d0,
2360		0x338e8, 0x338ec,
2361		0x33900, 0x3392c,
2362		0x33934, 0x33950,
2363		0x33958, 0x33958,
2364		0x33960, 0x3398c,
2365		0x3399c, 0x339ac,
2366		0x339c0, 0x339c0,
2367		0x339c8, 0x339d0,
2368		0x339d8, 0x339e0,
2369		0x339ec, 0x33a90,
2370		0x33a98, 0x33ac4,
2371		0x33ae4, 0x33b10,
2372		0x33b24, 0x33b28,
2373		0x33b38, 0x33b50,
2374		0x33bf0, 0x33c10,
2375		0x33c24, 0x33c28,
2376		0x33c38, 0x33c50,
2377		0x33cf0, 0x33cfc,
2378		0x34000, 0x34030,
2379		0x34038, 0x34038,
2380		0x34040, 0x34040,
2381		0x34048, 0x34048,
2382		0x34050, 0x34050,
2383		0x3405c, 0x34060,
2384		0x34068, 0x34068,
2385		0x34070, 0x34070,
2386		0x34100, 0x34168,
2387		0x34190, 0x341a0,
2388		0x341a8, 0x341b8,
2389		0x341c4, 0x341c8,
2390		0x341d0, 0x341d0,
2391		0x34200, 0x34320,
2392		0x34400, 0x344b4,
2393		0x344c0, 0x3452c,
2394		0x34540, 0x3461c,
2395		0x34800, 0x348a0,
2396		0x348c0, 0x34908,
2397		0x34910, 0x349b8,
2398		0x34a00, 0x34a04,
2399		0x34a0c, 0x34a14,
2400		0x34a1c, 0x34a2c,
2401		0x34a44, 0x34a50,
2402		0x34a74, 0x34a74,
2403		0x34a7c, 0x34afc,
2404		0x34b08, 0x34c24,
2405		0x34d00, 0x34d14,
2406		0x34d1c, 0x34d3c,
2407		0x34d44, 0x34d4c,
2408		0x34d54, 0x34d74,
2409		0x34d7c, 0x34d7c,
2410		0x34de0, 0x34de0,
2411		0x34e00, 0x34ed4,
2412		0x34f00, 0x34fa4,
2413		0x34fc0, 0x34fc4,
2414		0x35000, 0x35004,
2415		0x35080, 0x350fc,
2416		0x35208, 0x35220,
2417		0x3523c, 0x35254,
2418		0x35300, 0x35300,
2419		0x35308, 0x3531c,
2420		0x35338, 0x3533c,
2421		0x35380, 0x35380,
2422		0x35388, 0x353a8,
2423		0x353b4, 0x353b4,
2424		0x35400, 0x35420,
2425		0x35438, 0x3543c,
2426		0x35480, 0x35480,
2427		0x354a8, 0x354a8,
2428		0x354b0, 0x354b4,
2429		0x354c8, 0x354d4,
2430		0x35a40, 0x35a4c,
2431		0x35af0, 0x35b20,
2432		0x35b38, 0x35b3c,
2433		0x35b80, 0x35b80,
2434		0x35ba8, 0x35ba8,
2435		0x35bb0, 0x35bb4,
2436		0x35bc8, 0x35bd4,
2437		0x36140, 0x3618c,
2438		0x361f0, 0x361f4,
2439		0x36200, 0x36200,
2440		0x36218, 0x36218,
2441		0x36400, 0x36400,
2442		0x36408, 0x3641c,
2443		0x36618, 0x36620,
2444		0x36664, 0x36664,
2445		0x366a8, 0x366a8,
2446		0x366ec, 0x366ec,
2447		0x36a00, 0x36abc,
2448		0x36b00, 0x36b38,
2449		0x36b40, 0x36b58,
2450		0x36b60, 0x36b78,
2451		0x36c00, 0x36c00,
2452		0x36c08, 0x36c3c,
2453		0x36e00, 0x36e2c,
2454		0x36f00, 0x36f2c,
2455		0x37000, 0x3702c,
2456		0x37034, 0x37050,
2457		0x37058, 0x37058,
2458		0x37060, 0x3708c,
2459		0x3709c, 0x370ac,
2460		0x370c0, 0x370c0,
2461		0x370c8, 0x370d0,
2462		0x370d8, 0x370e0,
2463		0x370ec, 0x3712c,
2464		0x37134, 0x37150,
2465		0x37158, 0x37158,
2466		0x37160, 0x3718c,
2467		0x3719c, 0x371ac,
2468		0x371c0, 0x371c0,
2469		0x371c8, 0x371d0,
2470		0x371d8, 0x371e0,
2471		0x371ec, 0x37290,
2472		0x37298, 0x372c4,
2473		0x372e4, 0x37390,
2474		0x37398, 0x373c4,
2475		0x373e4, 0x3742c,
2476		0x37434, 0x37450,
2477		0x37458, 0x37458,
2478		0x37460, 0x3748c,
2479		0x3749c, 0x374ac,
2480		0x374c0, 0x374c0,
2481		0x374c8, 0x374d0,
2482		0x374d8, 0x374e0,
2483		0x374ec, 0x3752c,
2484		0x37534, 0x37550,
2485		0x37558, 0x37558,
2486		0x37560, 0x3758c,
2487		0x3759c, 0x375ac,
2488		0x375c0, 0x375c0,
2489		0x375c8, 0x375d0,
2490		0x375d8, 0x375e0,
2491		0x375ec, 0x37690,
2492		0x37698, 0x376c4,
2493		0x376e4, 0x37790,
2494		0x37798, 0x377c4,
2495		0x377e4, 0x377fc,
2496		0x37814, 0x37814,
2497		0x37854, 0x37868,
2498		0x37880, 0x3788c,
2499		0x378c0, 0x378d0,
2500		0x378e8, 0x378ec,
2501		0x37900, 0x3792c,
2502		0x37934, 0x37950,
2503		0x37958, 0x37958,
2504		0x37960, 0x3798c,
2505		0x3799c, 0x379ac,
2506		0x379c0, 0x379c0,
2507		0x379c8, 0x379d0,
2508		0x379d8, 0x379e0,
2509		0x379ec, 0x37a90,
2510		0x37a98, 0x37ac4,
2511		0x37ae4, 0x37b10,
2512		0x37b24, 0x37b28,
2513		0x37b38, 0x37b50,
2514		0x37bf0, 0x37c10,
2515		0x37c24, 0x37c28,
2516		0x37c38, 0x37c50,
2517		0x37cf0, 0x37cfc,
2518		0x40040, 0x40040,
2519		0x40080, 0x40084,
2520		0x40100, 0x40100,
2521		0x40140, 0x401bc,
2522		0x40200, 0x40214,
2523		0x40228, 0x40228,
2524		0x40240, 0x40258,
2525		0x40280, 0x40280,
2526		0x40304, 0x40304,
2527		0x40330, 0x4033c,
2528		0x41304, 0x413c8,
2529		0x413d0, 0x413dc,
2530		0x413f0, 0x413f0,
2531		0x41400, 0x4140c,
2532		0x41414, 0x4141c,
2533		0x41480, 0x414d0,
2534		0x44000, 0x4407c,
2535		0x440c0, 0x441ac,
2536		0x441b4, 0x4427c,
2537		0x442c0, 0x443ac,
2538		0x443b4, 0x4447c,
2539		0x444c0, 0x445ac,
2540		0x445b4, 0x4467c,
2541		0x446c0, 0x447ac,
2542		0x447b4, 0x4487c,
2543		0x448c0, 0x449ac,
2544		0x449b4, 0x44a7c,
2545		0x44ac0, 0x44bac,
2546		0x44bb4, 0x44c7c,
2547		0x44cc0, 0x44dac,
2548		0x44db4, 0x44e7c,
2549		0x44ec0, 0x44fac,
2550		0x44fb4, 0x4507c,
2551		0x450c0, 0x451ac,
2552		0x451b4, 0x451fc,
2553		0x45800, 0x45804,
2554		0x45810, 0x45830,
2555		0x45840, 0x45860,
2556		0x45868, 0x45868,
2557		0x45880, 0x45884,
2558		0x458a0, 0x458b0,
2559		0x45a00, 0x45a04,
2560		0x45a10, 0x45a30,
2561		0x45a40, 0x45a60,
2562		0x45a68, 0x45a68,
2563		0x45a80, 0x45a84,
2564		0x45aa0, 0x45ab0,
2565		0x460c0, 0x460e4,
2566		0x47000, 0x4703c,
2567		0x47044, 0x4708c,
2568		0x47200, 0x47250,
2569		0x47400, 0x47408,
2570		0x47414, 0x47420,
2571		0x47600, 0x47618,
2572		0x47800, 0x47814,
2573		0x47820, 0x4782c,
2574		0x50000, 0x50084,
2575		0x50090, 0x500cc,
2576		0x50300, 0x50384,
2577		0x50400, 0x50400,
2578		0x50800, 0x50884,
2579		0x50890, 0x508cc,
2580		0x50b00, 0x50b84,
2581		0x50c00, 0x50c00,
2582		0x51000, 0x51020,
2583		0x51028, 0x510b0,
2584		0x51300, 0x51324,
2585	};
2586
2587	static const unsigned int t6vf_reg_ranges[] = {
2588		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2589		VF_MPS_REG(A_MPS_VF_CTL),
2590		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2591		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2592		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2593		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2594		FW_T6VF_MBDATA_BASE_ADDR,
2595		FW_T6VF_MBDATA_BASE_ADDR +
2596		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2597	};
2598
2599	u32 *buf_end = (u32 *)(buf + buf_size);
2600	const unsigned int *reg_ranges;
2601	int reg_ranges_size, range;
2602	unsigned int chip_version = chip_id(adap);
2603
2604	/*
2605	 * Select the right set of register ranges to dump depending on the
2606	 * adapter chip type.
2607	 */
2608	switch (chip_version) {
2609	case CHELSIO_T4:
2610		if (adap->flags & IS_VF) {
2611			reg_ranges = t4vf_reg_ranges;
2612			reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2613		} else {
2614			reg_ranges = t4_reg_ranges;
2615			reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2616		}
2617		break;
2618
2619	case CHELSIO_T5:
2620		if (adap->flags & IS_VF) {
2621			reg_ranges = t5vf_reg_ranges;
2622			reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2623		} else {
2624			reg_ranges = t5_reg_ranges;
2625			reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2626		}
2627		break;
2628
2629	case CHELSIO_T6:
2630		if (adap->flags & IS_VF) {
2631			reg_ranges = t6vf_reg_ranges;
2632			reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2633		} else {
2634			reg_ranges = t6_reg_ranges;
2635			reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2636		}
2637		break;
2638
2639	default:
2640		CH_ERR(adap,
2641			"Unsupported chip version %d\n", chip_version);
2642		return;
2643	}
2644
2645	/*
2646	 * Clear the register buffer and insert the appropriate register
2647	 * values selected by the above register ranges.
2648	 */
2649	memset(buf, 0, buf_size);
2650	for (range = 0; range < reg_ranges_size; range += 2) {
2651		unsigned int reg = reg_ranges[range];
2652		unsigned int last_reg = reg_ranges[range + 1];
2653		u32 *bufp = (u32 *)(buf + reg);
2654
2655		/*
2656		 * Iterate across the register range filling in the register
2657		 * buffer but don't write past the end of the register buffer.
2658		 */
2659		while (reg <= last_reg && bufp < buf_end) {
2660			*bufp++ = t4_read_reg(adap, reg);
2661			reg += sizeof(u32);
2662		}
2663	}
2664}
2665
2666/*
2667 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
2668 * VPD-R sections.
2669 */
2670struct t4_vpd_hdr {
2671	u8  id_tag;
2672	u8  id_len[2];
2673	u8  id_data[ID_LEN];
2674	u8  vpdr_tag;
2675	u8  vpdr_len[2];
2676};
2677
2678/*
2679 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2680 */
2681#define EEPROM_DELAY		10		/* 10us per poll spin */
2682#define EEPROM_MAX_POLL		5000		/* x 5000 == 50ms */
2683
2684#define EEPROM_STAT_ADDR	0x7bfc
2685#define VPD_BASE		0x400
2686#define VPD_BASE_OLD		0
2687#define VPD_LEN			1024
2688#define VPD_INFO_FLD_HDR_SIZE	3
2689#define CHELSIO_VPD_UNIQUE_ID	0x82
2690
2691/*
2692 * Small utility function to wait till any outstanding VPD Access is complete.
2693 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2694 * VPD Access in flight.  This allows us to handle the problem of having a
2695 * previous VPD Access time out and prevent an attempt to inject a new VPD
2696 * Request before any in-flight VPD reguest has completed.
2697 */
2698static int t4_seeprom_wait(struct adapter *adapter)
2699{
2700	unsigned int base = adapter->params.pci.vpd_cap_addr;
2701	int max_poll;
2702
2703	/*
2704	 * If no VPD Access is in flight, we can just return success right
2705	 * away.
2706	 */
2707	if (!adapter->vpd_busy)
2708		return 0;
2709
2710	/*
2711	 * Poll the VPD Capability Address/Flag register waiting for it
2712	 * to indicate that the operation is complete.
2713	 */
2714	max_poll = EEPROM_MAX_POLL;
2715	do {
2716		u16 val;
2717
2718		udelay(EEPROM_DELAY);
2719		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2720
2721		/*
2722		 * If the operation is complete, mark the VPD as no longer
2723		 * busy and return success.
2724		 */
2725		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2726			adapter->vpd_busy = 0;
2727			return 0;
2728		}
2729	} while (--max_poll);
2730
2731	/*
2732	 * Failure!  Note that we leave the VPD Busy status set in order to
2733	 * avoid pushing a new VPD Access request into the VPD Capability till
2734	 * the current operation eventually succeeds.  It's a bug to issue a
2735	 * new request when an existing request is in flight and will result
2736	 * in corrupt hardware state.
2737	 */
2738	return -ETIMEDOUT;
2739}
2740
2741/**
2742 *	t4_seeprom_read - read a serial EEPROM location
2743 *	@adapter: adapter to read
2744 *	@addr: EEPROM virtual address
2745 *	@data: where to store the read data
2746 *
2747 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
2748 *	VPD capability.  Note that this function must be called with a virtual
2749 *	address.
2750 */
2751int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2752{
2753	unsigned int base = adapter->params.pci.vpd_cap_addr;
2754	int ret;
2755
2756	/*
2757	 * VPD Accesses must alway be 4-byte aligned!
2758	 */
2759	if (addr >= EEPROMVSIZE || (addr & 3))
2760		return -EINVAL;
2761
2762	/*
2763	 * Wait for any previous operation which may still be in flight to
2764	 * complete.
2765	 */
2766	ret = t4_seeprom_wait(adapter);
2767	if (ret) {
2768		CH_ERR(adapter, "VPD still busy from previous operation\n");
2769		return ret;
2770	}
2771
2772	/*
2773	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2774	 * for our request to complete.  If it doesn't complete, note the
2775	 * error and return it to our caller.  Note that we do not reset the
2776	 * VPD Busy status!
2777	 */
2778	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2779	adapter->vpd_busy = 1;
2780	adapter->vpd_flag = PCI_VPD_ADDR_F;
2781	ret = t4_seeprom_wait(adapter);
2782	if (ret) {
2783		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2784		return ret;
2785	}
2786
2787	/*
2788	 * Grab the returned data, swizzle it into our endianess and
2789	 * return success.
2790	 */
2791	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2792	*data = le32_to_cpu(*data);
2793	return 0;
2794}
2795
2796/**
2797 *	t4_seeprom_write - write a serial EEPROM location
2798 *	@adapter: adapter to write
2799 *	@addr: virtual EEPROM address
2800 *	@data: value to write
2801 *
2802 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
2803 *	VPD capability.  Note that this function must be called with a virtual
2804 *	address.
2805 */
2806int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2807{
2808	unsigned int base = adapter->params.pci.vpd_cap_addr;
2809	int ret;
2810	u32 stats_reg;
2811	int max_poll;
2812
2813	/*
2814	 * VPD Accesses must alway be 4-byte aligned!
2815	 */
2816	if (addr >= EEPROMVSIZE || (addr & 3))
2817		return -EINVAL;
2818
2819	/*
2820	 * Wait for any previous operation which may still be in flight to
2821	 * complete.
2822	 */
2823	ret = t4_seeprom_wait(adapter);
2824	if (ret) {
2825		CH_ERR(adapter, "VPD still busy from previous operation\n");
2826		return ret;
2827	}
2828
2829	/*
2830	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2831	 * for our request to complete.  If it doesn't complete, note the
2832	 * error and return it to our caller.  Note that we do not reset the
2833	 * VPD Busy status!
2834	 */
2835	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2836				 cpu_to_le32(data));
2837	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2838				 (u16)addr | PCI_VPD_ADDR_F);
2839	adapter->vpd_busy = 1;
2840	adapter->vpd_flag = 0;
2841	ret = t4_seeprom_wait(adapter);
2842	if (ret) {
2843		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2844		return ret;
2845	}
2846
2847	/*
2848	 * Reset PCI_VPD_DATA register after a transaction and wait for our
2849	 * request to complete. If it doesn't complete, return error.
2850	 */
2851	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2852	max_poll = EEPROM_MAX_POLL;
2853	do {
2854		udelay(EEPROM_DELAY);
2855		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2856	} while ((stats_reg & 0x1) && --max_poll);
2857	if (!max_poll)
2858		return -ETIMEDOUT;
2859
2860	/* Return success! */
2861	return 0;
2862}
2863
2864/**
2865 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
2866 *	@phys_addr: the physical EEPROM address
2867 *	@fn: the PCI function number
2868 *	@sz: size of function-specific area
2869 *
2870 *	Translate a physical EEPROM address to virtual.  The first 1K is
2871 *	accessed through virtual addresses starting at 31K, the rest is
2872 *	accessed through virtual addresses starting at 0.
2873 *
2874 *	The mapping is as follows:
2875 *	[0..1K) -> [31K..32K)
2876 *	[1K..1K+A) -> [ES-A..ES)
2877 *	[1K+A..ES) -> [0..ES-A-1K)
2878 *
2879 *	where A = @fn * @sz, and ES = EEPROM size.
2880 */
2881int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2882{
2883	fn *= sz;
2884	if (phys_addr < 1024)
2885		return phys_addr + (31 << 10);
2886	if (phys_addr < 1024 + fn)
2887		return EEPROMSIZE - fn + phys_addr - 1024;
2888	if (phys_addr < EEPROMSIZE)
2889		return phys_addr - 1024 - fn;
2890	return -EINVAL;
2891}
2892
2893/**
2894 *	t4_seeprom_wp - enable/disable EEPROM write protection
2895 *	@adapter: the adapter
2896 *	@enable: whether to enable or disable write protection
2897 *
2898 *	Enables or disables write protection on the serial EEPROM.
2899 */
2900int t4_seeprom_wp(struct adapter *adapter, int enable)
2901{
2902	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2903}
2904
2905/**
2906 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
2907 *	@v: Pointer to buffered vpd data structure
2908 *	@kw: The keyword to search for
2909 *
2910 *	Returns the value of the information field keyword or
2911 *	-ENOENT otherwise.
2912 */
2913static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2914{
2915	int i;
2916	unsigned int offset , len;
2917	const u8 *buf = (const u8 *)v;
2918	const u8 *vpdr_len = &v->vpdr_len[0];
2919	offset = sizeof(struct t4_vpd_hdr);
2920	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2921
2922	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2923		return -ENOENT;
2924	}
2925
2926	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2927		if(memcmp(buf + i , kw , 2) == 0){
2928			i += VPD_INFO_FLD_HDR_SIZE;
2929			return i;
2930		}
2931
2932		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2933	}
2934
2935	return -ENOENT;
2936}
2937
2938
2939/**
2940 *	get_vpd_params - read VPD parameters from VPD EEPROM
2941 *	@adapter: adapter to read
2942 *	@p: where to store the parameters
2943 *	@vpd: caller provided temporary space to read the VPD into
2944 *
2945 *	Reads card parameters stored in VPD EEPROM.
2946 */
2947static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2948    u8 *vpd)
2949{
2950	int i, ret, addr;
2951	int ec, sn, pn, na;
2952	u8 csum;
2953	const struct t4_vpd_hdr *v;
2954
2955	/*
2956	 * Card information normally starts at VPD_BASE but early cards had
2957	 * it at 0.
2958	 */
2959	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2960	if (ret)
2961		return (ret);
2962
2963	/*
2964	 * The VPD shall have a unique identifier specified by the PCI SIG.
2965	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2966	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2967	 * is expected to automatically put this entry at the
2968	 * beginning of the VPD.
2969	 */
2970	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2971
2972	for (i = 0; i < VPD_LEN; i += 4) {
2973		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2974		if (ret)
2975			return ret;
2976	}
2977 	v = (const struct t4_vpd_hdr *)vpd;
2978
2979#define FIND_VPD_KW(var,name) do { \
2980	var = get_vpd_keyword_val(v , name); \
2981	if (var < 0) { \
2982		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2983		return -EINVAL; \
2984	} \
2985} while (0)
2986
2987	FIND_VPD_KW(i, "RV");
2988	for (csum = 0; i >= 0; i--)
2989		csum += vpd[i];
2990
2991	if (csum) {
2992		CH_ERR(adapter,
2993			"corrupted VPD EEPROM, actual csum %u\n", csum);
2994		return -EINVAL;
2995	}
2996
2997	FIND_VPD_KW(ec, "EC");
2998	FIND_VPD_KW(sn, "SN");
2999	FIND_VPD_KW(pn, "PN");
3000	FIND_VPD_KW(na, "NA");
3001#undef FIND_VPD_KW
3002
3003	memcpy(p->id, v->id_data, ID_LEN);
3004	strstrip(p->id);
3005	memcpy(p->ec, vpd + ec, EC_LEN);
3006	strstrip(p->ec);
3007	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3008	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3009	strstrip(p->sn);
3010	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3011	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3012	strstrip((char *)p->pn);
3013	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3014	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3015	strstrip((char *)p->na);
3016
3017	return 0;
3018}
3019
3020/* serial flash and firmware constants and flash config file constants */
3021enum {
3022	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3023
3024	/* flash command opcodes */
3025	SF_PROG_PAGE    = 2,	/* program page */
3026	SF_WR_DISABLE   = 4,	/* disable writes */
3027	SF_RD_STATUS    = 5,	/* read status register */
3028	SF_WR_ENABLE    = 6,	/* enable writes */
3029	SF_RD_DATA_FAST = 0xb,	/* read flash */
3030	SF_RD_ID	= 0x9f,	/* read ID */
3031	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
3032};
3033
3034/**
3035 *	sf1_read - read data from the serial flash
3036 *	@adapter: the adapter
3037 *	@byte_cnt: number of bytes to read
3038 *	@cont: whether another operation will be chained
3039 *	@lock: whether to lock SF for PL access only
3040 *	@valp: where to store the read data
3041 *
3042 *	Reads up to 4 bytes of data from the serial flash.  The location of
3043 *	the read needs to be specified prior to calling this by issuing the
3044 *	appropriate commands to the serial flash.
3045 */
3046static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3047		    int lock, u32 *valp)
3048{
3049	int ret;
3050
3051	if (!byte_cnt || byte_cnt > 4)
3052		return -EINVAL;
3053	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3054		return -EBUSY;
3055	t4_write_reg(adapter, A_SF_OP,
3056		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3057	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3058	if (!ret)
3059		*valp = t4_read_reg(adapter, A_SF_DATA);
3060	return ret;
3061}
3062
3063/**
3064 *	sf1_write - write data to the serial flash
3065 *	@adapter: the adapter
3066 *	@byte_cnt: number of bytes to write
3067 *	@cont: whether another operation will be chained
3068 *	@lock: whether to lock SF for PL access only
3069 *	@val: value to write
3070 *
3071 *	Writes up to 4 bytes of data to the serial flash.  The location of
3072 *	the write needs to be specified prior to calling this by issuing the
3073 *	appropriate commands to the serial flash.
3074 */
3075static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3076		     int lock, u32 val)
3077{
3078	if (!byte_cnt || byte_cnt > 4)
3079		return -EINVAL;
3080	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3081		return -EBUSY;
3082	t4_write_reg(adapter, A_SF_DATA, val);
3083	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3084		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3085	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3086}
3087
3088/**
3089 *	flash_wait_op - wait for a flash operation to complete
3090 *	@adapter: the adapter
3091 *	@attempts: max number of polls of the status register
3092 *	@delay: delay between polls in ms
3093 *
3094 *	Wait for a flash operation to complete by polling the status register.
3095 */
3096static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3097{
3098	int ret;
3099	u32 status;
3100
3101	while (1) {
3102		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3103		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3104			return ret;
3105		if (!(status & 1))
3106			return 0;
3107		if (--attempts == 0)
3108			return -EAGAIN;
3109		if (delay)
3110			msleep(delay);
3111	}
3112}
3113
3114/**
3115 *	t4_read_flash - read words from serial flash
3116 *	@adapter: the adapter
3117 *	@addr: the start address for the read
3118 *	@nwords: how many 32-bit words to read
3119 *	@data: where to store the read data
3120 *	@byte_oriented: whether to store data as bytes or as words
3121 *
3122 *	Read the specified number of 32-bit words from the serial flash.
3123 *	If @byte_oriented is set the read data is stored as a byte array
3124 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3125 *	natural endianness.
3126 */
3127int t4_read_flash(struct adapter *adapter, unsigned int addr,
3128		  unsigned int nwords, u32 *data, int byte_oriented)
3129{
3130	int ret;
3131
3132	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3133		return -EINVAL;
3134
3135	addr = swab32(addr) | SF_RD_DATA_FAST;
3136
3137	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3138	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3139		return ret;
3140
3141	for ( ; nwords; nwords--, data++) {
3142		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3143		if (nwords == 1)
3144			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3145		if (ret)
3146			return ret;
3147		if (byte_oriented)
3148			*data = (__force __u32)(cpu_to_be32(*data));
3149	}
3150	return 0;
3151}
3152
3153/**
3154 *	t4_write_flash - write up to a page of data to the serial flash
3155 *	@adapter: the adapter
3156 *	@addr: the start address to write
3157 *	@n: length of data to write in bytes
3158 *	@data: the data to write
3159 *	@byte_oriented: whether to store data as bytes or as words
3160 *
3161 *	Writes up to a page of data (256 bytes) to the serial flash starting
3162 *	at the given address.  All the data must be written to the same page.
3163 *	If @byte_oriented is set the write data is stored as byte stream
3164 *	(i.e. matches what on disk), otherwise in big-endian.
3165 */
3166int t4_write_flash(struct adapter *adapter, unsigned int addr,
3167			  unsigned int n, const u8 *data, int byte_oriented)
3168{
3169	int ret;
3170	u32 buf[SF_PAGE_SIZE / 4];
3171	unsigned int i, c, left, val, offset = addr & 0xff;
3172
3173	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3174		return -EINVAL;
3175
3176	val = swab32(addr) | SF_PROG_PAGE;
3177
3178	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3179	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3180		goto unlock;
3181
3182	for (left = n; left; left -= c) {
3183		c = min(left, 4U);
3184		for (val = 0, i = 0; i < c; ++i)
3185			val = (val << 8) + *data++;
3186
3187		if (!byte_oriented)
3188			val = cpu_to_be32(val);
3189
3190		ret = sf1_write(adapter, c, c != left, 1, val);
3191		if (ret)
3192			goto unlock;
3193	}
3194	ret = flash_wait_op(adapter, 8, 1);
3195	if (ret)
3196		goto unlock;
3197
3198	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3199
3200	/* Read the page to verify the write succeeded */
3201	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3202			    byte_oriented);
3203	if (ret)
3204		return ret;
3205
3206	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3207		CH_ERR(adapter,
3208			"failed to correctly write the flash page at %#x\n",
3209			addr);
3210		return -EIO;
3211	}
3212	return 0;
3213
3214unlock:
3215	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3216	return ret;
3217}
3218
3219/**
3220 *	t4_get_fw_version - read the firmware version
3221 *	@adapter: the adapter
3222 *	@vers: where to place the version
3223 *
3224 *	Reads the FW version from flash.
3225 */
3226int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3227{
3228	return t4_read_flash(adapter, FLASH_FW_START +
3229			     offsetof(struct fw_hdr, fw_ver), 1,
3230			     vers, 0);
3231}
3232
3233/**
3234 *	t4_get_bs_version - read the firmware bootstrap version
3235 *	@adapter: the adapter
3236 *	@vers: where to place the version
3237 *
3238 *	Reads the FW Bootstrap version from flash.
3239 */
3240int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3241{
3242	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3243			     offsetof(struct fw_hdr, fw_ver), 1,
3244			     vers, 0);
3245}
3246
3247/**
3248 *	t4_get_tp_version - read the TP microcode version
3249 *	@adapter: the adapter
3250 *	@vers: where to place the version
3251 *
3252 *	Reads the TP microcode version from flash.
3253 */
3254int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3255{
3256	return t4_read_flash(adapter, FLASH_FW_START +
3257			     offsetof(struct fw_hdr, tp_microcode_ver),
3258			     1, vers, 0);
3259}
3260
3261/**
3262 *	t4_get_exprom_version - return the Expansion ROM version (if any)
3263 *	@adapter: the adapter
3264 *	@vers: where to place the version
3265 *
3266 *	Reads the Expansion ROM header from FLASH and returns the version
3267 *	number (if present) through the @vers return value pointer.  We return
3268 *	this in the Firmware Version Format since it's convenient.  Return
3269 *	0 on success, -ENOENT if no Expansion ROM is present.
3270 */
3271int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3272{
3273	struct exprom_header {
3274		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3275		unsigned char hdr_ver[4];	/* Expansion ROM version */
3276	} *hdr;
3277	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3278					   sizeof(u32))];
3279	int ret;
3280
3281	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3282			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3283			    0);
3284	if (ret)
3285		return ret;
3286
3287	hdr = (struct exprom_header *)exprom_header_buf;
3288	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3289		return -ENOENT;
3290
3291	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3292		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3293		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3294		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3295	return 0;
3296}
3297
3298/**
3299 *	t4_get_scfg_version - return the Serial Configuration version
3300 *	@adapter: the adapter
3301 *	@vers: where to place the version
3302 *
3303 *	Reads the Serial Configuration Version via the Firmware interface
3304 *	(thus this can only be called once we're ready to issue Firmware
3305 *	commands).  The format of the Serial Configuration version is
3306 *	adapter specific.  Returns 0 on success, an error on failure.
3307 *
3308 *	Note that early versions of the Firmware didn't include the ability
3309 *	to retrieve the Serial Configuration version, so we zero-out the
3310 *	return-value parameter in that case to avoid leaving it with
3311 *	garbage in it.
3312 *
3313 *	Also note that the Firmware will return its cached copy of the Serial
3314 *	Initialization Revision ID, not the actual Revision ID as written in
3315 *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3316 *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3317 *	it's best to defer calling this routine till after a FW_RESET_CMD has
3318 *	been issued if the Host Driver will be performing a full adapter
3319 *	initialization.
3320 */
3321int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3322{
3323	u32 scfgrev_param;
3324	int ret;
3325
3326	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3327			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3328	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3329			      1, &scfgrev_param, vers);
3330	if (ret)
3331		*vers = 0;
3332	return ret;
3333}
3334
3335/**
3336 *	t4_get_vpd_version - return the VPD version
3337 *	@adapter: the adapter
3338 *	@vers: where to place the version
3339 *
3340 *	Reads the VPD via the Firmware interface (thus this can only be called
3341 *	once we're ready to issue Firmware commands).  The format of the
3342 *	VPD version is adapter specific.  Returns 0 on success, an error on
3343 *	failure.
3344 *
3345 *	Note that early versions of the Firmware didn't include the ability
3346 *	to retrieve the VPD version, so we zero-out the return-value parameter
3347 *	in that case to avoid leaving it with garbage in it.
3348 *
3349 *	Also note that the Firmware will return its cached copy of the VPD
3350 *	Revision ID, not the actual Revision ID as written in the Serial
3351 *	EEPROM.  This is only an issue if a new VPD has been written and the
3352 *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3353 *	to defer calling this routine till after a FW_RESET_CMD has been issued
3354 *	if the Host Driver will be performing a full adapter initialization.
3355 */
3356int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3357{
3358	u32 vpdrev_param;
3359	int ret;
3360
3361	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3362			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3363	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3364			      1, &vpdrev_param, vers);
3365	if (ret)
3366		*vers = 0;
3367	return ret;
3368}
3369
3370/**
3371 *	t4_get_version_info - extract various chip/firmware version information
3372 *	@adapter: the adapter
3373 *
3374 *	Reads various chip/firmware version numbers and stores them into the
3375 *	adapter Adapter Parameters structure.  If any of the efforts fails
3376 *	the first failure will be returned, but all of the version numbers
3377 *	will be read.
3378 */
3379int t4_get_version_info(struct adapter *adapter)
3380{
3381	int ret = 0;
3382
3383	#define FIRST_RET(__getvinfo) \
3384	do { \
3385		int __ret = __getvinfo; \
3386		if (__ret && !ret) \
3387			ret = __ret; \
3388	} while (0)
3389
3390	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3391	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3392	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3393	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3394	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3395	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3396
3397	#undef FIRST_RET
3398
3399	return ret;
3400}
3401
3402/**
3403 *	t4_flash_erase_sectors - erase a range of flash sectors
3404 *	@adapter: the adapter
3405 *	@start: the first sector to erase
3406 *	@end: the last sector to erase
3407 *
3408 *	Erases the sectors in the given inclusive range.
3409 */
3410int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3411{
3412	int ret = 0;
3413
3414	if (end >= adapter->params.sf_nsec)
3415		return -EINVAL;
3416
3417	while (start <= end) {
3418		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3419		    (ret = sf1_write(adapter, 4, 0, 1,
3420				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3421		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3422			CH_ERR(adapter,
3423				"erase of flash sector %d failed, error %d\n",
3424				start, ret);
3425			break;
3426		}
3427		start++;
3428	}
3429	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3430	return ret;
3431}
3432
3433/**
3434 *	t4_flash_cfg_addr - return the address of the flash configuration file
3435 *	@adapter: the adapter
3436 *
3437 *	Return the address within the flash where the Firmware Configuration
3438 *	File is stored, or an error if the device FLASH is too small to contain
3439 *	a Firmware Configuration File.
3440 */
3441int t4_flash_cfg_addr(struct adapter *adapter)
3442{
3443	/*
3444	 * If the device FLASH isn't large enough to hold a Firmware
3445	 * Configuration File, return an error.
3446	 */
3447	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3448		return -ENOSPC;
3449
3450	return FLASH_CFG_START;
3451}
3452
3453/*
3454 * Return TRUE if the specified firmware matches the adapter.  I.e. T4
3455 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3456 * and emit an error message for mismatched firmware to save our caller the
3457 * effort ...
3458 */
3459static int t4_fw_matches_chip(struct adapter *adap,
3460			      const struct fw_hdr *hdr)
3461{
3462	/*
3463	 * The expression below will return FALSE for any unsupported adapter
3464	 * which will keep us "honest" in the future ...
3465	 */
3466	if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3467	    (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3468	    (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3469		return 1;
3470
3471	CH_ERR(adap,
3472		"FW image (%d) is not suitable for this adapter (%d)\n",
3473		hdr->chip, chip_id(adap));
3474	return 0;
3475}
3476
3477/**
3478 *	t4_load_fw - download firmware
3479 *	@adap: the adapter
3480 *	@fw_data: the firmware image to write
3481 *	@size: image size
3482 *
3483 *	Write the supplied firmware image to the card's serial flash.
3484 */
3485int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3486{
3487	u32 csum;
3488	int ret, addr;
3489	unsigned int i;
3490	u8 first_page[SF_PAGE_SIZE];
3491	const u32 *p = (const u32 *)fw_data;
3492	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3493	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3494	unsigned int fw_start_sec;
3495	unsigned int fw_start;
3496	unsigned int fw_size;
3497
3498	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3499		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3500		fw_start = FLASH_FWBOOTSTRAP_START;
3501		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3502	} else {
3503		fw_start_sec = FLASH_FW_START_SEC;
3504 		fw_start = FLASH_FW_START;
3505		fw_size = FLASH_FW_MAX_SIZE;
3506	}
3507
3508	if (!size) {
3509		CH_ERR(adap, "FW image has no data\n");
3510		return -EINVAL;
3511	}
3512	if (size & 511) {
3513		CH_ERR(adap,
3514			"FW image size not multiple of 512 bytes\n");
3515		return -EINVAL;
3516	}
3517	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3518		CH_ERR(adap,
3519			"FW image size differs from size in FW header\n");
3520		return -EINVAL;
3521	}
3522	if (size > fw_size) {
3523		CH_ERR(adap, "FW image too large, max is %u bytes\n",
3524			fw_size);
3525		return -EFBIG;
3526	}
3527	if (!t4_fw_matches_chip(adap, hdr))
3528		return -EINVAL;
3529
3530	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3531		csum += be32_to_cpu(p[i]);
3532
3533	if (csum != 0xffffffff) {
3534		CH_ERR(adap,
3535			"corrupted firmware image, checksum %#x\n", csum);
3536		return -EINVAL;
3537	}
3538
3539	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
3540	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3541	if (ret)
3542		goto out;
3543
3544	/*
3545	 * We write the correct version at the end so the driver can see a bad
3546	 * version if the FW write fails.  Start by writing a copy of the
3547	 * first page with a bad version.
3548	 */
3549	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3550	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3551	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3552	if (ret)
3553		goto out;
3554
3555	addr = fw_start;
3556	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3557		addr += SF_PAGE_SIZE;
3558		fw_data += SF_PAGE_SIZE;
3559		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3560		if (ret)
3561			goto out;
3562	}
3563
3564	ret = t4_write_flash(adap,
3565			     fw_start + offsetof(struct fw_hdr, fw_ver),
3566			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3567out:
3568	if (ret)
3569		CH_ERR(adap, "firmware download failed, error %d\n",
3570			ret);
3571	return ret;
3572}
3573
3574/**
3575 *	t4_fwcache - firmware cache operation
3576 *	@adap: the adapter
3577 *	@op  : the operation (flush or flush and invalidate)
3578 */
3579int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3580{
3581	struct fw_params_cmd c;
3582
3583	memset(&c, 0, sizeof(c));
3584	c.op_to_vfn =
3585	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3586			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3587				V_FW_PARAMS_CMD_PFN(adap->pf) |
3588				V_FW_PARAMS_CMD_VFN(0));
3589	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3590	c.param[0].mnem =
3591	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3592			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3593	c.param[0].val = (__force __be32)op;
3594
3595	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3596}
3597
3598void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3599			unsigned int *pif_req_wrptr,
3600			unsigned int *pif_rsp_wrptr)
3601{
3602	int i, j;
3603	u32 cfg, val, req, rsp;
3604
3605	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3606	if (cfg & F_LADBGEN)
3607		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3608
3609	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3610	req = G_POLADBGWRPTR(val);
3611	rsp = G_PILADBGWRPTR(val);
3612	if (pif_req_wrptr)
3613		*pif_req_wrptr = req;
3614	if (pif_rsp_wrptr)
3615		*pif_rsp_wrptr = rsp;
3616
3617	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3618		for (j = 0; j < 6; j++) {
3619			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3620				     V_PILADBGRDPTR(rsp));
3621			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3622			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3623			req++;
3624			rsp++;
3625		}
3626		req = (req + 2) & M_POLADBGRDPTR;
3627		rsp = (rsp + 2) & M_PILADBGRDPTR;
3628	}
3629	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3630}
3631
3632void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3633{
3634	u32 cfg;
3635	int i, j, idx;
3636
3637	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3638	if (cfg & F_LADBGEN)
3639		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3640
3641	for (i = 0; i < CIM_MALA_SIZE; i++) {
3642		for (j = 0; j < 5; j++) {
3643			idx = 8 * i + j;
3644			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3645				     V_PILADBGRDPTR(idx));
3646			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3647			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3648		}
3649	}
3650	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3651}
3652
3653void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3654{
3655	unsigned int i, j;
3656
3657	for (i = 0; i < 8; i++) {
3658		u32 *p = la_buf + i;
3659
3660		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3661		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3662		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3663		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3664			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3665	}
3666}
3667
3668#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3669		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
3670		     FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
3671		     FW_PORT_CAP_ANEG)
3672
3673/**
3674 *	t4_link_l1cfg - apply link configuration to MAC/PHY
3675 *	@phy: the PHY to setup
3676 *	@mac: the MAC to setup
3677 *	@lc: the requested link configuration
3678 *
3679 *	Set up a port's MAC and PHY according to a desired link configuration.
3680 *	- If the PHY can auto-negotiate first decide what to advertise, then
3681 *	  enable/disable auto-negotiation as desired, and reset.
3682 *	- If the PHY does not auto-negotiate just reset it.
3683 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3684 *	  otherwise do it later based on the outcome of auto-negotiation.
3685 */
3686int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3687		  struct link_config *lc)
3688{
3689	struct fw_port_cmd c;
3690	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3691
3692	lc->link_ok = 0;
3693	if (lc->requested_fc & PAUSE_RX)
3694		fc |= FW_PORT_CAP_FC_RX;
3695	if (lc->requested_fc & PAUSE_TX)
3696		fc |= FW_PORT_CAP_FC_TX;
3697
3698	memset(&c, 0, sizeof(c));
3699	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3700				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3701				     V_FW_PORT_CMD_PORTID(port));
3702	c.action_to_len16 =
3703		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3704			    FW_LEN16(c));
3705
3706	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
3707		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3708					     fc);
3709		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3710	} else if (lc->autoneg == AUTONEG_DISABLE) {
3711		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
3712		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3713	} else
3714		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
3715
3716	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3717}
3718
3719/**
3720 *	t4_restart_aneg - restart autonegotiation
3721 *	@adap: the adapter
3722 *	@mbox: mbox to use for the FW command
3723 *	@port: the port id
3724 *
3725 *	Restarts autonegotiation for the selected port.
3726 */
3727int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3728{
3729	struct fw_port_cmd c;
3730
3731	memset(&c, 0, sizeof(c));
3732	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3733				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3734				     V_FW_PORT_CMD_PORTID(port));
3735	c.action_to_len16 =
3736		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3737			    FW_LEN16(c));
3738	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3739	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3740}
3741
3742typedef void (*int_handler_t)(struct adapter *adap);
3743
3744struct intr_info {
3745	unsigned int mask;	/* bits to check in interrupt status */
3746	const char *msg;	/* message to print or NULL */
3747	short stat_idx;		/* stat counter to increment or -1 */
3748	unsigned short fatal;	/* whether the condition reported is fatal */
3749	int_handler_t int_handler;	/* platform-specific int handler */
3750};
3751
3752/**
3753 *	t4_handle_intr_status - table driven interrupt handler
3754 *	@adapter: the adapter that generated the interrupt
3755 *	@reg: the interrupt status register to process
3756 *	@acts: table of interrupt actions
3757 *
3758 *	A table driven interrupt handler that applies a set of masks to an
3759 *	interrupt status word and performs the corresponding actions if the
3760 *	interrupts described by the mask have occurred.  The actions include
3761 *	optionally emitting a warning or alert message.  The table is terminated
3762 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
3763 *	conditions.
3764 */
3765static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3766				 const struct intr_info *acts)
3767{
3768	int fatal = 0;
3769	unsigned int mask = 0;
3770	unsigned int status = t4_read_reg(adapter, reg);
3771
3772	for ( ; acts->mask; ++acts) {
3773		if (!(status & acts->mask))
3774			continue;
3775		if (acts->fatal) {
3776			fatal++;
3777			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3778				  status & acts->mask);
3779		} else if (acts->msg)
3780			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3781				 status & acts->mask);
3782		if (acts->int_handler)
3783			acts->int_handler(adapter);
3784		mask |= acts->mask;
3785	}
3786	status &= mask;
3787	if (status)	/* clear processed interrupts */
3788		t4_write_reg(adapter, reg, status);
3789	return fatal;
3790}
3791
3792/*
3793 * Interrupt handler for the PCIE module.
3794 */
3795static void pcie_intr_handler(struct adapter *adapter)
3796{
3797	static const struct intr_info sysbus_intr_info[] = {
3798		{ F_RNPP, "RXNP array parity error", -1, 1 },
3799		{ F_RPCP, "RXPC array parity error", -1, 1 },
3800		{ F_RCIP, "RXCIF array parity error", -1, 1 },
3801		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
3802		{ F_RFTP, "RXFT array parity error", -1, 1 },
3803		{ 0 }
3804	};
3805	static const struct intr_info pcie_port_intr_info[] = {
3806		{ F_TPCP, "TXPC array parity error", -1, 1 },
3807		{ F_TNPP, "TXNP array parity error", -1, 1 },
3808		{ F_TFTP, "TXFT array parity error", -1, 1 },
3809		{ F_TCAP, "TXCA array parity error", -1, 1 },
3810		{ F_TCIP, "TXCIF array parity error", -1, 1 },
3811		{ F_RCAP, "RXCA array parity error", -1, 1 },
3812		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
3813		{ F_RDPE, "Rx data parity error", -1, 1 },
3814		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
3815		{ 0 }
3816	};
3817	static const struct intr_info pcie_intr_info[] = {
3818		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3819		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3820		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3821		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3822		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3823		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3824		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3825		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3826		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3827		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3828		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3829		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3830		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3831		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3832		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3833		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3834		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3835		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3836		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3837		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3838		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
3839		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3840		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3841		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3842		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3843		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3844		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3845		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
3846		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
3847		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3848		  0 },
3849		{ 0 }
3850	};
3851
3852	static const struct intr_info t5_pcie_intr_info[] = {
3853		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
3854		  -1, 1 },
3855		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3856		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3857		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3858		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3859		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3860		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3861		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3862		  -1, 1 },
3863		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3864		  -1, 1 },
3865		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3866		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3867		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3868		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3869		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
3870		  -1, 1 },
3871		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3872		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3873		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3874		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3875		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3876		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3877		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
3878		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3879		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3880		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3881		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3882		  -1, 1 },
3883		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3884		  -1, 1 },
3885		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3886		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3887		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3888		{ F_READRSPERR, "Outbound read error", -1,
3889		  0 },
3890		{ 0 }
3891	};
3892
3893	int fat;
3894
3895	if (is_t4(adapter))
3896		fat = t4_handle_intr_status(adapter,
3897				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3898				sysbus_intr_info) +
3899			t4_handle_intr_status(adapter,
3900					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3901					pcie_port_intr_info) +
3902			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3903					      pcie_intr_info);
3904	else
3905		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3906					    t5_pcie_intr_info);
3907	if (fat)
3908		t4_fatal_err(adapter);
3909}
3910
3911/*
3912 * TP interrupt handler.
3913 */
3914static void tp_intr_handler(struct adapter *adapter)
3915{
3916	static const struct intr_info tp_intr_info[] = {
3917		{ 0x3fffffff, "TP parity error", -1, 1 },
3918		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3919		{ 0 }
3920	};
3921
3922	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3923		t4_fatal_err(adapter);
3924}
3925
3926/*
3927 * SGE interrupt handler.
3928 */
3929static void sge_intr_handler(struct adapter *adapter)
3930{
3931	u64 v;
3932	u32 err;
3933
3934	static const struct intr_info sge_intr_info[] = {
3935		{ F_ERR_CPL_EXCEED_IQE_SIZE,
3936		  "SGE received CPL exceeding IQE size", -1, 1 },
3937		{ F_ERR_INVALID_CIDX_INC,
3938		  "SGE GTS CIDX increment too large", -1, 0 },
3939		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3940		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3941		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3942		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
3943		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3944		  0 },
3945		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3946		  0 },
3947		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3948		  0 },
3949		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3950		  0 },
3951		{ F_ERR_ING_CTXT_PRIO,
3952		  "SGE too many priority ingress contexts", -1, 0 },
3953		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3954		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3955		{ 0 }
3956	};
3957
3958	static const struct intr_info t4t5_sge_intr_info[] = {
3959		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3960		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3961		{ F_ERR_EGR_CTXT_PRIO,
3962		  "SGE too many priority egress contexts", -1, 0 },
3963		{ 0 }
3964	};
3965
3966	/*
3967 	* For now, treat below interrupts as fatal so that we disable SGE and
3968 	* get better debug */
3969	static const struct intr_info t6_sge_intr_info[] = {
3970		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
3971		  "SGE PCIe error for a DBP thread", -1, 1 },
3972		{ F_FATAL_WRE_LEN,
3973		  "SGE Actual WRE packet is less than advertized length",
3974		  -1, 1 },
3975		{ 0 }
3976	};
3977
3978	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
3979		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
3980	if (v) {
3981		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
3982				(unsigned long long)v);
3983		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
3984		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
3985	}
3986
3987	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
3988	if (chip_id(adapter) <= CHELSIO_T5)
3989		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3990					   t4t5_sge_intr_info);
3991	else
3992		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
3993					   t6_sge_intr_info);
3994
3995	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
3996	if (err & F_ERROR_QID_VALID) {
3997		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
3998		if (err & F_UNCAPTURED_ERROR)
3999			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4000		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4001			     F_UNCAPTURED_ERROR);
4002	}
4003
4004	if (v != 0)
4005		t4_fatal_err(adapter);
4006}
4007
4008#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4009		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4010#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4011		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4012
4013/*
4014 * CIM interrupt handler.
4015 */
4016static void cim_intr_handler(struct adapter *adapter)
4017{
4018	static const struct intr_info cim_intr_info[] = {
4019		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4020		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4021		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4022		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4023		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4024		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4025		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4026		{ 0 }
4027	};
4028	static const struct intr_info cim_upintr_info[] = {
4029		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4030		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4031		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4032		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4033		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4034		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4035		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4036		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4037		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4038		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4039		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4040		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4041		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4042		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4043		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4044		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4045		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4046		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4047		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4048		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4049		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4050		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4051		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4052		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4053		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4054		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4055		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4056		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4057		{ 0 }
4058	};
4059	int fat;
4060
4061	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4062		t4_report_fw_error(adapter);
4063
4064	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4065				    cim_intr_info) +
4066	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4067				    cim_upintr_info);
4068	if (fat)
4069		t4_fatal_err(adapter);
4070}
4071
4072/*
4073 * ULP RX interrupt handler.
4074 */
4075static void ulprx_intr_handler(struct adapter *adapter)
4076{
4077	static const struct intr_info ulprx_intr_info[] = {
4078		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4079		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4080		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4081		{ 0 }
4082	};
4083
4084	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4085		t4_fatal_err(adapter);
4086}
4087
4088/*
4089 * ULP TX interrupt handler.
4090 */
4091static void ulptx_intr_handler(struct adapter *adapter)
4092{
4093	static const struct intr_info ulptx_intr_info[] = {
4094		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4095		  0 },
4096		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4097		  0 },
4098		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4099		  0 },
4100		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4101		  0 },
4102		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4103		{ 0 }
4104	};
4105
4106	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4107		t4_fatal_err(adapter);
4108}
4109
4110/*
4111 * PM TX interrupt handler.
4112 */
4113static void pmtx_intr_handler(struct adapter *adapter)
4114{
4115	static const struct intr_info pmtx_intr_info[] = {
4116		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4117		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4118		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4119		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4120		{ 0xffffff0, "PMTX framing error", -1, 1 },
4121		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4122		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4123		  1 },
4124		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4125		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4126		{ 0 }
4127	};
4128
4129	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4130		t4_fatal_err(adapter);
4131}
4132
4133/*
4134 * PM RX interrupt handler.
4135 */
4136static void pmrx_intr_handler(struct adapter *adapter)
4137{
4138	static const struct intr_info pmrx_intr_info[] = {
4139		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4140		{ 0x3ffff0, "PMRX framing error", -1, 1 },
4141		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4142		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4143		  1 },
4144		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4145		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4146		{ 0 }
4147	};
4148
4149	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4150		t4_fatal_err(adapter);
4151}
4152
4153/*
4154 * CPL switch interrupt handler.
4155 */
4156static void cplsw_intr_handler(struct adapter *adapter)
4157{
4158	static const struct intr_info cplsw_intr_info[] = {
4159		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4160		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4161		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4162		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4163		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4164		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4165		{ 0 }
4166	};
4167
4168	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4169		t4_fatal_err(adapter);
4170}
4171
4172/*
4173 * LE interrupt handler.
4174 */
4175static void le_intr_handler(struct adapter *adap)
4176{
4177	unsigned int chip_ver = chip_id(adap);
4178	static const struct intr_info le_intr_info[] = {
4179		{ F_LIPMISS, "LE LIP miss", -1, 0 },
4180		{ F_LIP0, "LE 0 LIP error", -1, 0 },
4181		{ F_PARITYERR, "LE parity error", -1, 1 },
4182		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4183		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
4184		{ 0 }
4185	};
4186
4187	static const struct intr_info t6_le_intr_info[] = {
4188		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4189		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4190		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
4191		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4192		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4193		{ 0 }
4194	};
4195
4196	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4197				  (chip_ver <= CHELSIO_T5) ?
4198				  le_intr_info : t6_le_intr_info))
4199		t4_fatal_err(adap);
4200}
4201
4202/*
4203 * MPS interrupt handler.
4204 */
4205static void mps_intr_handler(struct adapter *adapter)
4206{
4207	static const struct intr_info mps_rx_intr_info[] = {
4208		{ 0xffffff, "MPS Rx parity error", -1, 1 },
4209		{ 0 }
4210	};
4211	static const struct intr_info mps_tx_intr_info[] = {
4212		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4213		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4214		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4215		  -1, 1 },
4216		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4217		  -1, 1 },
4218		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
4219		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4220		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
4221		{ 0 }
4222	};
4223	static const struct intr_info mps_trc_intr_info[] = {
4224		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4225		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4226		  1 },
4227		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4228		{ 0 }
4229	};
4230	static const struct intr_info mps_stat_sram_intr_info[] = {
4231		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4232		{ 0 }
4233	};
4234	static const struct intr_info mps_stat_tx_intr_info[] = {
4235		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4236		{ 0 }
4237	};
4238	static const struct intr_info mps_stat_rx_intr_info[] = {
4239		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4240		{ 0 }
4241	};
4242	static const struct intr_info mps_cls_intr_info[] = {
4243		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4244		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4245		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4246		{ 0 }
4247	};
4248
4249	int fat;
4250
4251	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4252				    mps_rx_intr_info) +
4253	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4254				    mps_tx_intr_info) +
4255	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4256				    mps_trc_intr_info) +
4257	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4258				    mps_stat_sram_intr_info) +
4259	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4260				    mps_stat_tx_intr_info) +
4261	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4262				    mps_stat_rx_intr_info) +
4263	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4264				    mps_cls_intr_info);
4265
4266	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4267	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
4268	if (fat)
4269		t4_fatal_err(adapter);
4270}
4271
4272#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4273		      F_ECC_UE_INT_CAUSE)
4274
4275/*
4276 * EDC/MC interrupt handler.
4277 */
4278static void mem_intr_handler(struct adapter *adapter, int idx)
4279{
4280	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4281
4282	unsigned int addr, cnt_addr, v;
4283
4284	if (idx <= MEM_EDC1) {
4285		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4286		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4287	} else if (idx == MEM_MC) {
4288		if (is_t4(adapter)) {
4289			addr = A_MC_INT_CAUSE;
4290			cnt_addr = A_MC_ECC_STATUS;
4291		} else {
4292			addr = A_MC_P_INT_CAUSE;
4293			cnt_addr = A_MC_P_ECC_STATUS;
4294		}
4295	} else {
4296		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4297		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4298	}
4299
4300	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4301	if (v & F_PERR_INT_CAUSE)
4302		CH_ALERT(adapter, "%s FIFO parity error\n",
4303			  name[idx]);
4304	if (v & F_ECC_CE_INT_CAUSE) {
4305		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4306
4307		t4_edc_err_read(adapter, idx);
4308
4309		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4310		CH_WARN_RATELIMIT(adapter,
4311				  "%u %s correctable ECC data error%s\n",
4312				  cnt, name[idx], cnt > 1 ? "s" : "");
4313	}
4314	if (v & F_ECC_UE_INT_CAUSE)
4315		CH_ALERT(adapter,
4316			 "%s uncorrectable ECC data error\n", name[idx]);
4317
4318	t4_write_reg(adapter, addr, v);
4319	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4320		t4_fatal_err(adapter);
4321}
4322
4323/*
4324 * MA interrupt handler.
4325 */
4326static void ma_intr_handler(struct adapter *adapter)
4327{
4328	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4329
4330	if (status & F_MEM_PERR_INT_CAUSE) {
4331		CH_ALERT(adapter,
4332			  "MA parity error, parity status %#x\n",
4333			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4334		if (is_t5(adapter))
4335			CH_ALERT(adapter,
4336				  "MA parity error, parity status %#x\n",
4337				  t4_read_reg(adapter,
4338					      A_MA_PARITY_ERROR_STATUS2));
4339	}
4340	if (status & F_MEM_WRAP_INT_CAUSE) {
4341		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4342		CH_ALERT(adapter, "MA address wrap-around error by "
4343			  "client %u to address %#x\n",
4344			  G_MEM_WRAP_CLIENT_NUM(v),
4345			  G_MEM_WRAP_ADDRESS(v) << 4);
4346	}
4347	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4348	t4_fatal_err(adapter);
4349}
4350
4351/*
4352 * SMB interrupt handler.
4353 */
4354static void smb_intr_handler(struct adapter *adap)
4355{
4356	static const struct intr_info smb_intr_info[] = {
4357		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4358		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4359		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4360		{ 0 }
4361	};
4362
4363	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4364		t4_fatal_err(adap);
4365}
4366
4367/*
4368 * NC-SI interrupt handler.
4369 */
4370static void ncsi_intr_handler(struct adapter *adap)
4371{
4372	static const struct intr_info ncsi_intr_info[] = {
4373		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4374		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4375		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4376		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4377		{ 0 }
4378	};
4379
4380	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4381		t4_fatal_err(adap);
4382}
4383
4384/*
4385 * XGMAC interrupt handler.
4386 */
4387static void xgmac_intr_handler(struct adapter *adap, int port)
4388{
4389	u32 v, int_cause_reg;
4390
4391	if (is_t4(adap))
4392		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4393	else
4394		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4395
4396	v = t4_read_reg(adap, int_cause_reg);
4397
4398	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4399	if (!v)
4400		return;
4401
4402	if (v & F_TXFIFO_PRTY_ERR)
4403		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4404			  port);
4405	if (v & F_RXFIFO_PRTY_ERR)
4406		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4407			  port);
4408	t4_write_reg(adap, int_cause_reg, v);
4409	t4_fatal_err(adap);
4410}
4411
4412/*
4413 * PL interrupt handler.
4414 */
4415static void pl_intr_handler(struct adapter *adap)
4416{
4417	static const struct intr_info pl_intr_info[] = {
4418		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4419		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4420		{ 0 }
4421	};
4422
4423	static const struct intr_info t5_pl_intr_info[] = {
4424		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4425		{ 0 }
4426	};
4427
4428	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4429				  is_t4(adap) ?
4430				  pl_intr_info : t5_pl_intr_info))
4431		t4_fatal_err(adap);
4432}
4433
4434#define PF_INTR_MASK (F_PFSW | F_PFCIM)
4435
4436/**
4437 *	t4_slow_intr_handler - control path interrupt handler
4438 *	@adapter: the adapter
4439 *
4440 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
4441 *	The designation 'slow' is because it involves register reads, while
4442 *	data interrupts typically don't involve any MMIOs.
4443 */
4444int t4_slow_intr_handler(struct adapter *adapter)
4445{
4446	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4447
4448	if (!(cause & GLBL_INTR_MASK))
4449		return 0;
4450	if (cause & F_CIM)
4451		cim_intr_handler(adapter);
4452	if (cause & F_MPS)
4453		mps_intr_handler(adapter);
4454	if (cause & F_NCSI)
4455		ncsi_intr_handler(adapter);
4456	if (cause & F_PL)
4457		pl_intr_handler(adapter);
4458	if (cause & F_SMB)
4459		smb_intr_handler(adapter);
4460	if (cause & F_MAC0)
4461		xgmac_intr_handler(adapter, 0);
4462	if (cause & F_MAC1)
4463		xgmac_intr_handler(adapter, 1);
4464	if (cause & F_MAC2)
4465		xgmac_intr_handler(adapter, 2);
4466	if (cause & F_MAC3)
4467		xgmac_intr_handler(adapter, 3);
4468	if (cause & F_PCIE)
4469		pcie_intr_handler(adapter);
4470	if (cause & F_MC0)
4471		mem_intr_handler(adapter, MEM_MC);
4472	if (is_t5(adapter) && (cause & F_MC1))
4473		mem_intr_handler(adapter, MEM_MC1);
4474	if (cause & F_EDC0)
4475		mem_intr_handler(adapter, MEM_EDC0);
4476	if (cause & F_EDC1)
4477		mem_intr_handler(adapter, MEM_EDC1);
4478	if (cause & F_LE)
4479		le_intr_handler(adapter);
4480	if (cause & F_TP)
4481		tp_intr_handler(adapter);
4482	if (cause & F_MA)
4483		ma_intr_handler(adapter);
4484	if (cause & F_PM_TX)
4485		pmtx_intr_handler(adapter);
4486	if (cause & F_PM_RX)
4487		pmrx_intr_handler(adapter);
4488	if (cause & F_ULP_RX)
4489		ulprx_intr_handler(adapter);
4490	if (cause & F_CPL_SWITCH)
4491		cplsw_intr_handler(adapter);
4492	if (cause & F_SGE)
4493		sge_intr_handler(adapter);
4494	if (cause & F_ULP_TX)
4495		ulptx_intr_handler(adapter);
4496
4497	/* Clear the interrupts just processed for which we are the master. */
4498	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4499	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4500	return 1;
4501}
4502
4503/**
4504 *	t4_intr_enable - enable interrupts
4505 *	@adapter: the adapter whose interrupts should be enabled
4506 *
4507 *	Enable PF-specific interrupts for the calling function and the top-level
4508 *	interrupt concentrator for global interrupts.  Interrupts are already
4509 *	enabled at each module,	here we just enable the roots of the interrupt
4510 *	hierarchies.
4511 *
4512 *	Note: this function should be called only when the driver manages
4513 *	non PF-specific interrupts from the various HW modules.  Only one PCI
4514 *	function at a time should be doing this.
4515 */
4516void t4_intr_enable(struct adapter *adapter)
4517{
4518	u32 val = 0;
4519	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4520	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4521		  ? G_SOURCEPF(whoami)
4522		  : G_T6_SOURCEPF(whoami));
4523
4524	if (chip_id(adapter) <= CHELSIO_T5)
4525		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4526	else
4527		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4528	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4529		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4530		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4531		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4532		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4533		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4534		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4535	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4536	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4537}
4538
4539/**
4540 *	t4_intr_disable - disable interrupts
4541 *	@adapter: the adapter whose interrupts should be disabled
4542 *
4543 *	Disable interrupts.  We only disable the top-level interrupt
4544 *	concentrators.  The caller must be a PCI function managing global
4545 *	interrupts.
4546 */
4547void t4_intr_disable(struct adapter *adapter)
4548{
4549	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4550	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4551		  ? G_SOURCEPF(whoami)
4552		  : G_T6_SOURCEPF(whoami));
4553
4554	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4555	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4556}
4557
4558/**
4559 *	t4_intr_clear - clear all interrupts
4560 *	@adapter: the adapter whose interrupts should be cleared
4561 *
4562 *	Clears all interrupts.  The caller must be a PCI function managing
4563 *	global interrupts.
4564 */
4565void t4_intr_clear(struct adapter *adapter)
4566{
4567	static const unsigned int cause_reg[] = {
4568		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4569		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4570		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4571		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4572		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4573		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4574		A_TP_INT_CAUSE,
4575		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4576		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4577		A_MPS_RX_PERR_INT_CAUSE,
4578		A_CPL_INTR_CAUSE,
4579		MYPF_REG(A_PL_PF_INT_CAUSE),
4580		A_PL_PL_INT_CAUSE,
4581		A_LE_DB_INT_CAUSE,
4582	};
4583
4584	unsigned int i;
4585
4586	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4587		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4588
4589	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4590				A_MC_P_INT_CAUSE, 0xffffffff);
4591
4592	if (is_t4(adapter)) {
4593		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4594				0xffffffff);
4595		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4596				0xffffffff);
4597	} else
4598		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4599
4600	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4601	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
4602}
4603
4604/**
4605 *	hash_mac_addr - return the hash value of a MAC address
4606 *	@addr: the 48-bit Ethernet MAC address
4607 *
4608 *	Hashes a MAC address according to the hash function used by HW inexact
4609 *	(hash) address matching.
4610 */
4611static int hash_mac_addr(const u8 *addr)
4612{
4613	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4614	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4615	a ^= b;
4616	a ^= (a >> 12);
4617	a ^= (a >> 6);
4618	return a & 0x3f;
4619}
4620
4621/**
4622 *	t4_config_rss_range - configure a portion of the RSS mapping table
4623 *	@adapter: the adapter
4624 *	@mbox: mbox to use for the FW command
4625 *	@viid: virtual interface whose RSS subtable is to be written
4626 *	@start: start entry in the table to write
4627 *	@n: how many table entries to write
4628 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
4629 *	@nrspq: number of values in @rspq
4630 *
4631 *	Programs the selected part of the VI's RSS mapping table with the
4632 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
4633 *	until the full table range is populated.
4634 *
4635 *	The caller must ensure the values in @rspq are in the range allowed for
4636 *	@viid.
4637 */
4638int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4639			int start, int n, const u16 *rspq, unsigned int nrspq)
4640{
4641	int ret;
4642	const u16 *rsp = rspq;
4643	const u16 *rsp_end = rspq + nrspq;
4644	struct fw_rss_ind_tbl_cmd cmd;
4645
4646	memset(&cmd, 0, sizeof(cmd));
4647	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4648				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4649				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
4650	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4651
4652	/*
4653	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4654	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
4655	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4656	 * reserved.
4657	 */
4658	while (n > 0) {
4659		int nq = min(n, 32);
4660		int nq_packed = 0;
4661		__be32 *qp = &cmd.iq0_to_iq2;
4662
4663		/*
4664		 * Set up the firmware RSS command header to send the next
4665		 * "nq" Ingress Queue IDs to the firmware.
4666		 */
4667		cmd.niqid = cpu_to_be16(nq);
4668		cmd.startidx = cpu_to_be16(start);
4669
4670		/*
4671		 * "nq" more done for the start of the next loop.
4672		 */
4673		start += nq;
4674		n -= nq;
4675
4676		/*
4677		 * While there are still Ingress Queue IDs to stuff into the
4678		 * current firmware RSS command, retrieve them from the
4679		 * Ingress Queue ID array and insert them into the command.
4680		 */
4681		while (nq > 0) {
4682			/*
4683			 * Grab up to the next 3 Ingress Queue IDs (wrapping
4684			 * around the Ingress Queue ID array if necessary) and
4685			 * insert them into the firmware RSS command at the
4686			 * current 3-tuple position within the commad.
4687			 */
4688			u16 qbuf[3];
4689			u16 *qbp = qbuf;
4690			int nqbuf = min(3, nq);
4691
4692			nq -= nqbuf;
4693			qbuf[0] = qbuf[1] = qbuf[2] = 0;
4694			while (nqbuf && nq_packed < 32) {
4695				nqbuf--;
4696				nq_packed++;
4697				*qbp++ = *rsp++;
4698				if (rsp >= rsp_end)
4699					rsp = rspq;
4700			}
4701			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4702					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4703					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4704		}
4705
4706		/*
4707		 * Send this portion of the RRS table update to the firmware;
4708		 * bail out on any errors.
4709		 */
4710		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4711		if (ret)
4712			return ret;
4713	}
4714	return 0;
4715}
4716
4717/**
4718 *	t4_config_glbl_rss - configure the global RSS mode
4719 *	@adapter: the adapter
4720 *	@mbox: mbox to use for the FW command
4721 *	@mode: global RSS mode
4722 *	@flags: mode-specific flags
4723 *
4724 *	Sets the global RSS mode.
4725 */
4726int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4727		       unsigned int flags)
4728{
4729	struct fw_rss_glb_config_cmd c;
4730
4731	memset(&c, 0, sizeof(c));
4732	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4733				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4734	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4735	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4736		c.u.manual.mode_pkd =
4737			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4738	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4739		c.u.basicvirtual.mode_keymode =
4740			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4741		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4742	} else
4743		return -EINVAL;
4744	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4745}
4746
4747/**
4748 *	t4_config_vi_rss - configure per VI RSS settings
4749 *	@adapter: the adapter
4750 *	@mbox: mbox to use for the FW command
4751 *	@viid: the VI id
4752 *	@flags: RSS flags
4753 *	@defq: id of the default RSS queue for the VI.
4754 *	@skeyidx: RSS secret key table index for non-global mode
4755 *	@skey: RSS vf_scramble key for VI.
4756 *
4757 *	Configures VI-specific RSS properties.
4758 */
4759int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4760		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
4761		     unsigned int skey)
4762{
4763	struct fw_rss_vi_config_cmd c;
4764
4765	memset(&c, 0, sizeof(c));
4766	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4767				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4768				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4769	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4770	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4771					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4772	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4773					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4774	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4775
4776	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4777}
4778
4779/* Read an RSS table row */
4780static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4781{
4782	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4783	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4784				   5, 0, val);
4785}
4786
4787/**
4788 *	t4_read_rss - read the contents of the RSS mapping table
4789 *	@adapter: the adapter
4790 *	@map: holds the contents of the RSS mapping table
4791 *
4792 *	Reads the contents of the RSS hash->queue mapping table.
4793 */
4794int t4_read_rss(struct adapter *adapter, u16 *map)
4795{
4796	u32 val;
4797	int i, ret;
4798
4799	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4800		ret = rd_rss_row(adapter, i, &val);
4801		if (ret)
4802			return ret;
4803		*map++ = G_LKPTBLQUEUE0(val);
4804		*map++ = G_LKPTBLQUEUE1(val);
4805	}
4806	return 0;
4807}
4808
4809/**
4810 *	t4_fw_tp_pio_rw - Access TP PIO through LDST
4811 *	@adap: the adapter
4812 *	@vals: where the indirect register values are stored/written
4813 *	@nregs: how many indirect registers to read/write
4814 *	@start_idx: index of first indirect register to read/write
4815 *	@rw: Read (1) or Write (0)
4816 *
4817 *	Access TP PIO registers through LDST
4818 */
4819void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4820		     unsigned int start_index, unsigned int rw)
4821{
4822	int ret, i;
4823	int cmd = FW_LDST_ADDRSPC_TP_PIO;
4824	struct fw_ldst_cmd c;
4825
4826	for (i = 0 ; i < nregs; i++) {
4827		memset(&c, 0, sizeof(c));
4828		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4829						F_FW_CMD_REQUEST |
4830						(rw ? F_FW_CMD_READ :
4831						     F_FW_CMD_WRITE) |
4832						V_FW_LDST_CMD_ADDRSPACE(cmd));
4833		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4834
4835		c.u.addrval.addr = cpu_to_be32(start_index + i);
4836		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
4837		ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4838		if (ret == 0) {
4839			if (rw)
4840				vals[i] = be32_to_cpu(c.u.addrval.val);
4841		}
4842	}
4843}
4844
4845/**
4846 *	t4_read_rss_key - read the global RSS key
4847 *	@adap: the adapter
4848 *	@key: 10-entry array holding the 320-bit RSS key
4849 *
4850 *	Reads the global 320-bit RSS key.
4851 */
4852void t4_read_rss_key(struct adapter *adap, u32 *key)
4853{
4854	if (t4_use_ldst(adap))
4855		t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
4856	else
4857		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4858				 A_TP_RSS_SECRET_KEY0);
4859}
4860
4861/**
4862 *	t4_write_rss_key - program one of the RSS keys
4863 *	@adap: the adapter
4864 *	@key: 10-entry array holding the 320-bit RSS key
4865 *	@idx: which RSS key to write
4866 *
4867 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
4868 *	0..15 the corresponding entry in the RSS key table is written,
4869 *	otherwise the global RSS key is written.
4870 */
4871void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
4872{
4873	u8 rss_key_addr_cnt = 16;
4874	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
4875
4876	/*
4877	 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4878	 * allows access to key addresses 16-63 by using KeyWrAddrX
4879	 * as index[5:4](upper 2) into key table
4880	 */
4881	if ((chip_id(adap) > CHELSIO_T5) &&
4882	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
4883		rss_key_addr_cnt = 32;
4884
4885	if (t4_use_ldst(adap))
4886		t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
4887	else
4888		t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
4889				  A_TP_RSS_SECRET_KEY0);
4890
4891	if (idx >= 0 && idx < rss_key_addr_cnt) {
4892		if (rss_key_addr_cnt > 16)
4893			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4894				     vrt | V_KEYWRADDRX(idx >> 4) |
4895				     V_T6_VFWRADDR(idx) | F_KEYWREN);
4896		else
4897			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
4898				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
4899	}
4900}
4901
4902/**
4903 *	t4_read_rss_pf_config - read PF RSS Configuration Table
4904 *	@adapter: the adapter
4905 *	@index: the entry in the PF RSS table to read
4906 *	@valp: where to store the returned value
4907 *
4908 *	Reads the PF RSS Configuration Table at the specified index and returns
4909 *	the value found there.
4910 */
4911void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4912			   u32 *valp)
4913{
4914	if (t4_use_ldst(adapter))
4915		t4_fw_tp_pio_rw(adapter, valp, 1,
4916				A_TP_RSS_PF0_CONFIG + index, 1);
4917	else
4918		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4919				 valp, 1, A_TP_RSS_PF0_CONFIG + index);
4920}
4921
4922/**
4923 *	t4_write_rss_pf_config - write PF RSS Configuration Table
4924 *	@adapter: the adapter
4925 *	@index: the entry in the VF RSS table to read
4926 *	@val: the value to store
4927 *
4928 *	Writes the PF RSS Configuration Table at the specified index with the
4929 *	specified value.
4930 */
4931void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
4932			    u32 val)
4933{
4934	if (t4_use_ldst(adapter))
4935		t4_fw_tp_pio_rw(adapter, &val, 1,
4936				A_TP_RSS_PF0_CONFIG + index, 0);
4937	else
4938		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4939				  &val, 1, A_TP_RSS_PF0_CONFIG + index);
4940}
4941
4942/**
4943 *	t4_read_rss_vf_config - read VF RSS Configuration Table
4944 *	@adapter: the adapter
4945 *	@index: the entry in the VF RSS table to read
4946 *	@vfl: where to store the returned VFL
4947 *	@vfh: where to store the returned VFH
4948 *
4949 *	Reads the VF RSS Configuration Table at the specified index and returns
4950 *	the (VFL, VFH) values found there.
4951 */
4952void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4953			   u32 *vfl, u32 *vfh)
4954{
4955	u32 vrt, mask, data;
4956
4957	if (chip_id(adapter) <= CHELSIO_T5) {
4958		mask = V_VFWRADDR(M_VFWRADDR);
4959		data = V_VFWRADDR(index);
4960	} else {
4961		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
4962		 data = V_T6_VFWRADDR(index);
4963	}
4964	/*
4965	 * Request that the index'th VF Table values be read into VFL/VFH.
4966	 */
4967	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
4968	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
4969	vrt |= data | F_VFRDEN;
4970	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
4971
4972	/*
4973	 * Grab the VFL/VFH values ...
4974	 */
4975	if (t4_use_ldst(adapter)) {
4976		t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
4977		t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
4978	} else {
4979		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4980				 vfl, 1, A_TP_RSS_VFL_CONFIG);
4981		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4982				 vfh, 1, A_TP_RSS_VFH_CONFIG);
4983	}
4984}
4985
4986/**
4987 *	t4_write_rss_vf_config - write VF RSS Configuration Table
4988 *
4989 *	@adapter: the adapter
4990 *	@index: the entry in the VF RSS table to write
4991 *	@vfl: the VFL to store
4992 *	@vfh: the VFH to store
4993 *
4994 *	Writes the VF RSS Configuration Table at the specified index with the
4995 *	specified (VFL, VFH) values.
4996 */
4997void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
4998			    u32 vfl, u32 vfh)
4999{
5000	u32 vrt, mask, data;
5001
5002	if (chip_id(adapter) <= CHELSIO_T5) {
5003		mask = V_VFWRADDR(M_VFWRADDR);
5004		data = V_VFWRADDR(index);
5005	} else {
5006		mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5007		data = V_T6_VFWRADDR(index);
5008	}
5009
5010	/*
5011	 * Load up VFL/VFH with the values to be written ...
5012	 */
5013	if (t4_use_ldst(adapter)) {
5014		t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
5015		t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
5016	} else {
5017		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5018				  &vfl, 1, A_TP_RSS_VFL_CONFIG);
5019		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5020				  &vfh, 1, A_TP_RSS_VFH_CONFIG);
5021	}
5022
5023	/*
5024	 * Write the VFL/VFH into the VF Table at index'th location.
5025	 */
5026	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5027	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5028	vrt |= data | F_VFRDEN;
5029	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5030}
5031
5032/**
5033 *	t4_read_rss_pf_map - read PF RSS Map
5034 *	@adapter: the adapter
5035 *
5036 *	Reads the PF RSS Map register and returns its value.
5037 */
5038u32 t4_read_rss_pf_map(struct adapter *adapter)
5039{
5040	u32 pfmap;
5041
5042	if (t4_use_ldst(adapter))
5043		t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
5044	else
5045		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5046				 &pfmap, 1, A_TP_RSS_PF_MAP);
5047	return pfmap;
5048}
5049
5050/**
5051 *	t4_write_rss_pf_map - write PF RSS Map
5052 *	@adapter: the adapter
5053 *	@pfmap: PF RSS Map value
5054 *
5055 *	Writes the specified value to the PF RSS Map register.
5056 */
5057void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
5058{
5059	if (t4_use_ldst(adapter))
5060		t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
5061	else
5062		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5063				  &pfmap, 1, A_TP_RSS_PF_MAP);
5064}
5065
5066/**
5067 *	t4_read_rss_pf_mask - read PF RSS Mask
5068 *	@adapter: the adapter
5069 *
5070 *	Reads the PF RSS Mask register and returns its value.
5071 */
5072u32 t4_read_rss_pf_mask(struct adapter *adapter)
5073{
5074	u32 pfmask;
5075
5076	if (t4_use_ldst(adapter))
5077		t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
5078	else
5079		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5080				 &pfmask, 1, A_TP_RSS_PF_MSK);
5081	return pfmask;
5082}
5083
5084/**
5085 *	t4_write_rss_pf_mask - write PF RSS Mask
5086 *	@adapter: the adapter
5087 *	@pfmask: PF RSS Mask value
5088 *
5089 *	Writes the specified value to the PF RSS Mask register.
5090 */
5091void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
5092{
5093	if (t4_use_ldst(adapter))
5094		t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
5095	else
5096		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5097				  &pfmask, 1, A_TP_RSS_PF_MSK);
5098}
5099
5100/**
5101 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5102 *	@adap: the adapter
5103 *	@v4: holds the TCP/IP counter values
5104 *	@v6: holds the TCP/IPv6 counter values
5105 *
5106 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5107 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5108 */
5109void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5110			 struct tp_tcp_stats *v6)
5111{
5112	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5113
5114#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5115#define STAT(x)     val[STAT_IDX(x)]
5116#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5117
5118	if (v4) {
5119		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5120				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
5121		v4->tcp_out_rsts = STAT(OUT_RST);
5122		v4->tcp_in_segs  = STAT64(IN_SEG);
5123		v4->tcp_out_segs = STAT64(OUT_SEG);
5124		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5125	}
5126	if (v6) {
5127		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5128				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
5129		v6->tcp_out_rsts = STAT(OUT_RST);
5130		v6->tcp_in_segs  = STAT64(IN_SEG);
5131		v6->tcp_out_segs = STAT64(OUT_SEG);
5132		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5133	}
5134#undef STAT64
5135#undef STAT
5136#undef STAT_IDX
5137}
5138
5139/**
5140 *	t4_tp_get_err_stats - read TP's error MIB counters
5141 *	@adap: the adapter
5142 *	@st: holds the counter values
5143 *
5144 *	Returns the values of TP's error counters.
5145 */
5146void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
5147{
5148	int nchan = adap->chip_params->nchan;
5149
5150	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5151			st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
5152	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5153			st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
5154	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5155			st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
5156	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5157			st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
5158	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5159			st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
5160	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5161			st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
5162	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5163			st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
5164	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5165			st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
5166
5167	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
5168			 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
5169}
5170
5171/**
5172 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
5173 *	@adap: the adapter
5174 *	@st: holds the counter values
5175 *
5176 *	Returns the values of TP's proxy counters.
5177 */
5178void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
5179{
5180	int nchan = adap->chip_params->nchan;
5181
5182	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
5183			 nchan, A_TP_MIB_TNL_LPBK_0);
5184}
5185
5186/**
5187 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
5188 *	@adap: the adapter
5189 *	@st: holds the counter values
5190 *
5191 *	Returns the values of TP's CPL counters.
5192 */
5193void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
5194{
5195	int nchan = adap->chip_params->nchan;
5196
5197	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
5198			 nchan, A_TP_MIB_CPL_IN_REQ_0);
5199	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
5200			 nchan, A_TP_MIB_CPL_OUT_RSP_0);
5201}
5202
5203/**
5204 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5205 *	@adap: the adapter
5206 *	@st: holds the counter values
5207 *
5208 *	Returns the values of TP's RDMA counters.
5209 */
5210void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
5211{
5212	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
5213			 2, A_TP_MIB_RQE_DFR_PKT);
5214}
5215
5216/**
5217 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5218 *	@adap: the adapter
5219 *	@idx: the port index
5220 *	@st: holds the counter values
5221 *
5222 *	Returns the values of TP's FCoE counters for the selected port.
5223 */
5224void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5225		       struct tp_fcoe_stats *st)
5226{
5227	u32 val[2];
5228
5229	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
5230			 1, A_TP_MIB_FCOE_DDP_0 + idx);
5231	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
5232			 1, A_TP_MIB_FCOE_DROP_0 + idx);
5233	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
5234			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
5235	st->octets_ddp = ((u64)val[0] << 32) | val[1];
5236}
5237
5238/**
5239 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5240 *	@adap: the adapter
5241 *	@st: holds the counter values
5242 *
5243 *	Returns the values of TP's counters for non-TCP directly-placed packets.
5244 */
5245void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
5246{
5247	u32 val[4];
5248
5249	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
5250			 A_TP_MIB_USM_PKTS);
5251	st->frames = val[0];
5252	st->drops = val[1];
5253	st->octets = ((u64)val[2] << 32) | val[3];
5254}
5255
5256/**
5257 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
5258 *	@adap: the adapter
5259 *	@mtus: where to store the MTU values
5260 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
5261 *
5262 *	Reads the HW path MTU table.
5263 */
5264void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5265{
5266	u32 v;
5267	int i;
5268
5269	for (i = 0; i < NMTUS; ++i) {
5270		t4_write_reg(adap, A_TP_MTU_TABLE,
5271			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
5272		v = t4_read_reg(adap, A_TP_MTU_TABLE);
5273		mtus[i] = G_MTUVALUE(v);
5274		if (mtu_log)
5275			mtu_log[i] = G_MTUWIDTH(v);
5276	}
5277}
5278
5279/**
5280 *	t4_read_cong_tbl - reads the congestion control table
5281 *	@adap: the adapter
5282 *	@incr: where to store the alpha values
5283 *
5284 *	Reads the additive increments programmed into the HW congestion
5285 *	control table.
5286 */
5287void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5288{
5289	unsigned int mtu, w;
5290
5291	for (mtu = 0; mtu < NMTUS; ++mtu)
5292		for (w = 0; w < NCCTRL_WIN; ++w) {
5293			t4_write_reg(adap, A_TP_CCTRL_TABLE,
5294				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
5295			incr[mtu][w] = (u16)t4_read_reg(adap,
5296						A_TP_CCTRL_TABLE) & 0x1fff;
5297		}
5298}
5299
5300/**
5301 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5302 *	@adap: the adapter
5303 *	@addr: the indirect TP register address
5304 *	@mask: specifies the field within the register to modify
5305 *	@val: new value for the field
5306 *
5307 *	Sets a field of an indirect TP register to the given value.
5308 */
5309void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5310			    unsigned int mask, unsigned int val)
5311{
5312	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5313	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5314	t4_write_reg(adap, A_TP_PIO_DATA, val);
5315}
5316
5317/**
5318 *	init_cong_ctrl - initialize congestion control parameters
5319 *	@a: the alpha values for congestion control
5320 *	@b: the beta values for congestion control
5321 *
5322 *	Initialize the congestion control parameters.
5323 */
5324static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5325{
5326	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5327	a[9] = 2;
5328	a[10] = 3;
5329	a[11] = 4;
5330	a[12] = 5;
5331	a[13] = 6;
5332	a[14] = 7;
5333	a[15] = 8;
5334	a[16] = 9;
5335	a[17] = 10;
5336	a[18] = 14;
5337	a[19] = 17;
5338	a[20] = 21;
5339	a[21] = 25;
5340	a[22] = 30;
5341	a[23] = 35;
5342	a[24] = 45;
5343	a[25] = 60;
5344	a[26] = 80;
5345	a[27] = 100;
5346	a[28] = 200;
5347	a[29] = 300;
5348	a[30] = 400;
5349	a[31] = 500;
5350
5351	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5352	b[9] = b[10] = 1;
5353	b[11] = b[12] = 2;
5354	b[13] = b[14] = b[15] = b[16] = 3;
5355	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5356	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5357	b[28] = b[29] = 6;
5358	b[30] = b[31] = 7;
5359}
5360
5361/* The minimum additive increment value for the congestion control table */
5362#define CC_MIN_INCR 2U
5363
5364/**
5365 *	t4_load_mtus - write the MTU and congestion control HW tables
5366 *	@adap: the adapter
5367 *	@mtus: the values for the MTU table
5368 *	@alpha: the values for the congestion control alpha parameter
5369 *	@beta: the values for the congestion control beta parameter
5370 *
5371 *	Write the HW MTU table with the supplied MTUs and the high-speed
5372 *	congestion control table with the supplied alpha, beta, and MTUs.
5373 *	We write the two tables together because the additive increments
5374 *	depend on the MTUs.
5375 */
5376void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5377		  const unsigned short *alpha, const unsigned short *beta)
5378{
5379	static const unsigned int avg_pkts[NCCTRL_WIN] = {
5380		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5381		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5382		28672, 40960, 57344, 81920, 114688, 163840, 229376
5383	};
5384
5385	unsigned int i, w;
5386
5387	for (i = 0; i < NMTUS; ++i) {
5388		unsigned int mtu = mtus[i];
5389		unsigned int log2 = fls(mtu);
5390
5391		if (!(mtu & ((1 << log2) >> 2)))     /* round */
5392			log2--;
5393		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5394			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5395
5396		for (w = 0; w < NCCTRL_WIN; ++w) {
5397			unsigned int inc;
5398
5399			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5400				  CC_MIN_INCR);
5401
5402			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5403				     (w << 16) | (beta[w] << 13) | inc);
5404		}
5405	}
5406}
5407
5408/**
5409 *	t4_set_pace_tbl - set the pace table
5410 *	@adap: the adapter
5411 *	@pace_vals: the pace values in microseconds
5412 *	@start: index of the first entry in the HW pace table to set
5413 *	@n: how many entries to set
5414 *
5415 *	Sets (a subset of the) HW pace table.
5416 */
5417int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5418		     unsigned int start, unsigned int n)
5419{
5420	unsigned int vals[NTX_SCHED], i;
5421	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5422
5423	if (n > NTX_SCHED)
5424	    return -ERANGE;
5425
5426	/* convert values from us to dack ticks, rounding to closest value */
5427	for (i = 0; i < n; i++, pace_vals++) {
5428		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5429		if (vals[i] > 0x7ff)
5430			return -ERANGE;
5431		if (*pace_vals && vals[i] == 0)
5432			return -ERANGE;
5433	}
5434	for (i = 0; i < n; i++, start++)
5435		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5436	return 0;
5437}
5438
5439/**
5440 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5441 *	@adap: the adapter
5442 *	@kbps: target rate in Kbps
5443 *	@sched: the scheduler index
5444 *
5445 *	Configure a Tx HW scheduler for the target rate.
5446 */
5447int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5448{
5449	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5450	unsigned int clk = adap->params.vpd.cclk * 1000;
5451	unsigned int selected_cpt = 0, selected_bpt = 0;
5452
5453	if (kbps > 0) {
5454		kbps *= 125;     /* -> bytes */
5455		for (cpt = 1; cpt <= 255; cpt++) {
5456			tps = clk / cpt;
5457			bpt = (kbps + tps / 2) / tps;
5458			if (bpt > 0 && bpt <= 255) {
5459				v = bpt * tps;
5460				delta = v >= kbps ? v - kbps : kbps - v;
5461				if (delta < mindelta) {
5462					mindelta = delta;
5463					selected_cpt = cpt;
5464					selected_bpt = bpt;
5465				}
5466			} else if (selected_cpt)
5467				break;
5468		}
5469		if (!selected_cpt)
5470			return -EINVAL;
5471	}
5472	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5473		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5474	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5475	if (sched & 1)
5476		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5477	else
5478		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5479	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5480	return 0;
5481}
5482
5483/**
5484 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5485 *	@adap: the adapter
5486 *	@sched: the scheduler index
5487 *	@ipg: the interpacket delay in tenths of nanoseconds
5488 *
5489 *	Set the interpacket delay for a HW packet rate scheduler.
5490 */
5491int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5492{
5493	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5494
5495	/* convert ipg to nearest number of core clocks */
5496	ipg *= core_ticks_per_usec(adap);
5497	ipg = (ipg + 5000) / 10000;
5498	if (ipg > M_TXTIMERSEPQ0)
5499		return -EINVAL;
5500
5501	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5502	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5503	if (sched & 1)
5504		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5505	else
5506		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5507	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5508	t4_read_reg(adap, A_TP_TM_PIO_DATA);
5509	return 0;
5510}
5511
5512/*
5513 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5514 * clocks.  The formula is
5515 *
5516 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5517 *
5518 * which is equivalent to
5519 *
5520 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5521 */
5522static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5523{
5524	u64 v = bytes256 * adap->params.vpd.cclk;
5525
5526	return v * 62 + v / 2;
5527}
5528
5529/**
5530 *	t4_get_chan_txrate - get the current per channel Tx rates
5531 *	@adap: the adapter
5532 *	@nic_rate: rates for NIC traffic
5533 *	@ofld_rate: rates for offloaded traffic
5534 *
5535 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
5536 *	for each channel.
5537 */
5538void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5539{
5540	u32 v;
5541
5542	v = t4_read_reg(adap, A_TP_TX_TRATE);
5543	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5544	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5545	if (adap->chip_params->nchan > 2) {
5546		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5547		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5548	}
5549
5550	v = t4_read_reg(adap, A_TP_TX_ORATE);
5551	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5552	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5553	if (adap->chip_params->nchan > 2) {
5554		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5555		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5556	}
5557}
5558
5559/**
5560 *	t4_set_trace_filter - configure one of the tracing filters
5561 *	@adap: the adapter
5562 *	@tp: the desired trace filter parameters
5563 *	@idx: which filter to configure
5564 *	@enable: whether to enable or disable the filter
5565 *
5566 *	Configures one of the tracing filters available in HW.  If @tp is %NULL
5567 *	it indicates that the filter is already written in the register and it
5568 *	just needs to be enabled or disabled.
5569 */
5570int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5571    int idx, int enable)
5572{
5573	int i, ofst = idx * 4;
5574	u32 data_reg, mask_reg, cfg;
5575	u32 multitrc = F_TRCMULTIFILTER;
5576	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5577
5578	if (idx < 0 || idx >= NTRACE)
5579		return -EINVAL;
5580
5581	if (tp == NULL || !enable) {
5582		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5583		    enable ? en : 0);
5584		return 0;
5585	}
5586
5587	/*
5588	 * TODO - After T4 data book is updated, specify the exact
5589	 * section below.
5590	 *
5591	 * See T4 data book - MPS section for a complete description
5592	 * of the below if..else handling of A_MPS_TRC_CFG register
5593	 * value.
5594	 */
5595	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5596	if (cfg & F_TRCMULTIFILTER) {
5597		/*
5598		 * If multiple tracers are enabled, then maximum
5599		 * capture size is 2.5KB (FIFO size of a single channel)
5600		 * minus 2 flits for CPL_TRACE_PKT header.
5601		 */
5602		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5603			return -EINVAL;
5604	} else {
5605		/*
5606		 * If multiple tracers are disabled, to avoid deadlocks
5607		 * maximum packet capture size of 9600 bytes is recommended.
5608		 * Also in this mode, only trace0 can be enabled and running.
5609		 */
5610		multitrc = 0;
5611		if (tp->snap_len > 9600 || idx)
5612			return -EINVAL;
5613	}
5614
5615	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5616	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5617	    tp->min_len > M_TFMINPKTSIZE)
5618		return -EINVAL;
5619
5620	/* stop the tracer we'll be changing */
5621	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5622
5623	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5624	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5625	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5626
5627	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5628		t4_write_reg(adap, data_reg, tp->data[i]);
5629		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5630	}
5631	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5632		     V_TFCAPTUREMAX(tp->snap_len) |
5633		     V_TFMINPKTSIZE(tp->min_len));
5634	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5635		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5636		     (is_t4(adap) ?
5637		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5638		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5639
5640	return 0;
5641}
5642
5643/**
5644 *	t4_get_trace_filter - query one of the tracing filters
5645 *	@adap: the adapter
5646 *	@tp: the current trace filter parameters
5647 *	@idx: which trace filter to query
5648 *	@enabled: non-zero if the filter is enabled
5649 *
5650 *	Returns the current settings of one of the HW tracing filters.
5651 */
5652void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5653			 int *enabled)
5654{
5655	u32 ctla, ctlb;
5656	int i, ofst = idx * 4;
5657	u32 data_reg, mask_reg;
5658
5659	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5660	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5661
5662	if (is_t4(adap)) {
5663		*enabled = !!(ctla & F_TFEN);
5664		tp->port =  G_TFPORT(ctla);
5665		tp->invert = !!(ctla & F_TFINVERTMATCH);
5666	} else {
5667		*enabled = !!(ctla & F_T5_TFEN);
5668		tp->port = G_T5_TFPORT(ctla);
5669		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5670	}
5671	tp->snap_len = G_TFCAPTUREMAX(ctlb);
5672	tp->min_len = G_TFMINPKTSIZE(ctlb);
5673	tp->skip_ofst = G_TFOFFSET(ctla);
5674	tp->skip_len = G_TFLENGTH(ctla);
5675
5676	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5677	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5678	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5679
5680	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5681		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5682		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5683	}
5684}
5685
5686/**
5687 *	t4_pmtx_get_stats - returns the HW stats from PMTX
5688 *	@adap: the adapter
5689 *	@cnt: where to store the count statistics
5690 *	@cycles: where to store the cycle statistics
5691 *
5692 *	Returns performance statistics from PMTX.
5693 */
5694void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5695{
5696	int i;
5697	u32 data[2];
5698
5699	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5700		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5701		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5702		if (is_t4(adap))
5703			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5704		else {
5705			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5706					 A_PM_TX_DBG_DATA, data, 2,
5707					 A_PM_TX_DBG_STAT_MSB);
5708			cycles[i] = (((u64)data[0] << 32) | data[1]);
5709		}
5710	}
5711}
5712
5713/**
5714 *	t4_pmrx_get_stats - returns the HW stats from PMRX
5715 *	@adap: the adapter
5716 *	@cnt: where to store the count statistics
5717 *	@cycles: where to store the cycle statistics
5718 *
5719 *	Returns performance statistics from PMRX.
5720 */
5721void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5722{
5723	int i;
5724	u32 data[2];
5725
5726	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5727		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5728		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5729		if (is_t4(adap)) {
5730			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5731		} else {
5732			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5733					 A_PM_RX_DBG_DATA, data, 2,
5734					 A_PM_RX_DBG_STAT_MSB);
5735			cycles[i] = (((u64)data[0] << 32) | data[1]);
5736		}
5737	}
5738}
5739
5740/**
5741 *	t4_get_mps_bg_map - return the buffer groups associated with a port
5742 *	@adap: the adapter
5743 *	@idx: the port index
5744 *
5745 *	Returns a bitmap indicating which MPS buffer groups are associated
5746 *	with the given port.  Bit i is set if buffer group i is used by the
5747 *	port.
5748 */
5749static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5750{
5751	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5752
5753	if (n == 0)
5754		return idx == 0 ? 0xf : 0;
5755	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5756		return idx < 2 ? (3 << (2 * idx)) : 0;
5757	return 1 << idx;
5758}
5759
5760/**
5761 *      t4_get_port_type_description - return Port Type string description
5762 *      @port_type: firmware Port Type enumeration
5763 */
5764const char *t4_get_port_type_description(enum fw_port_type port_type)
5765{
5766	static const char *const port_type_description[] = {
5767		"Fiber_XFI",
5768		"Fiber_XAUI",
5769		"BT_SGMII",
5770		"BT_XFI",
5771		"BT_XAUI",
5772		"KX4",
5773		"CX4",
5774		"KX",
5775		"KR",
5776		"SFP",
5777		"BP_AP",
5778		"BP4_AP",
5779		"QSFP_10G",
5780		"QSA",
5781		"QSFP",
5782		"BP40_BA",
5783		"KR4_100G",
5784		"CR4_QSFP",
5785		"CR_QSFP",
5786		"CR_SFP28",
5787		"SFP28",
5788		"KR_SFP28",
5789		"CR2_QSFP",
5790	};
5791
5792	if (port_type < ARRAY_SIZE(port_type_description))
5793		return port_type_description[port_type];
5794	return "UNKNOWN";
5795}
5796
5797/**
5798 *      t4_get_port_stats_offset - collect port stats relative to a previous
5799 *				   snapshot
5800 *      @adap: The adapter
5801 *      @idx: The port
5802 *      @stats: Current stats to fill
5803 *      @offset: Previous stats snapshot
5804 */
5805void t4_get_port_stats_offset(struct adapter *adap, int idx,
5806		struct port_stats *stats,
5807		struct port_stats *offset)
5808{
5809	u64 *s, *o;
5810	int i;
5811
5812	t4_get_port_stats(adap, idx, stats);
5813	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5814			i < (sizeof(struct port_stats)/sizeof(u64)) ;
5815			i++, s++, o++)
5816		*s -= *o;
5817}
5818
5819/**
5820 *	t4_get_port_stats - collect port statistics
5821 *	@adap: the adapter
5822 *	@idx: the port index
5823 *	@p: the stats structure to fill
5824 *
5825 *	Collect statistics related to the given port from HW.
5826 */
5827void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5828{
5829	u32 bgmap = t4_get_mps_bg_map(adap, idx);
5830	u32 stat_ctl;
5831
5832#define GET_STAT(name) \
5833	t4_read_reg64(adap, \
5834	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5835	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5836#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5837
5838	stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5839
5840	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
5841	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
5842	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
5843	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
5844	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
5845	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
5846	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
5847	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
5848	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
5849	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
5850	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
5851	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
5852	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
5853	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
5854	p->tx_drop		= GET_STAT(TX_PORT_DROP);
5855	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
5856	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
5857	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
5858	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
5859	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
5860	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
5861	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
5862	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
5863
5864	if (stat_ctl & F_COUNTPAUSESTATTX) {
5865		p->tx_frames -= p->tx_pause;
5866		p->tx_octets -= p->tx_pause * 64;
5867		p->tx_mcast_frames -= p->tx_pause;
5868	}
5869
5870	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
5871	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
5872	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
5873	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
5874	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
5875	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
5876	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
5877	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
5878	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
5879	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
5880	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
5881	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
5882	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
5883	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
5884	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
5885	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
5886	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
5887	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
5888	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
5889	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
5890	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
5891	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
5892	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
5893	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
5894	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
5895	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
5896	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
5897
5898	if (stat_ctl & F_COUNTPAUSESTATRX) {
5899		p->rx_frames -= p->rx_pause;
5900		p->rx_octets -= p->rx_pause * 64;
5901		p->rx_mcast_frames -= p->rx_pause;
5902	}
5903
5904	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5905	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5906	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5907	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5908	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5909	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5910	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5911	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5912
5913#undef GET_STAT
5914#undef GET_STAT_COM
5915}
5916
5917/**
5918 *	t4_get_lb_stats - collect loopback port statistics
5919 *	@adap: the adapter
5920 *	@idx: the loopback port index
5921 *	@p: the stats structure to fill
5922 *
5923 *	Return HW statistics for the given loopback port.
5924 */
5925void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
5926{
5927	u32 bgmap = t4_get_mps_bg_map(adap, idx);
5928
5929#define GET_STAT(name) \
5930	t4_read_reg64(adap, \
5931	(is_t4(adap) ? \
5932	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
5933	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
5934#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5935
5936	p->octets	= GET_STAT(BYTES);
5937	p->frames	= GET_STAT(FRAMES);
5938	p->bcast_frames	= GET_STAT(BCAST);
5939	p->mcast_frames	= GET_STAT(MCAST);
5940	p->ucast_frames	= GET_STAT(UCAST);
5941	p->error_frames	= GET_STAT(ERROR);
5942
5943	p->frames_64		= GET_STAT(64B);
5944	p->frames_65_127	= GET_STAT(65B_127B);
5945	p->frames_128_255	= GET_STAT(128B_255B);
5946	p->frames_256_511	= GET_STAT(256B_511B);
5947	p->frames_512_1023	= GET_STAT(512B_1023B);
5948	p->frames_1024_1518	= GET_STAT(1024B_1518B);
5949	p->frames_1519_max	= GET_STAT(1519B_MAX);
5950	p->drop			= GET_STAT(DROP_FRAMES);
5951
5952	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5953	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5954	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5955	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5956	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5957	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5958	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5959	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
5960
5961#undef GET_STAT
5962#undef GET_STAT_COM
5963}
5964
5965/**
5966 *	t4_wol_magic_enable - enable/disable magic packet WoL
5967 *	@adap: the adapter
5968 *	@port: the physical port index
5969 *	@addr: MAC address expected in magic packets, %NULL to disable
5970 *
5971 *	Enables/disables magic packet wake-on-LAN for the selected port.
5972 */
5973void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
5974			 const u8 *addr)
5975{
5976	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
5977
5978	if (is_t4(adap)) {
5979		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
5980		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
5981		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
5982	} else {
5983		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
5984		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
5985		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
5986	}
5987
5988	if (addr) {
5989		t4_write_reg(adap, mag_id_reg_l,
5990			     (addr[2] << 24) | (addr[3] << 16) |
5991			     (addr[4] << 8) | addr[5]);
5992		t4_write_reg(adap, mag_id_reg_h,
5993			     (addr[0] << 8) | addr[1]);
5994	}
5995	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
5996			 V_MAGICEN(addr != NULL));
5997}
5998
5999/**
6000 *	t4_wol_pat_enable - enable/disable pattern-based WoL
6001 *	@adap: the adapter
6002 *	@port: the physical port index
6003 *	@map: bitmap of which HW pattern filters to set
6004 *	@mask0: byte mask for bytes 0-63 of a packet
6005 *	@mask1: byte mask for bytes 64-127 of a packet
6006 *	@crc: Ethernet CRC for selected bytes
6007 *	@enable: enable/disable switch
6008 *
6009 *	Sets the pattern filters indicated in @map to mask out the bytes
6010 *	specified in @mask0/@mask1 in received packets and compare the CRC of
6011 *	the resulting packet against @crc.  If @enable is %true pattern-based
6012 *	WoL is enabled, otherwise disabled.
6013 */
6014int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6015		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
6016{
6017	int i;
6018	u32 port_cfg_reg;
6019
6020	if (is_t4(adap))
6021		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6022	else
6023		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6024
6025	if (!enable) {
6026		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6027		return 0;
6028	}
6029	if (map > 0xff)
6030		return -EINVAL;
6031
6032#define EPIO_REG(name) \
6033	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6034	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6035
6036	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6037	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6038	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6039
6040	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6041		if (!(map & 1))
6042			continue;
6043
6044		/* write byte masks */
6045		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6046		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6047		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6048		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6049			return -ETIMEDOUT;
6050
6051		/* write CRC */
6052		t4_write_reg(adap, EPIO_REG(DATA0), crc);
6053		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6054		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6055		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6056			return -ETIMEDOUT;
6057	}
6058#undef EPIO_REG
6059
6060	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6061	return 0;
6062}
6063
6064/*     t4_mk_filtdelwr - create a delete filter WR
6065 *     @ftid: the filter ID
6066 *     @wr: the filter work request to populate
6067 *     @qid: ingress queue to receive the delete notification
6068 *
6069 *     Creates a filter work request to delete the supplied filter.  If @qid is
6070 *     negative the delete notification is suppressed.
6071 */
6072void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6073{
6074	memset(wr, 0, sizeof(*wr));
6075	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6076	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6077	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6078				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6079	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6080	if (qid >= 0)
6081		wr->rx_chan_rx_rpl_iq =
6082				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6083}
6084
6085#define INIT_CMD(var, cmd, rd_wr) do { \
6086	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6087					F_FW_CMD_REQUEST | \
6088					F_FW_CMD_##rd_wr); \
6089	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6090} while (0)
6091
6092int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6093			  u32 addr, u32 val)
6094{
6095	u32 ldst_addrspace;
6096	struct fw_ldst_cmd c;
6097
6098	memset(&c, 0, sizeof(c));
6099	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6100	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6101					F_FW_CMD_REQUEST |
6102					F_FW_CMD_WRITE |
6103					ldst_addrspace);
6104	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6105	c.u.addrval.addr = cpu_to_be32(addr);
6106	c.u.addrval.val = cpu_to_be32(val);
6107
6108	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6109}
6110
6111/**
6112 *	t4_mdio_rd - read a PHY register through MDIO
6113 *	@adap: the adapter
6114 *	@mbox: mailbox to use for the FW command
6115 *	@phy_addr: the PHY address
6116 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6117 *	@reg: the register to read
6118 *	@valp: where to store the value
6119 *
6120 *	Issues a FW command through the given mailbox to read a PHY register.
6121 */
6122int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6123	       unsigned int mmd, unsigned int reg, unsigned int *valp)
6124{
6125	int ret;
6126	u32 ldst_addrspace;
6127	struct fw_ldst_cmd c;
6128
6129	memset(&c, 0, sizeof(c));
6130	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6131	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6132					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6133					ldst_addrspace);
6134	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6135	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6136					 V_FW_LDST_CMD_MMD(mmd));
6137	c.u.mdio.raddr = cpu_to_be16(reg);
6138
6139	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6140	if (ret == 0)
6141		*valp = be16_to_cpu(c.u.mdio.rval);
6142	return ret;
6143}
6144
6145/**
6146 *	t4_mdio_wr - write a PHY register through MDIO
6147 *	@adap: the adapter
6148 *	@mbox: mailbox to use for the FW command
6149 *	@phy_addr: the PHY address
6150 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6151 *	@reg: the register to write
6152 *	@valp: value to write
6153 *
6154 *	Issues a FW command through the given mailbox to write a PHY register.
6155 */
6156int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6157	       unsigned int mmd, unsigned int reg, unsigned int val)
6158{
6159	u32 ldst_addrspace;
6160	struct fw_ldst_cmd c;
6161
6162	memset(&c, 0, sizeof(c));
6163	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6164	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6165					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6166					ldst_addrspace);
6167	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6168	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6169					 V_FW_LDST_CMD_MMD(mmd));
6170	c.u.mdio.raddr = cpu_to_be16(reg);
6171	c.u.mdio.rval = cpu_to_be16(val);
6172
6173	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6174}
6175
6176/**
6177 *
6178 *	t4_sge_decode_idma_state - decode the idma state
6179 *	@adap: the adapter
6180 *	@state: the state idma is stuck in
6181 */
6182void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6183{
6184	static const char * const t4_decode[] = {
6185		"IDMA_IDLE",
6186		"IDMA_PUSH_MORE_CPL_FIFO",
6187		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6188		"Not used",
6189		"IDMA_PHYSADDR_SEND_PCIEHDR",
6190		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6191		"IDMA_PHYSADDR_SEND_PAYLOAD",
6192		"IDMA_SEND_FIFO_TO_IMSG",
6193		"IDMA_FL_REQ_DATA_FL_PREP",
6194		"IDMA_FL_REQ_DATA_FL",
6195		"IDMA_FL_DROP",
6196		"IDMA_FL_H_REQ_HEADER_FL",
6197		"IDMA_FL_H_SEND_PCIEHDR",
6198		"IDMA_FL_H_PUSH_CPL_FIFO",
6199		"IDMA_FL_H_SEND_CPL",
6200		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6201		"IDMA_FL_H_SEND_IP_HDR",
6202		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6203		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6204		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6205		"IDMA_FL_D_SEND_PCIEHDR",
6206		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6207		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6208		"IDMA_FL_SEND_PCIEHDR",
6209		"IDMA_FL_PUSH_CPL_FIFO",
6210		"IDMA_FL_SEND_CPL",
6211		"IDMA_FL_SEND_PAYLOAD_FIRST",
6212		"IDMA_FL_SEND_PAYLOAD",
6213		"IDMA_FL_REQ_NEXT_DATA_FL",
6214		"IDMA_FL_SEND_NEXT_PCIEHDR",
6215		"IDMA_FL_SEND_PADDING",
6216		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6217		"IDMA_FL_SEND_FIFO_TO_IMSG",
6218		"IDMA_FL_REQ_DATAFL_DONE",
6219		"IDMA_FL_REQ_HEADERFL_DONE",
6220	};
6221	static const char * const t5_decode[] = {
6222		"IDMA_IDLE",
6223		"IDMA_ALMOST_IDLE",
6224		"IDMA_PUSH_MORE_CPL_FIFO",
6225		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6226		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6227		"IDMA_PHYSADDR_SEND_PCIEHDR",
6228		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6229		"IDMA_PHYSADDR_SEND_PAYLOAD",
6230		"IDMA_SEND_FIFO_TO_IMSG",
6231		"IDMA_FL_REQ_DATA_FL",
6232		"IDMA_FL_DROP",
6233		"IDMA_FL_DROP_SEND_INC",
6234		"IDMA_FL_H_REQ_HEADER_FL",
6235		"IDMA_FL_H_SEND_PCIEHDR",
6236		"IDMA_FL_H_PUSH_CPL_FIFO",
6237		"IDMA_FL_H_SEND_CPL",
6238		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6239		"IDMA_FL_H_SEND_IP_HDR",
6240		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6241		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6242		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6243		"IDMA_FL_D_SEND_PCIEHDR",
6244		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6245		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6246		"IDMA_FL_SEND_PCIEHDR",
6247		"IDMA_FL_PUSH_CPL_FIFO",
6248		"IDMA_FL_SEND_CPL",
6249		"IDMA_FL_SEND_PAYLOAD_FIRST",
6250		"IDMA_FL_SEND_PAYLOAD",
6251		"IDMA_FL_REQ_NEXT_DATA_FL",
6252		"IDMA_FL_SEND_NEXT_PCIEHDR",
6253		"IDMA_FL_SEND_PADDING",
6254		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6255	};
6256	static const char * const t6_decode[] = {
6257		"IDMA_IDLE",
6258		"IDMA_PUSH_MORE_CPL_FIFO",
6259		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6260		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6261		"IDMA_PHYSADDR_SEND_PCIEHDR",
6262		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6263		"IDMA_PHYSADDR_SEND_PAYLOAD",
6264		"IDMA_FL_REQ_DATA_FL",
6265		"IDMA_FL_DROP",
6266		"IDMA_FL_DROP_SEND_INC",
6267		"IDMA_FL_H_REQ_HEADER_FL",
6268		"IDMA_FL_H_SEND_PCIEHDR",
6269		"IDMA_FL_H_PUSH_CPL_FIFO",
6270		"IDMA_FL_H_SEND_CPL",
6271		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6272		"IDMA_FL_H_SEND_IP_HDR",
6273		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6274		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6275		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6276		"IDMA_FL_D_SEND_PCIEHDR",
6277		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6278		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6279		"IDMA_FL_SEND_PCIEHDR",
6280		"IDMA_FL_PUSH_CPL_FIFO",
6281		"IDMA_FL_SEND_CPL",
6282		"IDMA_FL_SEND_PAYLOAD_FIRST",
6283		"IDMA_FL_SEND_PAYLOAD",
6284		"IDMA_FL_REQ_NEXT_DATA_FL",
6285		"IDMA_FL_SEND_NEXT_PCIEHDR",
6286		"IDMA_FL_SEND_PADDING",
6287		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6288	};
6289	static const u32 sge_regs[] = {
6290		A_SGE_DEBUG_DATA_LOW_INDEX_2,
6291		A_SGE_DEBUG_DATA_LOW_INDEX_3,
6292		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6293	};
6294	const char * const *sge_idma_decode;
6295	int sge_idma_decode_nstates;
6296	int i;
6297	unsigned int chip_version = chip_id(adapter);
6298
6299	/* Select the right set of decode strings to dump depending on the
6300	 * adapter chip type.
6301	 */
6302	switch (chip_version) {
6303	case CHELSIO_T4:
6304		sge_idma_decode = (const char * const *)t4_decode;
6305		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6306		break;
6307
6308	case CHELSIO_T5:
6309		sge_idma_decode = (const char * const *)t5_decode;
6310		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6311		break;
6312
6313	case CHELSIO_T6:
6314		sge_idma_decode = (const char * const *)t6_decode;
6315		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6316		break;
6317
6318	default:
6319		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
6320		return;
6321	}
6322
6323	if (state < sge_idma_decode_nstates)
6324		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6325	else
6326		CH_WARN(adapter, "idma state %d unknown\n", state);
6327
6328	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6329		CH_WARN(adapter, "SGE register %#x value %#x\n",
6330			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6331}
6332
6333/**
6334 *      t4_sge_ctxt_flush - flush the SGE context cache
6335 *      @adap: the adapter
6336 *      @mbox: mailbox to use for the FW command
6337 *
6338 *      Issues a FW command through the given mailbox to flush the
6339 *      SGE context cache.
6340 */
6341int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6342{
6343	int ret;
6344	u32 ldst_addrspace;
6345	struct fw_ldst_cmd c;
6346
6347	memset(&c, 0, sizeof(c));
6348	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6349	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6350					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6351					ldst_addrspace);
6352	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6353	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6354
6355	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6356	return ret;
6357}
6358
6359/**
6360 *      t4_fw_hello - establish communication with FW
6361 *      @adap: the adapter
6362 *      @mbox: mailbox to use for the FW command
6363 *      @evt_mbox: mailbox to receive async FW events
6364 *      @master: specifies the caller's willingness to be the device master
6365 *	@state: returns the current device state (if non-NULL)
6366 *
6367 *	Issues a command to establish communication with FW.  Returns either
6368 *	an error (negative integer) or the mailbox of the Master PF.
6369 */
6370int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6371		enum dev_master master, enum dev_state *state)
6372{
6373	int ret;
6374	struct fw_hello_cmd c;
6375	u32 v;
6376	unsigned int master_mbox;
6377	int retries = FW_CMD_HELLO_RETRIES;
6378
6379retry:
6380	memset(&c, 0, sizeof(c));
6381	INIT_CMD(c, HELLO, WRITE);
6382	c.err_to_clearinit = cpu_to_be32(
6383		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6384		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6385		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6386					mbox : M_FW_HELLO_CMD_MBMASTER) |
6387		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6388		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6389		F_FW_HELLO_CMD_CLEARINIT);
6390
6391	/*
6392	 * Issue the HELLO command to the firmware.  If it's not successful
6393	 * but indicates that we got a "busy" or "timeout" condition, retry
6394	 * the HELLO until we exhaust our retry limit.  If we do exceed our
6395	 * retry limit, check to see if the firmware left us any error
6396	 * information and report that if so ...
6397	 */
6398	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6399	if (ret != FW_SUCCESS) {
6400		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6401			goto retry;
6402		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6403			t4_report_fw_error(adap);
6404		return ret;
6405	}
6406
6407	v = be32_to_cpu(c.err_to_clearinit);
6408	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6409	if (state) {
6410		if (v & F_FW_HELLO_CMD_ERR)
6411			*state = DEV_STATE_ERR;
6412		else if (v & F_FW_HELLO_CMD_INIT)
6413			*state = DEV_STATE_INIT;
6414		else
6415			*state = DEV_STATE_UNINIT;
6416	}
6417
6418	/*
6419	 * If we're not the Master PF then we need to wait around for the
6420	 * Master PF Driver to finish setting up the adapter.
6421	 *
6422	 * Note that we also do this wait if we're a non-Master-capable PF and
6423	 * there is no current Master PF; a Master PF may show up momentarily
6424	 * and we wouldn't want to fail pointlessly.  (This can happen when an
6425	 * OS loads lots of different drivers rapidly at the same time).  In
6426	 * this case, the Master PF returned by the firmware will be
6427	 * M_PCIE_FW_MASTER so the test below will work ...
6428	 */
6429	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6430	    master_mbox != mbox) {
6431		int waiting = FW_CMD_HELLO_TIMEOUT;
6432
6433		/*
6434		 * Wait for the firmware to either indicate an error or
6435		 * initialized state.  If we see either of these we bail out
6436		 * and report the issue to the caller.  If we exhaust the
6437		 * "hello timeout" and we haven't exhausted our retries, try
6438		 * again.  Otherwise bail with a timeout error.
6439		 */
6440		for (;;) {
6441			u32 pcie_fw;
6442
6443			msleep(50);
6444			waiting -= 50;
6445
6446			/*
6447			 * If neither Error nor Initialialized are indicated
6448			 * by the firmware keep waiting till we exhaust our
6449			 * timeout ... and then retry if we haven't exhausted
6450			 * our retries ...
6451			 */
6452			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6453			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6454				if (waiting <= 0) {
6455					if (retries-- > 0)
6456						goto retry;
6457
6458					return -ETIMEDOUT;
6459				}
6460				continue;
6461			}
6462
6463			/*
6464			 * We either have an Error or Initialized condition
6465			 * report errors preferentially.
6466			 */
6467			if (state) {
6468				if (pcie_fw & F_PCIE_FW_ERR)
6469					*state = DEV_STATE_ERR;
6470				else if (pcie_fw & F_PCIE_FW_INIT)
6471					*state = DEV_STATE_INIT;
6472			}
6473
6474			/*
6475			 * If we arrived before a Master PF was selected and
6476			 * there's not a valid Master PF, grab its identity
6477			 * for our caller.
6478			 */
6479			if (master_mbox == M_PCIE_FW_MASTER &&
6480			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
6481				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6482			break;
6483		}
6484	}
6485
6486	return master_mbox;
6487}
6488
6489/**
6490 *	t4_fw_bye - end communication with FW
6491 *	@adap: the adapter
6492 *	@mbox: mailbox to use for the FW command
6493 *
6494 *	Issues a command to terminate communication with FW.
6495 */
6496int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6497{
6498	struct fw_bye_cmd c;
6499
6500	memset(&c, 0, sizeof(c));
6501	INIT_CMD(c, BYE, WRITE);
6502	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6503}
6504
6505/**
6506 *	t4_fw_reset - issue a reset to FW
6507 *	@adap: the adapter
6508 *	@mbox: mailbox to use for the FW command
6509 *	@reset: specifies the type of reset to perform
6510 *
6511 *	Issues a reset command of the specified type to FW.
6512 */
6513int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6514{
6515	struct fw_reset_cmd c;
6516
6517	memset(&c, 0, sizeof(c));
6518	INIT_CMD(c, RESET, WRITE);
6519	c.val = cpu_to_be32(reset);
6520	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6521}
6522
6523/**
6524 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6525 *	@adap: the adapter
6526 *	@mbox: mailbox to use for the FW RESET command (if desired)
6527 *	@force: force uP into RESET even if FW RESET command fails
6528 *
6529 *	Issues a RESET command to firmware (if desired) with a HALT indication
6530 *	and then puts the microprocessor into RESET state.  The RESET command
6531 *	will only be issued if a legitimate mailbox is provided (mbox <=
6532 *	M_PCIE_FW_MASTER).
6533 *
6534 *	This is generally used in order for the host to safely manipulate the
6535 *	adapter without fear of conflicting with whatever the firmware might
6536 *	be doing.  The only way out of this state is to RESTART the firmware
6537 *	...
6538 */
6539int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6540{
6541	int ret = 0;
6542
6543	/*
6544	 * If a legitimate mailbox is provided, issue a RESET command
6545	 * with a HALT indication.
6546	 */
6547	if (mbox <= M_PCIE_FW_MASTER) {
6548		struct fw_reset_cmd c;
6549
6550		memset(&c, 0, sizeof(c));
6551		INIT_CMD(c, RESET, WRITE);
6552		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6553		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6554		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6555	}
6556
6557	/*
6558	 * Normally we won't complete the operation if the firmware RESET
6559	 * command fails but if our caller insists we'll go ahead and put the
6560	 * uP into RESET.  This can be useful if the firmware is hung or even
6561	 * missing ...  We'll have to take the risk of putting the uP into
6562	 * RESET without the cooperation of firmware in that case.
6563	 *
6564	 * We also force the firmware's HALT flag to be on in case we bypassed
6565	 * the firmware RESET command above or we're dealing with old firmware
6566	 * which doesn't have the HALT capability.  This will serve as a flag
6567	 * for the incoming firmware to know that it's coming out of a HALT
6568	 * rather than a RESET ... if it's new enough to understand that ...
6569	 */
6570	if (ret == 0 || force) {
6571		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6572		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6573				 F_PCIE_FW_HALT);
6574	}
6575
6576	/*
6577	 * And we always return the result of the firmware RESET command
6578	 * even when we force the uP into RESET ...
6579	 */
6580	return ret;
6581}
6582
6583/**
6584 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
6585 *	@adap: the adapter
6586 *	@reset: if we want to do a RESET to restart things
6587 *
6588 *	Restart firmware previously halted by t4_fw_halt().  On successful
6589 *	return the previous PF Master remains as the new PF Master and there
6590 *	is no need to issue a new HELLO command, etc.
6591 *
6592 *	We do this in two ways:
6593 *
6594 *	 1. If we're dealing with newer firmware we'll simply want to take
6595 *	    the chip's microprocessor out of RESET.  This will cause the
6596 *	    firmware to start up from its start vector.  And then we'll loop
6597 *	    until the firmware indicates it's started again (PCIE_FW.HALT
6598 *	    reset to 0) or we timeout.
6599 *
6600 *	 2. If we're dealing with older firmware then we'll need to RESET
6601 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
6602 *	    flag and automatically RESET itself on startup.
6603 */
6604int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6605{
6606	if (reset) {
6607		/*
6608		 * Since we're directing the RESET instead of the firmware
6609		 * doing it automatically, we need to clear the PCIE_FW.HALT
6610		 * bit.
6611		 */
6612		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6613
6614		/*
6615		 * If we've been given a valid mailbox, first try to get the
6616		 * firmware to do the RESET.  If that works, great and we can
6617		 * return success.  Otherwise, if we haven't been given a
6618		 * valid mailbox or the RESET command failed, fall back to
6619		 * hitting the chip with a hammer.
6620		 */
6621		if (mbox <= M_PCIE_FW_MASTER) {
6622			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6623			msleep(100);
6624			if (t4_fw_reset(adap, mbox,
6625					F_PIORST | F_PIORSTMODE) == 0)
6626				return 0;
6627		}
6628
6629		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6630		msleep(2000);
6631	} else {
6632		int ms;
6633
6634		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6635		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6636			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6637				return FW_SUCCESS;
6638			msleep(100);
6639			ms += 100;
6640		}
6641		return -ETIMEDOUT;
6642	}
6643	return 0;
6644}
6645
6646/**
6647 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6648 *	@adap: the adapter
6649 *	@mbox: mailbox to use for the FW RESET command (if desired)
6650 *	@fw_data: the firmware image to write
6651 *	@size: image size
6652 *	@force: force upgrade even if firmware doesn't cooperate
6653 *
6654 *	Perform all of the steps necessary for upgrading an adapter's
6655 *	firmware image.  Normally this requires the cooperation of the
6656 *	existing firmware in order to halt all existing activities
6657 *	but if an invalid mailbox token is passed in we skip that step
6658 *	(though we'll still put the adapter microprocessor into RESET in
6659 *	that case).
6660 *
6661 *	On successful return the new firmware will have been loaded and
6662 *	the adapter will have been fully RESET losing all previous setup
6663 *	state.  On unsuccessful return the adapter may be completely hosed ...
6664 *	positive errno indicates that the adapter is ~probably~ intact, a
6665 *	negative errno indicates that things are looking bad ...
6666 */
6667int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6668		  const u8 *fw_data, unsigned int size, int force)
6669{
6670	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6671	unsigned int bootstrap =
6672	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6673	int reset, ret;
6674
6675	if (!t4_fw_matches_chip(adap, fw_hdr))
6676		return -EINVAL;
6677
6678	if (!bootstrap) {
6679		ret = t4_fw_halt(adap, mbox, force);
6680		if (ret < 0 && !force)
6681			return ret;
6682	}
6683
6684	ret = t4_load_fw(adap, fw_data, size);
6685	if (ret < 0 || bootstrap)
6686		return ret;
6687
6688	/*
6689	 * Older versions of the firmware don't understand the new
6690	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6691	 * restart.  So for newly loaded older firmware we'll have to do the
6692	 * RESET for it so it starts up on a clean slate.  We can tell if
6693	 * the newly loaded firmware will handle this right by checking
6694	 * its header flags to see if it advertises the capability.
6695	 */
6696	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6697	return t4_fw_restart(adap, mbox, reset);
6698}
6699
6700/**
6701 *	t4_fw_initialize - ask FW to initialize the device
6702 *	@adap: the adapter
6703 *	@mbox: mailbox to use for the FW command
6704 *
6705 *	Issues a command to FW to partially initialize the device.  This
6706 *	performs initialization that generally doesn't depend on user input.
6707 */
6708int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6709{
6710	struct fw_initialize_cmd c;
6711
6712	memset(&c, 0, sizeof(c));
6713	INIT_CMD(c, INITIALIZE, WRITE);
6714	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6715}
6716
6717/**
6718 *	t4_query_params_rw - query FW or device parameters
6719 *	@adap: the adapter
6720 *	@mbox: mailbox to use for the FW command
6721 *	@pf: the PF
6722 *	@vf: the VF
6723 *	@nparams: the number of parameters
6724 *	@params: the parameter names
6725 *	@val: the parameter values
6726 *	@rw: Write and read flag
6727 *
6728 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
6729 *	queried at once.
6730 */
6731int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6732		       unsigned int vf, unsigned int nparams, const u32 *params,
6733		       u32 *val, int rw)
6734{
6735	int i, ret;
6736	struct fw_params_cmd c;
6737	__be32 *p = &c.param[0].mnem;
6738
6739	if (nparams > 7)
6740		return -EINVAL;
6741
6742	memset(&c, 0, sizeof(c));
6743	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6744				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
6745				  V_FW_PARAMS_CMD_PFN(pf) |
6746				  V_FW_PARAMS_CMD_VFN(vf));
6747	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6748
6749	for (i = 0; i < nparams; i++) {
6750		*p++ = cpu_to_be32(*params++);
6751		if (rw)
6752			*p = cpu_to_be32(*(val + i));
6753		p++;
6754	}
6755
6756	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6757	if (ret == 0)
6758		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6759			*val++ = be32_to_cpu(*p);
6760	return ret;
6761}
6762
6763int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6764		    unsigned int vf, unsigned int nparams, const u32 *params,
6765		    u32 *val)
6766{
6767	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6768}
6769
6770/**
6771 *      t4_set_params_timeout - sets FW or device parameters
6772 *      @adap: the adapter
6773 *      @mbox: mailbox to use for the FW command
6774 *      @pf: the PF
6775 *      @vf: the VF
6776 *      @nparams: the number of parameters
6777 *      @params: the parameter names
6778 *      @val: the parameter values
6779 *      @timeout: the timeout time
6780 *
6781 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
6782 *      specified at once.
6783 */
6784int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6785			  unsigned int pf, unsigned int vf,
6786			  unsigned int nparams, const u32 *params,
6787			  const u32 *val, int timeout)
6788{
6789	struct fw_params_cmd c;
6790	__be32 *p = &c.param[0].mnem;
6791
6792	if (nparams > 7)
6793		return -EINVAL;
6794
6795	memset(&c, 0, sizeof(c));
6796	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6797				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6798				  V_FW_PARAMS_CMD_PFN(pf) |
6799				  V_FW_PARAMS_CMD_VFN(vf));
6800	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6801
6802	while (nparams--) {
6803		*p++ = cpu_to_be32(*params++);
6804		*p++ = cpu_to_be32(*val++);
6805	}
6806
6807	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6808}
6809
6810/**
6811 *	t4_set_params - sets FW or device parameters
6812 *	@adap: the adapter
6813 *	@mbox: mailbox to use for the FW command
6814 *	@pf: the PF
6815 *	@vf: the VF
6816 *	@nparams: the number of parameters
6817 *	@params: the parameter names
6818 *	@val: the parameter values
6819 *
6820 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
6821 *	specified at once.
6822 */
6823int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6824		  unsigned int vf, unsigned int nparams, const u32 *params,
6825		  const u32 *val)
6826{
6827	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6828				     FW_CMD_MAX_TIMEOUT);
6829}
6830
6831/**
6832 *	t4_cfg_pfvf - configure PF/VF resource limits
6833 *	@adap: the adapter
6834 *	@mbox: mailbox to use for the FW command
6835 *	@pf: the PF being configured
6836 *	@vf: the VF being configured
6837 *	@txq: the max number of egress queues
6838 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
6839 *	@rxqi: the max number of interrupt-capable ingress queues
6840 *	@rxq: the max number of interruptless ingress queues
6841 *	@tc: the PCI traffic class
6842 *	@vi: the max number of virtual interfaces
6843 *	@cmask: the channel access rights mask for the PF/VF
6844 *	@pmask: the port access rights mask for the PF/VF
6845 *	@nexact: the maximum number of exact MPS filters
6846 *	@rcaps: read capabilities
6847 *	@wxcaps: write/execute capabilities
6848 *
6849 *	Configures resource limits and capabilities for a physical or virtual
6850 *	function.
6851 */
6852int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6853		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6854		unsigned int rxqi, unsigned int rxq, unsigned int tc,
6855		unsigned int vi, unsigned int cmask, unsigned int pmask,
6856		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6857{
6858	struct fw_pfvf_cmd c;
6859
6860	memset(&c, 0, sizeof(c));
6861	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
6862				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
6863				  V_FW_PFVF_CMD_VFN(vf));
6864	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6865	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
6866				     V_FW_PFVF_CMD_NIQ(rxq));
6867	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
6868				    V_FW_PFVF_CMD_PMASK(pmask) |
6869				    V_FW_PFVF_CMD_NEQ(txq));
6870	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
6871				      V_FW_PFVF_CMD_NVI(vi) |
6872				      V_FW_PFVF_CMD_NEXACTF(nexact));
6873	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
6874				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
6875				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
6876	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6877}
6878
6879/**
6880 *	t4_alloc_vi_func - allocate a virtual interface
6881 *	@adap: the adapter
6882 *	@mbox: mailbox to use for the FW command
6883 *	@port: physical port associated with the VI
6884 *	@pf: the PF owning the VI
6885 *	@vf: the VF owning the VI
6886 *	@nmac: number of MAC addresses needed (1 to 5)
6887 *	@mac: the MAC addresses of the VI
6888 *	@rss_size: size of RSS table slice associated with this VI
6889 *	@portfunc: which Port Application Function MAC Address is desired
6890 *	@idstype: Intrusion Detection Type
6891 *
6892 *	Allocates a virtual interface for the given physical port.  If @mac is
6893 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
6894 *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
6895 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
6896 *	stored consecutively so the space needed is @nmac * 6 bytes.
6897 *	Returns a negative error number or the non-negative VI id.
6898 */
6899int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
6900		     unsigned int port, unsigned int pf, unsigned int vf,
6901		     unsigned int nmac, u8 *mac, u16 *rss_size,
6902		     unsigned int portfunc, unsigned int idstype)
6903{
6904	int ret;
6905	struct fw_vi_cmd c;
6906
6907	memset(&c, 0, sizeof(c));
6908	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
6909				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
6910				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
6911	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
6912	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
6913				     V_FW_VI_CMD_FUNC(portfunc));
6914	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
6915	c.nmac = nmac - 1;
6916	if(!rss_size)
6917		c.norss_rsssize = F_FW_VI_CMD_NORSS;
6918
6919	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6920	if (ret)
6921		return ret;
6922
6923	if (mac) {
6924		memcpy(mac, c.mac, sizeof(c.mac));
6925		switch (nmac) {
6926		case 5:
6927			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6928		case 4:
6929			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6930		case 3:
6931			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6932		case 2:
6933			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
6934		}
6935	}
6936	if (rss_size)
6937		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
6938	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
6939}
6940
6941/**
6942 *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
6943 *      @adap: the adapter
6944 *      @mbox: mailbox to use for the FW command
6945 *      @port: physical port associated with the VI
6946 *      @pf: the PF owning the VI
6947 *      @vf: the VF owning the VI
6948 *      @nmac: number of MAC addresses needed (1 to 5)
6949 *      @mac: the MAC addresses of the VI
6950 *      @rss_size: size of RSS table slice associated with this VI
6951 *
6952 *	backwards compatible and convieniance routine to allocate a Virtual
6953 *	Interface with a Ethernet Port Application Function and Intrustion
6954 *	Detection System disabled.
6955 */
6956int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6957		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6958		u16 *rss_size)
6959{
6960	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
6961				FW_VI_FUNC_ETH, 0);
6962}
6963
6964/**
6965 * 	t4_free_vi - free a virtual interface
6966 * 	@adap: the adapter
6967 * 	@mbox: mailbox to use for the FW command
6968 * 	@pf: the PF owning the VI
6969 * 	@vf: the VF owning the VI
6970 * 	@viid: virtual interface identifiler
6971 *
6972 * 	Free a previously allocated virtual interface.
6973 */
6974int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6975	       unsigned int vf, unsigned int viid)
6976{
6977	struct fw_vi_cmd c;
6978
6979	memset(&c, 0, sizeof(c));
6980	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
6981				  F_FW_CMD_REQUEST |
6982				  F_FW_CMD_EXEC |
6983				  V_FW_VI_CMD_PFN(pf) |
6984				  V_FW_VI_CMD_VFN(vf));
6985	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
6986	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
6987
6988	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6989}
6990
6991/**
6992 *	t4_set_rxmode - set Rx properties of a virtual interface
6993 *	@adap: the adapter
6994 *	@mbox: mailbox to use for the FW command
6995 *	@viid: the VI id
6996 *	@mtu: the new MTU or -1
6997 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6998 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6999 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7000 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7001 *	@sleep_ok: if true we may sleep while awaiting command completion
7002 *
7003 *	Sets Rx properties of a virtual interface.
7004 */
7005int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7006		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
7007		  bool sleep_ok)
7008{
7009	struct fw_vi_rxmode_cmd c;
7010
7011	/* convert to FW values */
7012	if (mtu < 0)
7013		mtu = M_FW_VI_RXMODE_CMD_MTU;
7014	if (promisc < 0)
7015		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7016	if (all_multi < 0)
7017		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7018	if (bcast < 0)
7019		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7020	if (vlanex < 0)
7021		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7022
7023	memset(&c, 0, sizeof(c));
7024	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7025				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7026				   V_FW_VI_RXMODE_CMD_VIID(viid));
7027	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7028	c.mtu_to_vlanexen =
7029		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7030			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7031			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7032			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7033			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7034	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7035}
7036
7037/**
7038 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7039 *	@adap: the adapter
7040 *	@mbox: mailbox to use for the FW command
7041 *	@viid: the VI id
7042 *	@free: if true any existing filters for this VI id are first removed
7043 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7044 *	@addr: the MAC address(es)
7045 *	@idx: where to store the index of each allocated filter
7046 *	@hash: pointer to hash address filter bitmap
7047 *	@sleep_ok: call is allowed to sleep
7048 *
7049 *	Allocates an exact-match filter for each of the supplied addresses and
7050 *	sets it to the corresponding address.  If @idx is not %NULL it should
7051 *	have at least @naddr entries, each of which will be set to the index of
7052 *	the filter allocated for the corresponding MAC address.  If a filter
7053 *	could not be allocated for an address its index is set to 0xffff.
7054 *	If @hash is not %NULL addresses that fail to allocate an exact filter
7055 *	are hashed and update the hash filter bitmap pointed at by @hash.
7056 *
7057 *	Returns a negative error number or the number of filters allocated.
7058 */
7059int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7060		      unsigned int viid, bool free, unsigned int naddr,
7061		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7062{
7063	int offset, ret = 0;
7064	struct fw_vi_mac_cmd c;
7065	unsigned int nfilters = 0;
7066	unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7067	unsigned int rem = naddr;
7068
7069	if (naddr > max_naddr)
7070		return -EINVAL;
7071
7072	for (offset = 0; offset < naddr ; /**/) {
7073		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7074					 ? rem
7075					 : ARRAY_SIZE(c.u.exact));
7076		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7077						     u.exact[fw_naddr]), 16);
7078		struct fw_vi_mac_exact *p;
7079		int i;
7080
7081		memset(&c, 0, sizeof(c));
7082		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7083					   F_FW_CMD_REQUEST |
7084					   F_FW_CMD_WRITE |
7085					   V_FW_CMD_EXEC(free) |
7086					   V_FW_VI_MAC_CMD_VIID(viid));
7087		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7088						  V_FW_CMD_LEN16(len16));
7089
7090		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7091			p->valid_to_idx =
7092				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7093					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7094			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7095		}
7096
7097		/*
7098		 * It's okay if we run out of space in our MAC address arena.
7099		 * Some of the addresses we submit may get stored so we need
7100		 * to run through the reply to see what the results were ...
7101		 */
7102		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7103		if (ret && ret != -FW_ENOMEM)
7104			break;
7105
7106		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7107			u16 index = G_FW_VI_MAC_CMD_IDX(
7108						be16_to_cpu(p->valid_to_idx));
7109
7110			if (idx)
7111				idx[offset+i] = (index >=  max_naddr
7112						 ? 0xffff
7113						 : index);
7114			if (index < max_naddr)
7115				nfilters++;
7116			else if (hash)
7117				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7118		}
7119
7120		free = false;
7121		offset += fw_naddr;
7122		rem -= fw_naddr;
7123	}
7124
7125	if (ret == 0 || ret == -FW_ENOMEM)
7126		ret = nfilters;
7127	return ret;
7128}
7129
7130/**
7131 *	t4_change_mac - modifies the exact-match filter for a MAC address
7132 *	@adap: the adapter
7133 *	@mbox: mailbox to use for the FW command
7134 *	@viid: the VI id
7135 *	@idx: index of existing filter for old value of MAC address, or -1
7136 *	@addr: the new MAC address value
7137 *	@persist: whether a new MAC allocation should be persistent
7138 *	@add_smt: if true also add the address to the HW SMT
7139 *
7140 *	Modifies an exact-match filter and sets it to the new MAC address if
7141 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
7142 *	latter case the address is added persistently if @persist is %true.
7143 *
7144 *	Note that in general it is not possible to modify the value of a given
7145 *	filter so the generic way to modify an address filter is to free the one
7146 *	being used by the old address value and allocate a new filter for the
7147 *	new address value.
7148 *
7149 *	Returns a negative error number or the index of the filter with the new
7150 *	MAC value.  Note that this index may differ from @idx.
7151 */
7152int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7153		  int idx, const u8 *addr, bool persist, bool add_smt)
7154{
7155	int ret, mode;
7156	struct fw_vi_mac_cmd c;
7157	struct fw_vi_mac_exact *p = c.u.exact;
7158	unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7159
7160	if (idx < 0)		/* new allocation */
7161		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7162	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7163
7164	memset(&c, 0, sizeof(c));
7165	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7166				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7167				   V_FW_VI_MAC_CMD_VIID(viid));
7168	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7169	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7170				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7171				      V_FW_VI_MAC_CMD_IDX(idx));
7172	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7173
7174	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7175	if (ret == 0) {
7176		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7177		if (ret >= max_mac_addr)
7178			ret = -ENOMEM;
7179	}
7180	return ret;
7181}
7182
7183/**
7184 *	t4_set_addr_hash - program the MAC inexact-match hash filter
7185 *	@adap: the adapter
7186 *	@mbox: mailbox to use for the FW command
7187 *	@viid: the VI id
7188 *	@ucast: whether the hash filter should also match unicast addresses
7189 *	@vec: the value to be written to the hash filter
7190 *	@sleep_ok: call is allowed to sleep
7191 *
7192 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
7193 */
7194int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7195		     bool ucast, u64 vec, bool sleep_ok)
7196{
7197	struct fw_vi_mac_cmd c;
7198	u32 val;
7199
7200	memset(&c, 0, sizeof(c));
7201	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7202				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7203				   V_FW_VI_ENABLE_CMD_VIID(viid));
7204	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7205	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7206	c.freemacs_to_len16 = cpu_to_be32(val);
7207	c.u.hash.hashvec = cpu_to_be64(vec);
7208	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7209}
7210
7211/**
7212 *      t4_enable_vi_params - enable/disable a virtual interface
7213 *      @adap: the adapter
7214 *      @mbox: mailbox to use for the FW command
7215 *      @viid: the VI id
7216 *      @rx_en: 1=enable Rx, 0=disable Rx
7217 *      @tx_en: 1=enable Tx, 0=disable Tx
7218 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
7219 *
7220 *      Enables/disables a virtual interface.  Note that setting DCB Enable
7221 *      only makes sense when enabling a Virtual Interface ...
7222 */
7223int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7224			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7225{
7226	struct fw_vi_enable_cmd c;
7227
7228	memset(&c, 0, sizeof(c));
7229	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7230				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7231				   V_FW_VI_ENABLE_CMD_VIID(viid));
7232	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7233				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7234				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7235				     FW_LEN16(c));
7236	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7237}
7238
7239/**
7240 *	t4_enable_vi - enable/disable a virtual interface
7241 *	@adap: the adapter
7242 *	@mbox: mailbox to use for the FW command
7243 *	@viid: the VI id
7244 *	@rx_en: 1=enable Rx, 0=disable Rx
7245 *	@tx_en: 1=enable Tx, 0=disable Tx
7246 *
7247 *	Enables/disables a virtual interface.  Note that setting DCB Enable
7248 *	only makes sense when enabling a Virtual Interface ...
7249 */
7250int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7251		 bool rx_en, bool tx_en)
7252{
7253	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7254}
7255
7256/**
7257 *	t4_identify_port - identify a VI's port by blinking its LED
7258 *	@adap: the adapter
7259 *	@mbox: mailbox to use for the FW command
7260 *	@viid: the VI id
7261 *	@nblinks: how many times to blink LED at 2.5 Hz
7262 *
7263 *	Identifies a VI's port by blinking its LED.
7264 */
7265int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7266		     unsigned int nblinks)
7267{
7268	struct fw_vi_enable_cmd c;
7269
7270	memset(&c, 0, sizeof(c));
7271	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7272				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7273				   V_FW_VI_ENABLE_CMD_VIID(viid));
7274	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7275	c.blinkdur = cpu_to_be16(nblinks);
7276	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7277}
7278
7279/**
7280 *	t4_iq_stop - stop an ingress queue and its FLs
7281 *	@adap: the adapter
7282 *	@mbox: mailbox to use for the FW command
7283 *	@pf: the PF owning the queues
7284 *	@vf: the VF owning the queues
7285 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7286 *	@iqid: ingress queue id
7287 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7288 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7289 *
7290 *	Stops an ingress queue and its associated FLs, if any.  This causes
7291 *	any current or future data/messages destined for these queues to be
7292 *	tossed.
7293 */
7294int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7295	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7296	       unsigned int fl0id, unsigned int fl1id)
7297{
7298	struct fw_iq_cmd c;
7299
7300	memset(&c, 0, sizeof(c));
7301	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7302				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7303				  V_FW_IQ_CMD_VFN(vf));
7304	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7305	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7306	c.iqid = cpu_to_be16(iqid);
7307	c.fl0id = cpu_to_be16(fl0id);
7308	c.fl1id = cpu_to_be16(fl1id);
7309	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7310}
7311
7312/**
7313 *	t4_iq_free - free an ingress queue and its FLs
7314 *	@adap: the adapter
7315 *	@mbox: mailbox to use for the FW command
7316 *	@pf: the PF owning the queues
7317 *	@vf: the VF owning the queues
7318 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7319 *	@iqid: ingress queue id
7320 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7321 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7322 *
7323 *	Frees an ingress queue and its associated FLs, if any.
7324 */
7325int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7326	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7327	       unsigned int fl0id, unsigned int fl1id)
7328{
7329	struct fw_iq_cmd c;
7330
7331	memset(&c, 0, sizeof(c));
7332	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7333				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7334				  V_FW_IQ_CMD_VFN(vf));
7335	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7336	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7337	c.iqid = cpu_to_be16(iqid);
7338	c.fl0id = cpu_to_be16(fl0id);
7339	c.fl1id = cpu_to_be16(fl1id);
7340	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7341}
7342
7343/**
7344 *	t4_eth_eq_free - free an Ethernet egress queue
7345 *	@adap: the adapter
7346 *	@mbox: mailbox to use for the FW command
7347 *	@pf: the PF owning the queue
7348 *	@vf: the VF owning the queue
7349 *	@eqid: egress queue id
7350 *
7351 *	Frees an Ethernet egress queue.
7352 */
7353int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7354		   unsigned int vf, unsigned int eqid)
7355{
7356	struct fw_eq_eth_cmd c;
7357
7358	memset(&c, 0, sizeof(c));
7359	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7360				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7361				  V_FW_EQ_ETH_CMD_PFN(pf) |
7362				  V_FW_EQ_ETH_CMD_VFN(vf));
7363	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7364	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7365	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7366}
7367
7368/**
7369 *	t4_ctrl_eq_free - free a control egress queue
7370 *	@adap: the adapter
7371 *	@mbox: mailbox to use for the FW command
7372 *	@pf: the PF owning the queue
7373 *	@vf: the VF owning the queue
7374 *	@eqid: egress queue id
7375 *
7376 *	Frees a control egress queue.
7377 */
7378int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7379		    unsigned int vf, unsigned int eqid)
7380{
7381	struct fw_eq_ctrl_cmd c;
7382
7383	memset(&c, 0, sizeof(c));
7384	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7385				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7386				  V_FW_EQ_CTRL_CMD_PFN(pf) |
7387				  V_FW_EQ_CTRL_CMD_VFN(vf));
7388	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7389	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7390	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7391}
7392
7393/**
7394 *	t4_ofld_eq_free - free an offload egress queue
7395 *	@adap: the adapter
7396 *	@mbox: mailbox to use for the FW command
7397 *	@pf: the PF owning the queue
7398 *	@vf: the VF owning the queue
7399 *	@eqid: egress queue id
7400 *
7401 *	Frees a control egress queue.
7402 */
7403int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7404		    unsigned int vf, unsigned int eqid)
7405{
7406	struct fw_eq_ofld_cmd c;
7407
7408	memset(&c, 0, sizeof(c));
7409	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7410				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7411				  V_FW_EQ_OFLD_CMD_PFN(pf) |
7412				  V_FW_EQ_OFLD_CMD_VFN(vf));
7413	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7414	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7415	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7416}
7417
7418/**
7419 *	t4_link_down_rc_str - return a string for a Link Down Reason Code
7420 *	@link_down_rc: Link Down Reason Code
7421 *
7422 *	Returns a string representation of the Link Down Reason Code.
7423 */
7424const char *t4_link_down_rc_str(unsigned char link_down_rc)
7425{
7426	static const char *reason[] = {
7427		"Link Down",
7428		"Remote Fault",
7429		"Auto-negotiation Failure",
7430		"Reserved3",
7431		"Insufficient Airflow",
7432		"Unable To Determine Reason",
7433		"No RX Signal Detected",
7434		"Reserved7",
7435	};
7436
7437	if (link_down_rc >= ARRAY_SIZE(reason))
7438		return "Bad Reason Code";
7439
7440	return reason[link_down_rc];
7441}
7442
7443/**
7444 *	t4_handle_fw_rpl - process a FW reply message
7445 *	@adap: the adapter
7446 *	@rpl: start of the FW message
7447 *
7448 *	Processes a FW message, such as link state change messages.
7449 */
7450int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7451{
7452	u8 opcode = *(const u8 *)rpl;
7453	const struct fw_port_cmd *p = (const void *)rpl;
7454	unsigned int action =
7455			G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7456
7457	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7458		/* link/module state change message */
7459		int speed = 0, fc = 0, i;
7460		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7461		struct port_info *pi = NULL;
7462		struct link_config *lc;
7463		u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7464		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7465		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
7466
7467		if (stat & F_FW_PORT_CMD_RXPAUSE)
7468			fc |= PAUSE_RX;
7469		if (stat & F_FW_PORT_CMD_TXPAUSE)
7470			fc |= PAUSE_TX;
7471		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7472			speed = 100;
7473		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7474			speed = 1000;
7475		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7476			speed = 10000;
7477		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7478			speed = 25000;
7479		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7480			speed = 40000;
7481		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7482			speed = 100000;
7483
7484		for_each_port(adap, i) {
7485			pi = adap2pinfo(adap, i);
7486			if (pi->tx_chan == chan)
7487				break;
7488		}
7489		lc = &pi->link_cfg;
7490
7491		if (mod != pi->mod_type) {
7492			pi->mod_type = mod;
7493			t4_os_portmod_changed(adap, i);
7494		}
7495		if (link_ok != lc->link_ok || speed != lc->speed ||
7496		    fc != lc->fc) {                    /* something changed */
7497			int reason;
7498
7499			if (!link_ok && lc->link_ok)
7500				reason = G_FW_PORT_CMD_LINKDNRC(stat);
7501			else
7502				reason = -1;
7503
7504			lc->link_ok = link_ok;
7505			lc->speed = speed;
7506			lc->fc = fc;
7507			lc->supported = be16_to_cpu(p->u.info.pcap);
7508			t4_os_link_changed(adap, i, link_ok, reason);
7509		}
7510	} else {
7511		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7512		return -EINVAL;
7513	}
7514	return 0;
7515}
7516
7517/**
7518 *	get_pci_mode - determine a card's PCI mode
7519 *	@adapter: the adapter
7520 *	@p: where to store the PCI settings
7521 *
7522 *	Determines a card's PCI mode and associated parameters, such as speed
7523 *	and width.
7524 */
7525static void get_pci_mode(struct adapter *adapter,
7526				   struct pci_params *p)
7527{
7528	u16 val;
7529	u32 pcie_cap;
7530
7531	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7532	if (pcie_cap) {
7533		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7534		p->speed = val & PCI_EXP_LNKSTA_CLS;
7535		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7536	}
7537}
7538
7539/**
7540 *	init_link_config - initialize a link's SW state
7541 *	@lc: structure holding the link state
7542 *	@caps: link capabilities
7543 *
7544 *	Initializes the SW state maintained for each link, including the link's
7545 *	capabilities and default speed/flow-control/autonegotiation settings.
7546 */
7547static void init_link_config(struct link_config *lc, unsigned int caps)
7548{
7549	lc->supported = caps;
7550	lc->requested_speed = 0;
7551	lc->speed = 0;
7552	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7553	if (lc->supported & FW_PORT_CAP_ANEG) {
7554		lc->advertising = lc->supported & ADVERT_MASK;
7555		lc->autoneg = AUTONEG_ENABLE;
7556		lc->requested_fc |= PAUSE_AUTONEG;
7557	} else {
7558		lc->advertising = 0;
7559		lc->autoneg = AUTONEG_DISABLE;
7560	}
7561}
7562
7563struct flash_desc {
7564	u32 vendor_and_model_id;
7565	u32 size_mb;
7566};
7567
7568int t4_get_flash_params(struct adapter *adapter)
7569{
7570	/*
7571	 * Table for non-Numonix supported flash parts.  Numonix parts are left
7572	 * to the preexisting well-tested code.  All flash parts have 64KB
7573	 * sectors.
7574	 */
7575	static struct flash_desc supported_flash[] = {
7576		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
7577	};
7578
7579	int ret;
7580	u32 info = 0;
7581
7582	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7583	if (!ret)
7584		ret = sf1_read(adapter, 3, 0, 1, &info);
7585	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
7586	if (ret < 0)
7587		return ret;
7588
7589	for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7590		if (supported_flash[ret].vendor_and_model_id == info) {
7591			adapter->params.sf_size = supported_flash[ret].size_mb;
7592			adapter->params.sf_nsec =
7593				adapter->params.sf_size / SF_SEC_SIZE;
7594			return 0;
7595		}
7596
7597	if ((info & 0xff) != 0x20)		/* not a Numonix flash */
7598		return -EINVAL;
7599	info >>= 16;				/* log2 of size */
7600	if (info >= 0x14 && info < 0x18)
7601		adapter->params.sf_nsec = 1 << (info - 16);
7602	else if (info == 0x18)
7603		adapter->params.sf_nsec = 64;
7604	else
7605		return -EINVAL;
7606	adapter->params.sf_size = 1 << info;
7607
7608	/*
7609	 * We should ~probably~ reject adapters with FLASHes which are too
7610	 * small but we have some legacy FPGAs with small FLASHes that we'd
7611	 * still like to use.  So instead we emit a scary message ...
7612	 */
7613	if (adapter->params.sf_size < FLASH_MIN_SIZE)
7614		CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
7615			adapter->params.sf_size, FLASH_MIN_SIZE);
7616
7617	return 0;
7618}
7619
7620static void set_pcie_completion_timeout(struct adapter *adapter,
7621						  u8 range)
7622{
7623	u16 val;
7624	u32 pcie_cap;
7625
7626	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7627	if (pcie_cap) {
7628		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7629		val &= 0xfff0;
7630		val |= range ;
7631		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7632	}
7633}
7634
7635const struct chip_params *t4_get_chip_params(int chipid)
7636{
7637	static const struct chip_params chip_params[] = {
7638		{
7639			/* T4 */
7640			.nchan = NCHAN,
7641			.pm_stats_cnt = PM_NSTATS,
7642			.cng_ch_bits_log = 2,
7643			.nsched_cls = 15,
7644			.cim_num_obq = CIM_NUM_OBQ,
7645			.mps_rplc_size = 128,
7646			.vfcount = 128,
7647			.sge_fl_db = F_DBPRIO,
7648			.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7649		},
7650		{
7651			/* T5 */
7652			.nchan = NCHAN,
7653			.pm_stats_cnt = PM_NSTATS,
7654			.cng_ch_bits_log = 2,
7655			.nsched_cls = 16,
7656			.cim_num_obq = CIM_NUM_OBQ_T5,
7657			.mps_rplc_size = 128,
7658			.vfcount = 128,
7659			.sge_fl_db = F_DBPRIO | F_DBTYPE,
7660			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7661		},
7662		{
7663			/* T6 */
7664			.nchan = T6_NCHAN,
7665			.pm_stats_cnt = T6_PM_NSTATS,
7666			.cng_ch_bits_log = 3,
7667			.nsched_cls = 16,
7668			.cim_num_obq = CIM_NUM_OBQ_T5,
7669			.mps_rplc_size = 256,
7670			.vfcount = 256,
7671			.sge_fl_db = 0,
7672			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7673		},
7674	};
7675
7676	chipid -= CHELSIO_T4;
7677	if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7678		return NULL;
7679
7680	return &chip_params[chipid];
7681}
7682
7683/**
7684 *	t4_prep_adapter - prepare SW and HW for operation
7685 *	@adapter: the adapter
7686 *	@buf: temporary space of at least VPD_LEN size provided by the caller.
7687 *
7688 *	Initialize adapter SW state for the various HW modules, set initial
7689 *	values for some adapter tunables, take PHYs out of reset, and
7690 *	initialize the MDIO interface.
7691 */
7692int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7693{
7694	int ret;
7695	uint16_t device_id;
7696	uint32_t pl_rev;
7697
7698	get_pci_mode(adapter, &adapter->params.pci);
7699
7700	pl_rev = t4_read_reg(adapter, A_PL_REV);
7701	adapter->params.chipid = G_CHIPID(pl_rev);
7702	adapter->params.rev = G_REV(pl_rev);
7703	if (adapter->params.chipid == 0) {
7704		/* T4 did not have chipid in PL_REV (T5 onwards do) */
7705		adapter->params.chipid = CHELSIO_T4;
7706
7707		/* T4A1 chip is not supported */
7708		if (adapter->params.rev == 1) {
7709			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7710			return -EINVAL;
7711		}
7712	}
7713
7714	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7715	if (adapter->chip_params == NULL)
7716		return -EINVAL;
7717
7718	adapter->params.pci.vpd_cap_addr =
7719	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7720
7721	ret = t4_get_flash_params(adapter);
7722	if (ret < 0)
7723		return ret;
7724
7725	ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
7726	if (ret < 0)
7727		return ret;
7728
7729	/* Cards with real ASICs have the chipid in the PCIe device id */
7730	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
7731	if (device_id >> 12 == chip_id(adapter))
7732		adapter->params.cim_la_size = CIMLA_SIZE;
7733	else {
7734		/* FPGA */
7735		adapter->params.fpga = 1;
7736		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
7737	}
7738
7739	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7740
7741	/*
7742	 * Default port and clock for debugging in case we can't reach FW.
7743	 */
7744	adapter->params.nports = 1;
7745	adapter->params.portvec = 1;
7746	adapter->params.vpd.cclk = 50000;
7747
7748	/* Set pci completion timeout value to 4 seconds. */
7749	set_pcie_completion_timeout(adapter, 0xd);
7750	return 0;
7751}
7752
7753/**
7754 *	t4_shutdown_adapter - shut down adapter, host & wire
7755 *	@adapter: the adapter
7756 *
7757 *	Perform an emergency shutdown of the adapter and stop it from
7758 *	continuing any further communication on the ports or DMA to the
7759 *	host.  This is typically used when the adapter and/or firmware
7760 *	have crashed and we want to prevent any further accidental
7761 *	communication with the rest of the world.  This will also force
7762 *	the port Link Status to go down -- if register writes work --
7763 *	which should help our peers figure out that we're down.
7764 */
7765int t4_shutdown_adapter(struct adapter *adapter)
7766{
7767	int port;
7768
7769	t4_intr_disable(adapter);
7770	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
7771	for_each_port(adapter, port) {
7772		u32 a_port_cfg = PORT_REG(port,
7773					  is_t4(adapter)
7774					  ? A_XGMAC_PORT_CFG
7775					  : A_MAC_PORT_CFG);
7776
7777		t4_write_reg(adapter, a_port_cfg,
7778			     t4_read_reg(adapter, a_port_cfg)
7779			     & ~V_SIGNAL_DET(1));
7780	}
7781	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
7782
7783	return 0;
7784}
7785
7786/**
7787 *	t4_init_devlog_params - initialize adapter->params.devlog
7788 *	@adap: the adapter
7789 *	@fw_attach: whether we can talk to the firmware
7790 *
7791 *	Initialize various fields of the adapter's Firmware Device Log
7792 *	Parameters structure.
7793 */
7794int t4_init_devlog_params(struct adapter *adap, int fw_attach)
7795{
7796	struct devlog_params *dparams = &adap->params.devlog;
7797	u32 pf_dparams;
7798	unsigned int devlog_meminfo;
7799	struct fw_devlog_cmd devlog_cmd;
7800	int ret;
7801
7802	/* If we're dealing with newer firmware, the Device Log Paramerters
7803	 * are stored in a designated register which allows us to access the
7804	 * Device Log even if we can't talk to the firmware.
7805	 */
7806	pf_dparams =
7807		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
7808	if (pf_dparams) {
7809		unsigned int nentries, nentries128;
7810
7811		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
7812		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
7813
7814		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
7815		nentries = (nentries128 + 1) * 128;
7816		dparams->size = nentries * sizeof(struct fw_devlog_e);
7817
7818		return 0;
7819	}
7820
7821	/*
7822	 * For any failing returns ...
7823	 */
7824	memset(dparams, 0, sizeof *dparams);
7825
7826	/*
7827	 * If we can't talk to the firmware, there's really nothing we can do
7828	 * at this point.
7829	 */
7830	if (!fw_attach)
7831		return -ENXIO;
7832
7833	/* Otherwise, ask the firmware for it's Device Log Parameters.
7834	 */
7835	memset(&devlog_cmd, 0, sizeof devlog_cmd);
7836	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
7837					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
7838	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
7839	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7840			 &devlog_cmd);
7841	if (ret)
7842		return ret;
7843
7844	devlog_meminfo =
7845		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
7846	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
7847	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
7848	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
7849
7850	return 0;
7851}
7852
7853/**
7854 *	t4_init_sge_params - initialize adap->params.sge
7855 *	@adapter: the adapter
7856 *
7857 *	Initialize various fields of the adapter's SGE Parameters structure.
7858 */
7859int t4_init_sge_params(struct adapter *adapter)
7860{
7861	u32 r;
7862	struct sge_params *sp = &adapter->params.sge;
7863	unsigned i;
7864
7865	r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
7866	sp->counter_val[0] = G_THRESHOLD_0(r);
7867	sp->counter_val[1] = G_THRESHOLD_1(r);
7868	sp->counter_val[2] = G_THRESHOLD_2(r);
7869	sp->counter_val[3] = G_THRESHOLD_3(r);
7870
7871	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
7872	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
7873	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
7874	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
7875	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
7876	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
7877	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
7878	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
7879	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
7880
7881	r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
7882	sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
7883	if (is_t4(adapter))
7884		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
7885	else if (is_t5(adapter))
7886		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
7887	else
7888		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
7889
7890	/* egress queues: log2 of # of doorbells per BAR2 page */
7891	r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
7892	r >>= S_QUEUESPERPAGEPF0 +
7893	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7894	sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
7895
7896	/* ingress queues: log2 of # of doorbells per BAR2 page */
7897	r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
7898	r >>= S_QUEUESPERPAGEPF0 +
7899	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
7900	sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
7901
7902	r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
7903	r >>= S_HOSTPAGESIZEPF0 +
7904	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
7905	sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
7906
7907	r = t4_read_reg(adapter, A_SGE_CONTROL);
7908	sp->sge_control = r;
7909	sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
7910	sp->fl_pktshift = G_PKTSHIFT(r);
7911	if (chip_id(adapter) <= CHELSIO_T5) {
7912		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7913		    X_INGPADBOUNDARY_SHIFT);
7914	} else {
7915		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
7916		    X_T6_INGPADBOUNDARY_SHIFT);
7917	}
7918	if (is_t4(adapter))
7919		sp->pack_boundary = sp->pad_boundary;
7920	else {
7921		r = t4_read_reg(adapter, A_SGE_CONTROL2);
7922		if (G_INGPACKBOUNDARY(r) == 0)
7923			sp->pack_boundary = 16;
7924		else
7925			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
7926	}
7927	for (i = 0; i < SGE_FLBUF_SIZES; i++)
7928		sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
7929		    A_SGE_FL_BUFFER_SIZE0 + (4 * i));
7930
7931	return 0;
7932}
7933
7934/*
7935 * Read and cache the adapter's compressed filter mode and ingress config.
7936 */
7937static void read_filter_mode_and_ingress_config(struct adapter *adap)
7938{
7939	struct tp_params *tpp = &adap->params.tp;
7940
7941	if (t4_use_ldst(adap)) {
7942		t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
7943				A_TP_VLAN_PRI_MAP, 1);
7944		t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
7945				A_TP_INGRESS_CONFIG, 1);
7946	} else {
7947		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7948				 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
7949		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
7950				 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
7951	}
7952
7953	/*
7954	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7955	 * shift positions of several elements of the Compressed Filter Tuple
7956	 * for this adapter which we need frequently ...
7957	 */
7958	tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
7959	tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
7960	tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
7961	tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
7962	tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
7963	tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
7964	tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
7965	tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
7966	tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
7967	tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
7968
7969	/*
7970	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
7971	 * represents the presense of an Outer VLAN instead of a VNIC ID.
7972	 */
7973	if ((tpp->ingress_config & F_VNIC) == 0)
7974		tpp->vnic_shift = -1;
7975}
7976
7977/**
7978 *      t4_init_tp_params - initialize adap->params.tp
7979 *      @adap: the adapter
7980 *
7981 *      Initialize various fields of the adapter's TP Parameters structure.
7982 */
7983int t4_init_tp_params(struct adapter *adap)
7984{
7985	int chan;
7986	u32 v;
7987	struct tp_params *tpp = &adap->params.tp;
7988
7989	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
7990	tpp->tre = G_TIMERRESOLUTION(v);
7991	tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
7992
7993	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7994	for (chan = 0; chan < MAX_NCHAN; chan++)
7995		tpp->tx_modq[chan] = chan;
7996
7997	read_filter_mode_and_ingress_config(adap);
7998
7999	/*
8000	 * For T6, cache the adapter's compressed error vector
8001	 * and passing outer header info for encapsulated packets.
8002	 */
8003	if (chip_id(adap) > CHELSIO_T5) {
8004		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8005		tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
8006	}
8007
8008	return 0;
8009}
8010
8011/**
8012 *      t4_filter_field_shift - calculate filter field shift
8013 *      @adap: the adapter
8014 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8015 *
8016 *      Return the shift position of a filter field within the Compressed
8017 *      Filter Tuple.  The filter field is specified via its selection bit
8018 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
8019 */
8020int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8021{
8022	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8023	unsigned int sel;
8024	int field_shift;
8025
8026	if ((filter_mode & filter_sel) == 0)
8027		return -1;
8028
8029	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8030		switch (filter_mode & sel) {
8031		case F_FCOE:
8032			field_shift += W_FT_FCOE;
8033			break;
8034		case F_PORT:
8035			field_shift += W_FT_PORT;
8036			break;
8037		case F_VNIC_ID:
8038			field_shift += W_FT_VNIC_ID;
8039			break;
8040		case F_VLAN:
8041			field_shift += W_FT_VLAN;
8042			break;
8043		case F_TOS:
8044			field_shift += W_FT_TOS;
8045			break;
8046		case F_PROTOCOL:
8047			field_shift += W_FT_PROTOCOL;
8048			break;
8049		case F_ETHERTYPE:
8050			field_shift += W_FT_ETHERTYPE;
8051			break;
8052		case F_MACMATCH:
8053			field_shift += W_FT_MACMATCH;
8054			break;
8055		case F_MPSHITTYPE:
8056			field_shift += W_FT_MPSHITTYPE;
8057			break;
8058		case F_FRAGMENTATION:
8059			field_shift += W_FT_FRAGMENTATION;
8060			break;
8061		}
8062	}
8063	return field_shift;
8064}
8065
8066int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8067{
8068	u8 addr[6];
8069	int ret, i, j;
8070	struct fw_port_cmd c;
8071	u16 rss_size;
8072	struct port_info *p = adap2pinfo(adap, port_id);
8073	u32 param, val;
8074
8075	memset(&c, 0, sizeof(c));
8076
8077	for (i = 0, j = -1; i <= p->port_id; i++) {
8078		do {
8079			j++;
8080		} while ((adap->params.portvec & (1 << j)) == 0);
8081	}
8082
8083	if (!(adap->flags & IS_VF) ||
8084	    adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8085		c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
8086				       F_FW_CMD_REQUEST | F_FW_CMD_READ |
8087				       V_FW_PORT_CMD_PORTID(j));
8088		c.action_to_len16 = htonl(
8089			V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8090			FW_LEN16(c));
8091		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8092		if (ret)
8093			return ret;
8094
8095		ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
8096		p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
8097			G_FW_PORT_CMD_MDIOADDR(ret) : -1;
8098		p->port_type = G_FW_PORT_CMD_PTYPE(ret);
8099		p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
8100
8101		init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
8102	}
8103
8104	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8105	if (ret < 0)
8106		return ret;
8107
8108	p->vi[0].viid = ret;
8109	if (chip_id(adap) <= CHELSIO_T5)
8110		p->vi[0].smt_idx = (ret & 0x7f) << 1;
8111	else
8112		p->vi[0].smt_idx = (ret & 0x7f);
8113	p->tx_chan = j;
8114	p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8115	p->lport = j;
8116	p->vi[0].rss_size = rss_size;
8117	t4_os_set_hw_addr(adap, p->port_id, addr);
8118
8119	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8120	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8121	    V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8122	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
8123	if (ret)
8124		p->vi[0].rss_base = 0xffff;
8125	else {
8126		/* MPASS((val >> 16) == rss_size); */
8127		p->vi[0].rss_base = val & 0xffff;
8128	}
8129
8130	return 0;
8131}
8132
8133/**
8134 *	t4_read_cimq_cfg - read CIM queue configuration
8135 *	@adap: the adapter
8136 *	@base: holds the queue base addresses in bytes
8137 *	@size: holds the queue sizes in bytes
8138 *	@thres: holds the queue full thresholds in bytes
8139 *
8140 *	Returns the current configuration of the CIM queues, starting with
8141 *	the IBQs, then the OBQs.
8142 */
8143void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8144{
8145	unsigned int i, v;
8146	int cim_num_obq = adap->chip_params->cim_num_obq;
8147
8148	for (i = 0; i < CIM_NUM_IBQ; i++) {
8149		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8150			     V_QUENUMSELECT(i));
8151		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8152		/* value is in 256-byte units */
8153		*base++ = G_CIMQBASE(v) * 256;
8154		*size++ = G_CIMQSIZE(v) * 256;
8155		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8156	}
8157	for (i = 0; i < cim_num_obq; i++) {
8158		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8159			     V_QUENUMSELECT(i));
8160		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8161		/* value is in 256-byte units */
8162		*base++ = G_CIMQBASE(v) * 256;
8163		*size++ = G_CIMQSIZE(v) * 256;
8164	}
8165}
8166
8167/**
8168 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
8169 *	@adap: the adapter
8170 *	@qid: the queue index
8171 *	@data: where to store the queue contents
8172 *	@n: capacity of @data in 32-bit words
8173 *
8174 *	Reads the contents of the selected CIM queue starting at address 0 up
8175 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8176 *	error and the number of 32-bit words actually read on success.
8177 */
8178int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8179{
8180	int i, err, attempts;
8181	unsigned int addr;
8182	const unsigned int nwords = CIM_IBQ_SIZE * 4;
8183
8184	if (qid > 5 || (n & 3))
8185		return -EINVAL;
8186
8187	addr = qid * nwords;
8188	if (n > nwords)
8189		n = nwords;
8190
8191	/* It might take 3-10ms before the IBQ debug read access is allowed.
8192	 * Wait for 1 Sec with a delay of 1 usec.
8193	 */
8194	attempts = 1000000;
8195
8196	for (i = 0; i < n; i++, addr++) {
8197		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8198			     F_IBQDBGEN);
8199		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8200				      attempts, 1);
8201		if (err)
8202			return err;
8203		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8204	}
8205	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8206	return i;
8207}
8208
8209/**
8210 *	t4_read_cim_obq - read the contents of a CIM outbound queue
8211 *	@adap: the adapter
8212 *	@qid: the queue index
8213 *	@data: where to store the queue contents
8214 *	@n: capacity of @data in 32-bit words
8215 *
8216 *	Reads the contents of the selected CIM queue starting at address 0 up
8217 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8218 *	error and the number of 32-bit words actually read on success.
8219 */
8220int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8221{
8222	int i, err;
8223	unsigned int addr, v, nwords;
8224	int cim_num_obq = adap->chip_params->cim_num_obq;
8225
8226	if ((qid > (cim_num_obq - 1)) || (n & 3))
8227		return -EINVAL;
8228
8229	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8230		     V_QUENUMSELECT(qid));
8231	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8232
8233	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
8234	nwords = G_CIMQSIZE(v) * 64;  /* same */
8235	if (n > nwords)
8236		n = nwords;
8237
8238	for (i = 0; i < n; i++, addr++) {
8239		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8240			     F_OBQDBGEN);
8241		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8242				      2, 1);
8243		if (err)
8244			return err;
8245		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8246	}
8247	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8248	return i;
8249}
8250
8251enum {
8252	CIM_QCTL_BASE     = 0,
8253	CIM_CTL_BASE      = 0x2000,
8254	CIM_PBT_ADDR_BASE = 0x2800,
8255	CIM_PBT_LRF_BASE  = 0x3000,
8256	CIM_PBT_DATA_BASE = 0x3800
8257};
8258
8259/**
8260 *	t4_cim_read - read a block from CIM internal address space
8261 *	@adap: the adapter
8262 *	@addr: the start address within the CIM address space
8263 *	@n: number of words to read
8264 *	@valp: where to store the result
8265 *
8266 *	Reads a block of 4-byte words from the CIM intenal address space.
8267 */
8268int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8269		unsigned int *valp)
8270{
8271	int ret = 0;
8272
8273	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8274		return -EBUSY;
8275
8276	for ( ; !ret && n--; addr += 4) {
8277		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8278		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8279				      0, 5, 2);
8280		if (!ret)
8281			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8282	}
8283	return ret;
8284}
8285
8286/**
8287 *	t4_cim_write - write a block into CIM internal address space
8288 *	@adap: the adapter
8289 *	@addr: the start address within the CIM address space
8290 *	@n: number of words to write
8291 *	@valp: set of values to write
8292 *
8293 *	Writes a block of 4-byte words into the CIM intenal address space.
8294 */
8295int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8296		 const unsigned int *valp)
8297{
8298	int ret = 0;
8299
8300	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8301		return -EBUSY;
8302
8303	for ( ; !ret && n--; addr += 4) {
8304		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8305		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8306		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8307				      0, 5, 2);
8308	}
8309	return ret;
8310}
8311
8312static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8313			 unsigned int val)
8314{
8315	return t4_cim_write(adap, addr, 1, &val);
8316}
8317
8318/**
8319 *	t4_cim_ctl_read - read a block from CIM control region
8320 *	@adap: the adapter
8321 *	@addr: the start address within the CIM control region
8322 *	@n: number of words to read
8323 *	@valp: where to store the result
8324 *
8325 *	Reads a block of 4-byte words from the CIM control region.
8326 */
8327int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8328		    unsigned int *valp)
8329{
8330	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8331}
8332
8333/**
8334 *	t4_cim_read_la - read CIM LA capture buffer
8335 *	@adap: the adapter
8336 *	@la_buf: where to store the LA data
8337 *	@wrptr: the HW write pointer within the capture buffer
8338 *
8339 *	Reads the contents of the CIM LA buffer with the most recent entry at
8340 *	the end	of the returned data and with the entry at @wrptr first.
8341 *	We try to leave the LA in the running state we find it in.
8342 */
8343int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8344{
8345	int i, ret;
8346	unsigned int cfg, val, idx;
8347
8348	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8349	if (ret)
8350		return ret;
8351
8352	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
8353		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8354		if (ret)
8355			return ret;
8356	}
8357
8358	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8359	if (ret)
8360		goto restart;
8361
8362	idx = G_UPDBGLAWRPTR(val);
8363	if (wrptr)
8364		*wrptr = idx;
8365
8366	for (i = 0; i < adap->params.cim_la_size; i++) {
8367		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8368				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8369		if (ret)
8370			break;
8371		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8372		if (ret)
8373			break;
8374		if (val & F_UPDBGLARDEN) {
8375			ret = -ETIMEDOUT;
8376			break;
8377		}
8378		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8379		if (ret)
8380			break;
8381
8382		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8383		idx = (idx + 1) & M_UPDBGLARDPTR;
8384		/*
8385		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8386		 * identify the 32-bit portion of the full 312-bit data
8387		 */
8388		if (is_t6(adap))
8389			while ((idx & 0xf) > 9)
8390				idx = (idx + 1) % M_UPDBGLARDPTR;
8391	}
8392restart:
8393	if (cfg & F_UPDBGLAEN) {
8394		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8395				      cfg & ~F_UPDBGLARDEN);
8396		if (!ret)
8397			ret = r;
8398	}
8399	return ret;
8400}
8401
8402/**
8403 *	t4_tp_read_la - read TP LA capture buffer
8404 *	@adap: the adapter
8405 *	@la_buf: where to store the LA data
8406 *	@wrptr: the HW write pointer within the capture buffer
8407 *
8408 *	Reads the contents of the TP LA buffer with the most recent entry at
8409 *	the end	of the returned data and with the entry at @wrptr first.
8410 *	We leave the LA in the running state we find it in.
8411 */
8412void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8413{
8414	bool last_incomplete;
8415	unsigned int i, cfg, val, idx;
8416
8417	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8418	if (cfg & F_DBGLAENABLE)			/* freeze LA */
8419		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8420			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8421
8422	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8423	idx = G_DBGLAWPTR(val);
8424	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8425	if (last_incomplete)
8426		idx = (idx + 1) & M_DBGLARPTR;
8427	if (wrptr)
8428		*wrptr = idx;
8429
8430	val &= 0xffff;
8431	val &= ~V_DBGLARPTR(M_DBGLARPTR);
8432	val |= adap->params.tp.la_mask;
8433
8434	for (i = 0; i < TPLA_SIZE; i++) {
8435		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8436		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8437		idx = (idx + 1) & M_DBGLARPTR;
8438	}
8439
8440	/* Wipe out last entry if it isn't valid */
8441	if (last_incomplete)
8442		la_buf[TPLA_SIZE - 1] = ~0ULL;
8443
8444	if (cfg & F_DBGLAENABLE)		/* restore running state */
8445		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8446			     cfg | adap->params.tp.la_mask);
8447}
8448
8449/*
8450 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8451 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
8452 * state for more than the Warning Threshold then we'll issue a warning about
8453 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
8454 * appears to be hung every Warning Repeat second till the situation clears.
8455 * If the situation clears, we'll note that as well.
8456 */
8457#define SGE_IDMA_WARN_THRESH 1
8458#define SGE_IDMA_WARN_REPEAT 300
8459
8460/**
8461 *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8462 *	@adapter: the adapter
8463 *	@idma: the adapter IDMA Monitor state
8464 *
8465 *	Initialize the state of an SGE Ingress DMA Monitor.
8466 */
8467void t4_idma_monitor_init(struct adapter *adapter,
8468			  struct sge_idma_monitor_state *idma)
8469{
8470	/* Initialize the state variables for detecting an SGE Ingress DMA
8471	 * hang.  The SGE has internal counters which count up on each clock
8472	 * tick whenever the SGE finds its Ingress DMA State Engines in the
8473	 * same state they were on the previous clock tick.  The clock used is
8474	 * the Core Clock so we have a limit on the maximum "time" they can
8475	 * record; typically a very small number of seconds.  For instance,
8476	 * with a 600MHz Core Clock, we can only count up to a bit more than
8477	 * 7s.  So we'll synthesize a larger counter in order to not run the
8478	 * risk of having the "timers" overflow and give us the flexibility to
8479	 * maintain a Hung SGE State Machine of our own which operates across
8480	 * a longer time frame.
8481	 */
8482	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8483	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8484}
8485
8486/**
8487 *	t4_idma_monitor - monitor SGE Ingress DMA state
8488 *	@adapter: the adapter
8489 *	@idma: the adapter IDMA Monitor state
8490 *	@hz: number of ticks/second
8491 *	@ticks: number of ticks since the last IDMA Monitor call
8492 */
8493void t4_idma_monitor(struct adapter *adapter,
8494		     struct sge_idma_monitor_state *idma,
8495		     int hz, int ticks)
8496{
8497	int i, idma_same_state_cnt[2];
8498
8499	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
8500	  * are counters inside the SGE which count up on each clock when the
8501	  * SGE finds its Ingress DMA State Engines in the same states they
8502	  * were in the previous clock.  The counters will peg out at
8503	  * 0xffffffff without wrapping around so once they pass the 1s
8504	  * threshold they'll stay above that till the IDMA state changes.
8505	  */
8506	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8507	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8508	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8509
8510	for (i = 0; i < 2; i++) {
8511		u32 debug0, debug11;
8512
8513		/* If the Ingress DMA Same State Counter ("timer") is less
8514		 * than 1s, then we can reset our synthesized Stall Timer and
8515		 * continue.  If we have previously emitted warnings about a
8516		 * potential stalled Ingress Queue, issue a note indicating
8517		 * that the Ingress Queue has resumed forward progress.
8518		 */
8519		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8520			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8521				CH_WARN(adapter, "SGE idma%d, queue %u, "
8522					"resumed after %d seconds\n",
8523					i, idma->idma_qid[i],
8524					idma->idma_stalled[i]/hz);
8525			idma->idma_stalled[i] = 0;
8526			continue;
8527		}
8528
8529		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8530		 * domain.  The first time we get here it'll be because we
8531		 * passed the 1s Threshold; each additional time it'll be
8532		 * because the RX Timer Callback is being fired on its regular
8533		 * schedule.
8534		 *
8535		 * If the stall is below our Potential Hung Ingress Queue
8536		 * Warning Threshold, continue.
8537		 */
8538		if (idma->idma_stalled[i] == 0) {
8539			idma->idma_stalled[i] = hz;
8540			idma->idma_warn[i] = 0;
8541		} else {
8542			idma->idma_stalled[i] += ticks;
8543			idma->idma_warn[i] -= ticks;
8544		}
8545
8546		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8547			continue;
8548
8549		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8550		 */
8551		if (idma->idma_warn[i] > 0)
8552			continue;
8553		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8554
8555		/* Read and save the SGE IDMA State and Queue ID information.
8556		 * We do this every time in case it changes across time ...
8557		 * can't be too careful ...
8558		 */
8559		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8560		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8561		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8562
8563		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8564		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8565		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8566
8567		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8568			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8569			i, idma->idma_qid[i], idma->idma_state[i],
8570			idma->idma_stalled[i]/hz,
8571			debug0, debug11);
8572		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8573	}
8574}
8575
8576/**
8577 *	t4_read_pace_tbl - read the pace table
8578 *	@adap: the adapter
8579 *	@pace_vals: holds the returned values
8580 *
8581 *	Returns the values of TP's pace table in microseconds.
8582 */
8583void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8584{
8585	unsigned int i, v;
8586
8587	for (i = 0; i < NTX_SCHED; i++) {
8588		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8589		v = t4_read_reg(adap, A_TP_PACE_TABLE);
8590		pace_vals[i] = dack_ticks_to_usec(adap, v);
8591	}
8592}
8593
8594/**
8595 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8596 *	@adap: the adapter
8597 *	@sched: the scheduler index
8598 *	@kbps: the byte rate in Kbps
8599 *	@ipg: the interpacket delay in tenths of nanoseconds
8600 *
8601 *	Return the current configuration of a HW Tx scheduler.
8602 */
8603void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8604		     unsigned int *ipg)
8605{
8606	unsigned int v, addr, bpt, cpt;
8607
8608	if (kbps) {
8609		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8610		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8611		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8612		if (sched & 1)
8613			v >>= 16;
8614		bpt = (v >> 8) & 0xff;
8615		cpt = v & 0xff;
8616		if (!cpt)
8617			*kbps = 0;	/* scheduler disabled */
8618		else {
8619			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8620			*kbps = (v * bpt) / 125;
8621		}
8622	}
8623	if (ipg) {
8624		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8625		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
8626		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
8627		if (sched & 1)
8628			v >>= 16;
8629		v &= 0xffff;
8630		*ipg = (10000 * v) / core_ticks_per_usec(adap);
8631	}
8632}
8633
8634/**
8635 *	t4_load_cfg - download config file
8636 *	@adap: the adapter
8637 *	@cfg_data: the cfg text file to write
8638 *	@size: text file size
8639 *
8640 *	Write the supplied config text file to the card's serial flash.
8641 */
8642int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8643{
8644	int ret, i, n, cfg_addr;
8645	unsigned int addr;
8646	unsigned int flash_cfg_start_sec;
8647	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8648
8649	cfg_addr = t4_flash_cfg_addr(adap);
8650	if (cfg_addr < 0)
8651		return cfg_addr;
8652
8653	addr = cfg_addr;
8654	flash_cfg_start_sec = addr / SF_SEC_SIZE;
8655
8656	if (size > FLASH_CFG_MAX_SIZE) {
8657		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8658		       FLASH_CFG_MAX_SIZE);
8659		return -EFBIG;
8660	}
8661
8662	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
8663			 sf_sec_size);
8664	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8665				     flash_cfg_start_sec + i - 1);
8666	/*
8667	 * If size == 0 then we're simply erasing the FLASH sectors associated
8668	 * with the on-adapter Firmware Configuration File.
8669	 */
8670	if (ret || size == 0)
8671		goto out;
8672
8673	/* this will write to the flash up to SF_PAGE_SIZE at a time */
8674	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8675		if ( (size - i) <  SF_PAGE_SIZE)
8676			n = size - i;
8677		else
8678			n = SF_PAGE_SIZE;
8679		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8680		if (ret)
8681			goto out;
8682
8683		addr += SF_PAGE_SIZE;
8684		cfg_data += SF_PAGE_SIZE;
8685	}
8686
8687out:
8688	if (ret)
8689		CH_ERR(adap, "config file %s failed %d\n",
8690		       (size == 0 ? "clear" : "download"), ret);
8691	return ret;
8692}
8693
8694/**
8695 *	t5_fw_init_extern_mem - initialize the external memory
8696 *	@adap: the adapter
8697 *
8698 *	Initializes the external memory on T5.
8699 */
8700int t5_fw_init_extern_mem(struct adapter *adap)
8701{
8702	u32 params[1], val[1];
8703	int ret;
8704
8705	if (!is_t5(adap))
8706		return 0;
8707
8708	val[0] = 0xff; /* Initialize all MCs */
8709	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8710			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8711	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8712			FW_CMD_MAX_TIMEOUT);
8713
8714	return ret;
8715}
8716
8717/* BIOS boot headers */
8718typedef struct pci_expansion_rom_header {
8719	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
8720	u8	reserved[22]; /* Reserved per processor Architecture data */
8721	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
8722} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8723
8724/* Legacy PCI Expansion ROM Header */
8725typedef struct legacy_pci_expansion_rom_header {
8726	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
8727	u8	size512; /* Current Image Size in units of 512 bytes */
8728	u8	initentry_point[4];
8729	u8	cksum; /* Checksum computed on the entire Image */
8730	u8	reserved[16]; /* Reserved */
8731	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
8732} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8733
8734/* EFI PCI Expansion ROM Header */
8735typedef struct efi_pci_expansion_rom_header {
8736	u8	signature[2]; // ROM signature. The value 0xaa55
8737	u8	initialization_size[2]; /* Units 512. Includes this header */
8738	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
8739	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
8740	u8	efi_machine_type[2]; /* Machine type from EFI image header */
8741	u8	compression_type[2]; /* Compression type. */
8742		/*
8743		 * Compression type definition
8744		 * 0x0: uncompressed
8745		 * 0x1: Compressed
8746		 * 0x2-0xFFFF: Reserved
8747		 */
8748	u8	reserved[8]; /* Reserved */
8749	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
8750	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
8751} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
8752
8753/* PCI Data Structure Format */
8754typedef struct pcir_data_structure { /* PCI Data Structure */
8755	u8	signature[4]; /* Signature. The string "PCIR" */
8756	u8	vendor_id[2]; /* Vendor Identification */
8757	u8	device_id[2]; /* Device Identification */
8758	u8	vital_product[2]; /* Pointer to Vital Product Data */
8759	u8	length[2]; /* PCIR Data Structure Length */
8760	u8	revision; /* PCIR Data Structure Revision */
8761	u8	class_code[3]; /* Class Code */
8762	u8	image_length[2]; /* Image Length. Multiple of 512B */
8763	u8	code_revision[2]; /* Revision Level of Code/Data */
8764	u8	code_type; /* Code Type. */
8765		/*
8766		 * PCI Expansion ROM Code Types
8767		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
8768		 * 0x01: Open Firmware standard for PCI. FCODE
8769		 * 0x02: Hewlett-Packard PA RISC. HP reserved
8770		 * 0x03: EFI Image. EFI
8771		 * 0x04-0xFF: Reserved.
8772		 */
8773	u8	indicator; /* Indicator. Identifies the last image in the ROM */
8774	u8	reserved[2]; /* Reserved */
8775} pcir_data_t; /* PCI__DATA_STRUCTURE */
8776
8777/* BOOT constants */
8778enum {
8779	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
8780	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
8781	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
8782	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
8783	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
8784	VENDOR_ID = 0x1425, /* Vendor ID */
8785	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
8786};
8787
8788/*
8789 *	modify_device_id - Modifies the device ID of the Boot BIOS image
8790 *	@adatper: the device ID to write.
8791 *	@boot_data: the boot image to modify.
8792 *
8793 *	Write the supplied device ID to the boot BIOS image.
8794 */
8795static void modify_device_id(int device_id, u8 *boot_data)
8796{
8797	legacy_pci_exp_rom_header_t *header;
8798	pcir_data_t *pcir_header;
8799	u32 cur_header = 0;
8800
8801	/*
8802	 * Loop through all chained images and change the device ID's
8803	 */
8804	while (1) {
8805		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
8806		pcir_header = (pcir_data_t *) &boot_data[cur_header +
8807			      le16_to_cpu(*(u16*)header->pcir_offset)];
8808
8809		/*
8810		 * Only modify the Device ID if code type is Legacy or HP.
8811		 * 0x00: Okay to modify
8812		 * 0x01: FCODE. Do not be modify
8813		 * 0x03: Okay to modify
8814		 * 0x04-0xFF: Do not modify
8815		 */
8816		if (pcir_header->code_type == 0x00) {
8817			u8 csum = 0;
8818			int i;
8819
8820			/*
8821			 * Modify Device ID to match current adatper
8822			 */
8823			*(u16*) pcir_header->device_id = device_id;
8824
8825			/*
8826			 * Set checksum temporarily to 0.
8827			 * We will recalculate it later.
8828			 */
8829			header->cksum = 0x0;
8830
8831			/*
8832			 * Calculate and update checksum
8833			 */
8834			for (i = 0; i < (header->size512 * 512); i++)
8835				csum += (u8)boot_data[cur_header + i];
8836
8837			/*
8838			 * Invert summed value to create the checksum
8839			 * Writing new checksum value directly to the boot data
8840			 */
8841			boot_data[cur_header + 7] = -csum;
8842
8843		} else if (pcir_header->code_type == 0x03) {
8844
8845			/*
8846			 * Modify Device ID to match current adatper
8847			 */
8848			*(u16*) pcir_header->device_id = device_id;
8849
8850		}
8851
8852
8853		/*
8854		 * Check indicator element to identify if this is the last
8855		 * image in the ROM.
8856		 */
8857		if (pcir_header->indicator & 0x80)
8858			break;
8859
8860		/*
8861		 * Move header pointer up to the next image in the ROM.
8862		 */
8863		cur_header += header->size512 * 512;
8864	}
8865}
8866
8867/*
8868 *	t4_load_boot - download boot flash
8869 *	@adapter: the adapter
8870 *	@boot_data: the boot image to write
8871 *	@boot_addr: offset in flash to write boot_data
8872 *	@size: image size
8873 *
8874 *	Write the supplied boot image to the card's serial flash.
8875 *	The boot image has the following sections: a 28-byte header and the
8876 *	boot image.
8877 */
8878int t4_load_boot(struct adapter *adap, u8 *boot_data,
8879		 unsigned int boot_addr, unsigned int size)
8880{
8881	pci_exp_rom_header_t *header;
8882	int pcir_offset ;
8883	pcir_data_t *pcir_header;
8884	int ret, addr;
8885	uint16_t device_id;
8886	unsigned int i;
8887	unsigned int boot_sector = (boot_addr * 1024 );
8888	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8889
8890	/*
8891	 * Make sure the boot image does not encroach on the firmware region
8892	 */
8893	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
8894		CH_ERR(adap, "boot image encroaching on firmware region\n");
8895		return -EFBIG;
8896	}
8897
8898	/*
8899	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
8900	 * and Boot configuration data sections. These 3 boot sections span
8901	 * sectors 0 to 7 in flash and live right before the FW image location.
8902	 */
8903	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
8904			sf_sec_size);
8905	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
8906				     (boot_sector >> 16) + i - 1);
8907
8908	/*
8909	 * If size == 0 then we're simply erasing the FLASH sectors associated
8910	 * with the on-adapter option ROM file
8911	 */
8912	if (ret || (size == 0))
8913		goto out;
8914
8915	/* Get boot header */
8916	header = (pci_exp_rom_header_t *)boot_data;
8917	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
8918	/* PCIR Data Structure */
8919	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
8920
8921	/*
8922	 * Perform some primitive sanity testing to avoid accidentally
8923	 * writing garbage over the boot sectors.  We ought to check for
8924	 * more but it's not worth it for now ...
8925	 */
8926	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
8927		CH_ERR(adap, "boot image too small/large\n");
8928		return -EFBIG;
8929	}
8930
8931#ifndef CHELSIO_T4_DIAGS
8932	/*
8933	 * Check BOOT ROM header signature
8934	 */
8935	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
8936		CH_ERR(adap, "Boot image missing signature\n");
8937		return -EINVAL;
8938	}
8939
8940	/*
8941	 * Check PCI header signature
8942	 */
8943	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
8944		CH_ERR(adap, "PCI header missing signature\n");
8945		return -EINVAL;
8946	}
8947
8948	/*
8949	 * Check Vendor ID matches Chelsio ID
8950	 */
8951	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
8952		CH_ERR(adap, "Vendor ID missing signature\n");
8953		return -EINVAL;
8954	}
8955#endif
8956
8957	/*
8958	 * Retrieve adapter's device ID
8959	 */
8960	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
8961	/* Want to deal with PF 0 so I strip off PF 4 indicator */
8962	device_id = device_id & 0xf0ff;
8963
8964	/*
8965	 * Check PCIE Device ID
8966	 */
8967	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
8968		/*
8969		 * Change the device ID in the Boot BIOS image to match
8970		 * the Device ID of the current adapter.
8971		 */
8972		modify_device_id(device_id, boot_data);
8973	}
8974
8975	/*
8976	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
8977	 * we finish copying the rest of the boot image. This will ensure
8978	 * that the BIOS boot header will only be written if the boot image
8979	 * was written in full.
8980	 */
8981	addr = boot_sector;
8982	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
8983		addr += SF_PAGE_SIZE;
8984		boot_data += SF_PAGE_SIZE;
8985		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
8986		if (ret)
8987			goto out;
8988	}
8989
8990	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
8991			     (const u8 *)header, 0);
8992
8993out:
8994	if (ret)
8995		CH_ERR(adap, "boot image download failed, error %d\n", ret);
8996	return ret;
8997}
8998
8999/*
9000 *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9001 *	@adapter: the adapter
9002 *
9003 *	Return the address within the flash where the OptionROM Configuration
9004 *	is stored, or an error if the device FLASH is too small to contain
9005 *	a OptionROM Configuration.
9006 */
9007static int t4_flash_bootcfg_addr(struct adapter *adapter)
9008{
9009	/*
9010	 * If the device FLASH isn't large enough to hold a Firmware
9011	 * Configuration File, return an error.
9012	 */
9013	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9014		return -ENOSPC;
9015
9016	return FLASH_BOOTCFG_START;
9017}
9018
9019int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9020{
9021	int ret, i, n, cfg_addr;
9022	unsigned int addr;
9023	unsigned int flash_cfg_start_sec;
9024	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9025
9026	cfg_addr = t4_flash_bootcfg_addr(adap);
9027	if (cfg_addr < 0)
9028		return cfg_addr;
9029
9030	addr = cfg_addr;
9031	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9032
9033	if (size > FLASH_BOOTCFG_MAX_SIZE) {
9034		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9035			FLASH_BOOTCFG_MAX_SIZE);
9036		return -EFBIG;
9037	}
9038
9039	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9040			 sf_sec_size);
9041	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9042					flash_cfg_start_sec + i - 1);
9043
9044	/*
9045	 * If size == 0 then we're simply erasing the FLASH sectors associated
9046	 * with the on-adapter OptionROM Configuration File.
9047	 */
9048	if (ret || size == 0)
9049		goto out;
9050
9051	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9052	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9053		if ( (size - i) <  SF_PAGE_SIZE)
9054			n = size - i;
9055		else
9056			n = SF_PAGE_SIZE;
9057		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9058		if (ret)
9059			goto out;
9060
9061		addr += SF_PAGE_SIZE;
9062		cfg_data += SF_PAGE_SIZE;
9063	}
9064
9065out:
9066	if (ret)
9067		CH_ERR(adap, "boot config data %s failed %d\n",
9068				(size == 0 ? "clear" : "download"), ret);
9069	return ret;
9070}
9071
9072/**
9073 *	t4_set_filter_mode - configure the optional components of filter tuples
9074 *	@adap: the adapter
9075 *	@mode_map: a bitmap selcting which optional filter components to enable
9076 *
9077 *	Sets the filter mode by selecting the optional components to enable
9078 *	in filter tuples.  Returns 0 on success and a negative error if the
9079 *	requested mode needs more bits than are available for optional
9080 *	components.
9081 */
9082int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
9083{
9084	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9085
9086	int i, nbits = 0;
9087
9088	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9089		if (mode_map & (1 << i))
9090			nbits += width[i];
9091	if (nbits > FILTER_OPT_LEN)
9092		return -EINVAL;
9093	if (t4_use_ldst(adap))
9094		t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
9095	else
9096		t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
9097				  1, A_TP_VLAN_PRI_MAP);
9098	read_filter_mode_and_ingress_config(adap);
9099
9100	return 0;
9101}
9102
9103/**
9104 *	t4_clr_port_stats - clear port statistics
9105 *	@adap: the adapter
9106 *	@idx: the port index
9107 *
9108 *	Clear HW statistics for the given port.
9109 */
9110void t4_clr_port_stats(struct adapter *adap, int idx)
9111{
9112	unsigned int i;
9113	u32 bgmap = t4_get_mps_bg_map(adap, idx);
9114	u32 port_base_addr;
9115
9116	if (is_t4(adap))
9117		port_base_addr = PORT_BASE(idx);
9118	else
9119		port_base_addr = T5_PORT_BASE(idx);
9120
9121	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9122			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9123		t4_write_reg(adap, port_base_addr + i, 0);
9124	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9125			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9126		t4_write_reg(adap, port_base_addr + i, 0);
9127	for (i = 0; i < 4; i++)
9128		if (bgmap & (1 << i)) {
9129			t4_write_reg(adap,
9130			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9131			t4_write_reg(adap,
9132			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9133		}
9134}
9135
9136/**
9137 *	t4_i2c_rd - read I2C data from adapter
9138 *	@adap: the adapter
9139 *	@port: Port number if per-port device; <0 if not
9140 *	@devid: per-port device ID or absolute device ID
9141 *	@offset: byte offset into device I2C space
9142 *	@len: byte length of I2C space data
9143 *	@buf: buffer in which to return I2C data
9144 *
9145 *	Reads the I2C data from the indicated device and location.
9146 */
9147int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9148	      int port, unsigned int devid,
9149	      unsigned int offset, unsigned int len,
9150	      u8 *buf)
9151{
9152	u32 ldst_addrspace;
9153	struct fw_ldst_cmd ldst;
9154	int ret;
9155
9156	if (port >= 4 ||
9157	    devid >= 256 ||
9158	    offset >= 256 ||
9159	    len > sizeof ldst.u.i2c.data)
9160		return -EINVAL;
9161
9162	memset(&ldst, 0, sizeof ldst);
9163	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9164	ldst.op_to_addrspace =
9165		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9166			    F_FW_CMD_REQUEST |
9167			    F_FW_CMD_READ |
9168			    ldst_addrspace);
9169	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9170	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9171	ldst.u.i2c.did = devid;
9172	ldst.u.i2c.boffset = offset;
9173	ldst.u.i2c.blen = len;
9174	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9175	if (!ret)
9176		memcpy(buf, ldst.u.i2c.data, len);
9177	return ret;
9178}
9179
9180/**
9181 *	t4_i2c_wr - write I2C data to adapter
9182 *	@adap: the adapter
9183 *	@port: Port number if per-port device; <0 if not
9184 *	@devid: per-port device ID or absolute device ID
9185 *	@offset: byte offset into device I2C space
9186 *	@len: byte length of I2C space data
9187 *	@buf: buffer containing new I2C data
9188 *
9189 *	Write the I2C data to the indicated device and location.
9190 */
9191int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9192	      int port, unsigned int devid,
9193	      unsigned int offset, unsigned int len,
9194	      u8 *buf)
9195{
9196	u32 ldst_addrspace;
9197	struct fw_ldst_cmd ldst;
9198
9199	if (port >= 4 ||
9200	    devid >= 256 ||
9201	    offset >= 256 ||
9202	    len > sizeof ldst.u.i2c.data)
9203		return -EINVAL;
9204
9205	memset(&ldst, 0, sizeof ldst);
9206	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9207	ldst.op_to_addrspace =
9208		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9209			    F_FW_CMD_REQUEST |
9210			    F_FW_CMD_WRITE |
9211			    ldst_addrspace);
9212	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9213	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9214	ldst.u.i2c.did = devid;
9215	ldst.u.i2c.boffset = offset;
9216	ldst.u.i2c.blen = len;
9217	memcpy(ldst.u.i2c.data, buf, len);
9218	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9219}
9220
9221/**
9222 * 	t4_sge_ctxt_rd - read an SGE context through FW
9223 * 	@adap: the adapter
9224 * 	@mbox: mailbox to use for the FW command
9225 * 	@cid: the context id
9226 * 	@ctype: the context type
9227 * 	@data: where to store the context data
9228 *
9229 * 	Issues a FW command through the given mailbox to read an SGE context.
9230 */
9231int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9232		   enum ctxt_type ctype, u32 *data)
9233{
9234	int ret;
9235	struct fw_ldst_cmd c;
9236
9237	if (ctype == CTXT_EGRESS)
9238		ret = FW_LDST_ADDRSPC_SGE_EGRC;
9239	else if (ctype == CTXT_INGRESS)
9240		ret = FW_LDST_ADDRSPC_SGE_INGC;
9241	else if (ctype == CTXT_FLM)
9242		ret = FW_LDST_ADDRSPC_SGE_FLMC;
9243	else
9244		ret = FW_LDST_ADDRSPC_SGE_CONMC;
9245
9246	memset(&c, 0, sizeof(c));
9247	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9248					F_FW_CMD_REQUEST | F_FW_CMD_READ |
9249					V_FW_LDST_CMD_ADDRSPACE(ret));
9250	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9251	c.u.idctxt.physid = cpu_to_be32(cid);
9252
9253	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9254	if (ret == 0) {
9255		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9256		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9257		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9258		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9259		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9260		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9261	}
9262	return ret;
9263}
9264
9265/**
9266 * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9267 * 	@adap: the adapter
9268 * 	@cid: the context id
9269 * 	@ctype: the context type
9270 * 	@data: where to store the context data
9271 *
9272 * 	Reads an SGE context directly, bypassing FW.  This is only for
9273 * 	debugging when FW is unavailable.
9274 */
9275int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9276		      u32 *data)
9277{
9278	int i, ret;
9279
9280	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9281	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9282	if (!ret)
9283		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9284			*data++ = t4_read_reg(adap, i);
9285	return ret;
9286}
9287
9288int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9289    		    int sleep_ok)
9290{
9291	struct fw_sched_cmd cmd;
9292
9293	memset(&cmd, 0, sizeof(cmd));
9294	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9295				      F_FW_CMD_REQUEST |
9296				      F_FW_CMD_WRITE);
9297	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9298
9299	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9300	cmd.u.config.type = type;
9301	cmd.u.config.minmaxen = minmaxen;
9302
9303	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9304			       NULL, sleep_ok);
9305}
9306
9307int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9308		    int rateunit, int ratemode, int channel, int cl,
9309		    int minrate, int maxrate, int weight, int pktsize,
9310		    int sleep_ok)
9311{
9312	struct fw_sched_cmd cmd;
9313
9314	memset(&cmd, 0, sizeof(cmd));
9315	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9316				      F_FW_CMD_REQUEST |
9317				      F_FW_CMD_WRITE);
9318	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9319
9320	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9321	cmd.u.params.type = type;
9322	cmd.u.params.level = level;
9323	cmd.u.params.mode = mode;
9324	cmd.u.params.ch = channel;
9325	cmd.u.params.cl = cl;
9326	cmd.u.params.unit = rateunit;
9327	cmd.u.params.rate = ratemode;
9328	cmd.u.params.min = cpu_to_be32(minrate);
9329	cmd.u.params.max = cpu_to_be32(maxrate);
9330	cmd.u.params.weight = cpu_to_be16(weight);
9331	cmd.u.params.pktsize = cpu_to_be16(pktsize);
9332
9333	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9334			       NULL, sleep_ok);
9335}
9336
9337/*
9338 *	t4_config_watchdog - configure (enable/disable) a watchdog timer
9339 *	@adapter: the adapter
9340 * 	@mbox: mailbox to use for the FW command
9341 * 	@pf: the PF owning the queue
9342 * 	@vf: the VF owning the queue
9343 *	@timeout: watchdog timeout in ms
9344 *	@action: watchdog timer / action
9345 *
9346 *	There are separate watchdog timers for each possible watchdog
9347 *	action.  Configure one of the watchdog timers by setting a non-zero
9348 *	timeout.  Disable a watchdog timer by using a timeout of zero.
9349 */
9350int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9351		       unsigned int pf, unsigned int vf,
9352		       unsigned int timeout, unsigned int action)
9353{
9354	struct fw_watchdog_cmd wdog;
9355	unsigned int ticks;
9356
9357	/*
9358	 * The watchdog command expects a timeout in units of 10ms so we need
9359	 * to convert it here (via rounding) and force a minimum of one 10ms
9360	 * "tick" if the timeout is non-zero but the convertion results in 0
9361	 * ticks.
9362	 */
9363	ticks = (timeout + 5)/10;
9364	if (timeout && !ticks)
9365		ticks = 1;
9366
9367	memset(&wdog, 0, sizeof wdog);
9368	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9369				     F_FW_CMD_REQUEST |
9370				     F_FW_CMD_WRITE |
9371				     V_FW_PARAMS_CMD_PFN(pf) |
9372				     V_FW_PARAMS_CMD_VFN(vf));
9373	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9374	wdog.timeout = cpu_to_be32(ticks);
9375	wdog.action = cpu_to_be32(action);
9376
9377	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9378}
9379
9380int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9381{
9382	struct fw_devlog_cmd devlog_cmd;
9383	int ret;
9384
9385	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9386	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9387					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
9388	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9389	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9390			 sizeof(devlog_cmd), &devlog_cmd);
9391	if (ret)
9392		return ret;
9393
9394	*level = devlog_cmd.level;
9395	return 0;
9396}
9397
9398int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9399{
9400	struct fw_devlog_cmd devlog_cmd;
9401
9402	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9403	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9404					     F_FW_CMD_REQUEST |
9405					     F_FW_CMD_WRITE);
9406	devlog_cmd.level = level;
9407	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9408	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9409			  sizeof(devlog_cmd), &devlog_cmd);
9410}
9411