t4_hw.c revision 346967
1/*-
2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/common/t4_hw.c 346967 2019-04-30 17:30:37Z np $");
29
30#include "opt_inet.h"
31
32#include <sys/param.h>
33#include <sys/eventhandler.h>
34
35#include "common.h"
36#include "t4_regs.h"
37#include "t4_regs_values.h"
38#include "firmware/t4fw_interface.h"
39
40#undef msleep
41#define msleep(x) do { \
42	if (cold) \
43		DELAY((x) * 1000); \
44	else \
45		pause("t4hw", (x) * hz / 1000); \
46} while (0)
47
48/**
49 *	t4_wait_op_done_val - wait until an operation is completed
50 *	@adapter: the adapter performing the operation
51 *	@reg: the register to check for completion
52 *	@mask: a single-bit field within @reg that indicates completion
53 *	@polarity: the value of the field when the operation is completed
54 *	@attempts: number of check iterations
55 *	@delay: delay in usecs between iterations
56 *	@valp: where to store the value of the register at completion time
57 *
58 *	Wait until an operation is completed by checking a bit in a register
59 *	up to @attempts times.  If @valp is not NULL the value of the register
60 *	at the time it indicated completion is stored there.  Returns 0 if the
61 *	operation completes and	-EAGAIN	otherwise.
62 */
63static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
64			       int polarity, int attempts, int delay, u32 *valp)
65{
66	while (1) {
67		u32 val = t4_read_reg(adapter, reg);
68
69		if (!!(val & mask) == polarity) {
70			if (valp)
71				*valp = val;
72			return 0;
73		}
74		if (--attempts == 0)
75			return -EAGAIN;
76		if (delay)
77			udelay(delay);
78	}
79}
80
81static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
82				  int polarity, int attempts, int delay)
83{
84	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
85				   delay, NULL);
86}
87
88/**
89 *	t4_set_reg_field - set a register field to a value
90 *	@adapter: the adapter to program
91 *	@addr: the register address
92 *	@mask: specifies the portion of the register to modify
93 *	@val: the new value for the register field
94 *
95 *	Sets a register field specified by the supplied mask to the
96 *	given value.
97 */
98void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
99		      u32 val)
100{
101	u32 v = t4_read_reg(adapter, addr) & ~mask;
102
103	t4_write_reg(adapter, addr, v | val);
104	(void) t4_read_reg(adapter, addr);      /* flush */
105}
106
107/**
108 *	t4_read_indirect - read indirectly addressed registers
109 *	@adap: the adapter
110 *	@addr_reg: register holding the indirect address
111 *	@data_reg: register holding the value of the indirect register
112 *	@vals: where the read register values are stored
113 *	@nregs: how many indirect registers to read
114 *	@start_idx: index of first indirect register to read
115 *
116 *	Reads registers that are accessed indirectly through an address/data
117 *	register pair.
118 */
119void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120			     unsigned int data_reg, u32 *vals,
121			     unsigned int nregs, unsigned int start_idx)
122{
123	while (nregs--) {
124		t4_write_reg(adap, addr_reg, start_idx);
125		*vals++ = t4_read_reg(adap, data_reg);
126		start_idx++;
127	}
128}
129
130/**
131 *	t4_write_indirect - write indirectly addressed registers
132 *	@adap: the adapter
133 *	@addr_reg: register holding the indirect addresses
134 *	@data_reg: register holding the value for the indirect registers
135 *	@vals: values to write
136 *	@nregs: how many indirect registers to write
137 *	@start_idx: address of first indirect register to write
138 *
139 *	Writes a sequential block of registers that are accessed indirectly
140 *	through an address/data register pair.
141 */
142void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
143		       unsigned int data_reg, const u32 *vals,
144		       unsigned int nregs, unsigned int start_idx)
145{
146	while (nregs--) {
147		t4_write_reg(adap, addr_reg, start_idx++);
148		t4_write_reg(adap, data_reg, *vals++);
149	}
150}
151
152/*
153 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
154 * mechanism.  This guarantees that we get the real value even if we're
155 * operating within a Virtual Machine and the Hypervisor is trapping our
156 * Configuration Space accesses.
157 *
158 * N.B. This routine should only be used as a last resort: the firmware uses
159 *      the backdoor registers on a regular basis and we can end up
160 *      conflicting with it's uses!
161 */
162u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
163{
164	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
165	u32 val;
166
167	if (chip_id(adap) <= CHELSIO_T5)
168		req |= F_ENABLE;
169	else
170		req |= F_T6_ENABLE;
171
172	if (is_t4(adap))
173		req |= F_LOCALCFG;
174
175	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
176	val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
177
178	/*
179	 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
180	 * Configuration Space read.  (None of the other fields matter when
181	 * F_ENABLE is 0 so a simple register write is easier than a
182	 * read-modify-write via t4_set_reg_field().)
183	 */
184	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
185
186	return val;
187}
188
189/*
190 * t4_report_fw_error - report firmware error
191 * @adap: the adapter
192 *
193 * The adapter firmware can indicate error conditions to the host.
194 * If the firmware has indicated an error, print out the reason for
195 * the firmware error.
196 */
197static void t4_report_fw_error(struct adapter *adap)
198{
199	static const char *const reason[] = {
200		"Crash",			/* PCIE_FW_EVAL_CRASH */
201		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
202		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
203		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
204		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
205		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
206		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
207		"Reserved",			/* reserved */
208	};
209	u32 pcie_fw;
210
211	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
212	if (pcie_fw & F_PCIE_FW_ERR) {
213		adap->flags &= ~FW_OK;
214		CH_ERR(adap, "firmware reports adapter error: %s (0x%08x)\n",
215		    reason[G_PCIE_FW_EVAL(pcie_fw)], pcie_fw);
216		if (pcie_fw != 0xffffffff)
217			t4_os_dump_devlog(adap);
218	}
219}
220
221/*
222 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
223 */
224static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
225			 u32 mbox_addr)
226{
227	for ( ; nflit; nflit--, mbox_addr += 8)
228		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
229}
230
231/*
232 * Handle a FW assertion reported in a mailbox.
233 */
234static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
235{
236	CH_ALERT(adap,
237		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
238		  asrt->u.assert.filename_0_7,
239		  be32_to_cpu(asrt->u.assert.line),
240		  be32_to_cpu(asrt->u.assert.x),
241		  be32_to_cpu(asrt->u.assert.y));
242}
243
244struct port_tx_state {
245	uint64_t rx_pause;
246	uint64_t tx_frames;
247};
248
249static void
250read_tx_state_one(struct adapter *sc, int i, struct port_tx_state *tx_state)
251{
252	uint32_t rx_pause_reg, tx_frames_reg;
253
254	if (is_t4(sc)) {
255		tx_frames_reg = PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
256		rx_pause_reg = PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
257	} else {
258		tx_frames_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_TX_PORT_FRAMES_L);
259		rx_pause_reg = T5_PORT_REG(i, A_MPS_PORT_STAT_RX_PORT_PAUSE_L);
260	}
261
262	tx_state->rx_pause = t4_read_reg64(sc, rx_pause_reg);
263	tx_state->tx_frames = t4_read_reg64(sc, tx_frames_reg);
264}
265
266static void
267read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
268{
269	int i;
270
271	for_each_port(sc, i)
272		read_tx_state_one(sc, i, &tx_state[i]);
273}
274
275static void
276check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
277{
278	uint32_t port_ctl_reg;
279	uint64_t tx_frames, rx_pause;
280	int i;
281
282	for_each_port(sc, i) {
283		rx_pause = tx_state[i].rx_pause;
284		tx_frames = tx_state[i].tx_frames;
285		read_tx_state_one(sc, i, &tx_state[i]);	/* update */
286
287		if (is_t4(sc))
288			port_ctl_reg = PORT_REG(i, A_MPS_PORT_CTL);
289		else
290			port_ctl_reg = T5_PORT_REG(i, A_MPS_PORT_CTL);
291		if (t4_read_reg(sc, port_ctl_reg) & F_PORTTXEN &&
292		    rx_pause != tx_state[i].rx_pause &&
293		    tx_frames == tx_state[i].tx_frames) {
294			t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, 0);
295			mdelay(1);
296			t4_set_reg_field(sc, port_ctl_reg, F_PORTTXEN, F_PORTTXEN);
297		}
298	}
299}
300
301#define X_CIM_PF_NOACCESS 0xeeeeeeee
302/**
303 *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
304 *	@adap: the adapter
305 *	@mbox: index of the mailbox to use
306 *	@cmd: the command to write
307 *	@size: command length in bytes
308 *	@rpl: where to optionally store the reply
309 *	@sleep_ok: if true we may sleep while awaiting command completion
310 *	@timeout: time to wait for command to finish before timing out
311 *		(negative implies @sleep_ok=false)
312 *
313 *	Sends the given command to FW through the selected mailbox and waits
314 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
315 *	store the FW's reply to the command.  The command and its optional
316 *	reply are of the same length.  Some FW commands like RESET and
317 *	INITIALIZE can take a considerable amount of time to execute.
318 *	@sleep_ok determines whether we may sleep while awaiting the response.
319 *	If sleeping is allowed we use progressive backoff otherwise we spin.
320 *	Note that passing in a negative @timeout is an alternate mechanism
321 *	for specifying @sleep_ok=false.  This is useful when a higher level
322 *	interface allows for specification of @timeout but not @sleep_ok ...
323 *
324 *	The return value is 0 on success or a negative errno on failure.  A
325 *	failure can happen either because we are not able to execute the
326 *	command or FW executes it but signals an error.  In the latter case
327 *	the return value is the error code indicated by FW (negated).
328 */
329int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
330			    int size, void *rpl, bool sleep_ok, int timeout)
331{
332	/*
333	 * We delay in small increments at first in an effort to maintain
334	 * responsiveness for simple, fast executing commands but then back
335	 * off to larger delays to a maximum retry delay.
336	 */
337	static const int delay[] = {
338		1, 1, 3, 5, 10, 10, 20, 50, 100
339	};
340	u32 v;
341	u64 res;
342	int i, ms, delay_idx, ret, next_tx_check;
343	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
344	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
345	u32 ctl;
346	__be64 cmd_rpl[MBOX_LEN/8];
347	u32 pcie_fw;
348	struct port_tx_state tx_state[MAX_NPORTS];
349
350	if (adap->flags & CHK_MBOX_ACCESS)
351		ASSERT_SYNCHRONIZED_OP(adap);
352
353	if (size <= 0 || (size & 15) || size > MBOX_LEN)
354		return -EINVAL;
355
356	if (adap->flags & IS_VF) {
357		if (is_t6(adap))
358			data_reg = FW_T6VF_MBDATA_BASE_ADDR;
359		else
360			data_reg = FW_T4VF_MBDATA_BASE_ADDR;
361		ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
362	}
363
364	/*
365	 * If we have a negative timeout, that implies that we can't sleep.
366	 */
367	if (timeout < 0) {
368		sleep_ok = false;
369		timeout = -timeout;
370	}
371
372	/*
373	 * Attempt to gain access to the mailbox.
374	 */
375	for (i = 0; i < 4; i++) {
376		ctl = t4_read_reg(adap, ctl_reg);
377		v = G_MBOWNER(ctl);
378		if (v != X_MBOWNER_NONE)
379			break;
380	}
381
382	/*
383	 * If we were unable to gain access, report the error to our caller.
384	 */
385	if (v != X_MBOWNER_PL) {
386		t4_report_fw_error(adap);
387		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
388		return ret;
389	}
390
391	/*
392	 * If we gain ownership of the mailbox and there's a "valid" message
393	 * in it, this is likely an asynchronous error message from the
394	 * firmware.  So we'll report that and then proceed on with attempting
395	 * to issue our own command ... which may well fail if the error
396	 * presaged the firmware crashing ...
397	 */
398	if (ctl & F_MBMSGVALID) {
399		CH_DUMP_MBOX(adap, mbox, data_reg, "VLD", NULL, true);
400	}
401
402	/*
403	 * Copy in the new mailbox command and send it on its way ...
404	 */
405	memset(cmd_rpl, 0, sizeof(cmd_rpl));
406	memcpy(cmd_rpl, cmd, size);
407	CH_DUMP_MBOX(adap, mbox, 0, "cmd", cmd_rpl, false);
408	for (i = 0; i < ARRAY_SIZE(cmd_rpl); i++)
409		t4_write_reg64(adap, data_reg + i * 8, be64_to_cpu(cmd_rpl[i]));
410
411	if (adap->flags & IS_VF) {
412		/*
413		 * For the VFs, the Mailbox Data "registers" are
414		 * actually backed by T4's "MA" interface rather than
415		 * PL Registers (as is the case for the PFs).  Because
416		 * these are in different coherency domains, the write
417		 * to the VF's PL-register-backed Mailbox Control can
418		 * race in front of the writes to the MA-backed VF
419		 * Mailbox Data "registers".  So we need to do a
420		 * read-back on at least one byte of the VF Mailbox
421		 * Data registers before doing the write to the VF
422		 * Mailbox Control register.
423		 */
424		t4_read_reg(adap, data_reg);
425	}
426
427	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
428	read_tx_state(adap, &tx_state[0]);	/* also flushes the write_reg */
429	next_tx_check = 1000;
430	delay_idx = 0;
431	ms = delay[0];
432
433	/*
434	 * Loop waiting for the reply; bail out if we time out or the firmware
435	 * reports an error.
436	 */
437	pcie_fw = 0;
438	for (i = 0; i < timeout; i += ms) {
439		if (!(adap->flags & IS_VF)) {
440			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
441			if (pcie_fw & F_PCIE_FW_ERR)
442				break;
443		}
444
445		if (i >= next_tx_check) {
446			check_tx_state(adap, &tx_state[0]);
447			next_tx_check = i + 1000;
448		}
449
450		if (sleep_ok) {
451			ms = delay[delay_idx];  /* last element may repeat */
452			if (delay_idx < ARRAY_SIZE(delay) - 1)
453				delay_idx++;
454			msleep(ms);
455		} else {
456			mdelay(ms);
457		}
458
459		v = t4_read_reg(adap, ctl_reg);
460		if (v == X_CIM_PF_NOACCESS)
461			continue;
462		if (G_MBOWNER(v) == X_MBOWNER_PL) {
463			if (!(v & F_MBMSGVALID)) {
464				t4_write_reg(adap, ctl_reg,
465					     V_MBOWNER(X_MBOWNER_NONE));
466				continue;
467			}
468
469			/*
470			 * Retrieve the command reply and release the mailbox.
471			 */
472			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
473			CH_DUMP_MBOX(adap, mbox, 0, "rpl", cmd_rpl, false);
474			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
475
476			res = be64_to_cpu(cmd_rpl[0]);
477			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
478				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
479				res = V_FW_CMD_RETVAL(EIO);
480			} else if (rpl)
481				memcpy(rpl, cmd_rpl, size);
482			return -G_FW_CMD_RETVAL((int)res);
483		}
484	}
485
486	/*
487	 * We timed out waiting for a reply to our mailbox command.  Report
488	 * the error and also check to see if the firmware reported any
489	 * errors ...
490	 */
491	CH_ERR(adap, "command %#x in mbox %d timed out (0x%08x).\n",
492	    *(const u8 *)cmd, mbox, pcie_fw);
493	CH_DUMP_MBOX(adap, mbox, 0, "cmdsent", cmd_rpl, true);
494	CH_DUMP_MBOX(adap, mbox, data_reg, "current", NULL, true);
495
496	if (pcie_fw & F_PCIE_FW_ERR) {
497		ret = -ENXIO;
498		t4_report_fw_error(adap);
499	} else {
500		ret = -ETIMEDOUT;
501		t4_os_dump_devlog(adap);
502	}
503
504	t4_fatal_err(adap, true);
505	return ret;
506}
507
508int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
509		    void *rpl, bool sleep_ok)
510{
511		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
512					       sleep_ok, FW_CMD_MAX_TIMEOUT);
513
514}
515
516static int t4_edc_err_read(struct adapter *adap, int idx)
517{
518	u32 edc_ecc_err_addr_reg;
519	u32 edc_bist_status_rdata_reg;
520
521	if (is_t4(adap)) {
522		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
523		return 0;
524	}
525	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
526		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
527		return 0;
528	}
529
530	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
531	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
532
533	CH_WARN(adap,
534		"edc%d err addr 0x%x: 0x%x.\n",
535		idx, edc_ecc_err_addr_reg,
536		t4_read_reg(adap, edc_ecc_err_addr_reg));
537	CH_WARN(adap,
538	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
539		edc_bist_status_rdata_reg,
540		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
541		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
542		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
543		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
544		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
545		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
546		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
547		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
548		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
549
550	return 0;
551}
552
553/**
554 *	t4_mc_read - read from MC through backdoor accesses
555 *	@adap: the adapter
556 *	@idx: which MC to access
557 *	@addr: address of first byte requested
558 *	@data: 64 bytes of data containing the requested address
559 *	@ecc: where to store the corresponding 64-bit ECC word
560 *
561 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
562 *	that covers the requested address @addr.  If @parity is not %NULL it
563 *	is assigned the 64-bit ECC word for the read data.
564 */
565int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
566{
567	int i;
568	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
569	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
570
571	if (is_t4(adap)) {
572		mc_bist_cmd_reg = A_MC_BIST_CMD;
573		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
574		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
575		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
576		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
577	} else {
578		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
579		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
580		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
581		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
582						  idx);
583		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
584						  idx);
585	}
586
587	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
588		return -EBUSY;
589	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
590	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
591	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
592	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
593		     F_START_BIST | V_BIST_CMD_GAP(1));
594	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
595	if (i)
596		return i;
597
598#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
599
600	for (i = 15; i >= 0; i--)
601		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
602	if (ecc)
603		*ecc = t4_read_reg64(adap, MC_DATA(16));
604#undef MC_DATA
605	return 0;
606}
607
608/**
609 *	t4_edc_read - read from EDC through backdoor accesses
610 *	@adap: the adapter
611 *	@idx: which EDC to access
612 *	@addr: address of first byte requested
613 *	@data: 64 bytes of data containing the requested address
614 *	@ecc: where to store the corresponding 64-bit ECC word
615 *
616 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
617 *	that covers the requested address @addr.  If @parity is not %NULL it
618 *	is assigned the 64-bit ECC word for the read data.
619 */
620int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
621{
622	int i;
623	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
624	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
625
626	if (is_t4(adap)) {
627		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
628		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
629		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
630		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
631						    idx);
632		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
633						    idx);
634	} else {
635/*
636 * These macro are missing in t4_regs.h file.
637 * Added temporarily for testing.
638 */
639#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
640#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
641		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
642		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
643		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
644		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
645						    idx);
646		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
647						    idx);
648#undef EDC_REG_T5
649#undef EDC_STRIDE_T5
650	}
651
652	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
653		return -EBUSY;
654	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
655	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
656	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
657	t4_write_reg(adap, edc_bist_cmd_reg,
658		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
659	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
660	if (i)
661		return i;
662
663#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
664
665	for (i = 15; i >= 0; i--)
666		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
667	if (ecc)
668		*ecc = t4_read_reg64(adap, EDC_DATA(16));
669#undef EDC_DATA
670	return 0;
671}
672
673/**
674 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
675 *	@adap: the adapter
676 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
677 *	@addr: address within indicated memory type
678 *	@len: amount of memory to read
679 *	@buf: host memory buffer
680 *
681 *	Reads an [almost] arbitrary memory region in the firmware: the
682 *	firmware memory address, length and host buffer must be aligned on
683 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
684 *	the firmware's memory.  If this memory contains data structures which
685 *	contain multi-byte integers, it's the callers responsibility to
686 *	perform appropriate byte order conversions.
687 */
688int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
689		__be32 *buf)
690{
691	u32 pos, start, end, offset;
692	int ret;
693
694	/*
695	 * Argument sanity checks ...
696	 */
697	if ((addr & 0x3) || (len & 0x3))
698		return -EINVAL;
699
700	/*
701	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
702	 * need to round down the start and round up the end.  We'll start
703	 * copying out of the first line at (addr - start) a word at a time.
704	 */
705	start = rounddown2(addr, 64);
706	end = roundup2(addr + len, 64);
707	offset = (addr - start)/sizeof(__be32);
708
709	for (pos = start; pos < end; pos += 64, offset = 0) {
710		__be32 data[16];
711
712		/*
713		 * Read the chip's memory block and bail if there's an error.
714		 */
715		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
716			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
717		else
718			ret = t4_edc_read(adap, mtype, pos, data, NULL);
719		if (ret)
720			return ret;
721
722		/*
723		 * Copy the data into the caller's memory buffer.
724		 */
725		while (offset < 16 && len > 0) {
726			*buf++ = data[offset++];
727			len -= sizeof(__be32);
728		}
729	}
730
731	return 0;
732}
733
734/*
735 * Return the specified PCI-E Configuration Space register from our Physical
736 * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
737 * since we prefer to let the firmware own all of these registers, but if that
738 * fails we go for it directly ourselves.
739 */
740u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
741{
742
743	/*
744	 * If fw_attach != 0, construct and send the Firmware LDST Command to
745	 * retrieve the specified PCI-E Configuration Space register.
746	 */
747	if (drv_fw_attach != 0) {
748		struct fw_ldst_cmd ldst_cmd;
749		int ret;
750
751		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
752		ldst_cmd.op_to_addrspace =
753			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
754				    F_FW_CMD_REQUEST |
755				    F_FW_CMD_READ |
756				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
757		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
758		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
759		ldst_cmd.u.pcie.ctrl_to_fn =
760			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
761		ldst_cmd.u.pcie.r = reg;
762
763		/*
764		 * If the LDST Command succeeds, return the result, otherwise
765		 * fall through to reading it directly ourselves ...
766		 */
767		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
768				 &ldst_cmd);
769		if (ret == 0)
770			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
771
772		CH_WARN(adap, "Firmware failed to return "
773			"Configuration Space register %d, err = %d\n",
774			reg, -ret);
775	}
776
777	/*
778	 * Read the desired Configuration Space register via the PCI-E
779	 * Backdoor mechanism.
780	 */
781	return t4_hw_pci_read_cfg4(adap, reg);
782}
783
784/**
785 *	t4_get_regs_len - return the size of the chips register set
786 *	@adapter: the adapter
787 *
788 *	Returns the size of the chip's BAR0 register space.
789 */
790unsigned int t4_get_regs_len(struct adapter *adapter)
791{
792	unsigned int chip_version = chip_id(adapter);
793
794	switch (chip_version) {
795	case CHELSIO_T4:
796		if (adapter->flags & IS_VF)
797			return FW_T4VF_REGMAP_SIZE;
798		return T4_REGMAP_SIZE;
799
800	case CHELSIO_T5:
801	case CHELSIO_T6:
802		if (adapter->flags & IS_VF)
803			return FW_T4VF_REGMAP_SIZE;
804		return T5_REGMAP_SIZE;
805	}
806
807	CH_ERR(adapter,
808		"Unsupported chip version %d\n", chip_version);
809	return 0;
810}
811
812/**
813 *	t4_get_regs - read chip registers into provided buffer
814 *	@adap: the adapter
815 *	@buf: register buffer
816 *	@buf_size: size (in bytes) of register buffer
817 *
818 *	If the provided register buffer isn't large enough for the chip's
819 *	full register range, the register dump will be truncated to the
820 *	register buffer's size.
821 */
822void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
823{
824	static const unsigned int t4_reg_ranges[] = {
825		0x1008, 0x1108,
826		0x1180, 0x1184,
827		0x1190, 0x1194,
828		0x11a0, 0x11a4,
829		0x11b0, 0x11b4,
830		0x11fc, 0x123c,
831		0x1300, 0x173c,
832		0x1800, 0x18fc,
833		0x3000, 0x30d8,
834		0x30e0, 0x30e4,
835		0x30ec, 0x5910,
836		0x5920, 0x5924,
837		0x5960, 0x5960,
838		0x5968, 0x5968,
839		0x5970, 0x5970,
840		0x5978, 0x5978,
841		0x5980, 0x5980,
842		0x5988, 0x5988,
843		0x5990, 0x5990,
844		0x5998, 0x5998,
845		0x59a0, 0x59d4,
846		0x5a00, 0x5ae0,
847		0x5ae8, 0x5ae8,
848		0x5af0, 0x5af0,
849		0x5af8, 0x5af8,
850		0x6000, 0x6098,
851		0x6100, 0x6150,
852		0x6200, 0x6208,
853		0x6240, 0x6248,
854		0x6280, 0x62b0,
855		0x62c0, 0x6338,
856		0x6370, 0x638c,
857		0x6400, 0x643c,
858		0x6500, 0x6524,
859		0x6a00, 0x6a04,
860		0x6a14, 0x6a38,
861		0x6a60, 0x6a70,
862		0x6a78, 0x6a78,
863		0x6b00, 0x6b0c,
864		0x6b1c, 0x6b84,
865		0x6bf0, 0x6bf8,
866		0x6c00, 0x6c0c,
867		0x6c1c, 0x6c84,
868		0x6cf0, 0x6cf8,
869		0x6d00, 0x6d0c,
870		0x6d1c, 0x6d84,
871		0x6df0, 0x6df8,
872		0x6e00, 0x6e0c,
873		0x6e1c, 0x6e84,
874		0x6ef0, 0x6ef8,
875		0x6f00, 0x6f0c,
876		0x6f1c, 0x6f84,
877		0x6ff0, 0x6ff8,
878		0x7000, 0x700c,
879		0x701c, 0x7084,
880		0x70f0, 0x70f8,
881		0x7100, 0x710c,
882		0x711c, 0x7184,
883		0x71f0, 0x71f8,
884		0x7200, 0x720c,
885		0x721c, 0x7284,
886		0x72f0, 0x72f8,
887		0x7300, 0x730c,
888		0x731c, 0x7384,
889		0x73f0, 0x73f8,
890		0x7400, 0x7450,
891		0x7500, 0x7530,
892		0x7600, 0x760c,
893		0x7614, 0x761c,
894		0x7680, 0x76cc,
895		0x7700, 0x7798,
896		0x77c0, 0x77fc,
897		0x7900, 0x79fc,
898		0x7b00, 0x7b58,
899		0x7b60, 0x7b84,
900		0x7b8c, 0x7c38,
901		0x7d00, 0x7d38,
902		0x7d40, 0x7d80,
903		0x7d8c, 0x7ddc,
904		0x7de4, 0x7e04,
905		0x7e10, 0x7e1c,
906		0x7e24, 0x7e38,
907		0x7e40, 0x7e44,
908		0x7e4c, 0x7e78,
909		0x7e80, 0x7ea4,
910		0x7eac, 0x7edc,
911		0x7ee8, 0x7efc,
912		0x8dc0, 0x8e04,
913		0x8e10, 0x8e1c,
914		0x8e30, 0x8e78,
915		0x8ea0, 0x8eb8,
916		0x8ec0, 0x8f6c,
917		0x8fc0, 0x9008,
918		0x9010, 0x9058,
919		0x9060, 0x9060,
920		0x9068, 0x9074,
921		0x90fc, 0x90fc,
922		0x9400, 0x9408,
923		0x9410, 0x9458,
924		0x9600, 0x9600,
925		0x9608, 0x9638,
926		0x9640, 0x96bc,
927		0x9800, 0x9808,
928		0x9820, 0x983c,
929		0x9850, 0x9864,
930		0x9c00, 0x9c6c,
931		0x9c80, 0x9cec,
932		0x9d00, 0x9d6c,
933		0x9d80, 0x9dec,
934		0x9e00, 0x9e6c,
935		0x9e80, 0x9eec,
936		0x9f00, 0x9f6c,
937		0x9f80, 0x9fec,
938		0xd004, 0xd004,
939		0xd010, 0xd03c,
940		0xdfc0, 0xdfe0,
941		0xe000, 0xea7c,
942		0xf000, 0x11110,
943		0x11118, 0x11190,
944		0x19040, 0x1906c,
945		0x19078, 0x19080,
946		0x1908c, 0x190e4,
947		0x190f0, 0x190f8,
948		0x19100, 0x19110,
949		0x19120, 0x19124,
950		0x19150, 0x19194,
951		0x1919c, 0x191b0,
952		0x191d0, 0x191e8,
953		0x19238, 0x1924c,
954		0x193f8, 0x1943c,
955		0x1944c, 0x19474,
956		0x19490, 0x194e0,
957		0x194f0, 0x194f8,
958		0x19800, 0x19c08,
959		0x19c10, 0x19c90,
960		0x19ca0, 0x19ce4,
961		0x19cf0, 0x19d40,
962		0x19d50, 0x19d94,
963		0x19da0, 0x19de8,
964		0x19df0, 0x19e40,
965		0x19e50, 0x19e90,
966		0x19ea0, 0x19f4c,
967		0x1a000, 0x1a004,
968		0x1a010, 0x1a06c,
969		0x1a0b0, 0x1a0e4,
970		0x1a0ec, 0x1a0f4,
971		0x1a100, 0x1a108,
972		0x1a114, 0x1a120,
973		0x1a128, 0x1a130,
974		0x1a138, 0x1a138,
975		0x1a190, 0x1a1c4,
976		0x1a1fc, 0x1a1fc,
977		0x1e040, 0x1e04c,
978		0x1e284, 0x1e28c,
979		0x1e2c0, 0x1e2c0,
980		0x1e2e0, 0x1e2e0,
981		0x1e300, 0x1e384,
982		0x1e3c0, 0x1e3c8,
983		0x1e440, 0x1e44c,
984		0x1e684, 0x1e68c,
985		0x1e6c0, 0x1e6c0,
986		0x1e6e0, 0x1e6e0,
987		0x1e700, 0x1e784,
988		0x1e7c0, 0x1e7c8,
989		0x1e840, 0x1e84c,
990		0x1ea84, 0x1ea8c,
991		0x1eac0, 0x1eac0,
992		0x1eae0, 0x1eae0,
993		0x1eb00, 0x1eb84,
994		0x1ebc0, 0x1ebc8,
995		0x1ec40, 0x1ec4c,
996		0x1ee84, 0x1ee8c,
997		0x1eec0, 0x1eec0,
998		0x1eee0, 0x1eee0,
999		0x1ef00, 0x1ef84,
1000		0x1efc0, 0x1efc8,
1001		0x1f040, 0x1f04c,
1002		0x1f284, 0x1f28c,
1003		0x1f2c0, 0x1f2c0,
1004		0x1f2e0, 0x1f2e0,
1005		0x1f300, 0x1f384,
1006		0x1f3c0, 0x1f3c8,
1007		0x1f440, 0x1f44c,
1008		0x1f684, 0x1f68c,
1009		0x1f6c0, 0x1f6c0,
1010		0x1f6e0, 0x1f6e0,
1011		0x1f700, 0x1f784,
1012		0x1f7c0, 0x1f7c8,
1013		0x1f840, 0x1f84c,
1014		0x1fa84, 0x1fa8c,
1015		0x1fac0, 0x1fac0,
1016		0x1fae0, 0x1fae0,
1017		0x1fb00, 0x1fb84,
1018		0x1fbc0, 0x1fbc8,
1019		0x1fc40, 0x1fc4c,
1020		0x1fe84, 0x1fe8c,
1021		0x1fec0, 0x1fec0,
1022		0x1fee0, 0x1fee0,
1023		0x1ff00, 0x1ff84,
1024		0x1ffc0, 0x1ffc8,
1025		0x20000, 0x2002c,
1026		0x20100, 0x2013c,
1027		0x20190, 0x201a0,
1028		0x201a8, 0x201b8,
1029		0x201c4, 0x201c8,
1030		0x20200, 0x20318,
1031		0x20400, 0x204b4,
1032		0x204c0, 0x20528,
1033		0x20540, 0x20614,
1034		0x21000, 0x21040,
1035		0x2104c, 0x21060,
1036		0x210c0, 0x210ec,
1037		0x21200, 0x21268,
1038		0x21270, 0x21284,
1039		0x212fc, 0x21388,
1040		0x21400, 0x21404,
1041		0x21500, 0x21500,
1042		0x21510, 0x21518,
1043		0x2152c, 0x21530,
1044		0x2153c, 0x2153c,
1045		0x21550, 0x21554,
1046		0x21600, 0x21600,
1047		0x21608, 0x2161c,
1048		0x21624, 0x21628,
1049		0x21630, 0x21634,
1050		0x2163c, 0x2163c,
1051		0x21700, 0x2171c,
1052		0x21780, 0x2178c,
1053		0x21800, 0x21818,
1054		0x21820, 0x21828,
1055		0x21830, 0x21848,
1056		0x21850, 0x21854,
1057		0x21860, 0x21868,
1058		0x21870, 0x21870,
1059		0x21878, 0x21898,
1060		0x218a0, 0x218a8,
1061		0x218b0, 0x218c8,
1062		0x218d0, 0x218d4,
1063		0x218e0, 0x218e8,
1064		0x218f0, 0x218f0,
1065		0x218f8, 0x21a18,
1066		0x21a20, 0x21a28,
1067		0x21a30, 0x21a48,
1068		0x21a50, 0x21a54,
1069		0x21a60, 0x21a68,
1070		0x21a70, 0x21a70,
1071		0x21a78, 0x21a98,
1072		0x21aa0, 0x21aa8,
1073		0x21ab0, 0x21ac8,
1074		0x21ad0, 0x21ad4,
1075		0x21ae0, 0x21ae8,
1076		0x21af0, 0x21af0,
1077		0x21af8, 0x21c18,
1078		0x21c20, 0x21c20,
1079		0x21c28, 0x21c30,
1080		0x21c38, 0x21c38,
1081		0x21c80, 0x21c98,
1082		0x21ca0, 0x21ca8,
1083		0x21cb0, 0x21cc8,
1084		0x21cd0, 0x21cd4,
1085		0x21ce0, 0x21ce8,
1086		0x21cf0, 0x21cf0,
1087		0x21cf8, 0x21d7c,
1088		0x21e00, 0x21e04,
1089		0x22000, 0x2202c,
1090		0x22100, 0x2213c,
1091		0x22190, 0x221a0,
1092		0x221a8, 0x221b8,
1093		0x221c4, 0x221c8,
1094		0x22200, 0x22318,
1095		0x22400, 0x224b4,
1096		0x224c0, 0x22528,
1097		0x22540, 0x22614,
1098		0x23000, 0x23040,
1099		0x2304c, 0x23060,
1100		0x230c0, 0x230ec,
1101		0x23200, 0x23268,
1102		0x23270, 0x23284,
1103		0x232fc, 0x23388,
1104		0x23400, 0x23404,
1105		0x23500, 0x23500,
1106		0x23510, 0x23518,
1107		0x2352c, 0x23530,
1108		0x2353c, 0x2353c,
1109		0x23550, 0x23554,
1110		0x23600, 0x23600,
1111		0x23608, 0x2361c,
1112		0x23624, 0x23628,
1113		0x23630, 0x23634,
1114		0x2363c, 0x2363c,
1115		0x23700, 0x2371c,
1116		0x23780, 0x2378c,
1117		0x23800, 0x23818,
1118		0x23820, 0x23828,
1119		0x23830, 0x23848,
1120		0x23850, 0x23854,
1121		0x23860, 0x23868,
1122		0x23870, 0x23870,
1123		0x23878, 0x23898,
1124		0x238a0, 0x238a8,
1125		0x238b0, 0x238c8,
1126		0x238d0, 0x238d4,
1127		0x238e0, 0x238e8,
1128		0x238f0, 0x238f0,
1129		0x238f8, 0x23a18,
1130		0x23a20, 0x23a28,
1131		0x23a30, 0x23a48,
1132		0x23a50, 0x23a54,
1133		0x23a60, 0x23a68,
1134		0x23a70, 0x23a70,
1135		0x23a78, 0x23a98,
1136		0x23aa0, 0x23aa8,
1137		0x23ab0, 0x23ac8,
1138		0x23ad0, 0x23ad4,
1139		0x23ae0, 0x23ae8,
1140		0x23af0, 0x23af0,
1141		0x23af8, 0x23c18,
1142		0x23c20, 0x23c20,
1143		0x23c28, 0x23c30,
1144		0x23c38, 0x23c38,
1145		0x23c80, 0x23c98,
1146		0x23ca0, 0x23ca8,
1147		0x23cb0, 0x23cc8,
1148		0x23cd0, 0x23cd4,
1149		0x23ce0, 0x23ce8,
1150		0x23cf0, 0x23cf0,
1151		0x23cf8, 0x23d7c,
1152		0x23e00, 0x23e04,
1153		0x24000, 0x2402c,
1154		0x24100, 0x2413c,
1155		0x24190, 0x241a0,
1156		0x241a8, 0x241b8,
1157		0x241c4, 0x241c8,
1158		0x24200, 0x24318,
1159		0x24400, 0x244b4,
1160		0x244c0, 0x24528,
1161		0x24540, 0x24614,
1162		0x25000, 0x25040,
1163		0x2504c, 0x25060,
1164		0x250c0, 0x250ec,
1165		0x25200, 0x25268,
1166		0x25270, 0x25284,
1167		0x252fc, 0x25388,
1168		0x25400, 0x25404,
1169		0x25500, 0x25500,
1170		0x25510, 0x25518,
1171		0x2552c, 0x25530,
1172		0x2553c, 0x2553c,
1173		0x25550, 0x25554,
1174		0x25600, 0x25600,
1175		0x25608, 0x2561c,
1176		0x25624, 0x25628,
1177		0x25630, 0x25634,
1178		0x2563c, 0x2563c,
1179		0x25700, 0x2571c,
1180		0x25780, 0x2578c,
1181		0x25800, 0x25818,
1182		0x25820, 0x25828,
1183		0x25830, 0x25848,
1184		0x25850, 0x25854,
1185		0x25860, 0x25868,
1186		0x25870, 0x25870,
1187		0x25878, 0x25898,
1188		0x258a0, 0x258a8,
1189		0x258b0, 0x258c8,
1190		0x258d0, 0x258d4,
1191		0x258e0, 0x258e8,
1192		0x258f0, 0x258f0,
1193		0x258f8, 0x25a18,
1194		0x25a20, 0x25a28,
1195		0x25a30, 0x25a48,
1196		0x25a50, 0x25a54,
1197		0x25a60, 0x25a68,
1198		0x25a70, 0x25a70,
1199		0x25a78, 0x25a98,
1200		0x25aa0, 0x25aa8,
1201		0x25ab0, 0x25ac8,
1202		0x25ad0, 0x25ad4,
1203		0x25ae0, 0x25ae8,
1204		0x25af0, 0x25af0,
1205		0x25af8, 0x25c18,
1206		0x25c20, 0x25c20,
1207		0x25c28, 0x25c30,
1208		0x25c38, 0x25c38,
1209		0x25c80, 0x25c98,
1210		0x25ca0, 0x25ca8,
1211		0x25cb0, 0x25cc8,
1212		0x25cd0, 0x25cd4,
1213		0x25ce0, 0x25ce8,
1214		0x25cf0, 0x25cf0,
1215		0x25cf8, 0x25d7c,
1216		0x25e00, 0x25e04,
1217		0x26000, 0x2602c,
1218		0x26100, 0x2613c,
1219		0x26190, 0x261a0,
1220		0x261a8, 0x261b8,
1221		0x261c4, 0x261c8,
1222		0x26200, 0x26318,
1223		0x26400, 0x264b4,
1224		0x264c0, 0x26528,
1225		0x26540, 0x26614,
1226		0x27000, 0x27040,
1227		0x2704c, 0x27060,
1228		0x270c0, 0x270ec,
1229		0x27200, 0x27268,
1230		0x27270, 0x27284,
1231		0x272fc, 0x27388,
1232		0x27400, 0x27404,
1233		0x27500, 0x27500,
1234		0x27510, 0x27518,
1235		0x2752c, 0x27530,
1236		0x2753c, 0x2753c,
1237		0x27550, 0x27554,
1238		0x27600, 0x27600,
1239		0x27608, 0x2761c,
1240		0x27624, 0x27628,
1241		0x27630, 0x27634,
1242		0x2763c, 0x2763c,
1243		0x27700, 0x2771c,
1244		0x27780, 0x2778c,
1245		0x27800, 0x27818,
1246		0x27820, 0x27828,
1247		0x27830, 0x27848,
1248		0x27850, 0x27854,
1249		0x27860, 0x27868,
1250		0x27870, 0x27870,
1251		0x27878, 0x27898,
1252		0x278a0, 0x278a8,
1253		0x278b0, 0x278c8,
1254		0x278d0, 0x278d4,
1255		0x278e0, 0x278e8,
1256		0x278f0, 0x278f0,
1257		0x278f8, 0x27a18,
1258		0x27a20, 0x27a28,
1259		0x27a30, 0x27a48,
1260		0x27a50, 0x27a54,
1261		0x27a60, 0x27a68,
1262		0x27a70, 0x27a70,
1263		0x27a78, 0x27a98,
1264		0x27aa0, 0x27aa8,
1265		0x27ab0, 0x27ac8,
1266		0x27ad0, 0x27ad4,
1267		0x27ae0, 0x27ae8,
1268		0x27af0, 0x27af0,
1269		0x27af8, 0x27c18,
1270		0x27c20, 0x27c20,
1271		0x27c28, 0x27c30,
1272		0x27c38, 0x27c38,
1273		0x27c80, 0x27c98,
1274		0x27ca0, 0x27ca8,
1275		0x27cb0, 0x27cc8,
1276		0x27cd0, 0x27cd4,
1277		0x27ce0, 0x27ce8,
1278		0x27cf0, 0x27cf0,
1279		0x27cf8, 0x27d7c,
1280		0x27e00, 0x27e04,
1281	};
1282
1283	static const unsigned int t4vf_reg_ranges[] = {
1284		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1285		VF_MPS_REG(A_MPS_VF_CTL),
1286		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1287		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1288		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1289		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1290		FW_T4VF_MBDATA_BASE_ADDR,
1291		FW_T4VF_MBDATA_BASE_ADDR +
1292		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1293	};
1294
1295	static const unsigned int t5_reg_ranges[] = {
1296		0x1008, 0x10c0,
1297		0x10cc, 0x10f8,
1298		0x1100, 0x1100,
1299		0x110c, 0x1148,
1300		0x1180, 0x1184,
1301		0x1190, 0x1194,
1302		0x11a0, 0x11a4,
1303		0x11b0, 0x11b4,
1304		0x11fc, 0x123c,
1305		0x1280, 0x173c,
1306		0x1800, 0x18fc,
1307		0x3000, 0x3028,
1308		0x3060, 0x30b0,
1309		0x30b8, 0x30d8,
1310		0x30e0, 0x30fc,
1311		0x3140, 0x357c,
1312		0x35a8, 0x35cc,
1313		0x35ec, 0x35ec,
1314		0x3600, 0x5624,
1315		0x56cc, 0x56ec,
1316		0x56f4, 0x5720,
1317		0x5728, 0x575c,
1318		0x580c, 0x5814,
1319		0x5890, 0x589c,
1320		0x58a4, 0x58ac,
1321		0x58b8, 0x58bc,
1322		0x5940, 0x59c8,
1323		0x59d0, 0x59dc,
1324		0x59fc, 0x5a18,
1325		0x5a60, 0x5a70,
1326		0x5a80, 0x5a9c,
1327		0x5b94, 0x5bfc,
1328		0x6000, 0x6020,
1329		0x6028, 0x6040,
1330		0x6058, 0x609c,
1331		0x60a8, 0x614c,
1332		0x7700, 0x7798,
1333		0x77c0, 0x78fc,
1334		0x7b00, 0x7b58,
1335		0x7b60, 0x7b84,
1336		0x7b8c, 0x7c54,
1337		0x7d00, 0x7d38,
1338		0x7d40, 0x7d80,
1339		0x7d8c, 0x7ddc,
1340		0x7de4, 0x7e04,
1341		0x7e10, 0x7e1c,
1342		0x7e24, 0x7e38,
1343		0x7e40, 0x7e44,
1344		0x7e4c, 0x7e78,
1345		0x7e80, 0x7edc,
1346		0x7ee8, 0x7efc,
1347		0x8dc0, 0x8de0,
1348		0x8df8, 0x8e04,
1349		0x8e10, 0x8e84,
1350		0x8ea0, 0x8f84,
1351		0x8fc0, 0x9058,
1352		0x9060, 0x9060,
1353		0x9068, 0x90f8,
1354		0x9400, 0x9408,
1355		0x9410, 0x9470,
1356		0x9600, 0x9600,
1357		0x9608, 0x9638,
1358		0x9640, 0x96f4,
1359		0x9800, 0x9808,
1360		0x9820, 0x983c,
1361		0x9850, 0x9864,
1362		0x9c00, 0x9c6c,
1363		0x9c80, 0x9cec,
1364		0x9d00, 0x9d6c,
1365		0x9d80, 0x9dec,
1366		0x9e00, 0x9e6c,
1367		0x9e80, 0x9eec,
1368		0x9f00, 0x9f6c,
1369		0x9f80, 0xa020,
1370		0xd004, 0xd004,
1371		0xd010, 0xd03c,
1372		0xdfc0, 0xdfe0,
1373		0xe000, 0x1106c,
1374		0x11074, 0x11088,
1375		0x1109c, 0x1117c,
1376		0x11190, 0x11204,
1377		0x19040, 0x1906c,
1378		0x19078, 0x19080,
1379		0x1908c, 0x190e8,
1380		0x190f0, 0x190f8,
1381		0x19100, 0x19110,
1382		0x19120, 0x19124,
1383		0x19150, 0x19194,
1384		0x1919c, 0x191b0,
1385		0x191d0, 0x191e8,
1386		0x19238, 0x19290,
1387		0x193f8, 0x19428,
1388		0x19430, 0x19444,
1389		0x1944c, 0x1946c,
1390		0x19474, 0x19474,
1391		0x19490, 0x194cc,
1392		0x194f0, 0x194f8,
1393		0x19c00, 0x19c08,
1394		0x19c10, 0x19c60,
1395		0x19c94, 0x19ce4,
1396		0x19cf0, 0x19d40,
1397		0x19d50, 0x19d94,
1398		0x19da0, 0x19de8,
1399		0x19df0, 0x19e10,
1400		0x19e50, 0x19e90,
1401		0x19ea0, 0x19f24,
1402		0x19f34, 0x19f34,
1403		0x19f40, 0x19f50,
1404		0x19f90, 0x19fb4,
1405		0x19fc4, 0x19fe4,
1406		0x1a000, 0x1a004,
1407		0x1a010, 0x1a06c,
1408		0x1a0b0, 0x1a0e4,
1409		0x1a0ec, 0x1a0f8,
1410		0x1a100, 0x1a108,
1411		0x1a114, 0x1a120,
1412		0x1a128, 0x1a130,
1413		0x1a138, 0x1a138,
1414		0x1a190, 0x1a1c4,
1415		0x1a1fc, 0x1a1fc,
1416		0x1e008, 0x1e00c,
1417		0x1e040, 0x1e044,
1418		0x1e04c, 0x1e04c,
1419		0x1e284, 0x1e290,
1420		0x1e2c0, 0x1e2c0,
1421		0x1e2e0, 0x1e2e0,
1422		0x1e300, 0x1e384,
1423		0x1e3c0, 0x1e3c8,
1424		0x1e408, 0x1e40c,
1425		0x1e440, 0x1e444,
1426		0x1e44c, 0x1e44c,
1427		0x1e684, 0x1e690,
1428		0x1e6c0, 0x1e6c0,
1429		0x1e6e0, 0x1e6e0,
1430		0x1e700, 0x1e784,
1431		0x1e7c0, 0x1e7c8,
1432		0x1e808, 0x1e80c,
1433		0x1e840, 0x1e844,
1434		0x1e84c, 0x1e84c,
1435		0x1ea84, 0x1ea90,
1436		0x1eac0, 0x1eac0,
1437		0x1eae0, 0x1eae0,
1438		0x1eb00, 0x1eb84,
1439		0x1ebc0, 0x1ebc8,
1440		0x1ec08, 0x1ec0c,
1441		0x1ec40, 0x1ec44,
1442		0x1ec4c, 0x1ec4c,
1443		0x1ee84, 0x1ee90,
1444		0x1eec0, 0x1eec0,
1445		0x1eee0, 0x1eee0,
1446		0x1ef00, 0x1ef84,
1447		0x1efc0, 0x1efc8,
1448		0x1f008, 0x1f00c,
1449		0x1f040, 0x1f044,
1450		0x1f04c, 0x1f04c,
1451		0x1f284, 0x1f290,
1452		0x1f2c0, 0x1f2c0,
1453		0x1f2e0, 0x1f2e0,
1454		0x1f300, 0x1f384,
1455		0x1f3c0, 0x1f3c8,
1456		0x1f408, 0x1f40c,
1457		0x1f440, 0x1f444,
1458		0x1f44c, 0x1f44c,
1459		0x1f684, 0x1f690,
1460		0x1f6c0, 0x1f6c0,
1461		0x1f6e0, 0x1f6e0,
1462		0x1f700, 0x1f784,
1463		0x1f7c0, 0x1f7c8,
1464		0x1f808, 0x1f80c,
1465		0x1f840, 0x1f844,
1466		0x1f84c, 0x1f84c,
1467		0x1fa84, 0x1fa90,
1468		0x1fac0, 0x1fac0,
1469		0x1fae0, 0x1fae0,
1470		0x1fb00, 0x1fb84,
1471		0x1fbc0, 0x1fbc8,
1472		0x1fc08, 0x1fc0c,
1473		0x1fc40, 0x1fc44,
1474		0x1fc4c, 0x1fc4c,
1475		0x1fe84, 0x1fe90,
1476		0x1fec0, 0x1fec0,
1477		0x1fee0, 0x1fee0,
1478		0x1ff00, 0x1ff84,
1479		0x1ffc0, 0x1ffc8,
1480		0x30000, 0x30030,
1481		0x30100, 0x30144,
1482		0x30190, 0x301a0,
1483		0x301a8, 0x301b8,
1484		0x301c4, 0x301c8,
1485		0x301d0, 0x301d0,
1486		0x30200, 0x30318,
1487		0x30400, 0x304b4,
1488		0x304c0, 0x3052c,
1489		0x30540, 0x3061c,
1490		0x30800, 0x30828,
1491		0x30834, 0x30834,
1492		0x308c0, 0x30908,
1493		0x30910, 0x309ac,
1494		0x30a00, 0x30a14,
1495		0x30a1c, 0x30a2c,
1496		0x30a44, 0x30a50,
1497		0x30a74, 0x30a74,
1498		0x30a7c, 0x30afc,
1499		0x30b08, 0x30c24,
1500		0x30d00, 0x30d00,
1501		0x30d08, 0x30d14,
1502		0x30d1c, 0x30d20,
1503		0x30d3c, 0x30d3c,
1504		0x30d48, 0x30d50,
1505		0x31200, 0x3120c,
1506		0x31220, 0x31220,
1507		0x31240, 0x31240,
1508		0x31600, 0x3160c,
1509		0x31a00, 0x31a1c,
1510		0x31e00, 0x31e20,
1511		0x31e38, 0x31e3c,
1512		0x31e80, 0x31e80,
1513		0x31e88, 0x31ea8,
1514		0x31eb0, 0x31eb4,
1515		0x31ec8, 0x31ed4,
1516		0x31fb8, 0x32004,
1517		0x32200, 0x32200,
1518		0x32208, 0x32240,
1519		0x32248, 0x32280,
1520		0x32288, 0x322c0,
1521		0x322c8, 0x322fc,
1522		0x32600, 0x32630,
1523		0x32a00, 0x32abc,
1524		0x32b00, 0x32b10,
1525		0x32b20, 0x32b30,
1526		0x32b40, 0x32b50,
1527		0x32b60, 0x32b70,
1528		0x33000, 0x33028,
1529		0x33030, 0x33048,
1530		0x33060, 0x33068,
1531		0x33070, 0x3309c,
1532		0x330f0, 0x33128,
1533		0x33130, 0x33148,
1534		0x33160, 0x33168,
1535		0x33170, 0x3319c,
1536		0x331f0, 0x33238,
1537		0x33240, 0x33240,
1538		0x33248, 0x33250,
1539		0x3325c, 0x33264,
1540		0x33270, 0x332b8,
1541		0x332c0, 0x332e4,
1542		0x332f8, 0x33338,
1543		0x33340, 0x33340,
1544		0x33348, 0x33350,
1545		0x3335c, 0x33364,
1546		0x33370, 0x333b8,
1547		0x333c0, 0x333e4,
1548		0x333f8, 0x33428,
1549		0x33430, 0x33448,
1550		0x33460, 0x33468,
1551		0x33470, 0x3349c,
1552		0x334f0, 0x33528,
1553		0x33530, 0x33548,
1554		0x33560, 0x33568,
1555		0x33570, 0x3359c,
1556		0x335f0, 0x33638,
1557		0x33640, 0x33640,
1558		0x33648, 0x33650,
1559		0x3365c, 0x33664,
1560		0x33670, 0x336b8,
1561		0x336c0, 0x336e4,
1562		0x336f8, 0x33738,
1563		0x33740, 0x33740,
1564		0x33748, 0x33750,
1565		0x3375c, 0x33764,
1566		0x33770, 0x337b8,
1567		0x337c0, 0x337e4,
1568		0x337f8, 0x337fc,
1569		0x33814, 0x33814,
1570		0x3382c, 0x3382c,
1571		0x33880, 0x3388c,
1572		0x338e8, 0x338ec,
1573		0x33900, 0x33928,
1574		0x33930, 0x33948,
1575		0x33960, 0x33968,
1576		0x33970, 0x3399c,
1577		0x339f0, 0x33a38,
1578		0x33a40, 0x33a40,
1579		0x33a48, 0x33a50,
1580		0x33a5c, 0x33a64,
1581		0x33a70, 0x33ab8,
1582		0x33ac0, 0x33ae4,
1583		0x33af8, 0x33b10,
1584		0x33b28, 0x33b28,
1585		0x33b3c, 0x33b50,
1586		0x33bf0, 0x33c10,
1587		0x33c28, 0x33c28,
1588		0x33c3c, 0x33c50,
1589		0x33cf0, 0x33cfc,
1590		0x34000, 0x34030,
1591		0x34100, 0x34144,
1592		0x34190, 0x341a0,
1593		0x341a8, 0x341b8,
1594		0x341c4, 0x341c8,
1595		0x341d0, 0x341d0,
1596		0x34200, 0x34318,
1597		0x34400, 0x344b4,
1598		0x344c0, 0x3452c,
1599		0x34540, 0x3461c,
1600		0x34800, 0x34828,
1601		0x34834, 0x34834,
1602		0x348c0, 0x34908,
1603		0x34910, 0x349ac,
1604		0x34a00, 0x34a14,
1605		0x34a1c, 0x34a2c,
1606		0x34a44, 0x34a50,
1607		0x34a74, 0x34a74,
1608		0x34a7c, 0x34afc,
1609		0x34b08, 0x34c24,
1610		0x34d00, 0x34d00,
1611		0x34d08, 0x34d14,
1612		0x34d1c, 0x34d20,
1613		0x34d3c, 0x34d3c,
1614		0x34d48, 0x34d50,
1615		0x35200, 0x3520c,
1616		0x35220, 0x35220,
1617		0x35240, 0x35240,
1618		0x35600, 0x3560c,
1619		0x35a00, 0x35a1c,
1620		0x35e00, 0x35e20,
1621		0x35e38, 0x35e3c,
1622		0x35e80, 0x35e80,
1623		0x35e88, 0x35ea8,
1624		0x35eb0, 0x35eb4,
1625		0x35ec8, 0x35ed4,
1626		0x35fb8, 0x36004,
1627		0x36200, 0x36200,
1628		0x36208, 0x36240,
1629		0x36248, 0x36280,
1630		0x36288, 0x362c0,
1631		0x362c8, 0x362fc,
1632		0x36600, 0x36630,
1633		0x36a00, 0x36abc,
1634		0x36b00, 0x36b10,
1635		0x36b20, 0x36b30,
1636		0x36b40, 0x36b50,
1637		0x36b60, 0x36b70,
1638		0x37000, 0x37028,
1639		0x37030, 0x37048,
1640		0x37060, 0x37068,
1641		0x37070, 0x3709c,
1642		0x370f0, 0x37128,
1643		0x37130, 0x37148,
1644		0x37160, 0x37168,
1645		0x37170, 0x3719c,
1646		0x371f0, 0x37238,
1647		0x37240, 0x37240,
1648		0x37248, 0x37250,
1649		0x3725c, 0x37264,
1650		0x37270, 0x372b8,
1651		0x372c0, 0x372e4,
1652		0x372f8, 0x37338,
1653		0x37340, 0x37340,
1654		0x37348, 0x37350,
1655		0x3735c, 0x37364,
1656		0x37370, 0x373b8,
1657		0x373c0, 0x373e4,
1658		0x373f8, 0x37428,
1659		0x37430, 0x37448,
1660		0x37460, 0x37468,
1661		0x37470, 0x3749c,
1662		0x374f0, 0x37528,
1663		0x37530, 0x37548,
1664		0x37560, 0x37568,
1665		0x37570, 0x3759c,
1666		0x375f0, 0x37638,
1667		0x37640, 0x37640,
1668		0x37648, 0x37650,
1669		0x3765c, 0x37664,
1670		0x37670, 0x376b8,
1671		0x376c0, 0x376e4,
1672		0x376f8, 0x37738,
1673		0x37740, 0x37740,
1674		0x37748, 0x37750,
1675		0x3775c, 0x37764,
1676		0x37770, 0x377b8,
1677		0x377c0, 0x377e4,
1678		0x377f8, 0x377fc,
1679		0x37814, 0x37814,
1680		0x3782c, 0x3782c,
1681		0x37880, 0x3788c,
1682		0x378e8, 0x378ec,
1683		0x37900, 0x37928,
1684		0x37930, 0x37948,
1685		0x37960, 0x37968,
1686		0x37970, 0x3799c,
1687		0x379f0, 0x37a38,
1688		0x37a40, 0x37a40,
1689		0x37a48, 0x37a50,
1690		0x37a5c, 0x37a64,
1691		0x37a70, 0x37ab8,
1692		0x37ac0, 0x37ae4,
1693		0x37af8, 0x37b10,
1694		0x37b28, 0x37b28,
1695		0x37b3c, 0x37b50,
1696		0x37bf0, 0x37c10,
1697		0x37c28, 0x37c28,
1698		0x37c3c, 0x37c50,
1699		0x37cf0, 0x37cfc,
1700		0x38000, 0x38030,
1701		0x38100, 0x38144,
1702		0x38190, 0x381a0,
1703		0x381a8, 0x381b8,
1704		0x381c4, 0x381c8,
1705		0x381d0, 0x381d0,
1706		0x38200, 0x38318,
1707		0x38400, 0x384b4,
1708		0x384c0, 0x3852c,
1709		0x38540, 0x3861c,
1710		0x38800, 0x38828,
1711		0x38834, 0x38834,
1712		0x388c0, 0x38908,
1713		0x38910, 0x389ac,
1714		0x38a00, 0x38a14,
1715		0x38a1c, 0x38a2c,
1716		0x38a44, 0x38a50,
1717		0x38a74, 0x38a74,
1718		0x38a7c, 0x38afc,
1719		0x38b08, 0x38c24,
1720		0x38d00, 0x38d00,
1721		0x38d08, 0x38d14,
1722		0x38d1c, 0x38d20,
1723		0x38d3c, 0x38d3c,
1724		0x38d48, 0x38d50,
1725		0x39200, 0x3920c,
1726		0x39220, 0x39220,
1727		0x39240, 0x39240,
1728		0x39600, 0x3960c,
1729		0x39a00, 0x39a1c,
1730		0x39e00, 0x39e20,
1731		0x39e38, 0x39e3c,
1732		0x39e80, 0x39e80,
1733		0x39e88, 0x39ea8,
1734		0x39eb0, 0x39eb4,
1735		0x39ec8, 0x39ed4,
1736		0x39fb8, 0x3a004,
1737		0x3a200, 0x3a200,
1738		0x3a208, 0x3a240,
1739		0x3a248, 0x3a280,
1740		0x3a288, 0x3a2c0,
1741		0x3a2c8, 0x3a2fc,
1742		0x3a600, 0x3a630,
1743		0x3aa00, 0x3aabc,
1744		0x3ab00, 0x3ab10,
1745		0x3ab20, 0x3ab30,
1746		0x3ab40, 0x3ab50,
1747		0x3ab60, 0x3ab70,
1748		0x3b000, 0x3b028,
1749		0x3b030, 0x3b048,
1750		0x3b060, 0x3b068,
1751		0x3b070, 0x3b09c,
1752		0x3b0f0, 0x3b128,
1753		0x3b130, 0x3b148,
1754		0x3b160, 0x3b168,
1755		0x3b170, 0x3b19c,
1756		0x3b1f0, 0x3b238,
1757		0x3b240, 0x3b240,
1758		0x3b248, 0x3b250,
1759		0x3b25c, 0x3b264,
1760		0x3b270, 0x3b2b8,
1761		0x3b2c0, 0x3b2e4,
1762		0x3b2f8, 0x3b338,
1763		0x3b340, 0x3b340,
1764		0x3b348, 0x3b350,
1765		0x3b35c, 0x3b364,
1766		0x3b370, 0x3b3b8,
1767		0x3b3c0, 0x3b3e4,
1768		0x3b3f8, 0x3b428,
1769		0x3b430, 0x3b448,
1770		0x3b460, 0x3b468,
1771		0x3b470, 0x3b49c,
1772		0x3b4f0, 0x3b528,
1773		0x3b530, 0x3b548,
1774		0x3b560, 0x3b568,
1775		0x3b570, 0x3b59c,
1776		0x3b5f0, 0x3b638,
1777		0x3b640, 0x3b640,
1778		0x3b648, 0x3b650,
1779		0x3b65c, 0x3b664,
1780		0x3b670, 0x3b6b8,
1781		0x3b6c0, 0x3b6e4,
1782		0x3b6f8, 0x3b738,
1783		0x3b740, 0x3b740,
1784		0x3b748, 0x3b750,
1785		0x3b75c, 0x3b764,
1786		0x3b770, 0x3b7b8,
1787		0x3b7c0, 0x3b7e4,
1788		0x3b7f8, 0x3b7fc,
1789		0x3b814, 0x3b814,
1790		0x3b82c, 0x3b82c,
1791		0x3b880, 0x3b88c,
1792		0x3b8e8, 0x3b8ec,
1793		0x3b900, 0x3b928,
1794		0x3b930, 0x3b948,
1795		0x3b960, 0x3b968,
1796		0x3b970, 0x3b99c,
1797		0x3b9f0, 0x3ba38,
1798		0x3ba40, 0x3ba40,
1799		0x3ba48, 0x3ba50,
1800		0x3ba5c, 0x3ba64,
1801		0x3ba70, 0x3bab8,
1802		0x3bac0, 0x3bae4,
1803		0x3baf8, 0x3bb10,
1804		0x3bb28, 0x3bb28,
1805		0x3bb3c, 0x3bb50,
1806		0x3bbf0, 0x3bc10,
1807		0x3bc28, 0x3bc28,
1808		0x3bc3c, 0x3bc50,
1809		0x3bcf0, 0x3bcfc,
1810		0x3c000, 0x3c030,
1811		0x3c100, 0x3c144,
1812		0x3c190, 0x3c1a0,
1813		0x3c1a8, 0x3c1b8,
1814		0x3c1c4, 0x3c1c8,
1815		0x3c1d0, 0x3c1d0,
1816		0x3c200, 0x3c318,
1817		0x3c400, 0x3c4b4,
1818		0x3c4c0, 0x3c52c,
1819		0x3c540, 0x3c61c,
1820		0x3c800, 0x3c828,
1821		0x3c834, 0x3c834,
1822		0x3c8c0, 0x3c908,
1823		0x3c910, 0x3c9ac,
1824		0x3ca00, 0x3ca14,
1825		0x3ca1c, 0x3ca2c,
1826		0x3ca44, 0x3ca50,
1827		0x3ca74, 0x3ca74,
1828		0x3ca7c, 0x3cafc,
1829		0x3cb08, 0x3cc24,
1830		0x3cd00, 0x3cd00,
1831		0x3cd08, 0x3cd14,
1832		0x3cd1c, 0x3cd20,
1833		0x3cd3c, 0x3cd3c,
1834		0x3cd48, 0x3cd50,
1835		0x3d200, 0x3d20c,
1836		0x3d220, 0x3d220,
1837		0x3d240, 0x3d240,
1838		0x3d600, 0x3d60c,
1839		0x3da00, 0x3da1c,
1840		0x3de00, 0x3de20,
1841		0x3de38, 0x3de3c,
1842		0x3de80, 0x3de80,
1843		0x3de88, 0x3dea8,
1844		0x3deb0, 0x3deb4,
1845		0x3dec8, 0x3ded4,
1846		0x3dfb8, 0x3e004,
1847		0x3e200, 0x3e200,
1848		0x3e208, 0x3e240,
1849		0x3e248, 0x3e280,
1850		0x3e288, 0x3e2c0,
1851		0x3e2c8, 0x3e2fc,
1852		0x3e600, 0x3e630,
1853		0x3ea00, 0x3eabc,
1854		0x3eb00, 0x3eb10,
1855		0x3eb20, 0x3eb30,
1856		0x3eb40, 0x3eb50,
1857		0x3eb60, 0x3eb70,
1858		0x3f000, 0x3f028,
1859		0x3f030, 0x3f048,
1860		0x3f060, 0x3f068,
1861		0x3f070, 0x3f09c,
1862		0x3f0f0, 0x3f128,
1863		0x3f130, 0x3f148,
1864		0x3f160, 0x3f168,
1865		0x3f170, 0x3f19c,
1866		0x3f1f0, 0x3f238,
1867		0x3f240, 0x3f240,
1868		0x3f248, 0x3f250,
1869		0x3f25c, 0x3f264,
1870		0x3f270, 0x3f2b8,
1871		0x3f2c0, 0x3f2e4,
1872		0x3f2f8, 0x3f338,
1873		0x3f340, 0x3f340,
1874		0x3f348, 0x3f350,
1875		0x3f35c, 0x3f364,
1876		0x3f370, 0x3f3b8,
1877		0x3f3c0, 0x3f3e4,
1878		0x3f3f8, 0x3f428,
1879		0x3f430, 0x3f448,
1880		0x3f460, 0x3f468,
1881		0x3f470, 0x3f49c,
1882		0x3f4f0, 0x3f528,
1883		0x3f530, 0x3f548,
1884		0x3f560, 0x3f568,
1885		0x3f570, 0x3f59c,
1886		0x3f5f0, 0x3f638,
1887		0x3f640, 0x3f640,
1888		0x3f648, 0x3f650,
1889		0x3f65c, 0x3f664,
1890		0x3f670, 0x3f6b8,
1891		0x3f6c0, 0x3f6e4,
1892		0x3f6f8, 0x3f738,
1893		0x3f740, 0x3f740,
1894		0x3f748, 0x3f750,
1895		0x3f75c, 0x3f764,
1896		0x3f770, 0x3f7b8,
1897		0x3f7c0, 0x3f7e4,
1898		0x3f7f8, 0x3f7fc,
1899		0x3f814, 0x3f814,
1900		0x3f82c, 0x3f82c,
1901		0x3f880, 0x3f88c,
1902		0x3f8e8, 0x3f8ec,
1903		0x3f900, 0x3f928,
1904		0x3f930, 0x3f948,
1905		0x3f960, 0x3f968,
1906		0x3f970, 0x3f99c,
1907		0x3f9f0, 0x3fa38,
1908		0x3fa40, 0x3fa40,
1909		0x3fa48, 0x3fa50,
1910		0x3fa5c, 0x3fa64,
1911		0x3fa70, 0x3fab8,
1912		0x3fac0, 0x3fae4,
1913		0x3faf8, 0x3fb10,
1914		0x3fb28, 0x3fb28,
1915		0x3fb3c, 0x3fb50,
1916		0x3fbf0, 0x3fc10,
1917		0x3fc28, 0x3fc28,
1918		0x3fc3c, 0x3fc50,
1919		0x3fcf0, 0x3fcfc,
1920		0x40000, 0x4000c,
1921		0x40040, 0x40050,
1922		0x40060, 0x40068,
1923		0x4007c, 0x4008c,
1924		0x40094, 0x400b0,
1925		0x400c0, 0x40144,
1926		0x40180, 0x4018c,
1927		0x40200, 0x40254,
1928		0x40260, 0x40264,
1929		0x40270, 0x40288,
1930		0x40290, 0x40298,
1931		0x402ac, 0x402c8,
1932		0x402d0, 0x402e0,
1933		0x402f0, 0x402f0,
1934		0x40300, 0x4033c,
1935		0x403f8, 0x403fc,
1936		0x41304, 0x413c4,
1937		0x41400, 0x4140c,
1938		0x41414, 0x4141c,
1939		0x41480, 0x414d0,
1940		0x44000, 0x44054,
1941		0x4405c, 0x44078,
1942		0x440c0, 0x44174,
1943		0x44180, 0x441ac,
1944		0x441b4, 0x441b8,
1945		0x441c0, 0x44254,
1946		0x4425c, 0x44278,
1947		0x442c0, 0x44374,
1948		0x44380, 0x443ac,
1949		0x443b4, 0x443b8,
1950		0x443c0, 0x44454,
1951		0x4445c, 0x44478,
1952		0x444c0, 0x44574,
1953		0x44580, 0x445ac,
1954		0x445b4, 0x445b8,
1955		0x445c0, 0x44654,
1956		0x4465c, 0x44678,
1957		0x446c0, 0x44774,
1958		0x44780, 0x447ac,
1959		0x447b4, 0x447b8,
1960		0x447c0, 0x44854,
1961		0x4485c, 0x44878,
1962		0x448c0, 0x44974,
1963		0x44980, 0x449ac,
1964		0x449b4, 0x449b8,
1965		0x449c0, 0x449fc,
1966		0x45000, 0x45004,
1967		0x45010, 0x45030,
1968		0x45040, 0x45060,
1969		0x45068, 0x45068,
1970		0x45080, 0x45084,
1971		0x450a0, 0x450b0,
1972		0x45200, 0x45204,
1973		0x45210, 0x45230,
1974		0x45240, 0x45260,
1975		0x45268, 0x45268,
1976		0x45280, 0x45284,
1977		0x452a0, 0x452b0,
1978		0x460c0, 0x460e4,
1979		0x47000, 0x4703c,
1980		0x47044, 0x4708c,
1981		0x47200, 0x47250,
1982		0x47400, 0x47408,
1983		0x47414, 0x47420,
1984		0x47600, 0x47618,
1985		0x47800, 0x47814,
1986		0x48000, 0x4800c,
1987		0x48040, 0x48050,
1988		0x48060, 0x48068,
1989		0x4807c, 0x4808c,
1990		0x48094, 0x480b0,
1991		0x480c0, 0x48144,
1992		0x48180, 0x4818c,
1993		0x48200, 0x48254,
1994		0x48260, 0x48264,
1995		0x48270, 0x48288,
1996		0x48290, 0x48298,
1997		0x482ac, 0x482c8,
1998		0x482d0, 0x482e0,
1999		0x482f0, 0x482f0,
2000		0x48300, 0x4833c,
2001		0x483f8, 0x483fc,
2002		0x49304, 0x493c4,
2003		0x49400, 0x4940c,
2004		0x49414, 0x4941c,
2005		0x49480, 0x494d0,
2006		0x4c000, 0x4c054,
2007		0x4c05c, 0x4c078,
2008		0x4c0c0, 0x4c174,
2009		0x4c180, 0x4c1ac,
2010		0x4c1b4, 0x4c1b8,
2011		0x4c1c0, 0x4c254,
2012		0x4c25c, 0x4c278,
2013		0x4c2c0, 0x4c374,
2014		0x4c380, 0x4c3ac,
2015		0x4c3b4, 0x4c3b8,
2016		0x4c3c0, 0x4c454,
2017		0x4c45c, 0x4c478,
2018		0x4c4c0, 0x4c574,
2019		0x4c580, 0x4c5ac,
2020		0x4c5b4, 0x4c5b8,
2021		0x4c5c0, 0x4c654,
2022		0x4c65c, 0x4c678,
2023		0x4c6c0, 0x4c774,
2024		0x4c780, 0x4c7ac,
2025		0x4c7b4, 0x4c7b8,
2026		0x4c7c0, 0x4c854,
2027		0x4c85c, 0x4c878,
2028		0x4c8c0, 0x4c974,
2029		0x4c980, 0x4c9ac,
2030		0x4c9b4, 0x4c9b8,
2031		0x4c9c0, 0x4c9fc,
2032		0x4d000, 0x4d004,
2033		0x4d010, 0x4d030,
2034		0x4d040, 0x4d060,
2035		0x4d068, 0x4d068,
2036		0x4d080, 0x4d084,
2037		0x4d0a0, 0x4d0b0,
2038		0x4d200, 0x4d204,
2039		0x4d210, 0x4d230,
2040		0x4d240, 0x4d260,
2041		0x4d268, 0x4d268,
2042		0x4d280, 0x4d284,
2043		0x4d2a0, 0x4d2b0,
2044		0x4e0c0, 0x4e0e4,
2045		0x4f000, 0x4f03c,
2046		0x4f044, 0x4f08c,
2047		0x4f200, 0x4f250,
2048		0x4f400, 0x4f408,
2049		0x4f414, 0x4f420,
2050		0x4f600, 0x4f618,
2051		0x4f800, 0x4f814,
2052		0x50000, 0x50084,
2053		0x50090, 0x500cc,
2054		0x50400, 0x50400,
2055		0x50800, 0x50884,
2056		0x50890, 0x508cc,
2057		0x50c00, 0x50c00,
2058		0x51000, 0x5101c,
2059		0x51300, 0x51308,
2060	};
2061
2062	static const unsigned int t5vf_reg_ranges[] = {
2063		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2064		VF_MPS_REG(A_MPS_VF_CTL),
2065		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2066		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2067		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2068		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2069		FW_T4VF_MBDATA_BASE_ADDR,
2070		FW_T4VF_MBDATA_BASE_ADDR +
2071		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2072	};
2073
2074	static const unsigned int t6_reg_ranges[] = {
2075		0x1008, 0x101c,
2076		0x1024, 0x10a8,
2077		0x10b4, 0x10f8,
2078		0x1100, 0x1114,
2079		0x111c, 0x112c,
2080		0x1138, 0x113c,
2081		0x1144, 0x114c,
2082		0x1180, 0x1184,
2083		0x1190, 0x1194,
2084		0x11a0, 0x11a4,
2085		0x11b0, 0x11b4,
2086		0x11fc, 0x1274,
2087		0x1280, 0x133c,
2088		0x1800, 0x18fc,
2089		0x3000, 0x302c,
2090		0x3060, 0x30b0,
2091		0x30b8, 0x30d8,
2092		0x30e0, 0x30fc,
2093		0x3140, 0x357c,
2094		0x35a8, 0x35cc,
2095		0x35ec, 0x35ec,
2096		0x3600, 0x5624,
2097		0x56cc, 0x56ec,
2098		0x56f4, 0x5720,
2099		0x5728, 0x575c,
2100		0x580c, 0x5814,
2101		0x5890, 0x589c,
2102		0x58a4, 0x58ac,
2103		0x58b8, 0x58bc,
2104		0x5940, 0x595c,
2105		0x5980, 0x598c,
2106		0x59b0, 0x59c8,
2107		0x59d0, 0x59dc,
2108		0x59fc, 0x5a18,
2109		0x5a60, 0x5a6c,
2110		0x5a80, 0x5a8c,
2111		0x5a94, 0x5a9c,
2112		0x5b94, 0x5bfc,
2113		0x5c10, 0x5e48,
2114		0x5e50, 0x5e94,
2115		0x5ea0, 0x5eb0,
2116		0x5ec0, 0x5ec0,
2117		0x5ec8, 0x5ed0,
2118		0x5ee0, 0x5ee0,
2119		0x5ef0, 0x5ef0,
2120		0x5f00, 0x5f00,
2121		0x6000, 0x6020,
2122		0x6028, 0x6040,
2123		0x6058, 0x609c,
2124		0x60a8, 0x619c,
2125		0x7700, 0x7798,
2126		0x77c0, 0x7880,
2127		0x78cc, 0x78fc,
2128		0x7b00, 0x7b58,
2129		0x7b60, 0x7b84,
2130		0x7b8c, 0x7c54,
2131		0x7d00, 0x7d38,
2132		0x7d40, 0x7d84,
2133		0x7d8c, 0x7ddc,
2134		0x7de4, 0x7e04,
2135		0x7e10, 0x7e1c,
2136		0x7e24, 0x7e38,
2137		0x7e40, 0x7e44,
2138		0x7e4c, 0x7e78,
2139		0x7e80, 0x7edc,
2140		0x7ee8, 0x7efc,
2141		0x8dc0, 0x8de4,
2142		0x8df8, 0x8e04,
2143		0x8e10, 0x8e84,
2144		0x8ea0, 0x8f88,
2145		0x8fb8, 0x9058,
2146		0x9060, 0x9060,
2147		0x9068, 0x90f8,
2148		0x9100, 0x9124,
2149		0x9400, 0x9470,
2150		0x9600, 0x9600,
2151		0x9608, 0x9638,
2152		0x9640, 0x9704,
2153		0x9710, 0x971c,
2154		0x9800, 0x9808,
2155		0x9820, 0x983c,
2156		0x9850, 0x9864,
2157		0x9c00, 0x9c6c,
2158		0x9c80, 0x9cec,
2159		0x9d00, 0x9d6c,
2160		0x9d80, 0x9dec,
2161		0x9e00, 0x9e6c,
2162		0x9e80, 0x9eec,
2163		0x9f00, 0x9f6c,
2164		0x9f80, 0xa020,
2165		0xd004, 0xd03c,
2166		0xd100, 0xd118,
2167		0xd200, 0xd214,
2168		0xd220, 0xd234,
2169		0xd240, 0xd254,
2170		0xd260, 0xd274,
2171		0xd280, 0xd294,
2172		0xd2a0, 0xd2b4,
2173		0xd2c0, 0xd2d4,
2174		0xd2e0, 0xd2f4,
2175		0xd300, 0xd31c,
2176		0xdfc0, 0xdfe0,
2177		0xe000, 0xf008,
2178		0xf010, 0xf018,
2179		0xf020, 0xf028,
2180		0x11000, 0x11014,
2181		0x11048, 0x1106c,
2182		0x11074, 0x11088,
2183		0x11098, 0x11120,
2184		0x1112c, 0x1117c,
2185		0x11190, 0x112e0,
2186		0x11300, 0x1130c,
2187		0x12000, 0x1206c,
2188		0x19040, 0x1906c,
2189		0x19078, 0x19080,
2190		0x1908c, 0x190e8,
2191		0x190f0, 0x190f8,
2192		0x19100, 0x19110,
2193		0x19120, 0x19124,
2194		0x19150, 0x19194,
2195		0x1919c, 0x191b0,
2196		0x191d0, 0x191e8,
2197		0x19238, 0x19290,
2198		0x192a4, 0x192b0,
2199		0x192bc, 0x192bc,
2200		0x19348, 0x1934c,
2201		0x193f8, 0x19418,
2202		0x19420, 0x19428,
2203		0x19430, 0x19444,
2204		0x1944c, 0x1946c,
2205		0x19474, 0x19474,
2206		0x19490, 0x194cc,
2207		0x194f0, 0x194f8,
2208		0x19c00, 0x19c48,
2209		0x19c50, 0x19c80,
2210		0x19c94, 0x19c98,
2211		0x19ca0, 0x19cbc,
2212		0x19ce4, 0x19ce4,
2213		0x19cf0, 0x19cf8,
2214		0x19d00, 0x19d28,
2215		0x19d50, 0x19d78,
2216		0x19d94, 0x19d98,
2217		0x19da0, 0x19dc8,
2218		0x19df0, 0x19e10,
2219		0x19e50, 0x19e6c,
2220		0x19ea0, 0x19ebc,
2221		0x19ec4, 0x19ef4,
2222		0x19f04, 0x19f2c,
2223		0x19f34, 0x19f34,
2224		0x19f40, 0x19f50,
2225		0x19f90, 0x19fac,
2226		0x19fc4, 0x19fc8,
2227		0x19fd0, 0x19fe4,
2228		0x1a000, 0x1a004,
2229		0x1a010, 0x1a06c,
2230		0x1a0b0, 0x1a0e4,
2231		0x1a0ec, 0x1a0f8,
2232		0x1a100, 0x1a108,
2233		0x1a114, 0x1a120,
2234		0x1a128, 0x1a130,
2235		0x1a138, 0x1a138,
2236		0x1a190, 0x1a1c4,
2237		0x1a1fc, 0x1a1fc,
2238		0x1e008, 0x1e00c,
2239		0x1e040, 0x1e044,
2240		0x1e04c, 0x1e04c,
2241		0x1e284, 0x1e290,
2242		0x1e2c0, 0x1e2c0,
2243		0x1e2e0, 0x1e2e0,
2244		0x1e300, 0x1e384,
2245		0x1e3c0, 0x1e3c8,
2246		0x1e408, 0x1e40c,
2247		0x1e440, 0x1e444,
2248		0x1e44c, 0x1e44c,
2249		0x1e684, 0x1e690,
2250		0x1e6c0, 0x1e6c0,
2251		0x1e6e0, 0x1e6e0,
2252		0x1e700, 0x1e784,
2253		0x1e7c0, 0x1e7c8,
2254		0x1e808, 0x1e80c,
2255		0x1e840, 0x1e844,
2256		0x1e84c, 0x1e84c,
2257		0x1ea84, 0x1ea90,
2258		0x1eac0, 0x1eac0,
2259		0x1eae0, 0x1eae0,
2260		0x1eb00, 0x1eb84,
2261		0x1ebc0, 0x1ebc8,
2262		0x1ec08, 0x1ec0c,
2263		0x1ec40, 0x1ec44,
2264		0x1ec4c, 0x1ec4c,
2265		0x1ee84, 0x1ee90,
2266		0x1eec0, 0x1eec0,
2267		0x1eee0, 0x1eee0,
2268		0x1ef00, 0x1ef84,
2269		0x1efc0, 0x1efc8,
2270		0x1f008, 0x1f00c,
2271		0x1f040, 0x1f044,
2272		0x1f04c, 0x1f04c,
2273		0x1f284, 0x1f290,
2274		0x1f2c0, 0x1f2c0,
2275		0x1f2e0, 0x1f2e0,
2276		0x1f300, 0x1f384,
2277		0x1f3c0, 0x1f3c8,
2278		0x1f408, 0x1f40c,
2279		0x1f440, 0x1f444,
2280		0x1f44c, 0x1f44c,
2281		0x1f684, 0x1f690,
2282		0x1f6c0, 0x1f6c0,
2283		0x1f6e0, 0x1f6e0,
2284		0x1f700, 0x1f784,
2285		0x1f7c0, 0x1f7c8,
2286		0x1f808, 0x1f80c,
2287		0x1f840, 0x1f844,
2288		0x1f84c, 0x1f84c,
2289		0x1fa84, 0x1fa90,
2290		0x1fac0, 0x1fac0,
2291		0x1fae0, 0x1fae0,
2292		0x1fb00, 0x1fb84,
2293		0x1fbc0, 0x1fbc8,
2294		0x1fc08, 0x1fc0c,
2295		0x1fc40, 0x1fc44,
2296		0x1fc4c, 0x1fc4c,
2297		0x1fe84, 0x1fe90,
2298		0x1fec0, 0x1fec0,
2299		0x1fee0, 0x1fee0,
2300		0x1ff00, 0x1ff84,
2301		0x1ffc0, 0x1ffc8,
2302		0x30000, 0x30030,
2303		0x30100, 0x30168,
2304		0x30190, 0x301a0,
2305		0x301a8, 0x301b8,
2306		0x301c4, 0x301c8,
2307		0x301d0, 0x301d0,
2308		0x30200, 0x30320,
2309		0x30400, 0x304b4,
2310		0x304c0, 0x3052c,
2311		0x30540, 0x3061c,
2312		0x30800, 0x308a0,
2313		0x308c0, 0x30908,
2314		0x30910, 0x309b8,
2315		0x30a00, 0x30a04,
2316		0x30a0c, 0x30a14,
2317		0x30a1c, 0x30a2c,
2318		0x30a44, 0x30a50,
2319		0x30a74, 0x30a74,
2320		0x30a7c, 0x30afc,
2321		0x30b08, 0x30c24,
2322		0x30d00, 0x30d14,
2323		0x30d1c, 0x30d3c,
2324		0x30d44, 0x30d4c,
2325		0x30d54, 0x30d74,
2326		0x30d7c, 0x30d7c,
2327		0x30de0, 0x30de0,
2328		0x30e00, 0x30ed4,
2329		0x30f00, 0x30fa4,
2330		0x30fc0, 0x30fc4,
2331		0x31000, 0x31004,
2332		0x31080, 0x310fc,
2333		0x31208, 0x31220,
2334		0x3123c, 0x31254,
2335		0x31300, 0x31300,
2336		0x31308, 0x3131c,
2337		0x31338, 0x3133c,
2338		0x31380, 0x31380,
2339		0x31388, 0x313a8,
2340		0x313b4, 0x313b4,
2341		0x31400, 0x31420,
2342		0x31438, 0x3143c,
2343		0x31480, 0x31480,
2344		0x314a8, 0x314a8,
2345		0x314b0, 0x314b4,
2346		0x314c8, 0x314d4,
2347		0x31a40, 0x31a4c,
2348		0x31af0, 0x31b20,
2349		0x31b38, 0x31b3c,
2350		0x31b80, 0x31b80,
2351		0x31ba8, 0x31ba8,
2352		0x31bb0, 0x31bb4,
2353		0x31bc8, 0x31bd4,
2354		0x32140, 0x3218c,
2355		0x321f0, 0x321f4,
2356		0x32200, 0x32200,
2357		0x32218, 0x32218,
2358		0x32400, 0x32400,
2359		0x32408, 0x3241c,
2360		0x32618, 0x32620,
2361		0x32664, 0x32664,
2362		0x326a8, 0x326a8,
2363		0x326ec, 0x326ec,
2364		0x32a00, 0x32abc,
2365		0x32b00, 0x32b18,
2366		0x32b20, 0x32b38,
2367		0x32b40, 0x32b58,
2368		0x32b60, 0x32b78,
2369		0x32c00, 0x32c00,
2370		0x32c08, 0x32c3c,
2371		0x33000, 0x3302c,
2372		0x33034, 0x33050,
2373		0x33058, 0x33058,
2374		0x33060, 0x3308c,
2375		0x3309c, 0x330ac,
2376		0x330c0, 0x330c0,
2377		0x330c8, 0x330d0,
2378		0x330d8, 0x330e0,
2379		0x330ec, 0x3312c,
2380		0x33134, 0x33150,
2381		0x33158, 0x33158,
2382		0x33160, 0x3318c,
2383		0x3319c, 0x331ac,
2384		0x331c0, 0x331c0,
2385		0x331c8, 0x331d0,
2386		0x331d8, 0x331e0,
2387		0x331ec, 0x33290,
2388		0x33298, 0x332c4,
2389		0x332e4, 0x33390,
2390		0x33398, 0x333c4,
2391		0x333e4, 0x3342c,
2392		0x33434, 0x33450,
2393		0x33458, 0x33458,
2394		0x33460, 0x3348c,
2395		0x3349c, 0x334ac,
2396		0x334c0, 0x334c0,
2397		0x334c8, 0x334d0,
2398		0x334d8, 0x334e0,
2399		0x334ec, 0x3352c,
2400		0x33534, 0x33550,
2401		0x33558, 0x33558,
2402		0x33560, 0x3358c,
2403		0x3359c, 0x335ac,
2404		0x335c0, 0x335c0,
2405		0x335c8, 0x335d0,
2406		0x335d8, 0x335e0,
2407		0x335ec, 0x33690,
2408		0x33698, 0x336c4,
2409		0x336e4, 0x33790,
2410		0x33798, 0x337c4,
2411		0x337e4, 0x337fc,
2412		0x33814, 0x33814,
2413		0x33854, 0x33868,
2414		0x33880, 0x3388c,
2415		0x338c0, 0x338d0,
2416		0x338e8, 0x338ec,
2417		0x33900, 0x3392c,
2418		0x33934, 0x33950,
2419		0x33958, 0x33958,
2420		0x33960, 0x3398c,
2421		0x3399c, 0x339ac,
2422		0x339c0, 0x339c0,
2423		0x339c8, 0x339d0,
2424		0x339d8, 0x339e0,
2425		0x339ec, 0x33a90,
2426		0x33a98, 0x33ac4,
2427		0x33ae4, 0x33b10,
2428		0x33b24, 0x33b28,
2429		0x33b38, 0x33b50,
2430		0x33bf0, 0x33c10,
2431		0x33c24, 0x33c28,
2432		0x33c38, 0x33c50,
2433		0x33cf0, 0x33cfc,
2434		0x34000, 0x34030,
2435		0x34100, 0x34168,
2436		0x34190, 0x341a0,
2437		0x341a8, 0x341b8,
2438		0x341c4, 0x341c8,
2439		0x341d0, 0x341d0,
2440		0x34200, 0x34320,
2441		0x34400, 0x344b4,
2442		0x344c0, 0x3452c,
2443		0x34540, 0x3461c,
2444		0x34800, 0x348a0,
2445		0x348c0, 0x34908,
2446		0x34910, 0x349b8,
2447		0x34a00, 0x34a04,
2448		0x34a0c, 0x34a14,
2449		0x34a1c, 0x34a2c,
2450		0x34a44, 0x34a50,
2451		0x34a74, 0x34a74,
2452		0x34a7c, 0x34afc,
2453		0x34b08, 0x34c24,
2454		0x34d00, 0x34d14,
2455		0x34d1c, 0x34d3c,
2456		0x34d44, 0x34d4c,
2457		0x34d54, 0x34d74,
2458		0x34d7c, 0x34d7c,
2459		0x34de0, 0x34de0,
2460		0x34e00, 0x34ed4,
2461		0x34f00, 0x34fa4,
2462		0x34fc0, 0x34fc4,
2463		0x35000, 0x35004,
2464		0x35080, 0x350fc,
2465		0x35208, 0x35220,
2466		0x3523c, 0x35254,
2467		0x35300, 0x35300,
2468		0x35308, 0x3531c,
2469		0x35338, 0x3533c,
2470		0x35380, 0x35380,
2471		0x35388, 0x353a8,
2472		0x353b4, 0x353b4,
2473		0x35400, 0x35420,
2474		0x35438, 0x3543c,
2475		0x35480, 0x35480,
2476		0x354a8, 0x354a8,
2477		0x354b0, 0x354b4,
2478		0x354c8, 0x354d4,
2479		0x35a40, 0x35a4c,
2480		0x35af0, 0x35b20,
2481		0x35b38, 0x35b3c,
2482		0x35b80, 0x35b80,
2483		0x35ba8, 0x35ba8,
2484		0x35bb0, 0x35bb4,
2485		0x35bc8, 0x35bd4,
2486		0x36140, 0x3618c,
2487		0x361f0, 0x361f4,
2488		0x36200, 0x36200,
2489		0x36218, 0x36218,
2490		0x36400, 0x36400,
2491		0x36408, 0x3641c,
2492		0x36618, 0x36620,
2493		0x36664, 0x36664,
2494		0x366a8, 0x366a8,
2495		0x366ec, 0x366ec,
2496		0x36a00, 0x36abc,
2497		0x36b00, 0x36b18,
2498		0x36b20, 0x36b38,
2499		0x36b40, 0x36b58,
2500		0x36b60, 0x36b78,
2501		0x36c00, 0x36c00,
2502		0x36c08, 0x36c3c,
2503		0x37000, 0x3702c,
2504		0x37034, 0x37050,
2505		0x37058, 0x37058,
2506		0x37060, 0x3708c,
2507		0x3709c, 0x370ac,
2508		0x370c0, 0x370c0,
2509		0x370c8, 0x370d0,
2510		0x370d8, 0x370e0,
2511		0x370ec, 0x3712c,
2512		0x37134, 0x37150,
2513		0x37158, 0x37158,
2514		0x37160, 0x3718c,
2515		0x3719c, 0x371ac,
2516		0x371c0, 0x371c0,
2517		0x371c8, 0x371d0,
2518		0x371d8, 0x371e0,
2519		0x371ec, 0x37290,
2520		0x37298, 0x372c4,
2521		0x372e4, 0x37390,
2522		0x37398, 0x373c4,
2523		0x373e4, 0x3742c,
2524		0x37434, 0x37450,
2525		0x37458, 0x37458,
2526		0x37460, 0x3748c,
2527		0x3749c, 0x374ac,
2528		0x374c0, 0x374c0,
2529		0x374c8, 0x374d0,
2530		0x374d8, 0x374e0,
2531		0x374ec, 0x3752c,
2532		0x37534, 0x37550,
2533		0x37558, 0x37558,
2534		0x37560, 0x3758c,
2535		0x3759c, 0x375ac,
2536		0x375c0, 0x375c0,
2537		0x375c8, 0x375d0,
2538		0x375d8, 0x375e0,
2539		0x375ec, 0x37690,
2540		0x37698, 0x376c4,
2541		0x376e4, 0x37790,
2542		0x37798, 0x377c4,
2543		0x377e4, 0x377fc,
2544		0x37814, 0x37814,
2545		0x37854, 0x37868,
2546		0x37880, 0x3788c,
2547		0x378c0, 0x378d0,
2548		0x378e8, 0x378ec,
2549		0x37900, 0x3792c,
2550		0x37934, 0x37950,
2551		0x37958, 0x37958,
2552		0x37960, 0x3798c,
2553		0x3799c, 0x379ac,
2554		0x379c0, 0x379c0,
2555		0x379c8, 0x379d0,
2556		0x379d8, 0x379e0,
2557		0x379ec, 0x37a90,
2558		0x37a98, 0x37ac4,
2559		0x37ae4, 0x37b10,
2560		0x37b24, 0x37b28,
2561		0x37b38, 0x37b50,
2562		0x37bf0, 0x37c10,
2563		0x37c24, 0x37c28,
2564		0x37c38, 0x37c50,
2565		0x37cf0, 0x37cfc,
2566		0x40040, 0x40040,
2567		0x40080, 0x40084,
2568		0x40100, 0x40100,
2569		0x40140, 0x401bc,
2570		0x40200, 0x40214,
2571		0x40228, 0x40228,
2572		0x40240, 0x40258,
2573		0x40280, 0x40280,
2574		0x40304, 0x40304,
2575		0x40330, 0x4033c,
2576		0x41304, 0x413c8,
2577		0x413d0, 0x413dc,
2578		0x413f0, 0x413f0,
2579		0x41400, 0x4140c,
2580		0x41414, 0x4141c,
2581		0x41480, 0x414d0,
2582		0x44000, 0x4407c,
2583		0x440c0, 0x441ac,
2584		0x441b4, 0x4427c,
2585		0x442c0, 0x443ac,
2586		0x443b4, 0x4447c,
2587		0x444c0, 0x445ac,
2588		0x445b4, 0x4467c,
2589		0x446c0, 0x447ac,
2590		0x447b4, 0x4487c,
2591		0x448c0, 0x449ac,
2592		0x449b4, 0x44a7c,
2593		0x44ac0, 0x44bac,
2594		0x44bb4, 0x44c7c,
2595		0x44cc0, 0x44dac,
2596		0x44db4, 0x44e7c,
2597		0x44ec0, 0x44fac,
2598		0x44fb4, 0x4507c,
2599		0x450c0, 0x451ac,
2600		0x451b4, 0x451fc,
2601		0x45800, 0x45804,
2602		0x45810, 0x45830,
2603		0x45840, 0x45860,
2604		0x45868, 0x45868,
2605		0x45880, 0x45884,
2606		0x458a0, 0x458b0,
2607		0x45a00, 0x45a04,
2608		0x45a10, 0x45a30,
2609		0x45a40, 0x45a60,
2610		0x45a68, 0x45a68,
2611		0x45a80, 0x45a84,
2612		0x45aa0, 0x45ab0,
2613		0x460c0, 0x460e4,
2614		0x47000, 0x4703c,
2615		0x47044, 0x4708c,
2616		0x47200, 0x47250,
2617		0x47400, 0x47408,
2618		0x47414, 0x47420,
2619		0x47600, 0x47618,
2620		0x47800, 0x47814,
2621		0x47820, 0x4782c,
2622		0x50000, 0x50084,
2623		0x50090, 0x500cc,
2624		0x50300, 0x50384,
2625		0x50400, 0x50400,
2626		0x50800, 0x50884,
2627		0x50890, 0x508cc,
2628		0x50b00, 0x50b84,
2629		0x50c00, 0x50c00,
2630		0x51000, 0x51020,
2631		0x51028, 0x510b0,
2632		0x51300, 0x51324,
2633	};
2634
2635	static const unsigned int t6vf_reg_ranges[] = {
2636		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2637		VF_MPS_REG(A_MPS_VF_CTL),
2638		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2639		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2640		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2641		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2642		FW_T6VF_MBDATA_BASE_ADDR,
2643		FW_T6VF_MBDATA_BASE_ADDR +
2644		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2645	};
2646
2647	u32 *buf_end = (u32 *)(buf + buf_size);
2648	const unsigned int *reg_ranges;
2649	int reg_ranges_size, range;
2650	unsigned int chip_version = chip_id(adap);
2651
2652	/*
2653	 * Select the right set of register ranges to dump depending on the
2654	 * adapter chip type.
2655	 */
2656	switch (chip_version) {
2657	case CHELSIO_T4:
2658		if (adap->flags & IS_VF) {
2659			reg_ranges = t4vf_reg_ranges;
2660			reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2661		} else {
2662			reg_ranges = t4_reg_ranges;
2663			reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2664		}
2665		break;
2666
2667	case CHELSIO_T5:
2668		if (adap->flags & IS_VF) {
2669			reg_ranges = t5vf_reg_ranges;
2670			reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2671		} else {
2672			reg_ranges = t5_reg_ranges;
2673			reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2674		}
2675		break;
2676
2677	case CHELSIO_T6:
2678		if (adap->flags & IS_VF) {
2679			reg_ranges = t6vf_reg_ranges;
2680			reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2681		} else {
2682			reg_ranges = t6_reg_ranges;
2683			reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2684		}
2685		break;
2686
2687	default:
2688		CH_ERR(adap,
2689			"Unsupported chip version %d\n", chip_version);
2690		return;
2691	}
2692
2693	/*
2694	 * Clear the register buffer and insert the appropriate register
2695	 * values selected by the above register ranges.
2696	 */
2697	memset(buf, 0, buf_size);
2698	for (range = 0; range < reg_ranges_size; range += 2) {
2699		unsigned int reg = reg_ranges[range];
2700		unsigned int last_reg = reg_ranges[range + 1];
2701		u32 *bufp = (u32 *)(buf + reg);
2702
2703		/*
2704		 * Iterate across the register range filling in the register
2705		 * buffer but don't write past the end of the register buffer.
2706		 */
2707		while (reg <= last_reg && bufp < buf_end) {
2708			*bufp++ = t4_read_reg(adap, reg);
2709			reg += sizeof(u32);
2710		}
2711	}
2712}
2713
2714/*
2715 * Partial EEPROM Vital Product Data structure.  The VPD starts with one ID
2716 * header followed by one or more VPD-R sections, each with its own header.
2717 */
2718struct t4_vpd_hdr {
2719	u8  id_tag;
2720	u8  id_len[2];
2721	u8  id_data[ID_LEN];
2722};
2723
2724struct t4_vpdr_hdr {
2725	u8  vpdr_tag;
2726	u8  vpdr_len[2];
2727};
2728
2729/*
2730 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2731 */
2732#define EEPROM_DELAY		10		/* 10us per poll spin */
2733#define EEPROM_MAX_POLL		5000		/* x 5000 == 50ms */
2734
2735#define EEPROM_STAT_ADDR	0x7bfc
2736#define VPD_SIZE		0x800
2737#define VPD_BASE		0x400
2738#define VPD_BASE_OLD		0
2739#define VPD_LEN			1024
2740#define VPD_INFO_FLD_HDR_SIZE	3
2741#define CHELSIO_VPD_UNIQUE_ID	0x82
2742
2743/*
2744 * Small utility function to wait till any outstanding VPD Access is complete.
2745 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2746 * VPD Access in flight.  This allows us to handle the problem of having a
2747 * previous VPD Access time out and prevent an attempt to inject a new VPD
2748 * Request before any in-flight VPD reguest has completed.
2749 */
2750static int t4_seeprom_wait(struct adapter *adapter)
2751{
2752	unsigned int base = adapter->params.pci.vpd_cap_addr;
2753	int max_poll;
2754
2755	/*
2756	 * If no VPD Access is in flight, we can just return success right
2757	 * away.
2758	 */
2759	if (!adapter->vpd_busy)
2760		return 0;
2761
2762	/*
2763	 * Poll the VPD Capability Address/Flag register waiting for it
2764	 * to indicate that the operation is complete.
2765	 */
2766	max_poll = EEPROM_MAX_POLL;
2767	do {
2768		u16 val;
2769
2770		udelay(EEPROM_DELAY);
2771		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2772
2773		/*
2774		 * If the operation is complete, mark the VPD as no longer
2775		 * busy and return success.
2776		 */
2777		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2778			adapter->vpd_busy = 0;
2779			return 0;
2780		}
2781	} while (--max_poll);
2782
2783	/*
2784	 * Failure!  Note that we leave the VPD Busy status set in order to
2785	 * avoid pushing a new VPD Access request into the VPD Capability till
2786	 * the current operation eventually succeeds.  It's a bug to issue a
2787	 * new request when an existing request is in flight and will result
2788	 * in corrupt hardware state.
2789	 */
2790	return -ETIMEDOUT;
2791}
2792
2793/**
2794 *	t4_seeprom_read - read a serial EEPROM location
2795 *	@adapter: adapter to read
2796 *	@addr: EEPROM virtual address
2797 *	@data: where to store the read data
2798 *
2799 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
2800 *	VPD capability.  Note that this function must be called with a virtual
2801 *	address.
2802 */
2803int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2804{
2805	unsigned int base = adapter->params.pci.vpd_cap_addr;
2806	int ret;
2807
2808	/*
2809	 * VPD Accesses must alway be 4-byte aligned!
2810	 */
2811	if (addr >= EEPROMVSIZE || (addr & 3))
2812		return -EINVAL;
2813
2814	/*
2815	 * Wait for any previous operation which may still be in flight to
2816	 * complete.
2817	 */
2818	ret = t4_seeprom_wait(adapter);
2819	if (ret) {
2820		CH_ERR(adapter, "VPD still busy from previous operation\n");
2821		return ret;
2822	}
2823
2824	/*
2825	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2826	 * for our request to complete.  If it doesn't complete, note the
2827	 * error and return it to our caller.  Note that we do not reset the
2828	 * VPD Busy status!
2829	 */
2830	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2831	adapter->vpd_busy = 1;
2832	adapter->vpd_flag = PCI_VPD_ADDR_F;
2833	ret = t4_seeprom_wait(adapter);
2834	if (ret) {
2835		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2836		return ret;
2837	}
2838
2839	/*
2840	 * Grab the returned data, swizzle it into our endianness and
2841	 * return success.
2842	 */
2843	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2844	*data = le32_to_cpu(*data);
2845	return 0;
2846}
2847
2848/**
2849 *	t4_seeprom_write - write a serial EEPROM location
2850 *	@adapter: adapter to write
2851 *	@addr: virtual EEPROM address
2852 *	@data: value to write
2853 *
2854 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
2855 *	VPD capability.  Note that this function must be called with a virtual
2856 *	address.
2857 */
2858int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2859{
2860	unsigned int base = adapter->params.pci.vpd_cap_addr;
2861	int ret;
2862	u32 stats_reg;
2863	int max_poll;
2864
2865	/*
2866	 * VPD Accesses must alway be 4-byte aligned!
2867	 */
2868	if (addr >= EEPROMVSIZE || (addr & 3))
2869		return -EINVAL;
2870
2871	/*
2872	 * Wait for any previous operation which may still be in flight to
2873	 * complete.
2874	 */
2875	ret = t4_seeprom_wait(adapter);
2876	if (ret) {
2877		CH_ERR(adapter, "VPD still busy from previous operation\n");
2878		return ret;
2879	}
2880
2881	/*
2882	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2883	 * for our request to complete.  If it doesn't complete, note the
2884	 * error and return it to our caller.  Note that we do not reset the
2885	 * VPD Busy status!
2886	 */
2887	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2888				 cpu_to_le32(data));
2889	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2890				 (u16)addr | PCI_VPD_ADDR_F);
2891	adapter->vpd_busy = 1;
2892	adapter->vpd_flag = 0;
2893	ret = t4_seeprom_wait(adapter);
2894	if (ret) {
2895		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2896		return ret;
2897	}
2898
2899	/*
2900	 * Reset PCI_VPD_DATA register after a transaction and wait for our
2901	 * request to complete. If it doesn't complete, return error.
2902	 */
2903	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2904	max_poll = EEPROM_MAX_POLL;
2905	do {
2906		udelay(EEPROM_DELAY);
2907		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2908	} while ((stats_reg & 0x1) && --max_poll);
2909	if (!max_poll)
2910		return -ETIMEDOUT;
2911
2912	/* Return success! */
2913	return 0;
2914}
2915
2916/**
2917 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
2918 *	@phys_addr: the physical EEPROM address
2919 *	@fn: the PCI function number
2920 *	@sz: size of function-specific area
2921 *
2922 *	Translate a physical EEPROM address to virtual.  The first 1K is
2923 *	accessed through virtual addresses starting at 31K, the rest is
2924 *	accessed through virtual addresses starting at 0.
2925 *
2926 *	The mapping is as follows:
2927 *	[0..1K) -> [31K..32K)
2928 *	[1K..1K+A) -> [ES-A..ES)
2929 *	[1K+A..ES) -> [0..ES-A-1K)
2930 *
2931 *	where A = @fn * @sz, and ES = EEPROM size.
2932 */
2933int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2934{
2935	fn *= sz;
2936	if (phys_addr < 1024)
2937		return phys_addr + (31 << 10);
2938	if (phys_addr < 1024 + fn)
2939		return EEPROMSIZE - fn + phys_addr - 1024;
2940	if (phys_addr < EEPROMSIZE)
2941		return phys_addr - 1024 - fn;
2942	return -EINVAL;
2943}
2944
2945/**
2946 *	t4_seeprom_wp - enable/disable EEPROM write protection
2947 *	@adapter: the adapter
2948 *	@enable: whether to enable or disable write protection
2949 *
2950 *	Enables or disables write protection on the serial EEPROM.
2951 */
2952int t4_seeprom_wp(struct adapter *adapter, int enable)
2953{
2954	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2955}
2956
2957/**
2958 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
2959 *	@vpd: Pointer to buffered vpd data structure
2960 *	@kw: The keyword to search for
2961 *	@region: VPD region to search (starting from 0)
2962 *
2963 *	Returns the value of the information field keyword or
2964 *	-ENOENT otherwise.
2965 */
2966static int get_vpd_keyword_val(const u8 *vpd, const char *kw, int region)
2967{
2968	int i, tag;
2969	unsigned int offset, len;
2970	const struct t4_vpdr_hdr *vpdr;
2971
2972	offset = sizeof(struct t4_vpd_hdr);
2973	vpdr = (const void *)(vpd + offset);
2974	tag = vpdr->vpdr_tag;
2975	len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2976	while (region--) {
2977		offset += sizeof(struct t4_vpdr_hdr) + len;
2978		vpdr = (const void *)(vpd + offset);
2979		if (++tag != vpdr->vpdr_tag)
2980			return -ENOENT;
2981		len = (u16)vpdr->vpdr_len[0] + ((u16)vpdr->vpdr_len[1] << 8);
2982	}
2983	offset += sizeof(struct t4_vpdr_hdr);
2984
2985	if (offset + len > VPD_LEN) {
2986		return -ENOENT;
2987	}
2988
2989	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2990		if (memcmp(vpd + i , kw , 2) == 0){
2991			i += VPD_INFO_FLD_HDR_SIZE;
2992			return i;
2993		}
2994
2995		i += VPD_INFO_FLD_HDR_SIZE + vpd[i+2];
2996	}
2997
2998	return -ENOENT;
2999}
3000
3001
3002/**
3003 *	get_vpd_params - read VPD parameters from VPD EEPROM
3004 *	@adapter: adapter to read
3005 *	@p: where to store the parameters
3006 *	@vpd: caller provided temporary space to read the VPD into
3007 *
3008 *	Reads card parameters stored in VPD EEPROM.
3009 */
3010static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
3011    uint16_t device_id, u32 *buf)
3012{
3013	int i, ret, addr;
3014	int ec, sn, pn, na, md;
3015	u8 csum;
3016	const u8 *vpd = (const u8 *)buf;
3017
3018	/*
3019	 * Card information normally starts at VPD_BASE but early cards had
3020	 * it at 0.
3021	 */
3022	ret = t4_seeprom_read(adapter, VPD_BASE, buf);
3023	if (ret)
3024		return (ret);
3025
3026	/*
3027	 * The VPD shall have a unique identifier specified by the PCI SIG.
3028	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3029	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3030	 * is expected to automatically put this entry at the
3031	 * beginning of the VPD.
3032	 */
3033	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3034
3035	for (i = 0; i < VPD_LEN; i += 4) {
3036		ret = t4_seeprom_read(adapter, addr + i, buf++);
3037		if (ret)
3038			return ret;
3039	}
3040
3041#define FIND_VPD_KW(var,name) do { \
3042	var = get_vpd_keyword_val(vpd, name, 0); \
3043	if (var < 0) { \
3044		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3045		return -EINVAL; \
3046	} \
3047} while (0)
3048
3049	FIND_VPD_KW(i, "RV");
3050	for (csum = 0; i >= 0; i--)
3051		csum += vpd[i];
3052
3053	if (csum) {
3054		CH_ERR(adapter,
3055			"corrupted VPD EEPROM, actual csum %u\n", csum);
3056		return -EINVAL;
3057	}
3058
3059	FIND_VPD_KW(ec, "EC");
3060	FIND_VPD_KW(sn, "SN");
3061	FIND_VPD_KW(pn, "PN");
3062	FIND_VPD_KW(na, "NA");
3063#undef FIND_VPD_KW
3064
3065	memcpy(p->id, vpd + offsetof(struct t4_vpd_hdr, id_data), ID_LEN);
3066	strstrip(p->id);
3067	memcpy(p->ec, vpd + ec, EC_LEN);
3068	strstrip(p->ec);
3069	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3070	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3071	strstrip(p->sn);
3072	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3073	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3074	strstrip((char *)p->pn);
3075	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3076	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3077	strstrip((char *)p->na);
3078
3079	if (device_id & 0x80)
3080		return 0;	/* Custom card */
3081
3082	md = get_vpd_keyword_val(vpd, "VF", 1);
3083	if (md < 0) {
3084		snprintf(p->md, sizeof(p->md), "unknown");
3085	} else {
3086		i = vpd[md - VPD_INFO_FLD_HDR_SIZE + 2];
3087		memcpy(p->md, vpd + md, min(i, MD_LEN));
3088		strstrip((char *)p->md);
3089	}
3090
3091	return 0;
3092}
3093
3094/* serial flash and firmware constants and flash config file constants */
3095enum {
3096	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3097
3098	/* flash command opcodes */
3099	SF_PROG_PAGE    = 2,	/* program 256B page */
3100	SF_WR_DISABLE   = 4,	/* disable writes */
3101	SF_RD_STATUS    = 5,	/* read status register */
3102	SF_WR_ENABLE    = 6,	/* enable writes */
3103	SF_RD_DATA_FAST = 0xb,	/* read flash */
3104	SF_RD_ID	= 0x9f,	/* read ID */
3105	SF_ERASE_SECTOR = 0xd8,	/* erase 64KB sector */
3106};
3107
3108/**
3109 *	sf1_read - read data from the serial flash
3110 *	@adapter: the adapter
3111 *	@byte_cnt: number of bytes to read
3112 *	@cont: whether another operation will be chained
3113 *	@lock: whether to lock SF for PL access only
3114 *	@valp: where to store the read data
3115 *
3116 *	Reads up to 4 bytes of data from the serial flash.  The location of
3117 *	the read needs to be specified prior to calling this by issuing the
3118 *	appropriate commands to the serial flash.
3119 */
3120static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3121		    int lock, u32 *valp)
3122{
3123	int ret;
3124
3125	if (!byte_cnt || byte_cnt > 4)
3126		return -EINVAL;
3127	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3128		return -EBUSY;
3129	t4_write_reg(adapter, A_SF_OP,
3130		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3131	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3132	if (!ret)
3133		*valp = t4_read_reg(adapter, A_SF_DATA);
3134	return ret;
3135}
3136
3137/**
3138 *	sf1_write - write data to the serial flash
3139 *	@adapter: the adapter
3140 *	@byte_cnt: number of bytes to write
3141 *	@cont: whether another operation will be chained
3142 *	@lock: whether to lock SF for PL access only
3143 *	@val: value to write
3144 *
3145 *	Writes up to 4 bytes of data to the serial flash.  The location of
3146 *	the write needs to be specified prior to calling this by issuing the
3147 *	appropriate commands to the serial flash.
3148 */
3149static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3150		     int lock, u32 val)
3151{
3152	if (!byte_cnt || byte_cnt > 4)
3153		return -EINVAL;
3154	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3155		return -EBUSY;
3156	t4_write_reg(adapter, A_SF_DATA, val);
3157	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3158		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3159	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3160}
3161
3162/**
3163 *	flash_wait_op - wait for a flash operation to complete
3164 *	@adapter: the adapter
3165 *	@attempts: max number of polls of the status register
3166 *	@delay: delay between polls in ms
3167 *
3168 *	Wait for a flash operation to complete by polling the status register.
3169 */
3170static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3171{
3172	int ret;
3173	u32 status;
3174
3175	while (1) {
3176		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3177		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3178			return ret;
3179		if (!(status & 1))
3180			return 0;
3181		if (--attempts == 0)
3182			return -EAGAIN;
3183		if (delay)
3184			msleep(delay);
3185	}
3186}
3187
3188/**
3189 *	t4_read_flash - read words from serial flash
3190 *	@adapter: the adapter
3191 *	@addr: the start address for the read
3192 *	@nwords: how many 32-bit words to read
3193 *	@data: where to store the read data
3194 *	@byte_oriented: whether to store data as bytes or as words
3195 *
3196 *	Read the specified number of 32-bit words from the serial flash.
3197 *	If @byte_oriented is set the read data is stored as a byte array
3198 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3199 *	natural endianness.
3200 */
3201int t4_read_flash(struct adapter *adapter, unsigned int addr,
3202		  unsigned int nwords, u32 *data, int byte_oriented)
3203{
3204	int ret;
3205
3206	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3207		return -EINVAL;
3208
3209	addr = swab32(addr) | SF_RD_DATA_FAST;
3210
3211	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3212	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3213		return ret;
3214
3215	for ( ; nwords; nwords--, data++) {
3216		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3217		if (nwords == 1)
3218			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3219		if (ret)
3220			return ret;
3221		if (byte_oriented)
3222			*data = (__force __u32)(cpu_to_be32(*data));
3223	}
3224	return 0;
3225}
3226
3227/**
3228 *	t4_write_flash - write up to a page of data to the serial flash
3229 *	@adapter: the adapter
3230 *	@addr: the start address to write
3231 *	@n: length of data to write in bytes
3232 *	@data: the data to write
3233 *	@byte_oriented: whether to store data as bytes or as words
3234 *
3235 *	Writes up to a page of data (256 bytes) to the serial flash starting
3236 *	at the given address.  All the data must be written to the same page.
3237 *	If @byte_oriented is set the write data is stored as byte stream
3238 *	(i.e. matches what on disk), otherwise in big-endian.
3239 */
3240int t4_write_flash(struct adapter *adapter, unsigned int addr,
3241			  unsigned int n, const u8 *data, int byte_oriented)
3242{
3243	int ret;
3244	u32 buf[SF_PAGE_SIZE / 4];
3245	unsigned int i, c, left, val, offset = addr & 0xff;
3246
3247	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3248		return -EINVAL;
3249
3250	val = swab32(addr) | SF_PROG_PAGE;
3251
3252	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3253	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3254		goto unlock;
3255
3256	for (left = n; left; left -= c) {
3257		c = min(left, 4U);
3258		for (val = 0, i = 0; i < c; ++i)
3259			val = (val << 8) + *data++;
3260
3261		if (!byte_oriented)
3262			val = cpu_to_be32(val);
3263
3264		ret = sf1_write(adapter, c, c != left, 1, val);
3265		if (ret)
3266			goto unlock;
3267	}
3268	ret = flash_wait_op(adapter, 8, 1);
3269	if (ret)
3270		goto unlock;
3271
3272	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3273
3274	/* Read the page to verify the write succeeded */
3275	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3276			    byte_oriented);
3277	if (ret)
3278		return ret;
3279
3280	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3281		CH_ERR(adapter,
3282			"failed to correctly write the flash page at %#x\n",
3283			addr);
3284		return -EIO;
3285	}
3286	return 0;
3287
3288unlock:
3289	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3290	return ret;
3291}
3292
3293/**
3294 *	t4_get_fw_version - read the firmware version
3295 *	@adapter: the adapter
3296 *	@vers: where to place the version
3297 *
3298 *	Reads the FW version from flash.
3299 */
3300int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3301{
3302	return t4_read_flash(adapter, FLASH_FW_START +
3303			     offsetof(struct fw_hdr, fw_ver), 1,
3304			     vers, 0);
3305}
3306
3307/**
3308 *	t4_get_fw_hdr - read the firmware header
3309 *	@adapter: the adapter
3310 *	@hdr: where to place the version
3311 *
3312 *	Reads the FW header from flash into caller provided buffer.
3313 */
3314int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
3315{
3316	return t4_read_flash(adapter, FLASH_FW_START,
3317	    sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
3318}
3319
3320/**
3321 *	t4_get_bs_version - read the firmware bootstrap version
3322 *	@adapter: the adapter
3323 *	@vers: where to place the version
3324 *
3325 *	Reads the FW Bootstrap version from flash.
3326 */
3327int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3328{
3329	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3330			     offsetof(struct fw_hdr, fw_ver), 1,
3331			     vers, 0);
3332}
3333
3334/**
3335 *	t4_get_tp_version - read the TP microcode version
3336 *	@adapter: the adapter
3337 *	@vers: where to place the version
3338 *
3339 *	Reads the TP microcode version from flash.
3340 */
3341int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3342{
3343	return t4_read_flash(adapter, FLASH_FW_START +
3344			     offsetof(struct fw_hdr, tp_microcode_ver),
3345			     1, vers, 0);
3346}
3347
3348/**
3349 *	t4_get_exprom_version - return the Expansion ROM version (if any)
3350 *	@adapter: the adapter
3351 *	@vers: where to place the version
3352 *
3353 *	Reads the Expansion ROM header from FLASH and returns the version
3354 *	number (if present) through the @vers return value pointer.  We return
3355 *	this in the Firmware Version Format since it's convenient.  Return
3356 *	0 on success, -ENOENT if no Expansion ROM is present.
3357 */
3358int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3359{
3360	struct exprom_header {
3361		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3362		unsigned char hdr_ver[4];	/* Expansion ROM version */
3363	} *hdr;
3364	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3365					   sizeof(u32))];
3366	int ret;
3367
3368	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3369			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3370			    0);
3371	if (ret)
3372		return ret;
3373
3374	hdr = (struct exprom_header *)exprom_header_buf;
3375	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3376		return -ENOENT;
3377
3378	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3379		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3380		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3381		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3382	return 0;
3383}
3384
3385/**
3386 *	t4_get_scfg_version - return the Serial Configuration version
3387 *	@adapter: the adapter
3388 *	@vers: where to place the version
3389 *
3390 *	Reads the Serial Configuration Version via the Firmware interface
3391 *	(thus this can only be called once we're ready to issue Firmware
3392 *	commands).  The format of the Serial Configuration version is
3393 *	adapter specific.  Returns 0 on success, an error on failure.
3394 *
3395 *	Note that early versions of the Firmware didn't include the ability
3396 *	to retrieve the Serial Configuration version, so we zero-out the
3397 *	return-value parameter in that case to avoid leaving it with
3398 *	garbage in it.
3399 *
3400 *	Also note that the Firmware will return its cached copy of the Serial
3401 *	Initialization Revision ID, not the actual Revision ID as written in
3402 *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3403 *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3404 *	it's best to defer calling this routine till after a FW_RESET_CMD has
3405 *	been issued if the Host Driver will be performing a full adapter
3406 *	initialization.
3407 */
3408int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3409{
3410	u32 scfgrev_param;
3411	int ret;
3412
3413	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3414			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3415	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3416			      1, &scfgrev_param, vers);
3417	if (ret)
3418		*vers = 0;
3419	return ret;
3420}
3421
3422/**
3423 *	t4_get_vpd_version - return the VPD version
3424 *	@adapter: the adapter
3425 *	@vers: where to place the version
3426 *
3427 *	Reads the VPD via the Firmware interface (thus this can only be called
3428 *	once we're ready to issue Firmware commands).  The format of the
3429 *	VPD version is adapter specific.  Returns 0 on success, an error on
3430 *	failure.
3431 *
3432 *	Note that early versions of the Firmware didn't include the ability
3433 *	to retrieve the VPD version, so we zero-out the return-value parameter
3434 *	in that case to avoid leaving it with garbage in it.
3435 *
3436 *	Also note that the Firmware will return its cached copy of the VPD
3437 *	Revision ID, not the actual Revision ID as written in the Serial
3438 *	EEPROM.  This is only an issue if a new VPD has been written and the
3439 *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3440 *	to defer calling this routine till after a FW_RESET_CMD has been issued
3441 *	if the Host Driver will be performing a full adapter initialization.
3442 */
3443int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3444{
3445	u32 vpdrev_param;
3446	int ret;
3447
3448	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3449			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3450	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3451			      1, &vpdrev_param, vers);
3452	if (ret)
3453		*vers = 0;
3454	return ret;
3455}
3456
3457/**
3458 *	t4_get_version_info - extract various chip/firmware version information
3459 *	@adapter: the adapter
3460 *
3461 *	Reads various chip/firmware version numbers and stores them into the
3462 *	adapter Adapter Parameters structure.  If any of the efforts fails
3463 *	the first failure will be returned, but all of the version numbers
3464 *	will be read.
3465 */
3466int t4_get_version_info(struct adapter *adapter)
3467{
3468	int ret = 0;
3469
3470	#define FIRST_RET(__getvinfo) \
3471	do { \
3472		int __ret = __getvinfo; \
3473		if (__ret && !ret) \
3474			ret = __ret; \
3475	} while (0)
3476
3477	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3478	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3479	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3480	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3481	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3482	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3483
3484	#undef FIRST_RET
3485
3486	return ret;
3487}
3488
3489/**
3490 *	t4_flash_erase_sectors - erase a range of flash sectors
3491 *	@adapter: the adapter
3492 *	@start: the first sector to erase
3493 *	@end: the last sector to erase
3494 *
3495 *	Erases the sectors in the given inclusive range.
3496 */
3497int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3498{
3499	int ret = 0;
3500
3501	if (end >= adapter->params.sf_nsec)
3502		return -EINVAL;
3503
3504	while (start <= end) {
3505		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3506		    (ret = sf1_write(adapter, 4, 0, 1,
3507				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3508		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3509			CH_ERR(adapter,
3510				"erase of flash sector %d failed, error %d\n",
3511				start, ret);
3512			break;
3513		}
3514		start++;
3515	}
3516	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3517	return ret;
3518}
3519
3520/**
3521 *	t4_flash_cfg_addr - return the address of the flash configuration file
3522 *	@adapter: the adapter
3523 *
3524 *	Return the address within the flash where the Firmware Configuration
3525 *	File is stored, or an error if the device FLASH is too small to contain
3526 *	a Firmware Configuration File.
3527 */
3528int t4_flash_cfg_addr(struct adapter *adapter)
3529{
3530	/*
3531	 * If the device FLASH isn't large enough to hold a Firmware
3532	 * Configuration File, return an error.
3533	 */
3534	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3535		return -ENOSPC;
3536
3537	return FLASH_CFG_START;
3538}
3539
3540/*
3541 * Return TRUE if the specified firmware matches the adapter.  I.e. T4
3542 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3543 * and emit an error message for mismatched firmware to save our caller the
3544 * effort ...
3545 */
3546static int t4_fw_matches_chip(struct adapter *adap,
3547			      const struct fw_hdr *hdr)
3548{
3549	/*
3550	 * The expression below will return FALSE for any unsupported adapter
3551	 * which will keep us "honest" in the future ...
3552	 */
3553	if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3554	    (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3555	    (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3556		return 1;
3557
3558	CH_ERR(adap,
3559		"FW image (%d) is not suitable for this adapter (%d)\n",
3560		hdr->chip, chip_id(adap));
3561	return 0;
3562}
3563
3564/**
3565 *	t4_load_fw - download firmware
3566 *	@adap: the adapter
3567 *	@fw_data: the firmware image to write
3568 *	@size: image size
3569 *
3570 *	Write the supplied firmware image to the card's serial flash.
3571 */
3572int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3573{
3574	u32 csum;
3575	int ret, addr;
3576	unsigned int i;
3577	u8 first_page[SF_PAGE_SIZE];
3578	const u32 *p = (const u32 *)fw_data;
3579	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3580	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3581	unsigned int fw_start_sec;
3582	unsigned int fw_start;
3583	unsigned int fw_size;
3584
3585	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3586		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3587		fw_start = FLASH_FWBOOTSTRAP_START;
3588		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3589	} else {
3590		fw_start_sec = FLASH_FW_START_SEC;
3591 		fw_start = FLASH_FW_START;
3592		fw_size = FLASH_FW_MAX_SIZE;
3593	}
3594
3595	if (!size) {
3596		CH_ERR(adap, "FW image has no data\n");
3597		return -EINVAL;
3598	}
3599	if (size & 511) {
3600		CH_ERR(adap,
3601			"FW image size not multiple of 512 bytes\n");
3602		return -EINVAL;
3603	}
3604	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3605		CH_ERR(adap,
3606			"FW image size differs from size in FW header\n");
3607		return -EINVAL;
3608	}
3609	if (size > fw_size) {
3610		CH_ERR(adap, "FW image too large, max is %u bytes\n",
3611			fw_size);
3612		return -EFBIG;
3613	}
3614	if (!t4_fw_matches_chip(adap, hdr))
3615		return -EINVAL;
3616
3617	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3618		csum += be32_to_cpu(p[i]);
3619
3620	if (csum != 0xffffffff) {
3621		CH_ERR(adap,
3622			"corrupted firmware image, checksum %#x\n", csum);
3623		return -EINVAL;
3624	}
3625
3626	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
3627	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3628	if (ret)
3629		goto out;
3630
3631	/*
3632	 * We write the correct version at the end so the driver can see a bad
3633	 * version if the FW write fails.  Start by writing a copy of the
3634	 * first page with a bad version.
3635	 */
3636	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3637	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3638	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3639	if (ret)
3640		goto out;
3641
3642	addr = fw_start;
3643	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3644		addr += SF_PAGE_SIZE;
3645		fw_data += SF_PAGE_SIZE;
3646		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3647		if (ret)
3648			goto out;
3649	}
3650
3651	ret = t4_write_flash(adap,
3652			     fw_start + offsetof(struct fw_hdr, fw_ver),
3653			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3654out:
3655	if (ret)
3656		CH_ERR(adap, "firmware download failed, error %d\n",
3657			ret);
3658	return ret;
3659}
3660
3661/**
3662 *	t4_fwcache - firmware cache operation
3663 *	@adap: the adapter
3664 *	@op  : the operation (flush or flush and invalidate)
3665 */
3666int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3667{
3668	struct fw_params_cmd c;
3669
3670	memset(&c, 0, sizeof(c));
3671	c.op_to_vfn =
3672	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3673			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3674				V_FW_PARAMS_CMD_PFN(adap->pf) |
3675				V_FW_PARAMS_CMD_VFN(0));
3676	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3677	c.param[0].mnem =
3678	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3679			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3680	c.param[0].val = (__force __be32)op;
3681
3682	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3683}
3684
3685void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3686			unsigned int *pif_req_wrptr,
3687			unsigned int *pif_rsp_wrptr)
3688{
3689	int i, j;
3690	u32 cfg, val, req, rsp;
3691
3692	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3693	if (cfg & F_LADBGEN)
3694		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3695
3696	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3697	req = G_POLADBGWRPTR(val);
3698	rsp = G_PILADBGWRPTR(val);
3699	if (pif_req_wrptr)
3700		*pif_req_wrptr = req;
3701	if (pif_rsp_wrptr)
3702		*pif_rsp_wrptr = rsp;
3703
3704	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3705		for (j = 0; j < 6; j++) {
3706			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3707				     V_PILADBGRDPTR(rsp));
3708			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3709			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3710			req++;
3711			rsp++;
3712		}
3713		req = (req + 2) & M_POLADBGRDPTR;
3714		rsp = (rsp + 2) & M_PILADBGRDPTR;
3715	}
3716	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3717}
3718
3719void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3720{
3721	u32 cfg;
3722	int i, j, idx;
3723
3724	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3725	if (cfg & F_LADBGEN)
3726		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3727
3728	for (i = 0; i < CIM_MALA_SIZE; i++) {
3729		for (j = 0; j < 5; j++) {
3730			idx = 8 * i + j;
3731			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3732				     V_PILADBGRDPTR(idx));
3733			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3734			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3735		}
3736	}
3737	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3738}
3739
3740void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3741{
3742	unsigned int i, j;
3743
3744	for (i = 0; i < 8; i++) {
3745		u32 *p = la_buf + i;
3746
3747		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3748		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3749		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3750		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3751			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3752	}
3753}
3754
3755/**
3756 *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3757 *	@caps16: a 16-bit Port Capabilities value
3758 *
3759 *	Returns the equivalent 32-bit Port Capabilities value.
3760 */
3761static uint32_t fwcaps16_to_caps32(uint16_t caps16)
3762{
3763	uint32_t caps32 = 0;
3764
3765	#define CAP16_TO_CAP32(__cap) \
3766		do { \
3767			if (caps16 & FW_PORT_CAP_##__cap) \
3768				caps32 |= FW_PORT_CAP32_##__cap; \
3769		} while (0)
3770
3771	CAP16_TO_CAP32(SPEED_100M);
3772	CAP16_TO_CAP32(SPEED_1G);
3773	CAP16_TO_CAP32(SPEED_25G);
3774	CAP16_TO_CAP32(SPEED_10G);
3775	CAP16_TO_CAP32(SPEED_40G);
3776	CAP16_TO_CAP32(SPEED_100G);
3777	CAP16_TO_CAP32(FC_RX);
3778	CAP16_TO_CAP32(FC_TX);
3779	CAP16_TO_CAP32(ANEG);
3780	CAP16_TO_CAP32(FORCE_PAUSE);
3781	CAP16_TO_CAP32(MDIAUTO);
3782	CAP16_TO_CAP32(MDISTRAIGHT);
3783	CAP16_TO_CAP32(FEC_RS);
3784	CAP16_TO_CAP32(FEC_BASER_RS);
3785	CAP16_TO_CAP32(802_3_PAUSE);
3786	CAP16_TO_CAP32(802_3_ASM_DIR);
3787
3788	#undef CAP16_TO_CAP32
3789
3790	return caps32;
3791}
3792
3793/**
3794 *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3795 *	@caps32: a 32-bit Port Capabilities value
3796 *
3797 *	Returns the equivalent 16-bit Port Capabilities value.  Note that
3798 *	not all 32-bit Port Capabilities can be represented in the 16-bit
3799 *	Port Capabilities and some fields/values may not make it.
3800 */
3801static uint16_t fwcaps32_to_caps16(uint32_t caps32)
3802{
3803	uint16_t caps16 = 0;
3804
3805	#define CAP32_TO_CAP16(__cap) \
3806		do { \
3807			if (caps32 & FW_PORT_CAP32_##__cap) \
3808				caps16 |= FW_PORT_CAP_##__cap; \
3809		} while (0)
3810
3811	CAP32_TO_CAP16(SPEED_100M);
3812	CAP32_TO_CAP16(SPEED_1G);
3813	CAP32_TO_CAP16(SPEED_10G);
3814	CAP32_TO_CAP16(SPEED_25G);
3815	CAP32_TO_CAP16(SPEED_40G);
3816	CAP32_TO_CAP16(SPEED_100G);
3817	CAP32_TO_CAP16(FC_RX);
3818	CAP32_TO_CAP16(FC_TX);
3819	CAP32_TO_CAP16(802_3_PAUSE);
3820	CAP32_TO_CAP16(802_3_ASM_DIR);
3821	CAP32_TO_CAP16(ANEG);
3822	CAP32_TO_CAP16(FORCE_PAUSE);
3823	CAP32_TO_CAP16(MDIAUTO);
3824	CAP32_TO_CAP16(MDISTRAIGHT);
3825	CAP32_TO_CAP16(FEC_RS);
3826	CAP32_TO_CAP16(FEC_BASER_RS);
3827
3828	#undef CAP32_TO_CAP16
3829
3830	return caps16;
3831}
3832
3833static bool
3834is_bt(struct port_info *pi)
3835{
3836
3837	return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
3838	    pi->port_type == FW_PORT_TYPE_BT_XFI ||
3839	    pi->port_type == FW_PORT_TYPE_BT_XAUI);
3840}
3841
3842/**
3843 *	t4_link_l1cfg - apply link configuration to MAC/PHY
3844 *	@phy: the PHY to setup
3845 *	@mac: the MAC to setup
3846 *	@lc: the requested link configuration
3847 *
3848 *	Set up a port's MAC and PHY according to a desired link configuration.
3849 *	- If the PHY can auto-negotiate first decide what to advertise, then
3850 *	  enable/disable auto-negotiation as desired, and reset.
3851 *	- If the PHY does not auto-negotiate just reset it.
3852 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3853 *	  otherwise do it later based on the outcome of auto-negotiation.
3854 */
3855int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3856		  struct link_config *lc)
3857{
3858	struct fw_port_cmd c;
3859	unsigned int mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
3860	unsigned int aneg, fc, fec, speed, rcap;
3861
3862	fc = 0;
3863	if (lc->requested_fc & PAUSE_RX)
3864		fc |= FW_PORT_CAP32_FC_RX;
3865	if (lc->requested_fc & PAUSE_TX)
3866		fc |= FW_PORT_CAP32_FC_TX;
3867	if (!(lc->requested_fc & PAUSE_AUTONEG))
3868		fc |= FW_PORT_CAP32_FORCE_PAUSE;
3869
3870	fec = 0;
3871	if (lc->requested_fec == FEC_AUTO)
3872		fec = lc->fec_hint;
3873	else {
3874		if (lc->requested_fec & FEC_RS)
3875			fec |= FW_PORT_CAP32_FEC_RS;
3876		if (lc->requested_fec & FEC_BASER_RS)
3877			fec |= FW_PORT_CAP32_FEC_BASER_RS;
3878	}
3879
3880	if (lc->requested_aneg == AUTONEG_DISABLE)
3881		aneg = 0;
3882	else if (lc->requested_aneg == AUTONEG_ENABLE)
3883		aneg = FW_PORT_CAP32_ANEG;
3884	else
3885		aneg = lc->supported & FW_PORT_CAP32_ANEG;
3886
3887	if (aneg) {
3888		speed = lc->supported & V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
3889	} else if (lc->requested_speed != 0)
3890		speed = speed_to_fwcap(lc->requested_speed);
3891	else
3892		speed = fwcap_top_speed(lc->supported);
3893
3894	/* Force AN on for BT cards. */
3895	if (is_bt(adap->port[adap->chan_map[port]]))
3896		aneg = lc->supported & FW_PORT_CAP32_ANEG;
3897
3898	rcap = aneg | speed | fc | fec;
3899	if ((rcap | lc->supported) != lc->supported) {
3900#ifdef INVARIANTS
3901		CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3902		    lc->supported);
3903#endif
3904		rcap &= lc->supported;
3905	}
3906	rcap |= mdi;
3907
3908	memset(&c, 0, sizeof(c));
3909	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3910				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3911				     V_FW_PORT_CMD_PORTID(port));
3912	if (adap->params.port_caps32) {
3913		c.action_to_len16 =
3914		    cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
3915			FW_LEN16(c));
3916		c.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
3917	} else {
3918		c.action_to_len16 =
3919		    cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3920			    FW_LEN16(c));
3921		c.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
3922	}
3923
3924	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3925}
3926
3927/**
3928 *	t4_restart_aneg - restart autonegotiation
3929 *	@adap: the adapter
3930 *	@mbox: mbox to use for the FW command
3931 *	@port: the port id
3932 *
3933 *	Restarts autonegotiation for the selected port.
3934 */
3935int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3936{
3937	struct fw_port_cmd c;
3938
3939	memset(&c, 0, sizeof(c));
3940	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3941				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3942				     V_FW_PORT_CMD_PORTID(port));
3943	c.action_to_len16 =
3944		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3945			    FW_LEN16(c));
3946	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3947	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3948}
3949
3950struct intr_details {
3951	u32 mask;
3952	const char *msg;
3953};
3954
3955struct intr_action {
3956	u32 mask;
3957	int arg;
3958	bool (*action)(struct adapter *, int, bool);
3959};
3960
3961#define NONFATAL_IF_DISABLED 1
3962struct intr_info {
3963	const char *name;	/* name of the INT_CAUSE register */
3964	int cause_reg;		/* INT_CAUSE register */
3965	int enable_reg;		/* INT_ENABLE register */
3966	u32 fatal;		/* bits that are fatal */
3967	int flags;		/* hints */
3968	const struct intr_details *details;
3969	const struct intr_action *actions;
3970};
3971
3972static inline char
3973intr_alert_char(u32 cause, u32 enable, u32 fatal)
3974{
3975
3976	if (cause & fatal)
3977		return ('!');
3978	if (cause & enable)
3979		return ('*');
3980	return ('-');
3981}
3982
3983static void
3984t4_show_intr_info(struct adapter *adap, const struct intr_info *ii, u32 cause)
3985{
3986	u32 enable, fatal, leftover;
3987	const struct intr_details *details;
3988	char alert;
3989
3990	enable = t4_read_reg(adap, ii->enable_reg);
3991	if (ii->flags & NONFATAL_IF_DISABLED)
3992		fatal = ii->fatal & t4_read_reg(adap, ii->enable_reg);
3993	else
3994		fatal = ii->fatal;
3995	alert = intr_alert_char(cause, enable, fatal);
3996	CH_ALERT(adap, "%c %s 0x%x = 0x%08x, E 0x%08x, F 0x%08x\n",
3997	    alert, ii->name, ii->cause_reg, cause, enable, fatal);
3998
3999	leftover = cause;
4000	for (details = ii->details; details && details->mask != 0; details++) {
4001		u32 msgbits = details->mask & cause;
4002		if (msgbits == 0)
4003			continue;
4004		alert = intr_alert_char(msgbits, enable, ii->fatal);
4005		CH_ALERT(adap, "  %c [0x%08x] %s\n", alert, msgbits,
4006		    details->msg);
4007		leftover &= ~msgbits;
4008	}
4009	if (leftover != 0 && leftover != cause)
4010		CH_ALERT(adap, "  ? [0x%08x]\n", leftover);
4011}
4012
4013/*
4014 * Returns true for fatal error.
4015 */
4016static bool
4017t4_handle_intr(struct adapter *adap, const struct intr_info *ii,
4018    u32 additional_cause, bool verbose)
4019{
4020	u32 cause, fatal;
4021	bool rc;
4022	const struct intr_action *action;
4023
4024	/* read and display cause. */
4025	cause = t4_read_reg(adap, ii->cause_reg);
4026	if (verbose || cause != 0)
4027		t4_show_intr_info(adap, ii, cause);
4028	/*
4029	 * The top level interrupt cause is a bit special and we need to ignore
4030	 * the bits that are not in the enable.  Note that we did display them
4031	 * above in t4_show_intr_info but will not clear them.
4032	 */
4033	if (ii->cause_reg == A_PL_INT_CAUSE)
4034		cause &= t4_read_reg(adap, ii->enable_reg);
4035	fatal = cause & ii->fatal;
4036	if (fatal != 0 && ii->flags & NONFATAL_IF_DISABLED)
4037		fatal &= t4_read_reg(adap, ii->enable_reg);
4038	cause |= additional_cause;
4039	if (cause == 0)
4040		return (false);
4041
4042	rc = fatal != 0;
4043	for (action = ii->actions; action && action->mask != 0; action++) {
4044		if (!(action->mask & cause))
4045			continue;
4046		rc |= (action->action)(adap, action->arg, verbose);
4047	}
4048
4049	/* clear */
4050	t4_write_reg(adap, ii->cause_reg, cause);
4051	(void)t4_read_reg(adap, ii->cause_reg);
4052
4053	return (rc);
4054}
4055
4056/*
4057 * Interrupt handler for the PCIE module.
4058 */
4059static bool pcie_intr_handler(struct adapter *adap, int arg, bool verbose)
4060{
4061	static const struct intr_details sysbus_intr_details[] = {
4062		{ F_RNPP, "RXNP array parity error" },
4063		{ F_RPCP, "RXPC array parity error" },
4064		{ F_RCIP, "RXCIF array parity error" },
4065		{ F_RCCP, "Rx completions control array parity error" },
4066		{ F_RFTP, "RXFT array parity error" },
4067		{ 0 }
4068	};
4069	static const struct intr_info sysbus_intr_info = {
4070		.name = "PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS",
4071		.cause_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4072		.enable_reg = A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE,
4073		.fatal = F_RFTP | F_RCCP | F_RCIP | F_RPCP | F_RNPP,
4074		.flags = 0,
4075		.details = sysbus_intr_details,
4076		.actions = NULL,
4077	};
4078	static const struct intr_details pcie_port_intr_details[] = {
4079		{ F_TPCP, "TXPC array parity error" },
4080		{ F_TNPP, "TXNP array parity error" },
4081		{ F_TFTP, "TXFT array parity error" },
4082		{ F_TCAP, "TXCA array parity error" },
4083		{ F_TCIP, "TXCIF array parity error" },
4084		{ F_RCAP, "RXCA array parity error" },
4085		{ F_OTDD, "outbound request TLP discarded" },
4086		{ F_RDPE, "Rx data parity error" },
4087		{ F_TDUE, "Tx uncorrectable data error" },
4088		{ 0 }
4089	};
4090	static const struct intr_info pcie_port_intr_info = {
4091		.name = "PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS",
4092		.cause_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4093		.enable_reg = A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE,
4094		.fatal = F_TPCP | F_TNPP | F_TFTP | F_TCAP | F_TCIP | F_RCAP |
4095		    F_OTDD | F_RDPE | F_TDUE,
4096		.flags = 0,
4097		.details = pcie_port_intr_details,
4098		.actions = NULL,
4099	};
4100	static const struct intr_details pcie_intr_details[] = {
4101		{ F_MSIADDRLPERR, "MSI AddrL parity error" },
4102		{ F_MSIADDRHPERR, "MSI AddrH parity error" },
4103		{ F_MSIDATAPERR, "MSI data parity error" },
4104		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error" },
4105		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error" },
4106		{ F_MSIXDATAPERR, "MSI-X data parity error" },
4107		{ F_MSIXDIPERR, "MSI-X DI parity error" },
4108		{ F_PIOCPLPERR, "PCIe PIO completion FIFO parity error" },
4109		{ F_PIOREQPERR, "PCIe PIO request FIFO parity error" },
4110		{ F_TARTAGPERR, "PCIe target tag FIFO parity error" },
4111		{ F_CCNTPERR, "PCIe CMD channel count parity error" },
4112		{ F_CREQPERR, "PCIe CMD channel request parity error" },
4113		{ F_CRSPPERR, "PCIe CMD channel response parity error" },
4114		{ F_DCNTPERR, "PCIe DMA channel count parity error" },
4115		{ F_DREQPERR, "PCIe DMA channel request parity error" },
4116		{ F_DRSPPERR, "PCIe DMA channel response parity error" },
4117		{ F_HCNTPERR, "PCIe HMA channel count parity error" },
4118		{ F_HREQPERR, "PCIe HMA channel request parity error" },
4119		{ F_HRSPPERR, "PCIe HMA channel response parity error" },
4120		{ F_CFGSNPPERR, "PCIe config snoop FIFO parity error" },
4121		{ F_FIDPERR, "PCIe FID parity error" },
4122		{ F_INTXCLRPERR, "PCIe INTx clear parity error" },
4123		{ F_MATAGPERR, "PCIe MA tag parity error" },
4124		{ F_PIOTAGPERR, "PCIe PIO tag parity error" },
4125		{ F_RXCPLPERR, "PCIe Rx completion parity error" },
4126		{ F_RXWRPERR, "PCIe Rx write parity error" },
4127		{ F_RPLPERR, "PCIe replay buffer parity error" },
4128		{ F_PCIESINT, "PCIe core secondary fault" },
4129		{ F_PCIEPINT, "PCIe core primary fault" },
4130		{ F_UNXSPLCPLERR, "PCIe unexpected split completion error" },
4131		{ 0 }
4132	};
4133	static const struct intr_details t5_pcie_intr_details[] = {
4134		{ F_IPGRPPERR, "Parity errors observed by IP" },
4135		{ F_NONFATALERR, "PCIe non-fatal error" },
4136		{ F_READRSPERR, "Outbound read error" },
4137		{ F_TRGT1GRPPERR, "PCIe TRGT1 group FIFOs parity error" },
4138		{ F_IPSOTPERR, "PCIe IP SOT buffer SRAM parity error" },
4139		{ F_IPRETRYPERR, "PCIe IP replay buffer parity error" },
4140		{ F_IPRXDATAGRPPERR, "PCIe IP Rx data group SRAMs parity error" },
4141		{ F_IPRXHDRGRPPERR, "PCIe IP Rx header group SRAMs parity error" },
4142		{ F_PIOTAGQPERR, "PIO tag queue FIFO parity error" },
4143		{ F_MAGRPPERR, "MA group FIFO parity error" },
4144		{ F_VFIDPERR, "VFID SRAM parity error" },
4145		{ F_FIDPERR, "FID SRAM parity error" },
4146		{ F_CFGSNPPERR, "config snoop FIFO parity error" },
4147		{ F_HRSPPERR, "HMA channel response data SRAM parity error" },
4148		{ F_HREQRDPERR, "HMA channel read request SRAM parity error" },
4149		{ F_HREQWRPERR, "HMA channel write request SRAM parity error" },
4150		{ F_DRSPPERR, "DMA channel response data SRAM parity error" },
4151		{ F_DREQRDPERR, "DMA channel write request SRAM parity error" },
4152		{ F_CRSPPERR, "CMD channel response data SRAM parity error" },
4153		{ F_CREQRDPERR, "CMD channel read request SRAM parity error" },
4154		{ F_MSTTAGQPERR, "PCIe master tag queue SRAM parity error" },
4155		{ F_TGTTAGQPERR, "PCIe target tag queue FIFO parity error" },
4156		{ F_PIOREQGRPPERR, "PIO request group FIFOs parity error" },
4157		{ F_PIOCPLGRPPERR, "PIO completion group FIFOs parity error" },
4158		{ F_MSIXDIPERR, "MSI-X DI SRAM parity error" },
4159		{ F_MSIXDATAPERR, "MSI-X data SRAM parity error" },
4160		{ F_MSIXADDRHPERR, "MSI-X AddrH SRAM parity error" },
4161		{ F_MSIXADDRLPERR, "MSI-X AddrL SRAM parity error" },
4162		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error" },
4163		{ F_MSTTIMEOUTPERR, "Master timeout FIFO parity error" },
4164		{ F_MSTGRPPERR, "Master response read queue SRAM parity error" },
4165		{ 0 }
4166	};
4167	struct intr_info pcie_intr_info = {
4168		.name = "PCIE_INT_CAUSE",
4169		.cause_reg = A_PCIE_INT_CAUSE,
4170		.enable_reg = A_PCIE_INT_ENABLE,
4171		.fatal = 0xffffffff,
4172		.flags = NONFATAL_IF_DISABLED,
4173		.details = NULL,
4174		.actions = NULL,
4175	};
4176	bool fatal = false;
4177
4178	if (is_t4(adap)) {
4179		fatal |= t4_handle_intr(adap, &sysbus_intr_info, 0, verbose);
4180		fatal |= t4_handle_intr(adap, &pcie_port_intr_info, 0, verbose);
4181
4182		pcie_intr_info.details = pcie_intr_details;
4183	} else {
4184		pcie_intr_info.details = t5_pcie_intr_details;
4185	}
4186	fatal |= t4_handle_intr(adap, &pcie_intr_info, 0, verbose);
4187
4188	return (fatal);
4189}
4190
4191/*
4192 * TP interrupt handler.
4193 */
4194static bool tp_intr_handler(struct adapter *adap, int arg, bool verbose)
4195{
4196	static const struct intr_details tp_intr_details[] = {
4197		{ 0x3fffffff, "TP parity error" },
4198		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages" },
4199		{ 0 }
4200	};
4201	static const struct intr_info tp_intr_info = {
4202		.name = "TP_INT_CAUSE",
4203		.cause_reg = A_TP_INT_CAUSE,
4204		.enable_reg = A_TP_INT_ENABLE,
4205		.fatal = 0x7fffffff,
4206		.flags = NONFATAL_IF_DISABLED,
4207		.details = tp_intr_details,
4208		.actions = NULL,
4209	};
4210
4211	return (t4_handle_intr(adap, &tp_intr_info, 0, verbose));
4212}
4213
4214/*
4215 * SGE interrupt handler.
4216 */
4217static bool sge_intr_handler(struct adapter *adap, int arg, bool verbose)
4218{
4219	static const struct intr_info sge_int1_info = {
4220		.name = "SGE_INT_CAUSE1",
4221		.cause_reg = A_SGE_INT_CAUSE1,
4222		.enable_reg = A_SGE_INT_ENABLE1,
4223		.fatal = 0xffffffff,
4224		.flags = NONFATAL_IF_DISABLED,
4225		.details = NULL,
4226		.actions = NULL,
4227	};
4228	static const struct intr_info sge_int2_info = {
4229		.name = "SGE_INT_CAUSE2",
4230		.cause_reg = A_SGE_INT_CAUSE2,
4231		.enable_reg = A_SGE_INT_ENABLE2,
4232		.fatal = 0xffffffff,
4233		.flags = NONFATAL_IF_DISABLED,
4234		.details = NULL,
4235		.actions = NULL,
4236	};
4237	static const struct intr_details sge_int3_details[] = {
4238		{ F_ERR_FLM_DBP,
4239			"DBP pointer delivery for invalid context or QID" },
4240		{ F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4241			"Invalid QID or header request by IDMA" },
4242		{ F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4243		{ F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4244		{ F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4245		{ F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4246		{ F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4247		{ F_ERR_TIMER_ABOVE_MAX_QID,
4248			"SGE GTS with timer 0-5 for IQID > 1023" },
4249		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4250			"SGE received CPL exceeding IQE size" },
4251		{ F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4252		{ F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4253		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4254		{ F_ERR_DROPPED_DB, "SGE DB dropped" },
4255		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4256		  "SGE IQID > 1023 received CPL for FL" },
4257		{ F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4258			F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4259		{ F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4260		{ F_ERR_ING_CTXT_PRIO,
4261			"Ingress context manager priority user error" },
4262		{ F_ERR_EGR_CTXT_PRIO,
4263			"Egress context manager priority user error" },
4264		{ F_DBFIFO_HP_INT, "High priority DB FIFO threshold reached" },
4265		{ F_DBFIFO_LP_INT, "Low priority DB FIFO threshold reached" },
4266		{ F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4267		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4268		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4269		{ 0x0000000f, "SGE context access for invalid queue" },
4270		{ 0 }
4271	};
4272	static const struct intr_details t6_sge_int3_details[] = {
4273		{ F_ERR_FLM_DBP,
4274			"DBP pointer delivery for invalid context or QID" },
4275		{ F_ERR_FLM_IDMA1 | F_ERR_FLM_IDMA0,
4276			"Invalid QID or header request by IDMA" },
4277		{ F_ERR_FLM_HINT, "FLM hint is for invalid context or QID" },
4278		{ F_ERR_PCIE_ERROR3, "SGE PCIe error for DBP thread 3" },
4279		{ F_ERR_PCIE_ERROR2, "SGE PCIe error for DBP thread 2" },
4280		{ F_ERR_PCIE_ERROR1, "SGE PCIe error for DBP thread 1" },
4281		{ F_ERR_PCIE_ERROR0, "SGE PCIe error for DBP thread 0" },
4282		{ F_ERR_TIMER_ABOVE_MAX_QID,
4283			"SGE GTS with timer 0-5 for IQID > 1023" },
4284		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4285			"SGE received CPL exceeding IQE size" },
4286		{ F_ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large" },
4287		{ F_ERR_ITP_TIME_PAUSED, "SGE ITP error" },
4288		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL" },
4289		{ F_ERR_DROPPED_DB, "SGE DB dropped" },
4290		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4291			"SGE IQID > 1023 received CPL for FL" },
4292		{ F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4293			F_ERR_BAD_DB_PIDX0, "SGE DBP pidx increment too large" },
4294		{ F_ERR_ING_PCIE_CHAN, "SGE Ingress PCIe channel mismatch" },
4295		{ F_ERR_ING_CTXT_PRIO,
4296			"Ingress context manager priority user error" },
4297		{ F_ERR_EGR_CTXT_PRIO,
4298			"Egress context manager priority user error" },
4299		{ F_DBP_TBUF_FULL, "SGE DBP tbuf full" },
4300		{ F_FATAL_WRE_LEN,
4301			"SGE WRE packet less than advertized length" },
4302		{ F_REG_ADDRESS_ERR, "Undefined SGE register accessed" },
4303		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID" },
4304		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID" },
4305		{ 0x0000000f, "SGE context access for invalid queue" },
4306		{ 0 }
4307	};
4308	struct intr_info sge_int3_info = {
4309		.name = "SGE_INT_CAUSE3",
4310		.cause_reg = A_SGE_INT_CAUSE3,
4311		.enable_reg = A_SGE_INT_ENABLE3,
4312		.fatal = F_ERR_CPL_EXCEED_IQE_SIZE,
4313		.flags = 0,
4314		.details = NULL,
4315		.actions = NULL,
4316	};
4317	static const struct intr_info sge_int4_info = {
4318		.name = "SGE_INT_CAUSE4",
4319		.cause_reg = A_SGE_INT_CAUSE4,
4320		.enable_reg = A_SGE_INT_ENABLE4,
4321		.fatal = 0,
4322		.flags = 0,
4323		.details = NULL,
4324		.actions = NULL,
4325	};
4326	static const struct intr_info sge_int5_info = {
4327		.name = "SGE_INT_CAUSE5",
4328		.cause_reg = A_SGE_INT_CAUSE5,
4329		.enable_reg = A_SGE_INT_ENABLE5,
4330		.fatal = 0xffffffff,
4331		.flags = NONFATAL_IF_DISABLED,
4332		.details = NULL,
4333		.actions = NULL,
4334	};
4335	static const struct intr_info sge_int6_info = {
4336		.name = "SGE_INT_CAUSE6",
4337		.cause_reg = A_SGE_INT_CAUSE6,
4338		.enable_reg = A_SGE_INT_ENABLE6,
4339		.fatal = 0,
4340		.flags = 0,
4341		.details = NULL,
4342		.actions = NULL,
4343	};
4344
4345	bool fatal;
4346	u32 v;
4347
4348	if (chip_id(adap) <= CHELSIO_T5) {
4349		sge_int3_info.details = sge_int3_details;
4350	} else {
4351		sge_int3_info.details = t6_sge_int3_details;
4352	}
4353
4354	fatal = false;
4355	fatal |= t4_handle_intr(adap, &sge_int1_info, 0, verbose);
4356	fatal |= t4_handle_intr(adap, &sge_int2_info, 0, verbose);
4357	fatal |= t4_handle_intr(adap, &sge_int3_info, 0, verbose);
4358	fatal |= t4_handle_intr(adap, &sge_int4_info, 0, verbose);
4359	if (chip_id(adap) >= CHELSIO_T5)
4360		fatal |= t4_handle_intr(adap, &sge_int5_info, 0, verbose);
4361	if (chip_id(adap) >= CHELSIO_T6)
4362		fatal |= t4_handle_intr(adap, &sge_int6_info, 0, verbose);
4363
4364	v = t4_read_reg(adap, A_SGE_ERROR_STATS);
4365	if (v & F_ERROR_QID_VALID) {
4366		CH_ERR(adap, "SGE error for QID %u\n", G_ERROR_QID(v));
4367		if (v & F_UNCAPTURED_ERROR)
4368			CH_ERR(adap, "SGE UNCAPTURED_ERROR set (clearing)\n");
4369		t4_write_reg(adap, A_SGE_ERROR_STATS,
4370		    F_ERROR_QID_VALID | F_UNCAPTURED_ERROR);
4371	}
4372
4373	return (fatal);
4374}
4375
4376/*
4377 * CIM interrupt handler.
4378 */
4379static bool cim_intr_handler(struct adapter *adap, int arg, bool verbose)
4380{
4381	static const struct intr_action cim_host_intr_actions[] = {
4382		{ F_TIMER0INT, 0, t4_os_dump_cimla },
4383		{ 0 },
4384	};
4385	static const struct intr_details cim_host_intr_details[] = {
4386		/* T6+ */
4387		{ F_PCIE2CIMINTFPARERR, "CIM IBQ PCIe interface parity error" },
4388
4389		/* T5+ */
4390		{ F_MA_CIM_INTFPERR, "MA2CIM interface parity error" },
4391		{ F_PLCIM_MSTRSPDATAPARERR,
4392			"PL2CIM master response data parity error" },
4393		{ F_NCSI2CIMINTFPARERR, "CIM IBQ NC-SI interface parity error" },
4394		{ F_SGE2CIMINTFPARERR, "CIM IBQ SGE interface parity error" },
4395		{ F_ULP2CIMINTFPARERR, "CIM IBQ ULP_TX interface parity error" },
4396		{ F_TP2CIMINTFPARERR, "CIM IBQ TP interface parity error" },
4397		{ F_OBQSGERX1PARERR, "CIM OBQ SGE1_RX parity error" },
4398		{ F_OBQSGERX0PARERR, "CIM OBQ SGE0_RX parity error" },
4399
4400		/* T4+ */
4401		{ F_TIEQOUTPARERRINT, "CIM TIEQ outgoing FIFO parity error" },
4402		{ F_TIEQINPARERRINT, "CIM TIEQ incoming FIFO parity error" },
4403		{ F_MBHOSTPARERR, "CIM mailbox host read parity error" },
4404		{ F_MBUPPARERR, "CIM mailbox uP parity error" },
4405		{ F_IBQTP0PARERR, "CIM IBQ TP0 parity error" },
4406		{ F_IBQTP1PARERR, "CIM IBQ TP1 parity error" },
4407		{ F_IBQULPPARERR, "CIM IBQ ULP parity error" },
4408		{ F_IBQSGELOPARERR, "CIM IBQ SGE_LO parity error" },
4409		{ F_IBQSGEHIPARERR | F_IBQPCIEPARERR,	/* same bit */
4410			"CIM IBQ PCIe/SGE_HI parity error" },
4411		{ F_IBQNCSIPARERR, "CIM IBQ NC-SI parity error" },
4412		{ F_OBQULP0PARERR, "CIM OBQ ULP0 parity error" },
4413		{ F_OBQULP1PARERR, "CIM OBQ ULP1 parity error" },
4414		{ F_OBQULP2PARERR, "CIM OBQ ULP2 parity error" },
4415		{ F_OBQULP3PARERR, "CIM OBQ ULP3 parity error" },
4416		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error" },
4417		{ F_OBQNCSIPARERR, "CIM OBQ NC-SI parity error" },
4418		{ F_TIMER1INT, "CIM TIMER0 interrupt" },
4419		{ F_TIMER0INT, "CIM TIMER0 interrupt" },
4420		{ F_PREFDROPINT, "CIM control register prefetch drop" },
4421		{ 0}
4422	};
4423	static const struct intr_info cim_host_intr_info = {
4424		.name = "CIM_HOST_INT_CAUSE",
4425		.cause_reg = A_CIM_HOST_INT_CAUSE,
4426		.enable_reg = A_CIM_HOST_INT_ENABLE,
4427		.fatal = 0x007fffe6,
4428		.flags = NONFATAL_IF_DISABLED,
4429		.details = cim_host_intr_details,
4430		.actions = cim_host_intr_actions,
4431	};
4432	static const struct intr_details cim_host_upacc_intr_details[] = {
4433		{ F_EEPROMWRINT, "CIM EEPROM came out of busy state" },
4434		{ F_TIMEOUTMAINT, "CIM PIF MA timeout" },
4435		{ F_TIMEOUTINT, "CIM PIF timeout" },
4436		{ F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite" },
4437		{ F_REQOVRLOOKUPINT, "CIM request FIFO overwrite" },
4438		{ F_BLKWRPLINT, "CIM block write to PL space" },
4439		{ F_BLKRDPLINT, "CIM block read from PL space" },
4440		{ F_SGLWRPLINT,
4441			"CIM single write to PL space with illegal BEs" },
4442		{ F_SGLRDPLINT,
4443			"CIM single read from PL space with illegal BEs" },
4444		{ F_BLKWRCTLINT, "CIM block write to CTL space" },
4445		{ F_BLKRDCTLINT, "CIM block read from CTL space" },
4446		{ F_SGLWRCTLINT,
4447			"CIM single write to CTL space with illegal BEs" },
4448		{ F_SGLRDCTLINT,
4449			"CIM single read from CTL space with illegal BEs" },
4450		{ F_BLKWREEPROMINT, "CIM block write to EEPROM space" },
4451		{ F_BLKRDEEPROMINT, "CIM block read from EEPROM space" },
4452		{ F_SGLWREEPROMINT,
4453			"CIM single write to EEPROM space with illegal BEs" },
4454		{ F_SGLRDEEPROMINT,
4455			"CIM single read from EEPROM space with illegal BEs" },
4456		{ F_BLKWRFLASHINT, "CIM block write to flash space" },
4457		{ F_BLKRDFLASHINT, "CIM block read from flash space" },
4458		{ F_SGLWRFLASHINT, "CIM single write to flash space" },
4459		{ F_SGLRDFLASHINT,
4460			"CIM single read from flash space with illegal BEs" },
4461		{ F_BLKWRBOOTINT, "CIM block write to boot space" },
4462		{ F_BLKRDBOOTINT, "CIM block read from boot space" },
4463		{ F_SGLWRBOOTINT, "CIM single write to boot space" },
4464		{ F_SGLRDBOOTINT,
4465			"CIM single read from boot space with illegal BEs" },
4466		{ F_ILLWRBEINT, "CIM illegal write BEs" },
4467		{ F_ILLRDBEINT, "CIM illegal read BEs" },
4468		{ F_ILLRDINT, "CIM illegal read" },
4469		{ F_ILLWRINT, "CIM illegal write" },
4470		{ F_ILLTRANSINT, "CIM illegal transaction" },
4471		{ F_RSVDSPACEINT, "CIM reserved space access" },
4472		{0}
4473	};
4474	static const struct intr_info cim_host_upacc_intr_info = {
4475		.name = "CIM_HOST_UPACC_INT_CAUSE",
4476		.cause_reg = A_CIM_HOST_UPACC_INT_CAUSE,
4477		.enable_reg = A_CIM_HOST_UPACC_INT_ENABLE,
4478		.fatal = 0x3fffeeff,
4479		.flags = NONFATAL_IF_DISABLED,
4480		.details = cim_host_upacc_intr_details,
4481		.actions = NULL,
4482	};
4483	static const struct intr_info cim_pf_host_intr_info = {
4484		.name = "CIM_PF_HOST_INT_CAUSE",
4485		.cause_reg = MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4486		.enable_reg = MYPF_REG(A_CIM_PF_HOST_INT_ENABLE),
4487		.fatal = 0,
4488		.flags = 0,
4489		.details = NULL,
4490		.actions = NULL,
4491	};
4492	u32 val, fw_err;
4493	bool fatal;
4494
4495	fw_err = t4_read_reg(adap, A_PCIE_FW);
4496	if (fw_err & F_PCIE_FW_ERR)
4497		t4_report_fw_error(adap);
4498
4499	/*
4500	 * When the Firmware detects an internal error which normally wouldn't
4501	 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4502	 * to make sure the Host sees the Firmware Crash.  So if we have a
4503	 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4504	 * interrupt.
4505	 */
4506	val = t4_read_reg(adap, A_CIM_HOST_INT_CAUSE);
4507	if (val & F_TIMER0INT && (!(fw_err & F_PCIE_FW_ERR) ||
4508	    G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) {
4509		t4_write_reg(adap, A_CIM_HOST_INT_CAUSE, F_TIMER0INT);
4510	}
4511
4512	fatal = false;
4513	fatal |= t4_handle_intr(adap, &cim_host_intr_info, 0, verbose);
4514	fatal |= t4_handle_intr(adap, &cim_host_upacc_intr_info, 0, verbose);
4515	fatal |= t4_handle_intr(adap, &cim_pf_host_intr_info, 0, verbose);
4516
4517	return (fatal);
4518}
4519
4520/*
4521 * ULP RX interrupt handler.
4522 */
4523static bool ulprx_intr_handler(struct adapter *adap, int arg, bool verbose)
4524{
4525	static const struct intr_details ulprx_intr_details[] = {
4526		/* T5+ */
4527		{ F_SE_CNT_MISMATCH_1, "ULPRX SE count mismatch in channel 1" },
4528		{ F_SE_CNT_MISMATCH_0, "ULPRX SE count mismatch in channel 0" },
4529
4530		/* T4+ */
4531		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error" },
4532		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error" },
4533		{ 0x007fffff, "ULPRX parity error" },
4534		{ 0 }
4535	};
4536	static const struct intr_info ulprx_intr_info = {
4537		.name = "ULP_RX_INT_CAUSE",
4538		.cause_reg = A_ULP_RX_INT_CAUSE,
4539		.enable_reg = A_ULP_RX_INT_ENABLE,
4540		.fatal = 0x07ffffff,
4541		.flags = NONFATAL_IF_DISABLED,
4542		.details = ulprx_intr_details,
4543		.actions = NULL,
4544	};
4545	static const struct intr_info ulprx_intr2_info = {
4546		.name = "ULP_RX_INT_CAUSE_2",
4547		.cause_reg = A_ULP_RX_INT_CAUSE_2,
4548		.enable_reg = A_ULP_RX_INT_ENABLE_2,
4549		.fatal = 0,
4550		.flags = 0,
4551		.details = NULL,
4552		.actions = NULL,
4553	};
4554	bool fatal = false;
4555
4556	fatal |= t4_handle_intr(adap, &ulprx_intr_info, 0, verbose);
4557	fatal |= t4_handle_intr(adap, &ulprx_intr2_info, 0, verbose);
4558
4559	return (fatal);
4560}
4561
4562/*
4563 * ULP TX interrupt handler.
4564 */
4565static bool ulptx_intr_handler(struct adapter *adap, int arg, bool verbose)
4566{
4567	static const struct intr_details ulptx_intr_details[] = {
4568		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds" },
4569		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds" },
4570		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds" },
4571		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds" },
4572		{ 0x0fffffff, "ULPTX parity error" },
4573		{ 0 }
4574	};
4575	static const struct intr_info ulptx_intr_info = {
4576		.name = "ULP_TX_INT_CAUSE",
4577		.cause_reg = A_ULP_TX_INT_CAUSE,
4578		.enable_reg = A_ULP_TX_INT_ENABLE,
4579		.fatal = 0x0fffffff,
4580		.flags = NONFATAL_IF_DISABLED,
4581		.details = ulptx_intr_details,
4582		.actions = NULL,
4583	};
4584	static const struct intr_info ulptx_intr2_info = {
4585		.name = "ULP_TX_INT_CAUSE_2",
4586		.cause_reg = A_ULP_TX_INT_CAUSE_2,
4587		.enable_reg = A_ULP_TX_INT_ENABLE_2,
4588		.fatal = 0xf0,
4589		.flags = NONFATAL_IF_DISABLED,
4590		.details = NULL,
4591		.actions = NULL,
4592	};
4593	bool fatal = false;
4594
4595	fatal |= t4_handle_intr(adap, &ulptx_intr_info, 0, verbose);
4596	fatal |= t4_handle_intr(adap, &ulptx_intr2_info, 0, verbose);
4597
4598	return (fatal);
4599}
4600
4601static bool pmtx_dump_dbg_stats(struct adapter *adap, int arg, bool verbose)
4602{
4603	int i;
4604	u32 data[17];
4605
4606	t4_read_indirect(adap, A_PM_TX_DBG_CTRL, A_PM_TX_DBG_DATA, &data[0],
4607	    ARRAY_SIZE(data), A_PM_TX_DBG_STAT0);
4608	for (i = 0; i < ARRAY_SIZE(data); i++) {
4609		CH_ALERT(adap, "  - PM_TX_DBG_STAT%u (0x%x) = 0x%08x\n", i,
4610		    A_PM_TX_DBG_STAT0 + i, data[i]);
4611	}
4612
4613	return (false);
4614}
4615
4616/*
4617 * PM TX interrupt handler.
4618 */
4619static bool pmtx_intr_handler(struct adapter *adap, int arg, bool verbose)
4620{
4621	static const struct intr_action pmtx_intr_actions[] = {
4622		{ 0xffffffff, 0, pmtx_dump_dbg_stats },
4623		{ 0 },
4624	};
4625	static const struct intr_details pmtx_intr_details[] = {
4626		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large" },
4627		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large" },
4628		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large" },
4629		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd" },
4630		{ 0x0f000000, "PMTX icspi FIFO2X Rx framing error" },
4631		{ 0x00f00000, "PMTX icspi FIFO Rx framing error" },
4632		{ 0x000f0000, "PMTX icspi FIFO Tx framing error" },
4633		{ 0x0000f000, "PMTX oespi FIFO Rx framing error" },
4634		{ 0x00000f00, "PMTX oespi FIFO Tx framing error" },
4635		{ 0x000000f0, "PMTX oespi FIFO2X Tx framing error" },
4636		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error" },
4637		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error" },
4638		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error" },
4639		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error" },
4640		{ 0 }
4641	};
4642	static const struct intr_info pmtx_intr_info = {
4643		.name = "PM_TX_INT_CAUSE",
4644		.cause_reg = A_PM_TX_INT_CAUSE,
4645		.enable_reg = A_PM_TX_INT_ENABLE,
4646		.fatal = 0xffffffff,
4647		.flags = 0,
4648		.details = pmtx_intr_details,
4649		.actions = pmtx_intr_actions,
4650	};
4651
4652	return (t4_handle_intr(adap, &pmtx_intr_info, 0, verbose));
4653}
4654
4655/*
4656 * PM RX interrupt handler.
4657 */
4658static bool pmrx_intr_handler(struct adapter *adap, int arg, bool verbose)
4659{
4660	static const struct intr_details pmrx_intr_details[] = {
4661		/* T6+ */
4662		{ 0x18000000, "PMRX ospi overflow" },
4663		{ F_MA_INTF_SDC_ERR, "PMRX MA interface SDC parity error" },
4664		{ F_BUNDLE_LEN_PARERR, "PMRX bundle len FIFO parity error" },
4665		{ F_BUNDLE_LEN_OVFL, "PMRX bundle len FIFO overflow" },
4666		{ F_SDC_ERR, "PMRX SDC error" },
4667
4668		/* T4+ */
4669		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd" },
4670		{ 0x003c0000, "PMRX iespi FIFO2X Rx framing error" },
4671		{ 0x0003c000, "PMRX iespi Rx framing error" },
4672		{ 0x00003c00, "PMRX iespi Tx framing error" },
4673		{ 0x00000300, "PMRX ocspi Rx framing error" },
4674		{ 0x000000c0, "PMRX ocspi Tx framing error" },
4675		{ 0x00000030, "PMRX ocspi FIFO2X Tx framing error" },
4676		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error" },
4677		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error" },
4678		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error" },
4679		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error"},
4680		{ 0 }
4681	};
4682	static const struct intr_info pmrx_intr_info = {
4683		.name = "PM_RX_INT_CAUSE",
4684		.cause_reg = A_PM_RX_INT_CAUSE,
4685		.enable_reg = A_PM_RX_INT_ENABLE,
4686		.fatal = 0x1fffffff,
4687		.flags = NONFATAL_IF_DISABLED,
4688		.details = pmrx_intr_details,
4689		.actions = NULL,
4690	};
4691
4692	return (t4_handle_intr(adap, &pmrx_intr_info, 0, verbose));
4693}
4694
4695/*
4696 * CPL switch interrupt handler.
4697 */
4698static bool cplsw_intr_handler(struct adapter *adap, int arg, bool verbose)
4699{
4700	static const struct intr_details cplsw_intr_details[] = {
4701		/* T5+ */
4702		{ F_PERR_CPL_128TO128_1, "CPLSW 128TO128 FIFO1 parity error" },
4703		{ F_PERR_CPL_128TO128_0, "CPLSW 128TO128 FIFO0 parity error" },
4704
4705		/* T4+ */
4706		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error" },
4707		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow" },
4708		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error" },
4709		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error" },
4710		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error" },
4711		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error" },
4712		{ 0 }
4713	};
4714	static const struct intr_info cplsw_intr_info = {
4715		.name = "CPL_INTR_CAUSE",
4716		.cause_reg = A_CPL_INTR_CAUSE,
4717		.enable_reg = A_CPL_INTR_ENABLE,
4718		.fatal = 0xff,
4719		.flags = NONFATAL_IF_DISABLED,
4720		.details = cplsw_intr_details,
4721		.actions = NULL,
4722	};
4723
4724	return (t4_handle_intr(adap, &cplsw_intr_info, 0, verbose));
4725}
4726
4727#define T4_LE_FATAL_MASK (F_PARITYERR | F_UNKNOWNCMD | F_REQQPARERR)
4728#define T5_LE_FATAL_MASK (T4_LE_FATAL_MASK | F_VFPARERR)
4729#define T6_LE_PERRCRC_MASK (F_PIPELINEERR | F_CLIPTCAMACCFAIL | \
4730    F_SRVSRAMACCFAIL | F_CLCAMCRCPARERR | F_CLCAMINTPERR | F_SSRAMINTPERR | \
4731    F_SRVSRAMPERR | F_VFSRAMPERR | F_TCAMINTPERR | F_TCAMCRCERR | \
4732    F_HASHTBLMEMACCERR | F_MAIFWRINTPERR | F_HASHTBLMEMCRCERR)
4733#define T6_LE_FATAL_MASK (T6_LE_PERRCRC_MASK | F_T6_UNKNOWNCMD | \
4734    F_TCAMACCFAIL | F_HASHTBLACCFAIL | F_CMDTIDERR | F_CMDPRSRINTERR | \
4735    F_TOTCNTERR | F_CLCAMFIFOERR | F_CLIPSUBERR)
4736
4737/*
4738 * LE interrupt handler.
4739 */
4740static bool le_intr_handler(struct adapter *adap, int arg, bool verbose)
4741{
4742	static const struct intr_details le_intr_details[] = {
4743		{ F_REQQPARERR, "LE request queue parity error" },
4744		{ F_UNKNOWNCMD, "LE unknown command" },
4745		{ F_ACTRGNFULL, "LE active region full" },
4746		{ F_PARITYERR, "LE parity error" },
4747		{ F_LIPMISS, "LE LIP miss" },
4748		{ F_LIP0, "LE 0 LIP error" },
4749		{ 0 }
4750	};
4751	static const struct intr_details t6_le_intr_details[] = {
4752		{ F_CLIPSUBERR, "LE CLIP CAM reverse substitution error" },
4753		{ F_CLCAMFIFOERR, "LE CLIP CAM internal FIFO error" },
4754		{ F_CTCAMINVLDENT, "Invalid IPv6 CLIP TCAM entry" },
4755		{ F_TCAMINVLDENT, "Invalid IPv6 TCAM entry" },
4756		{ F_TOTCNTERR, "LE total active < TCAM count" },
4757		{ F_CMDPRSRINTERR, "LE internal error in parser" },
4758		{ F_CMDTIDERR, "Incorrect tid in LE command" },
4759		{ F_T6_ACTRGNFULL, "LE active region full" },
4760		{ F_T6_ACTCNTIPV6TZERO, "LE IPv6 active open TCAM counter -ve" },
4761		{ F_T6_ACTCNTIPV4TZERO, "LE IPv4 active open TCAM counter -ve" },
4762		{ F_T6_ACTCNTIPV6ZERO, "LE IPv6 active open counter -ve" },
4763		{ F_T6_ACTCNTIPV4ZERO, "LE IPv4 active open counter -ve" },
4764		{ F_HASHTBLACCFAIL, "Hash table read error (proto conflict)" },
4765		{ F_TCAMACCFAIL, "LE TCAM access failure" },
4766		{ F_T6_UNKNOWNCMD, "LE unknown command" },
4767		{ F_T6_LIP0, "LE found 0 LIP during CLIP substitution" },
4768		{ F_T6_LIPMISS, "LE CLIP lookup miss" },
4769		{ T6_LE_PERRCRC_MASK, "LE parity/CRC error" },
4770		{ 0 }
4771	};
4772	struct intr_info le_intr_info = {
4773		.name = "LE_DB_INT_CAUSE",
4774		.cause_reg = A_LE_DB_INT_CAUSE,
4775		.enable_reg = A_LE_DB_INT_ENABLE,
4776		.fatal = 0,
4777		.flags = NONFATAL_IF_DISABLED,
4778		.details = NULL,
4779		.actions = NULL,
4780	};
4781
4782	if (chip_id(adap) <= CHELSIO_T5) {
4783		le_intr_info.details = le_intr_details;
4784		le_intr_info.fatal = T5_LE_FATAL_MASK;
4785	} else {
4786		le_intr_info.details = t6_le_intr_details;
4787		le_intr_info.fatal = T6_LE_FATAL_MASK;
4788	}
4789
4790	return (t4_handle_intr(adap, &le_intr_info, 0, verbose));
4791}
4792
4793/*
4794 * MPS interrupt handler.
4795 */
4796static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
4797{
4798	static const struct intr_details mps_rx_perr_intr_details[] = {
4799		{ 0xffffffff, "MPS Rx parity error" },
4800		{ 0 }
4801	};
4802	static const struct intr_info mps_rx_perr_intr_info = {
4803		.name = "MPS_RX_PERR_INT_CAUSE",
4804		.cause_reg = A_MPS_RX_PERR_INT_CAUSE,
4805		.enable_reg = A_MPS_RX_PERR_INT_ENABLE,
4806		.fatal = 0xffffffff,
4807		.flags = NONFATAL_IF_DISABLED,
4808		.details = mps_rx_perr_intr_details,
4809		.actions = NULL,
4810	};
4811	static const struct intr_details mps_tx_intr_details[] = {
4812		{ F_PORTERR, "MPS Tx destination port is disabled" },
4813		{ F_FRMERR, "MPS Tx framing error" },
4814		{ F_SECNTERR, "MPS Tx SOP/EOP error" },
4815		{ F_BUBBLE, "MPS Tx underflow" },
4816		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error" },
4817		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error" },
4818		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error" },
4819		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error" },
4820		{ 0 }
4821	};
4822	static const struct intr_info mps_tx_intr_info = {
4823		.name = "MPS_TX_INT_CAUSE",
4824		.cause_reg = A_MPS_TX_INT_CAUSE,
4825		.enable_reg = A_MPS_TX_INT_ENABLE,
4826		.fatal = 0x1ffff,
4827		.flags = NONFATAL_IF_DISABLED,
4828		.details = mps_tx_intr_details,
4829		.actions = NULL,
4830	};
4831	static const struct intr_details mps_trc_intr_details[] = {
4832		{ F_MISCPERR, "MPS TRC misc parity error" },
4833		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error" },
4834		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error" },
4835		{ 0 }
4836	};
4837	static const struct intr_info mps_trc_intr_info = {
4838		.name = "MPS_TRC_INT_CAUSE",
4839		.cause_reg = A_MPS_TRC_INT_CAUSE,
4840		.enable_reg = A_MPS_TRC_INT_ENABLE,
4841		.fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
4842		.flags = 0,
4843		.details = mps_trc_intr_details,
4844		.actions = NULL,
4845	};
4846	static const struct intr_details mps_stat_sram_intr_details[] = {
4847		{ 0xffffffff, "MPS statistics SRAM parity error" },
4848		{ 0 }
4849	};
4850	static const struct intr_info mps_stat_sram_intr_info = {
4851		.name = "MPS_STAT_PERR_INT_CAUSE_SRAM",
4852		.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4853		.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM,
4854		.fatal = 0x1fffffff,
4855		.flags = NONFATAL_IF_DISABLED,
4856		.details = mps_stat_sram_intr_details,
4857		.actions = NULL,
4858	};
4859	static const struct intr_details mps_stat_tx_intr_details[] = {
4860		{ 0xffffff, "MPS statistics Tx FIFO parity error" },
4861		{ 0 }
4862	};
4863	static const struct intr_info mps_stat_tx_intr_info = {
4864		.name = "MPS_STAT_PERR_INT_CAUSE_TX_FIFO",
4865		.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4866		.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO,
4867		.fatal =  0xffffff,
4868		.flags = NONFATAL_IF_DISABLED,
4869		.details = mps_stat_tx_intr_details,
4870		.actions = NULL,
4871	};
4872	static const struct intr_details mps_stat_rx_intr_details[] = {
4873		{ 0xffffff, "MPS statistics Rx FIFO parity error" },
4874		{ 0 }
4875	};
4876	static const struct intr_info mps_stat_rx_intr_info = {
4877		.name = "MPS_STAT_PERR_INT_CAUSE_RX_FIFO",
4878		.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4879		.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO,
4880		.fatal =  0xffffff,
4881		.flags = 0,
4882		.details = mps_stat_rx_intr_details,
4883		.actions = NULL,
4884	};
4885	static const struct intr_details mps_cls_intr_details[] = {
4886		{ F_HASHSRAM, "MPS hash SRAM parity error" },
4887		{ F_MATCHTCAM, "MPS match TCAM parity error" },
4888		{ F_MATCHSRAM, "MPS match SRAM parity error" },
4889		{ 0 }
4890	};
4891	static const struct intr_info mps_cls_intr_info = {
4892		.name = "MPS_CLS_INT_CAUSE",
4893		.cause_reg = A_MPS_CLS_INT_CAUSE,
4894		.enable_reg = A_MPS_CLS_INT_ENABLE,
4895		.fatal =  F_MATCHSRAM | F_MATCHTCAM | F_HASHSRAM,
4896		.flags = 0,
4897		.details = mps_cls_intr_details,
4898		.actions = NULL,
4899	};
4900	static const struct intr_details mps_stat_sram1_intr_details[] = {
4901		{ 0xff, "MPS statistics SRAM1 parity error" },
4902		{ 0 }
4903	};
4904	static const struct intr_info mps_stat_sram1_intr_info = {
4905		.name = "MPS_STAT_PERR_INT_CAUSE_SRAM1",
4906		.cause_reg = A_MPS_STAT_PERR_INT_CAUSE_SRAM1,
4907		.enable_reg = A_MPS_STAT_PERR_INT_ENABLE_SRAM1,
4908		.fatal = 0xff,
4909		.flags = 0,
4910		.details = mps_stat_sram1_intr_details,
4911		.actions = NULL,
4912	};
4913
4914	bool fatal;
4915
4916	fatal = false;
4917	fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
4918	fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
4919	fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
4920	fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
4921	fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
4922	fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
4923	fatal |= t4_handle_intr(adap, &mps_cls_intr_info, 0, verbose);
4924	if (chip_id(adap) > CHELSIO_T4) {
4925		fatal |= t4_handle_intr(adap, &mps_stat_sram1_intr_info, 0,
4926		    verbose);
4927	}
4928
4929	t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
4930	t4_read_reg(adap, A_MPS_INT_CAUSE);	/* flush */
4931
4932	return (fatal);
4933
4934}
4935
4936/*
4937 * EDC/MC interrupt handler.
4938 */
4939static bool mem_intr_handler(struct adapter *adap, int idx, bool verbose)
4940{
4941	static const char name[4][5] = { "EDC0", "EDC1", "MC0", "MC1" };
4942	unsigned int count_reg, v;
4943	static const struct intr_details mem_intr_details[] = {
4944		{ F_ECC_UE_INT_CAUSE, "Uncorrectable ECC data error(s)" },
4945		{ F_ECC_CE_INT_CAUSE, "Correctable ECC data error(s)" },
4946		{ F_PERR_INT_CAUSE, "FIFO parity error" },
4947		{ 0 }
4948	};
4949	struct intr_info ii = {
4950		.fatal = F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE,
4951		.details = mem_intr_details,
4952		.flags = 0,
4953		.actions = NULL,
4954	};
4955	bool fatal;
4956
4957	switch (idx) {
4958	case MEM_EDC0:
4959		ii.name = "EDC0_INT_CAUSE";
4960		ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 0);
4961		ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 0);
4962		count_reg = EDC_REG(A_EDC_ECC_STATUS, 0);
4963		break;
4964	case MEM_EDC1:
4965		ii.name = "EDC1_INT_CAUSE";
4966		ii.cause_reg = EDC_REG(A_EDC_INT_CAUSE, 1);
4967		ii.enable_reg = EDC_REG(A_EDC_INT_ENABLE, 1);
4968		count_reg = EDC_REG(A_EDC_ECC_STATUS, 1);
4969		break;
4970	case MEM_MC0:
4971		ii.name = "MC0_INT_CAUSE";
4972		if (is_t4(adap)) {
4973			ii.cause_reg = A_MC_INT_CAUSE;
4974			ii.enable_reg = A_MC_INT_ENABLE;
4975			count_reg = A_MC_ECC_STATUS;
4976		} else {
4977			ii.cause_reg = A_MC_P_INT_CAUSE;
4978			ii.enable_reg = A_MC_P_INT_ENABLE;
4979			count_reg = A_MC_P_ECC_STATUS;
4980		}
4981		break;
4982	case MEM_MC1:
4983		ii.name = "MC1_INT_CAUSE";
4984		ii.cause_reg = MC_REG(A_MC_P_INT_CAUSE, 1);
4985		ii.enable_reg = MC_REG(A_MC_P_INT_ENABLE, 1);
4986		count_reg = MC_REG(A_MC_P_ECC_STATUS, 1);
4987		break;
4988	}
4989
4990	fatal = t4_handle_intr(adap, &ii, 0, verbose);
4991
4992	v = t4_read_reg(adap, count_reg);
4993	if (v != 0) {
4994		if (G_ECC_UECNT(v) != 0) {
4995			CH_ALERT(adap,
4996			    "%s: %u uncorrectable ECC data error(s)\n",
4997			    name[idx], G_ECC_UECNT(v));
4998		}
4999		if (G_ECC_CECNT(v) != 0) {
5000			if (idx <= MEM_EDC1)
5001				t4_edc_err_read(adap, idx);
5002			CH_WARN_RATELIMIT(adap,
5003			    "%s: %u correctable ECC data error(s)\n",
5004			    name[idx], G_ECC_CECNT(v));
5005		}
5006		t4_write_reg(adap, count_reg, 0xffffffff);
5007	}
5008
5009	return (fatal);
5010}
5011
5012static bool ma_wrap_status(struct adapter *adap, int arg, bool verbose)
5013{
5014	u32 v;
5015
5016	v = t4_read_reg(adap, A_MA_INT_WRAP_STATUS);
5017	CH_ALERT(adap,
5018	    "MA address wrap-around error by client %u to address %#x\n",
5019	    G_MEM_WRAP_CLIENT_NUM(v), G_MEM_WRAP_ADDRESS(v) << 4);
5020	t4_write_reg(adap, A_MA_INT_WRAP_STATUS, v);
5021
5022	return (false);
5023}
5024
5025
5026/*
5027 * MA interrupt handler.
5028 */
5029static bool ma_intr_handler(struct adapter *adap, int arg, bool verbose)
5030{
5031	static const struct intr_action ma_intr_actions[] = {
5032		{ F_MEM_WRAP_INT_CAUSE, 0, ma_wrap_status },
5033		{ 0 },
5034	};
5035	static const struct intr_info ma_intr_info = {
5036		.name = "MA_INT_CAUSE",
5037		.cause_reg = A_MA_INT_CAUSE,
5038		.enable_reg = A_MA_INT_ENABLE,
5039		.fatal = F_MEM_PERR_INT_CAUSE | F_MEM_TO_INT_CAUSE,
5040		.flags = NONFATAL_IF_DISABLED,
5041		.details = NULL,
5042		.actions = ma_intr_actions,
5043	};
5044	static const struct intr_info ma_perr_status1 = {
5045		.name = "MA_PARITY_ERROR_STATUS1",
5046		.cause_reg = A_MA_PARITY_ERROR_STATUS1,
5047		.enable_reg = A_MA_PARITY_ERROR_ENABLE1,
5048		.fatal = 0xffffffff,
5049		.flags = 0,
5050		.details = NULL,
5051		.actions = NULL,
5052	};
5053	static const struct intr_info ma_perr_status2 = {
5054		.name = "MA_PARITY_ERROR_STATUS2",
5055		.cause_reg = A_MA_PARITY_ERROR_STATUS2,
5056		.enable_reg = A_MA_PARITY_ERROR_ENABLE2,
5057		.fatal = 0xffffffff,
5058		.flags = 0,
5059		.details = NULL,
5060		.actions = NULL,
5061	};
5062	bool fatal;
5063
5064	fatal = false;
5065	fatal |= t4_handle_intr(adap, &ma_intr_info, 0, verbose);
5066	fatal |= t4_handle_intr(adap, &ma_perr_status1, 0, verbose);
5067	if (chip_id(adap) > CHELSIO_T4)
5068		fatal |= t4_handle_intr(adap, &ma_perr_status2, 0, verbose);
5069
5070	return (fatal);
5071}
5072
5073/*
5074 * SMB interrupt handler.
5075 */
5076static bool smb_intr_handler(struct adapter *adap, int arg, bool verbose)
5077{
5078	static const struct intr_details smb_intr_details[] = {
5079		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error" },
5080		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error" },
5081		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error" },
5082		{ 0 }
5083	};
5084	static const struct intr_info smb_intr_info = {
5085		.name = "SMB_INT_CAUSE",
5086		.cause_reg = A_SMB_INT_CAUSE,
5087		.enable_reg = A_SMB_INT_ENABLE,
5088		.fatal = F_SLVFIFOPARINT | F_MSTRXFIFOPARINT | F_MSTTXFIFOPARINT,
5089		.flags = 0,
5090		.details = smb_intr_details,
5091		.actions = NULL,
5092	};
5093
5094	return (t4_handle_intr(adap, &smb_intr_info, 0, verbose));
5095}
5096
5097/*
5098 * NC-SI interrupt handler.
5099 */
5100static bool ncsi_intr_handler(struct adapter *adap, int arg, bool verbose)
5101{
5102	static const struct intr_details ncsi_intr_details[] = {
5103		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error" },
5104		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error" },
5105		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error" },
5106		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error" },
5107		{ 0 }
5108	};
5109	static const struct intr_info ncsi_intr_info = {
5110		.name = "NCSI_INT_CAUSE",
5111		.cause_reg = A_NCSI_INT_CAUSE,
5112		.enable_reg = A_NCSI_INT_ENABLE,
5113		.fatal = F_RXFIFO_PRTY_ERR | F_TXFIFO_PRTY_ERR |
5114		    F_MPS_DM_PRTY_ERR | F_CIM_DM_PRTY_ERR,
5115		.flags = 0,
5116		.details = ncsi_intr_details,
5117		.actions = NULL,
5118	};
5119
5120	return (t4_handle_intr(adap, &ncsi_intr_info, 0, verbose));
5121}
5122
5123/*
5124 * MAC interrupt handler.
5125 */
5126static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
5127{
5128	static const struct intr_details mac_intr_details[] = {
5129		{ F_TXFIFO_PRTY_ERR, "MAC Tx FIFO parity error" },
5130		{ F_RXFIFO_PRTY_ERR, "MAC Rx FIFO parity error" },
5131		{ 0 }
5132	};
5133	char name[32];
5134	struct intr_info ii;
5135	bool fatal = false;
5136
5137	if (is_t4(adap)) {
5138		snprintf(name, sizeof(name), "XGMAC_PORT%u_INT_CAUSE", port);
5139		ii.name = &name[0];
5140		ii.cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5141		ii.enable_reg = PORT_REG(port, A_XGMAC_PORT_INT_EN);
5142		ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5143		ii.flags = 0;
5144		ii.details = mac_intr_details;
5145		ii.actions = NULL;
5146	} else {
5147		snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
5148		ii.name = &name[0];
5149		ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5150		ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_INT_EN);
5151		ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
5152		ii.flags = 0;
5153		ii.details = mac_intr_details;
5154		ii.actions = NULL;
5155	}
5156	fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5157
5158	if (chip_id(adap) >= CHELSIO_T5) {
5159		snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
5160		ii.name = &name[0];
5161		ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
5162		ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN);
5163		ii.fatal = 0;
5164		ii.flags = 0;
5165		ii.details = NULL;
5166		ii.actions = NULL;
5167		fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5168	}
5169
5170	if (chip_id(adap) >= CHELSIO_T6) {
5171		snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
5172		ii.name = &name[0];
5173		ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
5174		ii.enable_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_EN_100G);
5175		ii.fatal = 0;
5176		ii.flags = 0;
5177		ii.details = NULL;
5178		ii.actions = NULL;
5179		fatal |= t4_handle_intr(adap, &ii, 0, verbose);
5180	}
5181
5182	return (fatal);
5183}
5184
5185static bool plpl_intr_handler(struct adapter *adap, int arg, bool verbose)
5186{
5187	static const struct intr_details plpl_intr_details[] = {
5188		{ F_FATALPERR, "Fatal parity error" },
5189		{ F_PERRVFID, "VFID_MAP parity error" },
5190		{ 0 }
5191	};
5192	static const struct intr_info plpl_intr_info = {
5193		.name = "PL_PL_INT_CAUSE",
5194		.cause_reg = A_PL_PL_INT_CAUSE,
5195		.enable_reg = A_PL_PL_INT_ENABLE,
5196		.fatal = F_FATALPERR | F_PERRVFID,
5197		.flags = NONFATAL_IF_DISABLED,
5198		.details = plpl_intr_details,
5199		.actions = NULL,
5200	};
5201
5202	return (t4_handle_intr(adap, &plpl_intr_info, 0, verbose));
5203}
5204
5205/**
5206 *	t4_slow_intr_handler - control path interrupt handler
5207 *	@adap: the adapter
5208 *	@verbose: increased verbosity, for debug
5209 *
5210 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5211 *	The designation 'slow' is because it involves register reads, while
5212 *	data interrupts typically don't involve any MMIOs.
5213 */
5214int t4_slow_intr_handler(struct adapter *adap, bool verbose)
5215{
5216	static const struct intr_details pl_intr_details[] = {
5217		{ F_MC1, "MC1" },
5218		{ F_UART, "UART" },
5219		{ F_ULP_TX, "ULP TX" },
5220		{ F_SGE, "SGE" },
5221		{ F_HMA, "HMA" },
5222		{ F_CPL_SWITCH, "CPL Switch" },
5223		{ F_ULP_RX, "ULP RX" },
5224		{ F_PM_RX, "PM RX" },
5225		{ F_PM_TX, "PM TX" },
5226		{ F_MA, "MA" },
5227		{ F_TP, "TP" },
5228		{ F_LE, "LE" },
5229		{ F_EDC1, "EDC1" },
5230		{ F_EDC0, "EDC0" },
5231		{ F_MC, "MC0" },
5232		{ F_PCIE, "PCIE" },
5233		{ F_PMU, "PMU" },
5234		{ F_MAC3, "MAC3" },
5235		{ F_MAC2, "MAC2" },
5236		{ F_MAC1, "MAC1" },
5237		{ F_MAC0, "MAC0" },
5238		{ F_SMB, "SMB" },
5239		{ F_SF, "SF" },
5240		{ F_PL, "PL" },
5241		{ F_NCSI, "NC-SI" },
5242		{ F_MPS, "MPS" },
5243		{ F_MI, "MI" },
5244		{ F_DBG, "DBG" },
5245		{ F_I2CM, "I2CM" },
5246		{ F_CIM, "CIM" },
5247		{ 0 }
5248	};
5249	static const struct intr_info pl_perr_cause = {
5250		.name = "PL_PERR_CAUSE",
5251		.cause_reg = A_PL_PERR_CAUSE,
5252		.enable_reg = A_PL_PERR_ENABLE,
5253		.fatal = 0xffffffff,
5254		.flags = 0,
5255		.details = pl_intr_details,
5256		.actions = NULL,
5257	};
5258	static const struct intr_action pl_intr_action[] = {
5259		{ F_MC1, MEM_MC1, mem_intr_handler },
5260		{ F_ULP_TX, -1, ulptx_intr_handler },
5261		{ F_SGE, -1, sge_intr_handler },
5262		{ F_CPL_SWITCH, -1, cplsw_intr_handler },
5263		{ F_ULP_RX, -1, ulprx_intr_handler },
5264		{ F_PM_RX, -1, pmrx_intr_handler},
5265		{ F_PM_TX, -1, pmtx_intr_handler},
5266		{ F_MA, -1, ma_intr_handler },
5267		{ F_TP, -1, tp_intr_handler },
5268		{ F_LE, -1, le_intr_handler },
5269		{ F_EDC1, MEM_EDC1, mem_intr_handler },
5270		{ F_EDC0, MEM_EDC0, mem_intr_handler },
5271		{ F_MC0, MEM_MC0, mem_intr_handler },
5272		{ F_PCIE, -1, pcie_intr_handler },
5273		{ F_MAC3, 3, mac_intr_handler},
5274		{ F_MAC2, 2, mac_intr_handler},
5275		{ F_MAC1, 1, mac_intr_handler},
5276		{ F_MAC0, 0, mac_intr_handler},
5277		{ F_SMB, -1, smb_intr_handler},
5278		{ F_PL, -1, plpl_intr_handler },
5279		{ F_NCSI, -1, ncsi_intr_handler},
5280		{ F_MPS, -1, mps_intr_handler },
5281		{ F_CIM, -1, cim_intr_handler },
5282		{ 0 }
5283	};
5284	static const struct intr_info pl_intr_info = {
5285		.name = "PL_INT_CAUSE",
5286		.cause_reg = A_PL_INT_CAUSE,
5287		.enable_reg = A_PL_INT_ENABLE,
5288		.fatal = 0,
5289		.flags = 0,
5290		.details = pl_intr_details,
5291		.actions = pl_intr_action,
5292	};
5293	bool fatal;
5294	u32 perr;
5295
5296	perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
5297	if (verbose || perr != 0) {
5298		t4_show_intr_info(adap, &pl_perr_cause, perr);
5299		if (perr != 0)
5300			t4_write_reg(adap, pl_perr_cause.cause_reg, perr);
5301		if (verbose)
5302			perr |= t4_read_reg(adap, pl_intr_info.enable_reg);
5303	}
5304	fatal = t4_handle_intr(adap, &pl_intr_info, perr, verbose);
5305	if (fatal)
5306		t4_fatal_err(adap, false);
5307
5308	return (0);
5309}
5310
5311#define PF_INTR_MASK (F_PFSW | F_PFCIM)
5312
5313/**
5314 *	t4_intr_enable - enable interrupts
5315 *	@adapter: the adapter whose interrupts should be enabled
5316 *
5317 *	Enable PF-specific interrupts for the calling function and the top-level
5318 *	interrupt concentrator for global interrupts.  Interrupts are already
5319 *	enabled at each module,	here we just enable the roots of the interrupt
5320 *	hierarchies.
5321 *
5322 *	Note: this function should be called only when the driver manages
5323 *	non PF-specific interrupts from the various HW modules.  Only one PCI
5324 *	function at a time should be doing this.
5325 */
5326void t4_intr_enable(struct adapter *adap)
5327{
5328	u32 val = 0;
5329
5330	if (chip_id(adap) <= CHELSIO_T5)
5331		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5332	else
5333		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5334	val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
5335	    F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
5336	    F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
5337	    F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5338	    F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
5339	    F_EGRESS_SIZE_ERR;
5340	t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
5341	t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5342	t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
5343	t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
5344}
5345
5346/**
5347 *	t4_intr_disable - disable interrupts
5348 *	@adap: the adapter whose interrupts should be disabled
5349 *
5350 *	Disable interrupts.  We only disable the top-level interrupt
5351 *	concentrators.  The caller must be a PCI function managing global
5352 *	interrupts.
5353 */
5354void t4_intr_disable(struct adapter *adap)
5355{
5356
5357	t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5358	t4_set_reg_field(adap, A_PL_INT_MAP0, 1 << adap->pf, 0);
5359}
5360
5361/**
5362 *	t4_intr_clear - clear all interrupts
5363 *	@adap: the adapter whose interrupts should be cleared
5364 *
5365 *	Clears all interrupts.  The caller must be a PCI function managing
5366 *	global interrupts.
5367 */
5368void t4_intr_clear(struct adapter *adap)
5369{
5370	static const u32 cause_reg[] = {
5371		A_CIM_HOST_INT_CAUSE,
5372		A_CIM_HOST_UPACC_INT_CAUSE,
5373		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
5374		A_CPL_INTR_CAUSE,
5375		EDC_REG(A_EDC_INT_CAUSE, 0), EDC_REG(A_EDC_INT_CAUSE, 1),
5376		A_LE_DB_INT_CAUSE,
5377		A_MA_INT_WRAP_STATUS,
5378		A_MA_PARITY_ERROR_STATUS1,
5379		A_MA_INT_CAUSE,
5380		A_MPS_CLS_INT_CAUSE,
5381		A_MPS_RX_PERR_INT_CAUSE,
5382		A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5383		A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5384		A_MPS_TRC_INT_CAUSE,
5385		A_MPS_TX_INT_CAUSE,
5386		A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5387		A_NCSI_INT_CAUSE,
5388		A_PCIE_INT_CAUSE,
5389		A_PCIE_NONFAT_ERR,
5390		A_PL_PL_INT_CAUSE,
5391		A_PM_RX_INT_CAUSE,
5392		A_PM_TX_INT_CAUSE,
5393		A_SGE_INT_CAUSE1,
5394		A_SGE_INT_CAUSE2,
5395		A_SGE_INT_CAUSE3,
5396		A_SGE_INT_CAUSE4,
5397		A_SMB_INT_CAUSE,
5398		A_TP_INT_CAUSE,
5399		A_ULP_RX_INT_CAUSE,
5400		A_ULP_RX_INT_CAUSE_2,
5401		A_ULP_TX_INT_CAUSE,
5402		A_ULP_TX_INT_CAUSE_2,
5403
5404		MYPF_REG(A_PL_PF_INT_CAUSE),
5405	};
5406	int i;
5407	const int nchan = adap->chip_params->nchan;
5408
5409	for (i = 0; i < ARRAY_SIZE(cause_reg); i++)
5410		t4_write_reg(adap, cause_reg[i], 0xffffffff);
5411
5412	if (is_t4(adap)) {
5413		t4_write_reg(adap, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
5414		    0xffffffff);
5415		t4_write_reg(adap, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
5416		    0xffffffff);
5417		t4_write_reg(adap, A_MC_INT_CAUSE, 0xffffffff);
5418		for (i = 0; i < nchan; i++) {
5419			t4_write_reg(adap, PORT_REG(i, A_XGMAC_PORT_INT_CAUSE),
5420			    0xffffffff);
5421		}
5422	}
5423	if (chip_id(adap) >= CHELSIO_T5) {
5424		t4_write_reg(adap, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
5425		t4_write_reg(adap, A_MPS_STAT_PERR_INT_CAUSE_SRAM1, 0xffffffff);
5426		t4_write_reg(adap, A_SGE_INT_CAUSE5, 0xffffffff);
5427		t4_write_reg(adap, A_MC_P_INT_CAUSE, 0xffffffff);
5428		if (is_t5(adap)) {
5429			t4_write_reg(adap, MC_REG(A_MC_P_INT_CAUSE, 1),
5430			    0xffffffff);
5431		}
5432		for (i = 0; i < nchan; i++) {
5433			t4_write_reg(adap, T5_PORT_REG(i,
5434			    A_MAC_PORT_PERR_INT_CAUSE), 0xffffffff);
5435			if (chip_id(adap) > CHELSIO_T5) {
5436				t4_write_reg(adap, T5_PORT_REG(i,
5437				    A_MAC_PORT_PERR_INT_CAUSE_100G),
5438				    0xffffffff);
5439			}
5440			t4_write_reg(adap, T5_PORT_REG(i, A_MAC_PORT_INT_CAUSE),
5441			    0xffffffff);
5442		}
5443	}
5444	if (chip_id(adap) >= CHELSIO_T6) {
5445		t4_write_reg(adap, A_SGE_INT_CAUSE6, 0xffffffff);
5446	}
5447
5448	t4_write_reg(adap, A_MPS_INT_CAUSE, is_t4(adap) ? 0 : 0xffffffff);
5449	t4_write_reg(adap, A_PL_PERR_CAUSE, 0xffffffff);
5450	t4_write_reg(adap, A_PL_INT_CAUSE, 0xffffffff);
5451	(void) t4_read_reg(adap, A_PL_INT_CAUSE);          /* flush */
5452}
5453
5454/**
5455 *	hash_mac_addr - return the hash value of a MAC address
5456 *	@addr: the 48-bit Ethernet MAC address
5457 *
5458 *	Hashes a MAC address according to the hash function used by HW inexact
5459 *	(hash) address matching.
5460 */
5461static int hash_mac_addr(const u8 *addr)
5462{
5463	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
5464	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
5465	a ^= b;
5466	a ^= (a >> 12);
5467	a ^= (a >> 6);
5468	return a & 0x3f;
5469}
5470
5471/**
5472 *	t4_config_rss_range - configure a portion of the RSS mapping table
5473 *	@adapter: the adapter
5474 *	@mbox: mbox to use for the FW command
5475 *	@viid: virtual interface whose RSS subtable is to be written
5476 *	@start: start entry in the table to write
5477 *	@n: how many table entries to write
5478 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
5479 *	@nrspq: number of values in @rspq
5480 *
5481 *	Programs the selected part of the VI's RSS mapping table with the
5482 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5483 *	until the full table range is populated.
5484 *
5485 *	The caller must ensure the values in @rspq are in the range allowed for
5486 *	@viid.
5487 */
5488int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5489			int start, int n, const u16 *rspq, unsigned int nrspq)
5490{
5491	int ret;
5492	const u16 *rsp = rspq;
5493	const u16 *rsp_end = rspq + nrspq;
5494	struct fw_rss_ind_tbl_cmd cmd;
5495
5496	memset(&cmd, 0, sizeof(cmd));
5497	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5498				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5499				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
5500	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5501
5502	/*
5503	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
5504	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
5505	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5506	 * reserved.
5507	 */
5508	while (n > 0) {
5509		int nq = min(n, 32);
5510		int nq_packed = 0;
5511		__be32 *qp = &cmd.iq0_to_iq2;
5512
5513		/*
5514		 * Set up the firmware RSS command header to send the next
5515		 * "nq" Ingress Queue IDs to the firmware.
5516		 */
5517		cmd.niqid = cpu_to_be16(nq);
5518		cmd.startidx = cpu_to_be16(start);
5519
5520		/*
5521		 * "nq" more done for the start of the next loop.
5522		 */
5523		start += nq;
5524		n -= nq;
5525
5526		/*
5527		 * While there are still Ingress Queue IDs to stuff into the
5528		 * current firmware RSS command, retrieve them from the
5529		 * Ingress Queue ID array and insert them into the command.
5530		 */
5531		while (nq > 0) {
5532			/*
5533			 * Grab up to the next 3 Ingress Queue IDs (wrapping
5534			 * around the Ingress Queue ID array if necessary) and
5535			 * insert them into the firmware RSS command at the
5536			 * current 3-tuple position within the commad.
5537			 */
5538			u16 qbuf[3];
5539			u16 *qbp = qbuf;
5540			int nqbuf = min(3, nq);
5541
5542			nq -= nqbuf;
5543			qbuf[0] = qbuf[1] = qbuf[2] = 0;
5544			while (nqbuf && nq_packed < 32) {
5545				nqbuf--;
5546				nq_packed++;
5547				*qbp++ = *rsp++;
5548				if (rsp >= rsp_end)
5549					rsp = rspq;
5550			}
5551			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5552					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5553					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5554		}
5555
5556		/*
5557		 * Send this portion of the RRS table update to the firmware;
5558		 * bail out on any errors.
5559		 */
5560		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5561		if (ret)
5562			return ret;
5563	}
5564	return 0;
5565}
5566
5567/**
5568 *	t4_config_glbl_rss - configure the global RSS mode
5569 *	@adapter: the adapter
5570 *	@mbox: mbox to use for the FW command
5571 *	@mode: global RSS mode
5572 *	@flags: mode-specific flags
5573 *
5574 *	Sets the global RSS mode.
5575 */
5576int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5577		       unsigned int flags)
5578{
5579	struct fw_rss_glb_config_cmd c;
5580
5581	memset(&c, 0, sizeof(c));
5582	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5583				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5584	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5585	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5586		c.u.manual.mode_pkd =
5587			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5588	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5589		c.u.basicvirtual.mode_keymode =
5590			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5591		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5592	} else
5593		return -EINVAL;
5594	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5595}
5596
5597/**
5598 *	t4_config_vi_rss - configure per VI RSS settings
5599 *	@adapter: the adapter
5600 *	@mbox: mbox to use for the FW command
5601 *	@viid: the VI id
5602 *	@flags: RSS flags
5603 *	@defq: id of the default RSS queue for the VI.
5604 *	@skeyidx: RSS secret key table index for non-global mode
5605 *	@skey: RSS vf_scramble key for VI.
5606 *
5607 *	Configures VI-specific RSS properties.
5608 */
5609int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5610		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5611		     unsigned int skey)
5612{
5613	struct fw_rss_vi_config_cmd c;
5614
5615	memset(&c, 0, sizeof(c));
5616	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5617				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5618				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5619	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5620	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5621					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5622	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5623					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5624	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5625
5626	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5627}
5628
5629/* Read an RSS table row */
5630static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5631{
5632	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5633	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5634				   5, 0, val);
5635}
5636
5637/**
5638 *	t4_read_rss - read the contents of the RSS mapping table
5639 *	@adapter: the adapter
5640 *	@map: holds the contents of the RSS mapping table
5641 *
5642 *	Reads the contents of the RSS hash->queue mapping table.
5643 */
5644int t4_read_rss(struct adapter *adapter, u16 *map)
5645{
5646	u32 val;
5647	int i, ret;
5648
5649	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5650		ret = rd_rss_row(adapter, i, &val);
5651		if (ret)
5652			return ret;
5653		*map++ = G_LKPTBLQUEUE0(val);
5654		*map++ = G_LKPTBLQUEUE1(val);
5655	}
5656	return 0;
5657}
5658
5659/**
5660 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5661 * @adap: the adapter
5662 * @cmd: TP fw ldst address space type
5663 * @vals: where the indirect register values are stored/written
5664 * @nregs: how many indirect registers to read/write
5665 * @start_idx: index of first indirect register to read/write
5666 * @rw: Read (1) or Write (0)
5667 * @sleep_ok: if true we may sleep while awaiting command completion
5668 *
5669 * Access TP indirect registers through LDST
5670 **/
5671static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5672			    unsigned int nregs, unsigned int start_index,
5673			    unsigned int rw, bool sleep_ok)
5674{
5675	int ret = 0;
5676	unsigned int i;
5677	struct fw_ldst_cmd c;
5678
5679	for (i = 0; i < nregs; i++) {
5680		memset(&c, 0, sizeof(c));
5681		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5682						F_FW_CMD_REQUEST |
5683						(rw ? F_FW_CMD_READ :
5684						      F_FW_CMD_WRITE) |
5685						V_FW_LDST_CMD_ADDRSPACE(cmd));
5686		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5687
5688		c.u.addrval.addr = cpu_to_be32(start_index + i);
5689		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5690		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5691				      sleep_ok);
5692		if (ret)
5693			return ret;
5694
5695		if (rw)
5696			vals[i] = be32_to_cpu(c.u.addrval.val);
5697	}
5698	return 0;
5699}
5700
5701/**
5702 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5703 * @adap: the adapter
5704 * @reg_addr: Address Register
5705 * @reg_data: Data register
5706 * @buff: where the indirect register values are stored/written
5707 * @nregs: how many indirect registers to read/write
5708 * @start_index: index of first indirect register to read/write
5709 * @rw: READ(1) or WRITE(0)
5710 * @sleep_ok: if true we may sleep while awaiting command completion
5711 *
5712 * Read/Write TP indirect registers through LDST if possible.
5713 * Else, use backdoor access
5714 **/
5715static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5716			      u32 *buff, u32 nregs, u32 start_index, int rw,
5717			      bool sleep_ok)
5718{
5719	int rc = -EINVAL;
5720	int cmd;
5721
5722	switch (reg_addr) {
5723	case A_TP_PIO_ADDR:
5724		cmd = FW_LDST_ADDRSPC_TP_PIO;
5725		break;
5726	case A_TP_TM_PIO_ADDR:
5727		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5728		break;
5729	case A_TP_MIB_INDEX:
5730		cmd = FW_LDST_ADDRSPC_TP_MIB;
5731		break;
5732	default:
5733		goto indirect_access;
5734	}
5735
5736	if (t4_use_ldst(adap))
5737		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5738				      sleep_ok);
5739
5740indirect_access:
5741
5742	if (rc) {
5743		if (rw)
5744			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5745					 start_index);
5746		else
5747			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5748					  start_index);
5749	}
5750}
5751
5752/**
5753 * t4_tp_pio_read - Read TP PIO registers
5754 * @adap: the adapter
5755 * @buff: where the indirect register values are written
5756 * @nregs: how many indirect registers to read
5757 * @start_index: index of first indirect register to read
5758 * @sleep_ok: if true we may sleep while awaiting command completion
5759 *
5760 * Read TP PIO Registers
5761 **/
5762void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5763		    u32 start_index, bool sleep_ok)
5764{
5765	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5766			  start_index, 1, sleep_ok);
5767}
5768
5769/**
5770 * t4_tp_pio_write - Write TP PIO registers
5771 * @adap: the adapter
5772 * @buff: where the indirect register values are stored
5773 * @nregs: how many indirect registers to write
5774 * @start_index: index of first indirect register to write
5775 * @sleep_ok: if true we may sleep while awaiting command completion
5776 *
5777 * Write TP PIO Registers
5778 **/
5779void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
5780		     u32 start_index, bool sleep_ok)
5781{
5782	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5783	    __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
5784}
5785
5786/**
5787 * t4_tp_tm_pio_read - Read TP TM PIO registers
5788 * @adap: the adapter
5789 * @buff: where the indirect register values are written
5790 * @nregs: how many indirect registers to read
5791 * @start_index: index of first indirect register to read
5792 * @sleep_ok: if true we may sleep while awaiting command completion
5793 *
5794 * Read TP TM PIO Registers
5795 **/
5796void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5797		       u32 start_index, bool sleep_ok)
5798{
5799	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5800			  nregs, start_index, 1, sleep_ok);
5801}
5802
5803/**
5804 * t4_tp_mib_read - Read TP MIB registers
5805 * @adap: the adapter
5806 * @buff: where the indirect register values are written
5807 * @nregs: how many indirect registers to read
5808 * @start_index: index of first indirect register to read
5809 * @sleep_ok: if true we may sleep while awaiting command completion
5810 *
5811 * Read TP MIB Registers
5812 **/
5813void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5814		    bool sleep_ok)
5815{
5816	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5817			  start_index, 1, sleep_ok);
5818}
5819
5820/**
5821 *	t4_read_rss_key - read the global RSS key
5822 *	@adap: the adapter
5823 *	@key: 10-entry array holding the 320-bit RSS key
5824 * 	@sleep_ok: if true we may sleep while awaiting command completion
5825 *
5826 *	Reads the global 320-bit RSS key.
5827 */
5828void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5829{
5830	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5831}
5832
5833/**
5834 *	t4_write_rss_key - program one of the RSS keys
5835 *	@adap: the adapter
5836 *	@key: 10-entry array holding the 320-bit RSS key
5837 *	@idx: which RSS key to write
5838 * 	@sleep_ok: if true we may sleep while awaiting command completion
5839 *
5840 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5841 *	0..15 the corresponding entry in the RSS key table is written,
5842 *	otherwise the global RSS key is written.
5843 */
5844void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5845		      bool sleep_ok)
5846{
5847	u8 rss_key_addr_cnt = 16;
5848	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5849
5850	/*
5851	 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5852	 * allows access to key addresses 16-63 by using KeyWrAddrX
5853	 * as index[5:4](upper 2) into key table
5854	 */
5855	if ((chip_id(adap) > CHELSIO_T5) &&
5856	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5857		rss_key_addr_cnt = 32;
5858
5859	t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5860
5861	if (idx >= 0 && idx < rss_key_addr_cnt) {
5862		if (rss_key_addr_cnt > 16)
5863			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5864				     vrt | V_KEYWRADDRX(idx >> 4) |
5865				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5866		else
5867			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5868				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5869	}
5870}
5871
5872/**
5873 *	t4_read_rss_pf_config - read PF RSS Configuration Table
5874 *	@adapter: the adapter
5875 *	@index: the entry in the PF RSS table to read
5876 *	@valp: where to store the returned value
5877 * 	@sleep_ok: if true we may sleep while awaiting command completion
5878 *
5879 *	Reads the PF RSS Configuration Table at the specified index and returns
5880 *	the value found there.
5881 */
5882void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5883			   u32 *valp, bool sleep_ok)
5884{
5885	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5886}
5887
5888/**
5889 *	t4_write_rss_pf_config - write PF RSS Configuration Table
5890 *	@adapter: the adapter
5891 *	@index: the entry in the VF RSS table to read
5892 *	@val: the value to store
5893 * 	@sleep_ok: if true we may sleep while awaiting command completion
5894 *
5895 *	Writes the PF RSS Configuration Table at the specified index with the
5896 *	specified value.
5897 */
5898void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5899			    u32 val, bool sleep_ok)
5900{
5901	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5902			sleep_ok);
5903}
5904
5905/**
5906 *	t4_read_rss_vf_config - read VF RSS Configuration Table
5907 *	@adapter: the adapter
5908 *	@index: the entry in the VF RSS table to read
5909 *	@vfl: where to store the returned VFL
5910 *	@vfh: where to store the returned VFH
5911 * 	@sleep_ok: if true we may sleep while awaiting command completion
5912 *
5913 *	Reads the VF RSS Configuration Table at the specified index and returns
5914 *	the (VFL, VFH) values found there.
5915 */
5916void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5917			   u32 *vfl, u32 *vfh, bool sleep_ok)
5918{
5919	u32 vrt, mask, data;
5920
5921	if (chip_id(adapter) <= CHELSIO_T5) {
5922		mask = V_VFWRADDR(M_VFWRADDR);
5923		data = V_VFWRADDR(index);
5924	} else {
5925		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5926		 data = V_T6_VFWRADDR(index);
5927	}
5928	/*
5929	 * Request that the index'th VF Table values be read into VFL/VFH.
5930	 */
5931	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5932	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5933	vrt |= data | F_VFRDEN;
5934	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5935
5936	/*
5937	 * Grab the VFL/VFH values ...
5938	 */
5939	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5940	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5941}
5942
5943/**
5944 *	t4_write_rss_vf_config - write VF RSS Configuration Table
5945 *
5946 *	@adapter: the adapter
5947 *	@index: the entry in the VF RSS table to write
5948 *	@vfl: the VFL to store
5949 *	@vfh: the VFH to store
5950 *
5951 *	Writes the VF RSS Configuration Table at the specified index with the
5952 *	specified (VFL, VFH) values.
5953 */
5954void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5955			    u32 vfl, u32 vfh, bool sleep_ok)
5956{
5957	u32 vrt, mask, data;
5958
5959	if (chip_id(adapter) <= CHELSIO_T5) {
5960		mask = V_VFWRADDR(M_VFWRADDR);
5961		data = V_VFWRADDR(index);
5962	} else {
5963		mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5964		data = V_T6_VFWRADDR(index);
5965	}
5966
5967	/*
5968	 * Load up VFL/VFH with the values to be written ...
5969	 */
5970	t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5971	t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5972
5973	/*
5974	 * Write the VFL/VFH into the VF Table at index'th location.
5975	 */
5976	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5977	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5978	vrt |= data | F_VFRDEN;
5979	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5980}
5981
5982/**
5983 *	t4_read_rss_pf_map - read PF RSS Map
5984 *	@adapter: the adapter
5985 * 	@sleep_ok: if true we may sleep while awaiting command completion
5986 *
5987 *	Reads the PF RSS Map register and returns its value.
5988 */
5989u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5990{
5991	u32 pfmap;
5992
5993	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5994
5995	return pfmap;
5996}
5997
5998/**
5999 *	t4_write_rss_pf_map - write PF RSS Map
6000 *	@adapter: the adapter
6001 *	@pfmap: PF RSS Map value
6002 *
6003 *	Writes the specified value to the PF RSS Map register.
6004 */
6005void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
6006{
6007	t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6008}
6009
6010/**
6011 *	t4_read_rss_pf_mask - read PF RSS Mask
6012 *	@adapter: the adapter
6013 * 	@sleep_ok: if true we may sleep while awaiting command completion
6014 *
6015 *	Reads the PF RSS Mask register and returns its value.
6016 */
6017u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6018{
6019	u32 pfmask;
6020
6021	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6022
6023	return pfmask;
6024}
6025
6026/**
6027 *	t4_write_rss_pf_mask - write PF RSS Mask
6028 *	@adapter: the adapter
6029 *	@pfmask: PF RSS Mask value
6030 *
6031 *	Writes the specified value to the PF RSS Mask register.
6032 */
6033void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
6034{
6035	t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6036}
6037
6038/**
6039 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
6040 *	@adap: the adapter
6041 *	@v4: holds the TCP/IP counter values
6042 *	@v6: holds the TCP/IPv6 counter values
6043 * 	@sleep_ok: if true we may sleep while awaiting command completion
6044 *
6045 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6046 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6047 */
6048void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6049			 struct tp_tcp_stats *v6, bool sleep_ok)
6050{
6051	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6052
6053#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6054#define STAT(x)     val[STAT_IDX(x)]
6055#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6056
6057	if (v4) {
6058		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6059			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
6060		v4->tcp_out_rsts = STAT(OUT_RST);
6061		v4->tcp_in_segs  = STAT64(IN_SEG);
6062		v4->tcp_out_segs = STAT64(OUT_SEG);
6063		v4->tcp_retrans_segs = STAT64(RXT_SEG);
6064	}
6065	if (v6) {
6066		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6067			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6068		v6->tcp_out_rsts = STAT(OUT_RST);
6069		v6->tcp_in_segs  = STAT64(IN_SEG);
6070		v6->tcp_out_segs = STAT64(OUT_SEG);
6071		v6->tcp_retrans_segs = STAT64(RXT_SEG);
6072	}
6073#undef STAT64
6074#undef STAT
6075#undef STAT_IDX
6076}
6077
6078/**
6079 *	t4_tp_get_err_stats - read TP's error MIB counters
6080 *	@adap: the adapter
6081 *	@st: holds the counter values
6082 * 	@sleep_ok: if true we may sleep while awaiting command completion
6083 *
6084 *	Returns the values of TP's error counters.
6085 */
6086void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6087			 bool sleep_ok)
6088{
6089	int nchan = adap->chip_params->nchan;
6090
6091	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6092		       sleep_ok);
6093
6094	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6095		       sleep_ok);
6096
6097	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6098		       sleep_ok);
6099
6100	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6101		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6102
6103	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6104		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6105
6106	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6107		       sleep_ok);
6108
6109	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6110		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6111
6112	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6113		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6114
6115	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6116		       sleep_ok);
6117}
6118
6119/**
6120 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
6121 *	@adap: the adapter
6122 *	@st: holds the counter values
6123 *
6124 *	Returns the values of TP's proxy counters.
6125 */
6126void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
6127    bool sleep_ok)
6128{
6129	int nchan = adap->chip_params->nchan;
6130
6131	t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
6132}
6133
6134/**
6135 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
6136 *	@adap: the adapter
6137 *	@st: holds the counter values
6138 * 	@sleep_ok: if true we may sleep while awaiting command completion
6139 *
6140 *	Returns the values of TP's CPL counters.
6141 */
6142void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6143			 bool sleep_ok)
6144{
6145	int nchan = adap->chip_params->nchan;
6146
6147	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6148
6149	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6150}
6151
6152/**
6153 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6154 *	@adap: the adapter
6155 *	@st: holds the counter values
6156 *
6157 *	Returns the values of TP's RDMA counters.
6158 */
6159void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6160			  bool sleep_ok)
6161{
6162	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6163		       sleep_ok);
6164}
6165
6166/**
6167 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6168 *	@adap: the adapter
6169 *	@idx: the port index
6170 *	@st: holds the counter values
6171 * 	@sleep_ok: if true we may sleep while awaiting command completion
6172 *
6173 *	Returns the values of TP's FCoE counters for the selected port.
6174 */
6175void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6176		       struct tp_fcoe_stats *st, bool sleep_ok)
6177{
6178	u32 val[2];
6179
6180	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6181		       sleep_ok);
6182
6183	t4_tp_mib_read(adap, &st->frames_drop, 1,
6184		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6185
6186	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6187		       sleep_ok);
6188
6189	st->octets_ddp = ((u64)val[0] << 32) | val[1];
6190}
6191
6192/**
6193 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6194 *	@adap: the adapter
6195 *	@st: holds the counter values
6196 * 	@sleep_ok: if true we may sleep while awaiting command completion
6197 *
6198 *	Returns the values of TP's counters for non-TCP directly-placed packets.
6199 */
6200void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6201		      bool sleep_ok)
6202{
6203	u32 val[4];
6204
6205	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6206
6207	st->frames = val[0];
6208	st->drops = val[1];
6209	st->octets = ((u64)val[2] << 32) | val[3];
6210}
6211
6212/**
6213 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
6214 *	@adap: the adapter
6215 *	@mtus: where to store the MTU values
6216 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
6217 *
6218 *	Reads the HW path MTU table.
6219 */
6220void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6221{
6222	u32 v;
6223	int i;
6224
6225	for (i = 0; i < NMTUS; ++i) {
6226		t4_write_reg(adap, A_TP_MTU_TABLE,
6227			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
6228		v = t4_read_reg(adap, A_TP_MTU_TABLE);
6229		mtus[i] = G_MTUVALUE(v);
6230		if (mtu_log)
6231			mtu_log[i] = G_MTUWIDTH(v);
6232	}
6233}
6234
6235/**
6236 *	t4_read_cong_tbl - reads the congestion control table
6237 *	@adap: the adapter
6238 *	@incr: where to store the alpha values
6239 *
6240 *	Reads the additive increments programmed into the HW congestion
6241 *	control table.
6242 */
6243void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6244{
6245	unsigned int mtu, w;
6246
6247	for (mtu = 0; mtu < NMTUS; ++mtu)
6248		for (w = 0; w < NCCTRL_WIN; ++w) {
6249			t4_write_reg(adap, A_TP_CCTRL_TABLE,
6250				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
6251			incr[mtu][w] = (u16)t4_read_reg(adap,
6252						A_TP_CCTRL_TABLE) & 0x1fff;
6253		}
6254}
6255
6256/**
6257 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6258 *	@adap: the adapter
6259 *	@addr: the indirect TP register address
6260 *	@mask: specifies the field within the register to modify
6261 *	@val: new value for the field
6262 *
6263 *	Sets a field of an indirect TP register to the given value.
6264 */
6265void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6266			    unsigned int mask, unsigned int val)
6267{
6268	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6269	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6270	t4_write_reg(adap, A_TP_PIO_DATA, val);
6271}
6272
6273/**
6274 *	init_cong_ctrl - initialize congestion control parameters
6275 *	@a: the alpha values for congestion control
6276 *	@b: the beta values for congestion control
6277 *
6278 *	Initialize the congestion control parameters.
6279 */
6280static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6281{
6282	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6283	a[9] = 2;
6284	a[10] = 3;
6285	a[11] = 4;
6286	a[12] = 5;
6287	a[13] = 6;
6288	a[14] = 7;
6289	a[15] = 8;
6290	a[16] = 9;
6291	a[17] = 10;
6292	a[18] = 14;
6293	a[19] = 17;
6294	a[20] = 21;
6295	a[21] = 25;
6296	a[22] = 30;
6297	a[23] = 35;
6298	a[24] = 45;
6299	a[25] = 60;
6300	a[26] = 80;
6301	a[27] = 100;
6302	a[28] = 200;
6303	a[29] = 300;
6304	a[30] = 400;
6305	a[31] = 500;
6306
6307	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6308	b[9] = b[10] = 1;
6309	b[11] = b[12] = 2;
6310	b[13] = b[14] = b[15] = b[16] = 3;
6311	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6312	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6313	b[28] = b[29] = 6;
6314	b[30] = b[31] = 7;
6315}
6316
6317/* The minimum additive increment value for the congestion control table */
6318#define CC_MIN_INCR 2U
6319
6320/**
6321 *	t4_load_mtus - write the MTU and congestion control HW tables
6322 *	@adap: the adapter
6323 *	@mtus: the values for the MTU table
6324 *	@alpha: the values for the congestion control alpha parameter
6325 *	@beta: the values for the congestion control beta parameter
6326 *
6327 *	Write the HW MTU table with the supplied MTUs and the high-speed
6328 *	congestion control table with the supplied alpha, beta, and MTUs.
6329 *	We write the two tables together because the additive increments
6330 *	depend on the MTUs.
6331 */
6332void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6333		  const unsigned short *alpha, const unsigned short *beta)
6334{
6335	static const unsigned int avg_pkts[NCCTRL_WIN] = {
6336		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6337		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6338		28672, 40960, 57344, 81920, 114688, 163840, 229376
6339	};
6340
6341	unsigned int i, w;
6342
6343	for (i = 0; i < NMTUS; ++i) {
6344		unsigned int mtu = mtus[i];
6345		unsigned int log2 = fls(mtu);
6346
6347		if (!(mtu & ((1 << log2) >> 2)))     /* round */
6348			log2--;
6349		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6350			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6351
6352		for (w = 0; w < NCCTRL_WIN; ++w) {
6353			unsigned int inc;
6354
6355			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6356				  CC_MIN_INCR);
6357
6358			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6359				     (w << 16) | (beta[w] << 13) | inc);
6360		}
6361	}
6362}
6363
6364/**
6365 *	t4_set_pace_tbl - set the pace table
6366 *	@adap: the adapter
6367 *	@pace_vals: the pace values in microseconds
6368 *	@start: index of the first entry in the HW pace table to set
6369 *	@n: how many entries to set
6370 *
6371 *	Sets (a subset of the) HW pace table.
6372 */
6373int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
6374		     unsigned int start, unsigned int n)
6375{
6376	unsigned int vals[NTX_SCHED], i;
6377	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
6378
6379	if (n > NTX_SCHED)
6380	    return -ERANGE;
6381
6382	/* convert values from us to dack ticks, rounding to closest value */
6383	for (i = 0; i < n; i++, pace_vals++) {
6384		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
6385		if (vals[i] > 0x7ff)
6386			return -ERANGE;
6387		if (*pace_vals && vals[i] == 0)
6388			return -ERANGE;
6389	}
6390	for (i = 0; i < n; i++, start++)
6391		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
6392	return 0;
6393}
6394
6395/**
6396 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
6397 *	@adap: the adapter
6398 *	@kbps: target rate in Kbps
6399 *	@sched: the scheduler index
6400 *
6401 *	Configure a Tx HW scheduler for the target rate.
6402 */
6403int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
6404{
6405	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
6406	unsigned int clk = adap->params.vpd.cclk * 1000;
6407	unsigned int selected_cpt = 0, selected_bpt = 0;
6408
6409	if (kbps > 0) {
6410		kbps *= 125;     /* -> bytes */
6411		for (cpt = 1; cpt <= 255; cpt++) {
6412			tps = clk / cpt;
6413			bpt = (kbps + tps / 2) / tps;
6414			if (bpt > 0 && bpt <= 255) {
6415				v = bpt * tps;
6416				delta = v >= kbps ? v - kbps : kbps - v;
6417				if (delta < mindelta) {
6418					mindelta = delta;
6419					selected_cpt = cpt;
6420					selected_bpt = bpt;
6421				}
6422			} else if (selected_cpt)
6423				break;
6424		}
6425		if (!selected_cpt)
6426			return -EINVAL;
6427	}
6428	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
6429		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
6430	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6431	if (sched & 1)
6432		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
6433	else
6434		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
6435	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6436	return 0;
6437}
6438
6439/**
6440 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
6441 *	@adap: the adapter
6442 *	@sched: the scheduler index
6443 *	@ipg: the interpacket delay in tenths of nanoseconds
6444 *
6445 *	Set the interpacket delay for a HW packet rate scheduler.
6446 */
6447int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
6448{
6449	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
6450
6451	/* convert ipg to nearest number of core clocks */
6452	ipg *= core_ticks_per_usec(adap);
6453	ipg = (ipg + 5000) / 10000;
6454	if (ipg > M_TXTIMERSEPQ0)
6455		return -EINVAL;
6456
6457	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
6458	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
6459	if (sched & 1)
6460		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
6461	else
6462		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
6463	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
6464	t4_read_reg(adap, A_TP_TM_PIO_DATA);
6465	return 0;
6466}
6467
6468/*
6469 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6470 * clocks.  The formula is
6471 *
6472 * bytes/s = bytes256 * 256 * ClkFreq / 4096
6473 *
6474 * which is equivalent to
6475 *
6476 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6477 */
6478static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6479{
6480	u64 v = (u64)bytes256 * adap->params.vpd.cclk;
6481
6482	return v * 62 + v / 2;
6483}
6484
6485/**
6486 *	t4_get_chan_txrate - get the current per channel Tx rates
6487 *	@adap: the adapter
6488 *	@nic_rate: rates for NIC traffic
6489 *	@ofld_rate: rates for offloaded traffic
6490 *
6491 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
6492 *	for each channel.
6493 */
6494void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6495{
6496	u32 v;
6497
6498	v = t4_read_reg(adap, A_TP_TX_TRATE);
6499	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6500	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6501	if (adap->chip_params->nchan > 2) {
6502		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6503		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6504	}
6505
6506	v = t4_read_reg(adap, A_TP_TX_ORATE);
6507	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6508	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6509	if (adap->chip_params->nchan > 2) {
6510		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6511		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6512	}
6513}
6514
6515/**
6516 *	t4_set_trace_filter - configure one of the tracing filters
6517 *	@adap: the adapter
6518 *	@tp: the desired trace filter parameters
6519 *	@idx: which filter to configure
6520 *	@enable: whether to enable or disable the filter
6521 *
6522 *	Configures one of the tracing filters available in HW.  If @tp is %NULL
6523 *	it indicates that the filter is already written in the register and it
6524 *	just needs to be enabled or disabled.
6525 */
6526int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
6527    int idx, int enable)
6528{
6529	int i, ofst = idx * 4;
6530	u32 data_reg, mask_reg, cfg;
6531	u32 multitrc = F_TRCMULTIFILTER;
6532	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
6533
6534	if (idx < 0 || idx >= NTRACE)
6535		return -EINVAL;
6536
6537	if (tp == NULL || !enable) {
6538		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
6539		    enable ? en : 0);
6540		return 0;
6541	}
6542
6543	/*
6544	 * TODO - After T4 data book is updated, specify the exact
6545	 * section below.
6546	 *
6547	 * See T4 data book - MPS section for a complete description
6548	 * of the below if..else handling of A_MPS_TRC_CFG register
6549	 * value.
6550	 */
6551	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6552	if (cfg & F_TRCMULTIFILTER) {
6553		/*
6554		 * If multiple tracers are enabled, then maximum
6555		 * capture size is 2.5KB (FIFO size of a single channel)
6556		 * minus 2 flits for CPL_TRACE_PKT header.
6557		 */
6558		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6559			return -EINVAL;
6560	} else {
6561		/*
6562		 * If multiple tracers are disabled, to avoid deadlocks
6563		 * maximum packet capture size of 9600 bytes is recommended.
6564		 * Also in this mode, only trace0 can be enabled and running.
6565		 */
6566		multitrc = 0;
6567		if (tp->snap_len > 9600 || idx)
6568			return -EINVAL;
6569	}
6570
6571	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
6572	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6573	    tp->min_len > M_TFMINPKTSIZE)
6574		return -EINVAL;
6575
6576	/* stop the tracer we'll be changing */
6577	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
6578
6579	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6580	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6581	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6582
6583	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6584		t4_write_reg(adap, data_reg, tp->data[i]);
6585		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6586	}
6587	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6588		     V_TFCAPTUREMAX(tp->snap_len) |
6589		     V_TFMINPKTSIZE(tp->min_len));
6590	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6591		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
6592		     (is_t4(adap) ?
6593		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
6594		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
6595
6596	return 0;
6597}
6598
6599/**
6600 *	t4_get_trace_filter - query one of the tracing filters
6601 *	@adap: the adapter
6602 *	@tp: the current trace filter parameters
6603 *	@idx: which trace filter to query
6604 *	@enabled: non-zero if the filter is enabled
6605 *
6606 *	Returns the current settings of one of the HW tracing filters.
6607 */
6608void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6609			 int *enabled)
6610{
6611	u32 ctla, ctlb;
6612	int i, ofst = idx * 4;
6613	u32 data_reg, mask_reg;
6614
6615	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6616	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6617
6618	if (is_t4(adap)) {
6619		*enabled = !!(ctla & F_TFEN);
6620		tp->port =  G_TFPORT(ctla);
6621		tp->invert = !!(ctla & F_TFINVERTMATCH);
6622	} else {
6623		*enabled = !!(ctla & F_T5_TFEN);
6624		tp->port = G_T5_TFPORT(ctla);
6625		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6626	}
6627	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6628	tp->min_len = G_TFMINPKTSIZE(ctlb);
6629	tp->skip_ofst = G_TFOFFSET(ctla);
6630	tp->skip_len = G_TFLENGTH(ctla);
6631
6632	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6633	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6634	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6635
6636	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6637		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6638		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6639	}
6640}
6641
6642/**
6643 *	t4_pmtx_get_stats - returns the HW stats from PMTX
6644 *	@adap: the adapter
6645 *	@cnt: where to store the count statistics
6646 *	@cycles: where to store the cycle statistics
6647 *
6648 *	Returns performance statistics from PMTX.
6649 */
6650void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6651{
6652	int i;
6653	u32 data[2];
6654
6655	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6656		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6657		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6658		if (is_t4(adap))
6659			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6660		else {
6661			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6662					 A_PM_TX_DBG_DATA, data, 2,
6663					 A_PM_TX_DBG_STAT_MSB);
6664			cycles[i] = (((u64)data[0] << 32) | data[1]);
6665		}
6666	}
6667}
6668
6669/**
6670 *	t4_pmrx_get_stats - returns the HW stats from PMRX
6671 *	@adap: the adapter
6672 *	@cnt: where to store the count statistics
6673 *	@cycles: where to store the cycle statistics
6674 *
6675 *	Returns performance statistics from PMRX.
6676 */
6677void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6678{
6679	int i;
6680	u32 data[2];
6681
6682	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
6683		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6684		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6685		if (is_t4(adap)) {
6686			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6687		} else {
6688			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6689					 A_PM_RX_DBG_DATA, data, 2,
6690					 A_PM_RX_DBG_STAT_MSB);
6691			cycles[i] = (((u64)data[0] << 32) | data[1]);
6692		}
6693	}
6694}
6695
6696/**
6697 *	t4_get_mps_bg_map - return the buffer groups associated with a port
6698 *	@adap: the adapter
6699 *	@idx: the port index
6700 *
6701 *	Returns a bitmap indicating which MPS buffer groups are associated
6702 *	with the given port.  Bit i is set if buffer group i is used by the
6703 *	port.
6704 */
6705static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
6706{
6707	u32 n;
6708
6709	if (adap->params.mps_bg_map)
6710		return ((adap->params.mps_bg_map >> (idx << 3)) & 0xff);
6711
6712	n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6713	if (n == 0)
6714		return idx == 0 ? 0xf : 0;
6715	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6716		return idx < 2 ? (3 << (2 * idx)) : 0;
6717	return 1 << idx;
6718}
6719
6720/*
6721 * TP RX e-channels associated with the port.
6722 */
6723static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
6724{
6725	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
6726
6727	if (n == 0)
6728		return idx == 0 ? 0xf : 0;
6729	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
6730		return idx < 2 ? (3 << (2 * idx)) : 0;
6731	return 1 << idx;
6732}
6733
6734/**
6735 *      t4_get_port_type_description - return Port Type string description
6736 *      @port_type: firmware Port Type enumeration
6737 */
6738const char *t4_get_port_type_description(enum fw_port_type port_type)
6739{
6740	static const char *const port_type_description[] = {
6741		"Fiber_XFI",
6742		"Fiber_XAUI",
6743		"BT_SGMII",
6744		"BT_XFI",
6745		"BT_XAUI",
6746		"KX4",
6747		"CX4",
6748		"KX",
6749		"KR",
6750		"SFP",
6751		"BP_AP",
6752		"BP4_AP",
6753		"QSFP_10G",
6754		"QSA",
6755		"QSFP",
6756		"BP40_BA",
6757		"KR4_100G",
6758		"CR4_QSFP",
6759		"CR_QSFP",
6760		"CR2_QSFP",
6761		"SFP28",
6762		"KR_SFP28",
6763	};
6764
6765	if (port_type < ARRAY_SIZE(port_type_description))
6766		return port_type_description[port_type];
6767	return "UNKNOWN";
6768}
6769
6770/**
6771 *      t4_get_port_stats_offset - collect port stats relative to a previous
6772 *				   snapshot
6773 *      @adap: The adapter
6774 *      @idx: The port
6775 *      @stats: Current stats to fill
6776 *      @offset: Previous stats snapshot
6777 */
6778void t4_get_port_stats_offset(struct adapter *adap, int idx,
6779		struct port_stats *stats,
6780		struct port_stats *offset)
6781{
6782	u64 *s, *o;
6783	int i;
6784
6785	t4_get_port_stats(adap, idx, stats);
6786	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6787			i < (sizeof(struct port_stats)/sizeof(u64)) ;
6788			i++, s++, o++)
6789		*s -= *o;
6790}
6791
6792/**
6793 *	t4_get_port_stats - collect port statistics
6794 *	@adap: the adapter
6795 *	@idx: the port index
6796 *	@p: the stats structure to fill
6797 *
6798 *	Collect statistics related to the given port from HW.
6799 */
6800void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6801{
6802	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6803	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6804
6805#define GET_STAT(name) \
6806	t4_read_reg64(adap, \
6807	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6808	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6809#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6810
6811	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
6812	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
6813	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
6814	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
6815	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
6816	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
6817	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
6818	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
6819	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
6820	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
6821	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
6822	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6823	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6824	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6825	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6826	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6827	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6828	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6829	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6830	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6831	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6832	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6833	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6834
6835	if (chip_id(adap) >= CHELSIO_T5) {
6836		if (stat_ctl & F_COUNTPAUSESTATTX) {
6837			p->tx_frames -= p->tx_pause;
6838			p->tx_octets -= p->tx_pause * 64;
6839		}
6840		if (stat_ctl & F_COUNTPAUSEMCTX)
6841			p->tx_mcast_frames -= p->tx_pause;
6842	}
6843
6844	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6845	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6846	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6847	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6848	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6849	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6850	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6851	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6852	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6853	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6854	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6855	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6856	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6857	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6858	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6859	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6860	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6861	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6862	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6863	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6864	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6865	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6866	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6867	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6868	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6869	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6870	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6871
6872	if (chip_id(adap) >= CHELSIO_T5) {
6873		if (stat_ctl & F_COUNTPAUSESTATRX) {
6874			p->rx_frames -= p->rx_pause;
6875			p->rx_octets -= p->rx_pause * 64;
6876		}
6877		if (stat_ctl & F_COUNTPAUSEMCRX)
6878			p->rx_mcast_frames -= p->rx_pause;
6879	}
6880
6881	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6882	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6883	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6884	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6885	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6886	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6887	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6888	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6889
6890#undef GET_STAT
6891#undef GET_STAT_COM
6892}
6893
6894/**
6895 *	t4_get_lb_stats - collect loopback port statistics
6896 *	@adap: the adapter
6897 *	@idx: the loopback port index
6898 *	@p: the stats structure to fill
6899 *
6900 *	Return HW statistics for the given loopback port.
6901 */
6902void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6903{
6904	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
6905
6906#define GET_STAT(name) \
6907	t4_read_reg64(adap, \
6908	(is_t4(adap) ? \
6909	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6910	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6911#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6912
6913	p->octets	= GET_STAT(BYTES);
6914	p->frames	= GET_STAT(FRAMES);
6915	p->bcast_frames	= GET_STAT(BCAST);
6916	p->mcast_frames	= GET_STAT(MCAST);
6917	p->ucast_frames	= GET_STAT(UCAST);
6918	p->error_frames	= GET_STAT(ERROR);
6919
6920	p->frames_64		= GET_STAT(64B);
6921	p->frames_65_127	= GET_STAT(65B_127B);
6922	p->frames_128_255	= GET_STAT(128B_255B);
6923	p->frames_256_511	= GET_STAT(256B_511B);
6924	p->frames_512_1023	= GET_STAT(512B_1023B);
6925	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6926	p->frames_1519_max	= GET_STAT(1519B_MAX);
6927	p->drop			= GET_STAT(DROP_FRAMES);
6928
6929	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6930	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6931	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6932	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6933	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6934	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6935	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6936	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6937
6938#undef GET_STAT
6939#undef GET_STAT_COM
6940}
6941
6942/**
6943 *	t4_wol_magic_enable - enable/disable magic packet WoL
6944 *	@adap: the adapter
6945 *	@port: the physical port index
6946 *	@addr: MAC address expected in magic packets, %NULL to disable
6947 *
6948 *	Enables/disables magic packet wake-on-LAN for the selected port.
6949 */
6950void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6951			 const u8 *addr)
6952{
6953	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6954
6955	if (is_t4(adap)) {
6956		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6957		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6958		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6959	} else {
6960		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6961		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6962		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6963	}
6964
6965	if (addr) {
6966		t4_write_reg(adap, mag_id_reg_l,
6967			     (addr[2] << 24) | (addr[3] << 16) |
6968			     (addr[4] << 8) | addr[5]);
6969		t4_write_reg(adap, mag_id_reg_h,
6970			     (addr[0] << 8) | addr[1]);
6971	}
6972	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6973			 V_MAGICEN(addr != NULL));
6974}
6975
6976/**
6977 *	t4_wol_pat_enable - enable/disable pattern-based WoL
6978 *	@adap: the adapter
6979 *	@port: the physical port index
6980 *	@map: bitmap of which HW pattern filters to set
6981 *	@mask0: byte mask for bytes 0-63 of a packet
6982 *	@mask1: byte mask for bytes 64-127 of a packet
6983 *	@crc: Ethernet CRC for selected bytes
6984 *	@enable: enable/disable switch
6985 *
6986 *	Sets the pattern filters indicated in @map to mask out the bytes
6987 *	specified in @mask0/@mask1 in received packets and compare the CRC of
6988 *	the resulting packet against @crc.  If @enable is %true pattern-based
6989 *	WoL is enabled, otherwise disabled.
6990 */
6991int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6992		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
6993{
6994	int i;
6995	u32 port_cfg_reg;
6996
6997	if (is_t4(adap))
6998		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6999	else
7000		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
7001
7002	if (!enable) {
7003		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
7004		return 0;
7005	}
7006	if (map > 0xff)
7007		return -EINVAL;
7008
7009#define EPIO_REG(name) \
7010	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
7011	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
7012
7013	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
7014	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
7015	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
7016
7017	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
7018		if (!(map & 1))
7019			continue;
7020
7021		/* write byte masks */
7022		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
7023		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
7024		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
7025		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7026			return -ETIMEDOUT;
7027
7028		/* write CRC */
7029		t4_write_reg(adap, EPIO_REG(DATA0), crc);
7030		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
7031		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
7032		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
7033			return -ETIMEDOUT;
7034	}
7035#undef EPIO_REG
7036
7037	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
7038	return 0;
7039}
7040
7041/*     t4_mk_filtdelwr - create a delete filter WR
7042 *     @ftid: the filter ID
7043 *     @wr: the filter work request to populate
7044 *     @qid: ingress queue to receive the delete notification
7045 *
7046 *     Creates a filter work request to delete the supplied filter.  If @qid is
7047 *     negative the delete notification is suppressed.
7048 */
7049void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
7050{
7051	memset(wr, 0, sizeof(*wr));
7052	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
7053	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
7054	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
7055				    V_FW_FILTER_WR_NOREPLY(qid < 0));
7056	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
7057	if (qid >= 0)
7058		wr->rx_chan_rx_rpl_iq =
7059				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
7060}
7061
7062#define INIT_CMD(var, cmd, rd_wr) do { \
7063	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
7064					F_FW_CMD_REQUEST | \
7065					F_FW_CMD_##rd_wr); \
7066	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
7067} while (0)
7068
7069int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7070			  u32 addr, u32 val)
7071{
7072	u32 ldst_addrspace;
7073	struct fw_ldst_cmd c;
7074
7075	memset(&c, 0, sizeof(c));
7076	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7077	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7078					F_FW_CMD_REQUEST |
7079					F_FW_CMD_WRITE |
7080					ldst_addrspace);
7081	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7082	c.u.addrval.addr = cpu_to_be32(addr);
7083	c.u.addrval.val = cpu_to_be32(val);
7084
7085	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7086}
7087
7088/**
7089 *	t4_mdio_rd - read a PHY register through MDIO
7090 *	@adap: the adapter
7091 *	@mbox: mailbox to use for the FW command
7092 *	@phy_addr: the PHY address
7093 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7094 *	@reg: the register to read
7095 *	@valp: where to store the value
7096 *
7097 *	Issues a FW command through the given mailbox to read a PHY register.
7098 */
7099int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7100	       unsigned int mmd, unsigned int reg, unsigned int *valp)
7101{
7102	int ret;
7103	u32 ldst_addrspace;
7104	struct fw_ldst_cmd c;
7105
7106	memset(&c, 0, sizeof(c));
7107	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7108	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7109					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7110					ldst_addrspace);
7111	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7112	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7113					 V_FW_LDST_CMD_MMD(mmd));
7114	c.u.mdio.raddr = cpu_to_be16(reg);
7115
7116	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7117	if (ret == 0)
7118		*valp = be16_to_cpu(c.u.mdio.rval);
7119	return ret;
7120}
7121
7122/**
7123 *	t4_mdio_wr - write a PHY register through MDIO
7124 *	@adap: the adapter
7125 *	@mbox: mailbox to use for the FW command
7126 *	@phy_addr: the PHY address
7127 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7128 *	@reg: the register to write
7129 *	@valp: value to write
7130 *
7131 *	Issues a FW command through the given mailbox to write a PHY register.
7132 */
7133int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7134	       unsigned int mmd, unsigned int reg, unsigned int val)
7135{
7136	u32 ldst_addrspace;
7137	struct fw_ldst_cmd c;
7138
7139	memset(&c, 0, sizeof(c));
7140	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7141	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7142					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7143					ldst_addrspace);
7144	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7145	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7146					 V_FW_LDST_CMD_MMD(mmd));
7147	c.u.mdio.raddr = cpu_to_be16(reg);
7148	c.u.mdio.rval = cpu_to_be16(val);
7149
7150	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7151}
7152
7153/**
7154 *
7155 *	t4_sge_decode_idma_state - decode the idma state
7156 *	@adap: the adapter
7157 *	@state: the state idma is stuck in
7158 */
7159void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7160{
7161	static const char * const t4_decode[] = {
7162		"IDMA_IDLE",
7163		"IDMA_PUSH_MORE_CPL_FIFO",
7164		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7165		"Not used",
7166		"IDMA_PHYSADDR_SEND_PCIEHDR",
7167		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7168		"IDMA_PHYSADDR_SEND_PAYLOAD",
7169		"IDMA_SEND_FIFO_TO_IMSG",
7170		"IDMA_FL_REQ_DATA_FL_PREP",
7171		"IDMA_FL_REQ_DATA_FL",
7172		"IDMA_FL_DROP",
7173		"IDMA_FL_H_REQ_HEADER_FL",
7174		"IDMA_FL_H_SEND_PCIEHDR",
7175		"IDMA_FL_H_PUSH_CPL_FIFO",
7176		"IDMA_FL_H_SEND_CPL",
7177		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7178		"IDMA_FL_H_SEND_IP_HDR",
7179		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7180		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7181		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7182		"IDMA_FL_D_SEND_PCIEHDR",
7183		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7184		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7185		"IDMA_FL_SEND_PCIEHDR",
7186		"IDMA_FL_PUSH_CPL_FIFO",
7187		"IDMA_FL_SEND_CPL",
7188		"IDMA_FL_SEND_PAYLOAD_FIRST",
7189		"IDMA_FL_SEND_PAYLOAD",
7190		"IDMA_FL_REQ_NEXT_DATA_FL",
7191		"IDMA_FL_SEND_NEXT_PCIEHDR",
7192		"IDMA_FL_SEND_PADDING",
7193		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7194		"IDMA_FL_SEND_FIFO_TO_IMSG",
7195		"IDMA_FL_REQ_DATAFL_DONE",
7196		"IDMA_FL_REQ_HEADERFL_DONE",
7197	};
7198	static const char * const t5_decode[] = {
7199		"IDMA_IDLE",
7200		"IDMA_ALMOST_IDLE",
7201		"IDMA_PUSH_MORE_CPL_FIFO",
7202		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7203		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7204		"IDMA_PHYSADDR_SEND_PCIEHDR",
7205		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7206		"IDMA_PHYSADDR_SEND_PAYLOAD",
7207		"IDMA_SEND_FIFO_TO_IMSG",
7208		"IDMA_FL_REQ_DATA_FL",
7209		"IDMA_FL_DROP",
7210		"IDMA_FL_DROP_SEND_INC",
7211		"IDMA_FL_H_REQ_HEADER_FL",
7212		"IDMA_FL_H_SEND_PCIEHDR",
7213		"IDMA_FL_H_PUSH_CPL_FIFO",
7214		"IDMA_FL_H_SEND_CPL",
7215		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7216		"IDMA_FL_H_SEND_IP_HDR",
7217		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7218		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7219		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7220		"IDMA_FL_D_SEND_PCIEHDR",
7221		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7222		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7223		"IDMA_FL_SEND_PCIEHDR",
7224		"IDMA_FL_PUSH_CPL_FIFO",
7225		"IDMA_FL_SEND_CPL",
7226		"IDMA_FL_SEND_PAYLOAD_FIRST",
7227		"IDMA_FL_SEND_PAYLOAD",
7228		"IDMA_FL_REQ_NEXT_DATA_FL",
7229		"IDMA_FL_SEND_NEXT_PCIEHDR",
7230		"IDMA_FL_SEND_PADDING",
7231		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7232	};
7233	static const char * const t6_decode[] = {
7234		"IDMA_IDLE",
7235		"IDMA_PUSH_MORE_CPL_FIFO",
7236		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7237		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7238		"IDMA_PHYSADDR_SEND_PCIEHDR",
7239		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7240		"IDMA_PHYSADDR_SEND_PAYLOAD",
7241		"IDMA_FL_REQ_DATA_FL",
7242		"IDMA_FL_DROP",
7243		"IDMA_FL_DROP_SEND_INC",
7244		"IDMA_FL_H_REQ_HEADER_FL",
7245		"IDMA_FL_H_SEND_PCIEHDR",
7246		"IDMA_FL_H_PUSH_CPL_FIFO",
7247		"IDMA_FL_H_SEND_CPL",
7248		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7249		"IDMA_FL_H_SEND_IP_HDR",
7250		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7251		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7252		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7253		"IDMA_FL_D_SEND_PCIEHDR",
7254		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7255		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7256		"IDMA_FL_SEND_PCIEHDR",
7257		"IDMA_FL_PUSH_CPL_FIFO",
7258		"IDMA_FL_SEND_CPL",
7259		"IDMA_FL_SEND_PAYLOAD_FIRST",
7260		"IDMA_FL_SEND_PAYLOAD",
7261		"IDMA_FL_REQ_NEXT_DATA_FL",
7262		"IDMA_FL_SEND_NEXT_PCIEHDR",
7263		"IDMA_FL_SEND_PADDING",
7264		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7265	};
7266	static const u32 sge_regs[] = {
7267		A_SGE_DEBUG_DATA_LOW_INDEX_2,
7268		A_SGE_DEBUG_DATA_LOW_INDEX_3,
7269		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7270	};
7271	const char * const *sge_idma_decode;
7272	int sge_idma_decode_nstates;
7273	int i;
7274	unsigned int chip_version = chip_id(adapter);
7275
7276	/* Select the right set of decode strings to dump depending on the
7277	 * adapter chip type.
7278	 */
7279	switch (chip_version) {
7280	case CHELSIO_T4:
7281		sge_idma_decode = (const char * const *)t4_decode;
7282		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7283		break;
7284
7285	case CHELSIO_T5:
7286		sge_idma_decode = (const char * const *)t5_decode;
7287		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7288		break;
7289
7290	case CHELSIO_T6:
7291		sge_idma_decode = (const char * const *)t6_decode;
7292		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7293		break;
7294
7295	default:
7296		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
7297		return;
7298	}
7299
7300	if (state < sge_idma_decode_nstates)
7301		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7302	else
7303		CH_WARN(adapter, "idma state %d unknown\n", state);
7304
7305	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7306		CH_WARN(adapter, "SGE register %#x value %#x\n",
7307			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7308}
7309
7310/**
7311 *      t4_sge_ctxt_flush - flush the SGE context cache
7312 *      @adap: the adapter
7313 *      @mbox: mailbox to use for the FW command
7314 *
7315 *      Issues a FW command through the given mailbox to flush the
7316 *      SGE context cache.
7317 */
7318int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7319{
7320	int ret;
7321	u32 ldst_addrspace;
7322	struct fw_ldst_cmd c;
7323
7324	memset(&c, 0, sizeof(c));
7325	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
7326	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7327					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7328					ldst_addrspace);
7329	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7330	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7331
7332	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7333	return ret;
7334}
7335
7336/**
7337 *      t4_fw_hello - establish communication with FW
7338 *      @adap: the adapter
7339 *      @mbox: mailbox to use for the FW command
7340 *      @evt_mbox: mailbox to receive async FW events
7341 *      @master: specifies the caller's willingness to be the device master
7342 *	@state: returns the current device state (if non-NULL)
7343 *
7344 *	Issues a command to establish communication with FW.  Returns either
7345 *	an error (negative integer) or the mailbox of the Master PF.
7346 */
7347int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7348		enum dev_master master, enum dev_state *state)
7349{
7350	int ret;
7351	struct fw_hello_cmd c;
7352	u32 v;
7353	unsigned int master_mbox;
7354	int retries = FW_CMD_HELLO_RETRIES;
7355
7356retry:
7357	memset(&c, 0, sizeof(c));
7358	INIT_CMD(c, HELLO, WRITE);
7359	c.err_to_clearinit = cpu_to_be32(
7360		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7361		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7362		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7363					mbox : M_FW_HELLO_CMD_MBMASTER) |
7364		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7365		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7366		F_FW_HELLO_CMD_CLEARINIT);
7367
7368	/*
7369	 * Issue the HELLO command to the firmware.  If it's not successful
7370	 * but indicates that we got a "busy" or "timeout" condition, retry
7371	 * the HELLO until we exhaust our retry limit.  If we do exceed our
7372	 * retry limit, check to see if the firmware left us any error
7373	 * information and report that if so ...
7374	 */
7375	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7376	if (ret != FW_SUCCESS) {
7377		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7378			goto retry;
7379		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7380			t4_report_fw_error(adap);
7381		return ret;
7382	}
7383
7384	v = be32_to_cpu(c.err_to_clearinit);
7385	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7386	if (state) {
7387		if (v & F_FW_HELLO_CMD_ERR)
7388			*state = DEV_STATE_ERR;
7389		else if (v & F_FW_HELLO_CMD_INIT)
7390			*state = DEV_STATE_INIT;
7391		else
7392			*state = DEV_STATE_UNINIT;
7393	}
7394
7395	/*
7396	 * If we're not the Master PF then we need to wait around for the
7397	 * Master PF Driver to finish setting up the adapter.
7398	 *
7399	 * Note that we also do this wait if we're a non-Master-capable PF and
7400	 * there is no current Master PF; a Master PF may show up momentarily
7401	 * and we wouldn't want to fail pointlessly.  (This can happen when an
7402	 * OS loads lots of different drivers rapidly at the same time).  In
7403	 * this case, the Master PF returned by the firmware will be
7404	 * M_PCIE_FW_MASTER so the test below will work ...
7405	 */
7406	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7407	    master_mbox != mbox) {
7408		int waiting = FW_CMD_HELLO_TIMEOUT;
7409
7410		/*
7411		 * Wait for the firmware to either indicate an error or
7412		 * initialized state.  If we see either of these we bail out
7413		 * and report the issue to the caller.  If we exhaust the
7414		 * "hello timeout" and we haven't exhausted our retries, try
7415		 * again.  Otherwise bail with a timeout error.
7416		 */
7417		for (;;) {
7418			u32 pcie_fw;
7419
7420			msleep(50);
7421			waiting -= 50;
7422
7423			/*
7424			 * If neither Error nor Initialialized are indicated
7425			 * by the firmware keep waiting till we exhaust our
7426			 * timeout ... and then retry if we haven't exhausted
7427			 * our retries ...
7428			 */
7429			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7430			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7431				if (waiting <= 0) {
7432					if (retries-- > 0)
7433						goto retry;
7434
7435					return -ETIMEDOUT;
7436				}
7437				continue;
7438			}
7439
7440			/*
7441			 * We either have an Error or Initialized condition
7442			 * report errors preferentially.
7443			 */
7444			if (state) {
7445				if (pcie_fw & F_PCIE_FW_ERR)
7446					*state = DEV_STATE_ERR;
7447				else if (pcie_fw & F_PCIE_FW_INIT)
7448					*state = DEV_STATE_INIT;
7449			}
7450
7451			/*
7452			 * If we arrived before a Master PF was selected and
7453			 * there's not a valid Master PF, grab its identity
7454			 * for our caller.
7455			 */
7456			if (master_mbox == M_PCIE_FW_MASTER &&
7457			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
7458				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7459			break;
7460		}
7461	}
7462
7463	return master_mbox;
7464}
7465
7466/**
7467 *	t4_fw_bye - end communication with FW
7468 *	@adap: the adapter
7469 *	@mbox: mailbox to use for the FW command
7470 *
7471 *	Issues a command to terminate communication with FW.
7472 */
7473int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7474{
7475	struct fw_bye_cmd c;
7476
7477	memset(&c, 0, sizeof(c));
7478	INIT_CMD(c, BYE, WRITE);
7479	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7480}
7481
7482/**
7483 *	t4_fw_reset - issue a reset to FW
7484 *	@adap: the adapter
7485 *	@mbox: mailbox to use for the FW command
7486 *	@reset: specifies the type of reset to perform
7487 *
7488 *	Issues a reset command of the specified type to FW.
7489 */
7490int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7491{
7492	struct fw_reset_cmd c;
7493
7494	memset(&c, 0, sizeof(c));
7495	INIT_CMD(c, RESET, WRITE);
7496	c.val = cpu_to_be32(reset);
7497	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7498}
7499
7500/**
7501 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7502 *	@adap: the adapter
7503 *	@mbox: mailbox to use for the FW RESET command (if desired)
7504 *	@force: force uP into RESET even if FW RESET command fails
7505 *
7506 *	Issues a RESET command to firmware (if desired) with a HALT indication
7507 *	and then puts the microprocessor into RESET state.  The RESET command
7508 *	will only be issued if a legitimate mailbox is provided (mbox <=
7509 *	M_PCIE_FW_MASTER).
7510 *
7511 *	This is generally used in order for the host to safely manipulate the
7512 *	adapter without fear of conflicting with whatever the firmware might
7513 *	be doing.  The only way out of this state is to RESTART the firmware
7514 *	...
7515 */
7516int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7517{
7518	int ret = 0;
7519
7520	/*
7521	 * If a legitimate mailbox is provided, issue a RESET command
7522	 * with a HALT indication.
7523	 */
7524	if (adap->flags & FW_OK && mbox <= M_PCIE_FW_MASTER) {
7525		struct fw_reset_cmd c;
7526
7527		memset(&c, 0, sizeof(c));
7528		INIT_CMD(c, RESET, WRITE);
7529		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7530		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7531		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7532	}
7533
7534	/*
7535	 * Normally we won't complete the operation if the firmware RESET
7536	 * command fails but if our caller insists we'll go ahead and put the
7537	 * uP into RESET.  This can be useful if the firmware is hung or even
7538	 * missing ...  We'll have to take the risk of putting the uP into
7539	 * RESET without the cooperation of firmware in that case.
7540	 *
7541	 * We also force the firmware's HALT flag to be on in case we bypassed
7542	 * the firmware RESET command above or we're dealing with old firmware
7543	 * which doesn't have the HALT capability.  This will serve as a flag
7544	 * for the incoming firmware to know that it's coming out of a HALT
7545	 * rather than a RESET ... if it's new enough to understand that ...
7546	 */
7547	if (ret == 0 || force) {
7548		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7549		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7550				 F_PCIE_FW_HALT);
7551	}
7552
7553	/*
7554	 * And we always return the result of the firmware RESET command
7555	 * even when we force the uP into RESET ...
7556	 */
7557	return ret;
7558}
7559
7560/**
7561 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7562 *	@adap: the adapter
7563 *
7564 *	Restart firmware previously halted by t4_fw_halt().  On successful
7565 *	return the previous PF Master remains as the new PF Master and there
7566 *	is no need to issue a new HELLO command, etc.
7567 */
7568int t4_fw_restart(struct adapter *adap, unsigned int mbox)
7569{
7570	int ms;
7571
7572	t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7573	for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7574		if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7575			return FW_SUCCESS;
7576		msleep(100);
7577		ms += 100;
7578	}
7579
7580	return -ETIMEDOUT;
7581}
7582
7583/**
7584 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7585 *	@adap: the adapter
7586 *	@mbox: mailbox to use for the FW RESET command (if desired)
7587 *	@fw_data: the firmware image to write
7588 *	@size: image size
7589 *	@force: force upgrade even if firmware doesn't cooperate
7590 *
7591 *	Perform all of the steps necessary for upgrading an adapter's
7592 *	firmware image.  Normally this requires the cooperation of the
7593 *	existing firmware in order to halt all existing activities
7594 *	but if an invalid mailbox token is passed in we skip that step
7595 *	(though we'll still put the adapter microprocessor into RESET in
7596 *	that case).
7597 *
7598 *	On successful return the new firmware will have been loaded and
7599 *	the adapter will have been fully RESET losing all previous setup
7600 *	state.  On unsuccessful return the adapter may be completely hosed ...
7601 *	positive errno indicates that the adapter is ~probably~ intact, a
7602 *	negative errno indicates that things are looking bad ...
7603 */
7604int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7605		  const u8 *fw_data, unsigned int size, int force)
7606{
7607	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7608	unsigned int bootstrap =
7609	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7610	int ret;
7611
7612	if (!t4_fw_matches_chip(adap, fw_hdr))
7613		return -EINVAL;
7614
7615	if (!bootstrap) {
7616		ret = t4_fw_halt(adap, mbox, force);
7617		if (ret < 0 && !force)
7618			return ret;
7619	}
7620
7621	ret = t4_load_fw(adap, fw_data, size);
7622	if (ret < 0 || bootstrap)
7623		return ret;
7624
7625	return t4_fw_restart(adap, mbox);
7626}
7627
7628/**
7629 *	t4_fw_initialize - ask FW to initialize the device
7630 *	@adap: the adapter
7631 *	@mbox: mailbox to use for the FW command
7632 *
7633 *	Issues a command to FW to partially initialize the device.  This
7634 *	performs initialization that generally doesn't depend on user input.
7635 */
7636int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7637{
7638	struct fw_initialize_cmd c;
7639
7640	memset(&c, 0, sizeof(c));
7641	INIT_CMD(c, INITIALIZE, WRITE);
7642	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7643}
7644
7645/**
7646 *	t4_query_params_rw - query FW or device parameters
7647 *	@adap: the adapter
7648 *	@mbox: mailbox to use for the FW command
7649 *	@pf: the PF
7650 *	@vf: the VF
7651 *	@nparams: the number of parameters
7652 *	@params: the parameter names
7653 *	@val: the parameter values
7654 *	@rw: Write and read flag
7655 *
7656 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7657 *	queried at once.
7658 */
7659int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7660		       unsigned int vf, unsigned int nparams, const u32 *params,
7661		       u32 *val, int rw)
7662{
7663	int i, ret;
7664	struct fw_params_cmd c;
7665	__be32 *p = &c.param[0].mnem;
7666
7667	if (nparams > 7)
7668		return -EINVAL;
7669
7670	memset(&c, 0, sizeof(c));
7671	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7672				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
7673				  V_FW_PARAMS_CMD_PFN(pf) |
7674				  V_FW_PARAMS_CMD_VFN(vf));
7675	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7676
7677	for (i = 0; i < nparams; i++) {
7678		*p++ = cpu_to_be32(*params++);
7679		if (rw)
7680			*p = cpu_to_be32(*(val + i));
7681		p++;
7682	}
7683
7684	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7685	if (ret == 0)
7686		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7687			*val++ = be32_to_cpu(*p);
7688	return ret;
7689}
7690
7691int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7692		    unsigned int vf, unsigned int nparams, const u32 *params,
7693		    u32 *val)
7694{
7695	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
7696}
7697
7698/**
7699 *      t4_set_params_timeout - sets FW or device parameters
7700 *      @adap: the adapter
7701 *      @mbox: mailbox to use for the FW command
7702 *      @pf: the PF
7703 *      @vf: the VF
7704 *      @nparams: the number of parameters
7705 *      @params: the parameter names
7706 *      @val: the parameter values
7707 *      @timeout: the timeout time
7708 *
7709 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7710 *      specified at once.
7711 */
7712int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7713			  unsigned int pf, unsigned int vf,
7714			  unsigned int nparams, const u32 *params,
7715			  const u32 *val, int timeout)
7716{
7717	struct fw_params_cmd c;
7718	__be32 *p = &c.param[0].mnem;
7719
7720	if (nparams > 7)
7721		return -EINVAL;
7722
7723	memset(&c, 0, sizeof(c));
7724	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7725				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7726				  V_FW_PARAMS_CMD_PFN(pf) |
7727				  V_FW_PARAMS_CMD_VFN(vf));
7728	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7729
7730	while (nparams--) {
7731		*p++ = cpu_to_be32(*params++);
7732		*p++ = cpu_to_be32(*val++);
7733	}
7734
7735	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7736}
7737
7738/**
7739 *	t4_set_params - sets FW or device parameters
7740 *	@adap: the adapter
7741 *	@mbox: mailbox to use for the FW command
7742 *	@pf: the PF
7743 *	@vf: the VF
7744 *	@nparams: the number of parameters
7745 *	@params: the parameter names
7746 *	@val: the parameter values
7747 *
7748 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7749 *	specified at once.
7750 */
7751int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7752		  unsigned int vf, unsigned int nparams, const u32 *params,
7753		  const u32 *val)
7754{
7755	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7756				     FW_CMD_MAX_TIMEOUT);
7757}
7758
7759/**
7760 *	t4_cfg_pfvf - configure PF/VF resource limits
7761 *	@adap: the adapter
7762 *	@mbox: mailbox to use for the FW command
7763 *	@pf: the PF being configured
7764 *	@vf: the VF being configured
7765 *	@txq: the max number of egress queues
7766 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7767 *	@rxqi: the max number of interrupt-capable ingress queues
7768 *	@rxq: the max number of interruptless ingress queues
7769 *	@tc: the PCI traffic class
7770 *	@vi: the max number of virtual interfaces
7771 *	@cmask: the channel access rights mask for the PF/VF
7772 *	@pmask: the port access rights mask for the PF/VF
7773 *	@nexact: the maximum number of exact MPS filters
7774 *	@rcaps: read capabilities
7775 *	@wxcaps: write/execute capabilities
7776 *
7777 *	Configures resource limits and capabilities for a physical or virtual
7778 *	function.
7779 */
7780int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7781		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7782		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7783		unsigned int vi, unsigned int cmask, unsigned int pmask,
7784		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7785{
7786	struct fw_pfvf_cmd c;
7787
7788	memset(&c, 0, sizeof(c));
7789	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7790				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7791				  V_FW_PFVF_CMD_VFN(vf));
7792	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7793	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7794				     V_FW_PFVF_CMD_NIQ(rxq));
7795	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7796				    V_FW_PFVF_CMD_PMASK(pmask) |
7797				    V_FW_PFVF_CMD_NEQ(txq));
7798	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7799				      V_FW_PFVF_CMD_NVI(vi) |
7800				      V_FW_PFVF_CMD_NEXACTF(nexact));
7801	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7802				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7803				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7804	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7805}
7806
7807/**
7808 *	t4_alloc_vi_func - allocate a virtual interface
7809 *	@adap: the adapter
7810 *	@mbox: mailbox to use for the FW command
7811 *	@port: physical port associated with the VI
7812 *	@pf: the PF owning the VI
7813 *	@vf: the VF owning the VI
7814 *	@nmac: number of MAC addresses needed (1 to 5)
7815 *	@mac: the MAC addresses of the VI
7816 *	@rss_size: size of RSS table slice associated with this VI
7817 *	@portfunc: which Port Application Function MAC Address is desired
7818 *	@idstype: Intrusion Detection Type
7819 *
7820 *	Allocates a virtual interface for the given physical port.  If @mac is
7821 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7822 *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7823 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7824 *	stored consecutively so the space needed is @nmac * 6 bytes.
7825 *	Returns a negative error number or the non-negative VI id.
7826 */
7827int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7828		     unsigned int port, unsigned int pf, unsigned int vf,
7829		     unsigned int nmac, u8 *mac, u16 *rss_size,
7830		     uint8_t *vfvld, uint16_t *vin,
7831		     unsigned int portfunc, unsigned int idstype)
7832{
7833	int ret;
7834	struct fw_vi_cmd c;
7835
7836	memset(&c, 0, sizeof(c));
7837	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7838				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7839				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7840	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7841	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7842				     V_FW_VI_CMD_FUNC(portfunc));
7843	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7844	c.nmac = nmac - 1;
7845	if(!rss_size)
7846		c.norss_rsssize = F_FW_VI_CMD_NORSS;
7847
7848	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7849	if (ret)
7850		return ret;
7851	ret = G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7852
7853	if (mac) {
7854		memcpy(mac, c.mac, sizeof(c.mac));
7855		switch (nmac) {
7856		case 5:
7857			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7858		case 4:
7859			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7860		case 3:
7861			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7862		case 2:
7863			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7864		}
7865	}
7866	if (rss_size)
7867		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7868	if (vfvld) {
7869		*vfvld = adap->params.viid_smt_extn_support ?
7870		    G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)) :
7871		    G_FW_VIID_VIVLD(ret);
7872	}
7873	if (vin) {
7874		*vin = adap->params.viid_smt_extn_support ?
7875		    G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)) :
7876		    G_FW_VIID_VIN(ret);
7877	}
7878
7879	return ret;
7880}
7881
7882/**
7883 *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7884 *      @adap: the adapter
7885 *      @mbox: mailbox to use for the FW command
7886 *      @port: physical port associated with the VI
7887 *      @pf: the PF owning the VI
7888 *      @vf: the VF owning the VI
7889 *      @nmac: number of MAC addresses needed (1 to 5)
7890 *      @mac: the MAC addresses of the VI
7891 *      @rss_size: size of RSS table slice associated with this VI
7892 *
7893 *	backwards compatible and convieniance routine to allocate a Virtual
7894 *	Interface with a Ethernet Port Application Function and Intrustion
7895 *	Detection System disabled.
7896 */
7897int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7898		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7899		u16 *rss_size, uint8_t *vfvld, uint16_t *vin)
7900{
7901	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7902				vfvld, vin, FW_VI_FUNC_ETH, 0);
7903}
7904
7905/**
7906 * 	t4_free_vi - free a virtual interface
7907 * 	@adap: the adapter
7908 * 	@mbox: mailbox to use for the FW command
7909 * 	@pf: the PF owning the VI
7910 * 	@vf: the VF owning the VI
7911 * 	@viid: virtual interface identifiler
7912 *
7913 * 	Free a previously allocated virtual interface.
7914 */
7915int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7916	       unsigned int vf, unsigned int viid)
7917{
7918	struct fw_vi_cmd c;
7919
7920	memset(&c, 0, sizeof(c));
7921	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7922				  F_FW_CMD_REQUEST |
7923				  F_FW_CMD_EXEC |
7924				  V_FW_VI_CMD_PFN(pf) |
7925				  V_FW_VI_CMD_VFN(vf));
7926	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7927	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7928
7929	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7930}
7931
7932/**
7933 *	t4_set_rxmode - set Rx properties of a virtual interface
7934 *	@adap: the adapter
7935 *	@mbox: mailbox to use for the FW command
7936 *	@viid: the VI id
7937 *	@mtu: the new MTU or -1
7938 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7939 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7940 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7941 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7942 *	@sleep_ok: if true we may sleep while awaiting command completion
7943 *
7944 *	Sets Rx properties of a virtual interface.
7945 */
7946int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7947		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
7948		  bool sleep_ok)
7949{
7950	struct fw_vi_rxmode_cmd c;
7951
7952	/* convert to FW values */
7953	if (mtu < 0)
7954		mtu = M_FW_VI_RXMODE_CMD_MTU;
7955	if (promisc < 0)
7956		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7957	if (all_multi < 0)
7958		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7959	if (bcast < 0)
7960		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7961	if (vlanex < 0)
7962		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7963
7964	memset(&c, 0, sizeof(c));
7965	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7966				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7967				   V_FW_VI_RXMODE_CMD_VIID(viid));
7968	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7969	c.mtu_to_vlanexen =
7970		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7971			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7972			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7973			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7974			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7975	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7976}
7977
7978/**
7979 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7980 *	@adap: the adapter
7981 *	@mbox: mailbox to use for the FW command
7982 *	@viid: the VI id
7983 *	@free: if true any existing filters for this VI id are first removed
7984 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7985 *	@addr: the MAC address(es)
7986 *	@idx: where to store the index of each allocated filter
7987 *	@hash: pointer to hash address filter bitmap
7988 *	@sleep_ok: call is allowed to sleep
7989 *
7990 *	Allocates an exact-match filter for each of the supplied addresses and
7991 *	sets it to the corresponding address.  If @idx is not %NULL it should
7992 *	have at least @naddr entries, each of which will be set to the index of
7993 *	the filter allocated for the corresponding MAC address.  If a filter
7994 *	could not be allocated for an address its index is set to 0xffff.
7995 *	If @hash is not %NULL addresses that fail to allocate an exact filter
7996 *	are hashed and update the hash filter bitmap pointed at by @hash.
7997 *
7998 *	Returns a negative error number or the number of filters allocated.
7999 */
8000int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8001		      unsigned int viid, bool free, unsigned int naddr,
8002		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8003{
8004	int offset, ret = 0;
8005	struct fw_vi_mac_cmd c;
8006	unsigned int nfilters = 0;
8007	unsigned int max_naddr = adap->chip_params->mps_tcam_size;
8008	unsigned int rem = naddr;
8009
8010	if (naddr > max_naddr)
8011		return -EINVAL;
8012
8013	for (offset = 0; offset < naddr ; /**/) {
8014		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8015					 ? rem
8016					 : ARRAY_SIZE(c.u.exact));
8017		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8018						     u.exact[fw_naddr]), 16);
8019		struct fw_vi_mac_exact *p;
8020		int i;
8021
8022		memset(&c, 0, sizeof(c));
8023		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8024					   F_FW_CMD_REQUEST |
8025					   F_FW_CMD_WRITE |
8026					   V_FW_CMD_EXEC(free) |
8027					   V_FW_VI_MAC_CMD_VIID(viid));
8028		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8029						  V_FW_CMD_LEN16(len16));
8030
8031		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8032			p->valid_to_idx =
8033				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8034					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8035			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8036		}
8037
8038		/*
8039		 * It's okay if we run out of space in our MAC address arena.
8040		 * Some of the addresses we submit may get stored so we need
8041		 * to run through the reply to see what the results were ...
8042		 */
8043		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8044		if (ret && ret != -FW_ENOMEM)
8045			break;
8046
8047		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8048			u16 index = G_FW_VI_MAC_CMD_IDX(
8049						be16_to_cpu(p->valid_to_idx));
8050
8051			if (idx)
8052				idx[offset+i] = (index >=  max_naddr
8053						 ? 0xffff
8054						 : index);
8055			if (index < max_naddr)
8056				nfilters++;
8057			else if (hash)
8058				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8059		}
8060
8061		free = false;
8062		offset += fw_naddr;
8063		rem -= fw_naddr;
8064	}
8065
8066	if (ret == 0 || ret == -FW_ENOMEM)
8067		ret = nfilters;
8068	return ret;
8069}
8070
8071/**
8072 *	t4_change_mac - modifies the exact-match filter for a MAC address
8073 *	@adap: the adapter
8074 *	@mbox: mailbox to use for the FW command
8075 *	@viid: the VI id
8076 *	@idx: index of existing filter for old value of MAC address, or -1
8077 *	@addr: the new MAC address value
8078 *	@persist: whether a new MAC allocation should be persistent
8079 *	@smt_idx: add MAC to SMT and return its index, or NULL
8080 *
8081 *	Modifies an exact-match filter and sets it to the new MAC address if
8082 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
8083 *	latter case the address is added persistently if @persist is %true.
8084 *
8085 *	Note that in general it is not possible to modify the value of a given
8086 *	filter so the generic way to modify an address filter is to free the one
8087 *	being used by the old address value and allocate a new filter for the
8088 *	new address value.
8089 *
8090 *	Returns a negative error number or the index of the filter with the new
8091 *	MAC value.  Note that this index may differ from @idx.
8092 */
8093int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8094		  int idx, const u8 *addr, bool persist, uint16_t *smt_idx)
8095{
8096	int ret, mode;
8097	struct fw_vi_mac_cmd c;
8098	struct fw_vi_mac_exact *p = c.u.exact;
8099	unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
8100
8101	if (idx < 0)		/* new allocation */
8102		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8103	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8104
8105	memset(&c, 0, sizeof(c));
8106	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8107				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8108				   V_FW_VI_MAC_CMD_VIID(viid));
8109	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8110	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8111				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8112				      V_FW_VI_MAC_CMD_IDX(idx));
8113	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8114
8115	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8116	if (ret == 0) {
8117		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8118		if (ret >= max_mac_addr)
8119			ret = -ENOMEM;
8120		if (smt_idx) {
8121			if (adap->params.viid_smt_extn_support)
8122				*smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
8123			else {
8124				if (chip_id(adap) <= CHELSIO_T5)
8125					*smt_idx = (viid & M_FW_VIID_VIN) << 1;
8126				else
8127					*smt_idx = viid & M_FW_VIID_VIN;
8128			}
8129		}
8130	}
8131	return ret;
8132}
8133
8134/**
8135 *	t4_set_addr_hash - program the MAC inexact-match hash filter
8136 *	@adap: the adapter
8137 *	@mbox: mailbox to use for the FW command
8138 *	@viid: the VI id
8139 *	@ucast: whether the hash filter should also match unicast addresses
8140 *	@vec: the value to be written to the hash filter
8141 *	@sleep_ok: call is allowed to sleep
8142 *
8143 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8144 */
8145int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8146		     bool ucast, u64 vec, bool sleep_ok)
8147{
8148	struct fw_vi_mac_cmd c;
8149	u32 val;
8150
8151	memset(&c, 0, sizeof(c));
8152	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8153				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8154				   V_FW_VI_ENABLE_CMD_VIID(viid));
8155	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8156	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8157	c.freemacs_to_len16 = cpu_to_be32(val);
8158	c.u.hash.hashvec = cpu_to_be64(vec);
8159	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8160}
8161
8162/**
8163 *      t4_enable_vi_params - enable/disable a virtual interface
8164 *      @adap: the adapter
8165 *      @mbox: mailbox to use for the FW command
8166 *      @viid: the VI id
8167 *      @rx_en: 1=enable Rx, 0=disable Rx
8168 *      @tx_en: 1=enable Tx, 0=disable Tx
8169 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8170 *
8171 *      Enables/disables a virtual interface.  Note that setting DCB Enable
8172 *      only makes sense when enabling a Virtual Interface ...
8173 */
8174int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8175			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8176{
8177	struct fw_vi_enable_cmd c;
8178
8179	memset(&c, 0, sizeof(c));
8180	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8181				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8182				   V_FW_VI_ENABLE_CMD_VIID(viid));
8183	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8184				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8185				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8186				     FW_LEN16(c));
8187	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8188}
8189
8190/**
8191 *	t4_enable_vi - enable/disable a virtual interface
8192 *	@adap: the adapter
8193 *	@mbox: mailbox to use for the FW command
8194 *	@viid: the VI id
8195 *	@rx_en: 1=enable Rx, 0=disable Rx
8196 *	@tx_en: 1=enable Tx, 0=disable Tx
8197 *
8198 *	Enables/disables a virtual interface.  Note that setting DCB Enable
8199 *	only makes sense when enabling a Virtual Interface ...
8200 */
8201int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8202		 bool rx_en, bool tx_en)
8203{
8204	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8205}
8206
8207/**
8208 *	t4_identify_port - identify a VI's port by blinking its LED
8209 *	@adap: the adapter
8210 *	@mbox: mailbox to use for the FW command
8211 *	@viid: the VI id
8212 *	@nblinks: how many times to blink LED at 2.5 Hz
8213 *
8214 *	Identifies a VI's port by blinking its LED.
8215 */
8216int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8217		     unsigned int nblinks)
8218{
8219	struct fw_vi_enable_cmd c;
8220
8221	memset(&c, 0, sizeof(c));
8222	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8223				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8224				   V_FW_VI_ENABLE_CMD_VIID(viid));
8225	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8226	c.blinkdur = cpu_to_be16(nblinks);
8227	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8228}
8229
8230/**
8231 *	t4_iq_stop - stop an ingress queue and its FLs
8232 *	@adap: the adapter
8233 *	@mbox: mailbox to use for the FW command
8234 *	@pf: the PF owning the queues
8235 *	@vf: the VF owning the queues
8236 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8237 *	@iqid: ingress queue id
8238 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8239 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8240 *
8241 *	Stops an ingress queue and its associated FLs, if any.  This causes
8242 *	any current or future data/messages destined for these queues to be
8243 *	tossed.
8244 */
8245int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8246	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8247	       unsigned int fl0id, unsigned int fl1id)
8248{
8249	struct fw_iq_cmd c;
8250
8251	memset(&c, 0, sizeof(c));
8252	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8253				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8254				  V_FW_IQ_CMD_VFN(vf));
8255	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8256	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8257	c.iqid = cpu_to_be16(iqid);
8258	c.fl0id = cpu_to_be16(fl0id);
8259	c.fl1id = cpu_to_be16(fl1id);
8260	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8261}
8262
8263/**
8264 *	t4_iq_free - free an ingress queue and its FLs
8265 *	@adap: the adapter
8266 *	@mbox: mailbox to use for the FW command
8267 *	@pf: the PF owning the queues
8268 *	@vf: the VF owning the queues
8269 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8270 *	@iqid: ingress queue id
8271 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8272 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8273 *
8274 *	Frees an ingress queue and its associated FLs, if any.
8275 */
8276int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8277	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8278	       unsigned int fl0id, unsigned int fl1id)
8279{
8280	struct fw_iq_cmd c;
8281
8282	memset(&c, 0, sizeof(c));
8283	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8284				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8285				  V_FW_IQ_CMD_VFN(vf));
8286	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8287	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8288	c.iqid = cpu_to_be16(iqid);
8289	c.fl0id = cpu_to_be16(fl0id);
8290	c.fl1id = cpu_to_be16(fl1id);
8291	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8292}
8293
8294/**
8295 *	t4_eth_eq_free - free an Ethernet egress queue
8296 *	@adap: the adapter
8297 *	@mbox: mailbox to use for the FW command
8298 *	@pf: the PF owning the queue
8299 *	@vf: the VF owning the queue
8300 *	@eqid: egress queue id
8301 *
8302 *	Frees an Ethernet egress queue.
8303 */
8304int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8305		   unsigned int vf, unsigned int eqid)
8306{
8307	struct fw_eq_eth_cmd c;
8308
8309	memset(&c, 0, sizeof(c));
8310	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8311				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8312				  V_FW_EQ_ETH_CMD_PFN(pf) |
8313				  V_FW_EQ_ETH_CMD_VFN(vf));
8314	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8315	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8316	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8317}
8318
8319/**
8320 *	t4_ctrl_eq_free - free a control egress queue
8321 *	@adap: the adapter
8322 *	@mbox: mailbox to use for the FW command
8323 *	@pf: the PF owning the queue
8324 *	@vf: the VF owning the queue
8325 *	@eqid: egress queue id
8326 *
8327 *	Frees a control egress queue.
8328 */
8329int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8330		    unsigned int vf, unsigned int eqid)
8331{
8332	struct fw_eq_ctrl_cmd c;
8333
8334	memset(&c, 0, sizeof(c));
8335	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8336				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8337				  V_FW_EQ_CTRL_CMD_PFN(pf) |
8338				  V_FW_EQ_CTRL_CMD_VFN(vf));
8339	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8340	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8341	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8342}
8343
8344/**
8345 *	t4_ofld_eq_free - free an offload egress queue
8346 *	@adap: the adapter
8347 *	@mbox: mailbox to use for the FW command
8348 *	@pf: the PF owning the queue
8349 *	@vf: the VF owning the queue
8350 *	@eqid: egress queue id
8351 *
8352 *	Frees a control egress queue.
8353 */
8354int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8355		    unsigned int vf, unsigned int eqid)
8356{
8357	struct fw_eq_ofld_cmd c;
8358
8359	memset(&c, 0, sizeof(c));
8360	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8361				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8362				  V_FW_EQ_OFLD_CMD_PFN(pf) |
8363				  V_FW_EQ_OFLD_CMD_VFN(vf));
8364	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8365	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8366	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8367}
8368
8369/**
8370 *	t4_link_down_rc_str - return a string for a Link Down Reason Code
8371 *	@link_down_rc: Link Down Reason Code
8372 *
8373 *	Returns a string representation of the Link Down Reason Code.
8374 */
8375const char *t4_link_down_rc_str(unsigned char link_down_rc)
8376{
8377	static const char *reason[] = {
8378		"Link Down",
8379		"Remote Fault",
8380		"Auto-negotiation Failure",
8381		"Reserved3",
8382		"Insufficient Airflow",
8383		"Unable To Determine Reason",
8384		"No RX Signal Detected",
8385		"Reserved7",
8386	};
8387
8388	if (link_down_rc >= ARRAY_SIZE(reason))
8389		return "Bad Reason Code";
8390
8391	return reason[link_down_rc];
8392}
8393
8394/*
8395 * Return the highest speed set in the port capabilities, in Mb/s.
8396 */
8397unsigned int fwcap_to_speed(uint32_t caps)
8398{
8399	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
8400		do { \
8401			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8402				return __speed; \
8403		} while (0)
8404
8405	TEST_SPEED_RETURN(400G, 400000);
8406	TEST_SPEED_RETURN(200G, 200000);
8407	TEST_SPEED_RETURN(100G, 100000);
8408	TEST_SPEED_RETURN(50G,   50000);
8409	TEST_SPEED_RETURN(40G,   40000);
8410	TEST_SPEED_RETURN(25G,   25000);
8411	TEST_SPEED_RETURN(10G,   10000);
8412	TEST_SPEED_RETURN(1G,     1000);
8413	TEST_SPEED_RETURN(100M,    100);
8414
8415	#undef TEST_SPEED_RETURN
8416
8417	return 0;
8418}
8419
8420/*
8421 * Return the port capabilities bit for the given speed, which is in Mb/s.
8422 */
8423uint32_t speed_to_fwcap(unsigned int speed)
8424{
8425	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
8426		do { \
8427			if (speed == __speed) \
8428				return FW_PORT_CAP32_SPEED_##__caps_speed; \
8429		} while (0)
8430
8431	TEST_SPEED_RETURN(400G, 400000);
8432	TEST_SPEED_RETURN(200G, 200000);
8433	TEST_SPEED_RETURN(100G, 100000);
8434	TEST_SPEED_RETURN(50G,   50000);
8435	TEST_SPEED_RETURN(40G,   40000);
8436	TEST_SPEED_RETURN(25G,   25000);
8437	TEST_SPEED_RETURN(10G,   10000);
8438	TEST_SPEED_RETURN(1G,     1000);
8439	TEST_SPEED_RETURN(100M,    100);
8440
8441	#undef TEST_SPEED_RETURN
8442
8443	return 0;
8444}
8445
8446/*
8447 * Return the port capabilities bit for the highest speed in the capabilities.
8448 */
8449uint32_t fwcap_top_speed(uint32_t caps)
8450{
8451	#define TEST_SPEED_RETURN(__caps_speed) \
8452		do { \
8453			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8454				return FW_PORT_CAP32_SPEED_##__caps_speed; \
8455		} while (0)
8456
8457	TEST_SPEED_RETURN(400G);
8458	TEST_SPEED_RETURN(200G);
8459	TEST_SPEED_RETURN(100G);
8460	TEST_SPEED_RETURN(50G);
8461	TEST_SPEED_RETURN(40G);
8462	TEST_SPEED_RETURN(25G);
8463	TEST_SPEED_RETURN(10G);
8464	TEST_SPEED_RETURN(1G);
8465	TEST_SPEED_RETURN(100M);
8466
8467	#undef TEST_SPEED_RETURN
8468
8469	return 0;
8470}
8471
8472
8473/**
8474 *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8475 *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8476 *
8477 *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8478 *	32-bit Port Capabilities value.
8479 */
8480static uint32_t lstatus_to_fwcap(u32 lstatus)
8481{
8482	uint32_t linkattr = 0;
8483
8484	/*
8485	 * Unfortunately the format of the Link Status in the old
8486	 * 16-bit Port Information message isn't the same as the
8487	 * 16-bit Port Capabilities bitfield used everywhere else ...
8488	 */
8489	if (lstatus & F_FW_PORT_CMD_RXPAUSE)
8490		linkattr |= FW_PORT_CAP32_FC_RX;
8491	if (lstatus & F_FW_PORT_CMD_TXPAUSE)
8492		linkattr |= FW_PORT_CAP32_FC_TX;
8493	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8494		linkattr |= FW_PORT_CAP32_SPEED_100M;
8495	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8496		linkattr |= FW_PORT_CAP32_SPEED_1G;
8497	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8498		linkattr |= FW_PORT_CAP32_SPEED_10G;
8499	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8500		linkattr |= FW_PORT_CAP32_SPEED_25G;
8501	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8502		linkattr |= FW_PORT_CAP32_SPEED_40G;
8503	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8504		linkattr |= FW_PORT_CAP32_SPEED_100G;
8505
8506	return linkattr;
8507}
8508
8509/*
8510 * Updates all fields owned by the common code in port_info and link_config
8511 * based on information provided by the firmware.  Does not touch any
8512 * requested_* field.
8513 */
8514static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
8515    enum fw_port_action action, bool *mod_changed, bool *link_changed)
8516{
8517	struct link_config old_lc, *lc = &pi->link_cfg;
8518	unsigned char fc, fec;
8519	u32 stat, linkattr;
8520	int old_ptype, old_mtype;
8521
8522	old_ptype = pi->port_type;
8523	old_mtype = pi->mod_type;
8524	old_lc = *lc;
8525	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8526		stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8527
8528		pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
8529		pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
8530		pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
8531		    G_FW_PORT_CMD_MDIOADDR(stat) : -1;
8532
8533		lc->supported = fwcaps16_to_caps32(be16_to_cpu(p->u.info.pcap));
8534		lc->advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.acap));
8535		lc->lp_advertising = fwcaps16_to_caps32(be16_to_cpu(p->u.info.lpacap));
8536		lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8537		lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
8538
8539		linkattr = lstatus_to_fwcap(stat);
8540	} else if (action == FW_PORT_ACTION_GET_PORT_INFO32) {
8541		stat = be32_to_cpu(p->u.info32.lstatus32_to_cbllen32);
8542
8543		pi->port_type = G_FW_PORT_CMD_PORTTYPE32(stat);
8544		pi->mod_type = G_FW_PORT_CMD_MODTYPE32(stat);
8545		pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP32 ?
8546		    G_FW_PORT_CMD_MDIOADDR32(stat) : -1;
8547
8548		lc->supported = be32_to_cpu(p->u.info32.pcaps32);
8549		lc->advertising = be32_to_cpu(p->u.info32.acaps32);
8550		lc->lp_advertising = be16_to_cpu(p->u.info32.lpacaps32);
8551		lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS32) != 0;
8552		lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC32(stat);
8553
8554		linkattr = be32_to_cpu(p->u.info32.linkattr32);
8555	} else {
8556		CH_ERR(pi->adapter, "bad port_info action 0x%x\n", action);
8557		return;
8558	}
8559
8560	lc->speed = fwcap_to_speed(linkattr);
8561
8562	fc = 0;
8563	if (linkattr & FW_PORT_CAP32_FC_RX)
8564		fc |= PAUSE_RX;
8565	if (linkattr & FW_PORT_CAP32_FC_TX)
8566		fc |= PAUSE_TX;
8567	lc->fc = fc;
8568
8569	fec = FEC_NONE;
8570	if (linkattr & FW_PORT_CAP32_FEC_RS)
8571		fec |= FEC_RS;
8572	if (linkattr & FW_PORT_CAP32_FEC_BASER_RS)
8573		fec |= FEC_BASER_RS;
8574	lc->fec = fec;
8575
8576	if (mod_changed != NULL)
8577		*mod_changed = false;
8578	if (link_changed != NULL)
8579		*link_changed = false;
8580	if (old_ptype != pi->port_type || old_mtype != pi->mod_type ||
8581	    old_lc.supported != lc->supported) {
8582		if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
8583			lc->fec_hint = lc->advertising &
8584			    V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
8585		}
8586		if (mod_changed != NULL)
8587			*mod_changed = true;
8588	}
8589	if (old_lc.link_ok != lc->link_ok || old_lc.speed != lc->speed ||
8590	    old_lc.fec != lc->fec || old_lc.fc != lc->fc) {
8591		if (link_changed != NULL)
8592			*link_changed = true;
8593	}
8594}
8595
8596/**
8597 *	t4_update_port_info - retrieve and update port information if changed
8598 *	@pi: the port_info
8599 *
8600 *	We issue a Get Port Information Command to the Firmware and, if
8601 *	successful, we check to see if anything is different from what we
8602 *	last recorded and update things accordingly.
8603 */
8604 int t4_update_port_info(struct port_info *pi)
8605 {
8606	struct adapter *sc = pi->adapter;
8607	struct fw_port_cmd cmd;
8608	enum fw_port_action action;
8609	int ret;
8610
8611	memset(&cmd, 0, sizeof(cmd));
8612	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8613	    F_FW_CMD_REQUEST | F_FW_CMD_READ |
8614	    V_FW_PORT_CMD_PORTID(pi->tx_chan));
8615	action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
8616	    FW_PORT_ACTION_GET_PORT_INFO;
8617	cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
8618	    FW_LEN16(cmd));
8619	ret = t4_wr_mbox_ns(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
8620	if (ret)
8621		return ret;
8622
8623	handle_port_info(pi, &cmd, action, NULL, NULL);
8624	return 0;
8625}
8626
8627/**
8628 *	t4_handle_fw_rpl - process a FW reply message
8629 *	@adap: the adapter
8630 *	@rpl: start of the FW message
8631 *
8632 *	Processes a FW message, such as link state change messages.
8633 */
8634int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8635{
8636	u8 opcode = *(const u8 *)rpl;
8637	const struct fw_port_cmd *p = (const void *)rpl;
8638	enum fw_port_action action =
8639	    G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8640	bool mod_changed, link_changed;
8641
8642	if (opcode == FW_PORT_CMD &&
8643	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
8644	    action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8645		/* link/module state change message */
8646		int i;
8647		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8648		struct port_info *pi = NULL;
8649		struct link_config *lc;
8650
8651		for_each_port(adap, i) {
8652			pi = adap2pinfo(adap, i);
8653			if (pi->tx_chan == chan)
8654				break;
8655		}
8656
8657		lc = &pi->link_cfg;
8658		PORT_LOCK(pi);
8659		handle_port_info(pi, p, action, &mod_changed, &link_changed);
8660		PORT_UNLOCK(pi);
8661		if (mod_changed)
8662			t4_os_portmod_changed(pi);
8663		if (link_changed) {
8664			PORT_LOCK(pi);
8665			t4_os_link_changed(pi);
8666			PORT_UNLOCK(pi);
8667		}
8668	} else {
8669		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8670		return -EINVAL;
8671	}
8672	return 0;
8673}
8674
8675/**
8676 *	get_pci_mode - determine a card's PCI mode
8677 *	@adapter: the adapter
8678 *	@p: where to store the PCI settings
8679 *
8680 *	Determines a card's PCI mode and associated parameters, such as speed
8681 *	and width.
8682 */
8683static void get_pci_mode(struct adapter *adapter,
8684				   struct pci_params *p)
8685{
8686	u16 val;
8687	u32 pcie_cap;
8688
8689	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8690	if (pcie_cap) {
8691		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8692		p->speed = val & PCI_EXP_LNKSTA_CLS;
8693		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8694	}
8695}
8696
8697struct flash_desc {
8698	u32 vendor_and_model_id;
8699	u32 size_mb;
8700};
8701
8702int t4_get_flash_params(struct adapter *adapter)
8703{
8704	/*
8705	 * Table for non-standard supported Flash parts.  Note, all Flash
8706	 * parts must have 64KB sectors.
8707	 */
8708	static struct flash_desc supported_flash[] = {
8709		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
8710	};
8711
8712	int ret;
8713	u32 flashid = 0;
8714	unsigned int part, manufacturer;
8715	unsigned int density, size = 0;
8716
8717
8718	/*
8719	 * Issue a Read ID Command to the Flash part.  We decode supported
8720	 * Flash parts and their sizes from this.  There's a newer Query
8721	 * Command which can retrieve detailed geometry information but many
8722	 * Flash parts don't support it.
8723	 */
8724	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8725	if (!ret)
8726		ret = sf1_read(adapter, 3, 0, 1, &flashid);
8727	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
8728	if (ret < 0)
8729		return ret;
8730
8731	/*
8732	 * Check to see if it's one of our non-standard supported Flash parts.
8733	 */
8734	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8735		if (supported_flash[part].vendor_and_model_id == flashid) {
8736			adapter->params.sf_size =
8737				supported_flash[part].size_mb;
8738			adapter->params.sf_nsec =
8739				adapter->params.sf_size / SF_SEC_SIZE;
8740			goto found;
8741		}
8742
8743	/*
8744	 * Decode Flash part size.  The code below looks repetative with
8745	 * common encodings, but that's not guaranteed in the JEDEC
8746	 * specification for the Read JADEC ID command.  The only thing that
8747	 * we're guaranteed by the JADEC specification is where the
8748	 * Manufacturer ID is in the returned result.  After that each
8749	 * Manufacturer ~could~ encode things completely differently.
8750	 * Note, all Flash parts must have 64KB sectors.
8751	 */
8752	manufacturer = flashid & 0xff;
8753	switch (manufacturer) {
8754	case 0x20: /* Micron/Numonix */
8755		/*
8756		 * This Density -> Size decoding table is taken from Micron
8757		 * Data Sheets.
8758		 */
8759		density = (flashid >> 16) & 0xff;
8760		switch (density) {
8761		case 0x14: size = 1 << 20; break; /*   1MB */
8762		case 0x15: size = 1 << 21; break; /*   2MB */
8763		case 0x16: size = 1 << 22; break; /*   4MB */
8764		case 0x17: size = 1 << 23; break; /*   8MB */
8765		case 0x18: size = 1 << 24; break; /*  16MB */
8766		case 0x19: size = 1 << 25; break; /*  32MB */
8767		case 0x20: size = 1 << 26; break; /*  64MB */
8768		case 0x21: size = 1 << 27; break; /* 128MB */
8769		case 0x22: size = 1 << 28; break; /* 256MB */
8770		}
8771		break;
8772
8773	case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
8774		/*
8775		 * This Density -> Size decoding table is taken from ISSI
8776		 * Data Sheets.
8777		 */
8778		density = (flashid >> 16) & 0xff;
8779		switch (density) {
8780		case 0x16: size = 1 << 25; break; /*  32MB */
8781		case 0x17: size = 1 << 26; break; /*  64MB */
8782		}
8783		break;
8784
8785	case 0xc2: /* Macronix */
8786		/*
8787		 * This Density -> Size decoding table is taken from Macronix
8788		 * Data Sheets.
8789		 */
8790		density = (flashid >> 16) & 0xff;
8791		switch (density) {
8792		case 0x17: size = 1 << 23; break; /*   8MB */
8793		case 0x18: size = 1 << 24; break; /*  16MB */
8794		}
8795		break;
8796
8797	case 0xef: /* Winbond */
8798		/*
8799		 * This Density -> Size decoding table is taken from Winbond
8800		 * Data Sheets.
8801		 */
8802		density = (flashid >> 16) & 0xff;
8803		switch (density) {
8804		case 0x17: size = 1 << 23; break; /*   8MB */
8805		case 0x18: size = 1 << 24; break; /*  16MB */
8806		}
8807		break;
8808	}
8809
8810	/* If we didn't recognize the FLASH part, that's no real issue: the
8811	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8812	 * use a FLASH part which is at least 4MB in size and has 64KB
8813	 * sectors.  The unrecognized FLASH part is likely to be much larger
8814	 * than 4MB, but that's all we really need.
8815	 */
8816	if (size == 0) {
8817		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
8818		size = 1 << 22;
8819	}
8820
8821	/*
8822	 * Store decoded Flash size and fall through into vetting code.
8823	 */
8824	adapter->params.sf_size = size;
8825	adapter->params.sf_nsec = size / SF_SEC_SIZE;
8826
8827 found:
8828	/*
8829	 * We should ~probably~ reject adapters with FLASHes which are too
8830	 * small but we have some legacy FPGAs with small FLASHes that we'd
8831	 * still like to use.  So instead we emit a scary message ...
8832	 */
8833	if (adapter->params.sf_size < FLASH_MIN_SIZE)
8834		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8835			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8836
8837	return 0;
8838}
8839
8840static void set_pcie_completion_timeout(struct adapter *adapter,
8841						  u8 range)
8842{
8843	u16 val;
8844	u32 pcie_cap;
8845
8846	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8847	if (pcie_cap) {
8848		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8849		val &= 0xfff0;
8850		val |= range ;
8851		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8852	}
8853}
8854
8855const struct chip_params *t4_get_chip_params(int chipid)
8856{
8857	static const struct chip_params chip_params[] = {
8858		{
8859			/* T4 */
8860			.nchan = NCHAN,
8861			.pm_stats_cnt = PM_NSTATS,
8862			.cng_ch_bits_log = 2,
8863			.nsched_cls = 15,
8864			.cim_num_obq = CIM_NUM_OBQ,
8865			.mps_rplc_size = 128,
8866			.vfcount = 128,
8867			.sge_fl_db = F_DBPRIO,
8868			.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
8869		},
8870		{
8871			/* T5 */
8872			.nchan = NCHAN,
8873			.pm_stats_cnt = PM_NSTATS,
8874			.cng_ch_bits_log = 2,
8875			.nsched_cls = 16,
8876			.cim_num_obq = CIM_NUM_OBQ_T5,
8877			.mps_rplc_size = 128,
8878			.vfcount = 128,
8879			.sge_fl_db = F_DBPRIO | F_DBTYPE,
8880			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8881		},
8882		{
8883			/* T6 */
8884			.nchan = T6_NCHAN,
8885			.pm_stats_cnt = T6_PM_NSTATS,
8886			.cng_ch_bits_log = 3,
8887			.nsched_cls = 16,
8888			.cim_num_obq = CIM_NUM_OBQ_T5,
8889			.mps_rplc_size = 256,
8890			.vfcount = 256,
8891			.sge_fl_db = 0,
8892			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
8893		},
8894	};
8895
8896	chipid -= CHELSIO_T4;
8897	if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
8898		return NULL;
8899
8900	return &chip_params[chipid];
8901}
8902
8903/**
8904 *	t4_prep_adapter - prepare SW and HW for operation
8905 *	@adapter: the adapter
8906 *	@buf: temporary space of at least VPD_LEN size provided by the caller.
8907 *
8908 *	Initialize adapter SW state for the various HW modules, set initial
8909 *	values for some adapter tunables, take PHYs out of reset, and
8910 *	initialize the MDIO interface.
8911 */
8912int t4_prep_adapter(struct adapter *adapter, u32 *buf)
8913{
8914	int ret;
8915	uint16_t device_id;
8916	uint32_t pl_rev;
8917
8918	get_pci_mode(adapter, &adapter->params.pci);
8919
8920	pl_rev = t4_read_reg(adapter, A_PL_REV);
8921	adapter->params.chipid = G_CHIPID(pl_rev);
8922	adapter->params.rev = G_REV(pl_rev);
8923	if (adapter->params.chipid == 0) {
8924		/* T4 did not have chipid in PL_REV (T5 onwards do) */
8925		adapter->params.chipid = CHELSIO_T4;
8926
8927		/* T4A1 chip is not supported */
8928		if (adapter->params.rev == 1) {
8929			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
8930			return -EINVAL;
8931		}
8932	}
8933
8934	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
8935	if (adapter->chip_params == NULL)
8936		return -EINVAL;
8937
8938	adapter->params.pci.vpd_cap_addr =
8939	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
8940
8941	ret = t4_get_flash_params(adapter);
8942	if (ret < 0)
8943		return ret;
8944
8945	/* Cards with real ASICs have the chipid in the PCIe device id */
8946	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8947	if (device_id >> 12 == chip_id(adapter))
8948		adapter->params.cim_la_size = CIMLA_SIZE;
8949	else {
8950		/* FPGA */
8951		adapter->params.fpga = 1;
8952		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8953	}
8954
8955	ret = get_vpd_params(adapter, &adapter->params.vpd, device_id, buf);
8956	if (ret < 0)
8957		return ret;
8958
8959	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8960
8961	/*
8962	 * Default port and clock for debugging in case we can't reach FW.
8963	 */
8964	adapter->params.nports = 1;
8965	adapter->params.portvec = 1;
8966	adapter->params.vpd.cclk = 50000;
8967
8968	/* Set pci completion timeout value to 4 seconds. */
8969	set_pcie_completion_timeout(adapter, 0xd);
8970	return 0;
8971}
8972
8973/**
8974 *	t4_shutdown_adapter - shut down adapter, host & wire
8975 *	@adapter: the adapter
8976 *
8977 *	Perform an emergency shutdown of the adapter and stop it from
8978 *	continuing any further communication on the ports or DMA to the
8979 *	host.  This is typically used when the adapter and/or firmware
8980 *	have crashed and we want to prevent any further accidental
8981 *	communication with the rest of the world.  This will also force
8982 *	the port Link Status to go down -- if register writes work --
8983 *	which should help our peers figure out that we're down.
8984 */
8985int t4_shutdown_adapter(struct adapter *adapter)
8986{
8987	int port;
8988
8989	t4_intr_disable(adapter);
8990	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8991	for_each_port(adapter, port) {
8992		u32 a_port_cfg = is_t4(adapter) ?
8993				 PORT_REG(port, A_XGMAC_PORT_CFG) :
8994				 T5_PORT_REG(port, A_MAC_PORT_CFG);
8995
8996		t4_write_reg(adapter, a_port_cfg,
8997			     t4_read_reg(adapter, a_port_cfg)
8998			     & ~V_SIGNAL_DET(1));
8999	}
9000	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
9001
9002	return 0;
9003}
9004
9005/**
9006 *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9007 *	@adapter: the adapter
9008 *	@qid: the Queue ID
9009 *	@qtype: the Ingress or Egress type for @qid
9010 *	@user: true if this request is for a user mode queue
9011 *	@pbar2_qoffset: BAR2 Queue Offset
9012 *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9013 *
9014 *	Returns the BAR2 SGE Queue Registers information associated with the
9015 *	indicated Absolute Queue ID.  These are passed back in return value
9016 *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9017 *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9018 *
9019 *	This may return an error which indicates that BAR2 SGE Queue
9020 *	registers aren't available.  If an error is not returned, then the
9021 *	following values are returned:
9022 *
9023 *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9024 *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9025 *
9026 *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9027 *	require the "Inferred Queue ID" ability may be used.  E.g. the
9028 *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9029 *	then these "Inferred Queue ID" register may not be used.
9030 */
9031int t4_bar2_sge_qregs(struct adapter *adapter,
9032		      unsigned int qid,
9033		      enum t4_bar2_qtype qtype,
9034		      int user,
9035		      u64 *pbar2_qoffset,
9036		      unsigned int *pbar2_qid)
9037{
9038	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9039	u64 bar2_page_offset, bar2_qoffset;
9040	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9041
9042	/* T4 doesn't support BAR2 SGE Queue registers for kernel
9043	 * mode queues.
9044	 */
9045	if (!user && is_t4(adapter))
9046		return -EINVAL;
9047
9048	/* Get our SGE Page Size parameters.
9049	 */
9050	page_shift = adapter->params.sge.page_shift;
9051	page_size = 1 << page_shift;
9052
9053	/* Get the right Queues per Page parameters for our Queue.
9054	 */
9055	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9056		     ? adapter->params.sge.eq_s_qpp
9057		     : adapter->params.sge.iq_s_qpp);
9058	qpp_mask = (1 << qpp_shift) - 1;
9059
9060	/* Calculate the basics of the BAR2 SGE Queue register area:
9061	 *  o The BAR2 page the Queue registers will be in.
9062	 *  o The BAR2 Queue ID.
9063	 *  o The BAR2 Queue ID Offset into the BAR2 page.
9064	 */
9065	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9066	bar2_qid = qid & qpp_mask;
9067	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9068
9069	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
9070	 * hardware will infer the Absolute Queue ID simply from the writes to
9071	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9072	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
9073	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9074	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9075	 * from the BAR2 Page and BAR2 Queue ID.
9076	 *
9077	 * One important censequence of this is that some BAR2 SGE registers
9078	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9079	 * there.  But other registers synthesize the SGE Queue ID purely
9080	 * from the writes to the registers -- the Write Combined Doorbell
9081	 * Buffer is a good example.  These BAR2 SGE Registers are only
9082	 * available for those BAR2 SGE Register areas where the SGE Absolute
9083	 * Queue ID can be inferred from simple writes.
9084	 */
9085	bar2_qoffset = bar2_page_offset;
9086	bar2_qinferred = (bar2_qid_offset < page_size);
9087	if (bar2_qinferred) {
9088		bar2_qoffset += bar2_qid_offset;
9089		bar2_qid = 0;
9090	}
9091
9092	*pbar2_qoffset = bar2_qoffset;
9093	*pbar2_qid = bar2_qid;
9094	return 0;
9095}
9096
9097/**
9098 *	t4_init_devlog_params - initialize adapter->params.devlog
9099 *	@adap: the adapter
9100 *	@fw_attach: whether we can talk to the firmware
9101 *
9102 *	Initialize various fields of the adapter's Firmware Device Log
9103 *	Parameters structure.
9104 */
9105int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9106{
9107	struct devlog_params *dparams = &adap->params.devlog;
9108	u32 pf_dparams;
9109	unsigned int devlog_meminfo;
9110	struct fw_devlog_cmd devlog_cmd;
9111	int ret;
9112
9113	/* If we're dealing with newer firmware, the Device Log Paramerters
9114	 * are stored in a designated register which allows us to access the
9115	 * Device Log even if we can't talk to the firmware.
9116	 */
9117	pf_dparams =
9118		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9119	if (pf_dparams) {
9120		unsigned int nentries, nentries128;
9121
9122		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9123		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9124
9125		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9126		nentries = (nentries128 + 1) * 128;
9127		dparams->size = nentries * sizeof(struct fw_devlog_e);
9128
9129		return 0;
9130	}
9131
9132	/*
9133	 * For any failing returns ...
9134	 */
9135	memset(dparams, 0, sizeof *dparams);
9136
9137	/*
9138	 * If we can't talk to the firmware, there's really nothing we can do
9139	 * at this point.
9140	 */
9141	if (!fw_attach)
9142		return -ENXIO;
9143
9144	/* Otherwise, ask the firmware for it's Device Log Parameters.
9145	 */
9146	memset(&devlog_cmd, 0, sizeof devlog_cmd);
9147	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9148					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
9149	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9150	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9151			 &devlog_cmd);
9152	if (ret)
9153		return ret;
9154
9155	devlog_meminfo =
9156		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9157	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9158	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9159	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9160
9161	return 0;
9162}
9163
9164/**
9165 *	t4_init_sge_params - initialize adap->params.sge
9166 *	@adapter: the adapter
9167 *
9168 *	Initialize various fields of the adapter's SGE Parameters structure.
9169 */
9170int t4_init_sge_params(struct adapter *adapter)
9171{
9172	u32 r;
9173	struct sge_params *sp = &adapter->params.sge;
9174	unsigned i, tscale = 1;
9175
9176	r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
9177	sp->counter_val[0] = G_THRESHOLD_0(r);
9178	sp->counter_val[1] = G_THRESHOLD_1(r);
9179	sp->counter_val[2] = G_THRESHOLD_2(r);
9180	sp->counter_val[3] = G_THRESHOLD_3(r);
9181
9182	if (chip_id(adapter) >= CHELSIO_T6) {
9183		r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
9184		tscale = G_TSCALE(r);
9185		if (tscale == 0)
9186			tscale = 1;
9187		else
9188			tscale += 2;
9189	}
9190
9191	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
9192	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
9193	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
9194	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
9195	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
9196	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
9197	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
9198	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
9199	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
9200
9201	r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
9202	sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
9203	if (is_t4(adapter))
9204		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
9205	else if (is_t5(adapter))
9206		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
9207	else
9208		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
9209
9210	/* egress queues: log2 of # of doorbells per BAR2 page */
9211	r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9212	r >>= S_QUEUESPERPAGEPF0 +
9213	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9214	sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
9215
9216	/* ingress queues: log2 of # of doorbells per BAR2 page */
9217	r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9218	r >>= S_QUEUESPERPAGEPF0 +
9219	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
9220	sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
9221
9222	r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9223	r >>= S_HOSTPAGESIZEPF0 +
9224	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
9225	sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
9226
9227	r = t4_read_reg(adapter, A_SGE_CONTROL);
9228	sp->sge_control = r;
9229	sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
9230	sp->fl_pktshift = G_PKTSHIFT(r);
9231	if (chip_id(adapter) <= CHELSIO_T5) {
9232		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9233		    X_INGPADBOUNDARY_SHIFT);
9234	} else {
9235		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
9236		    X_T6_INGPADBOUNDARY_SHIFT);
9237	}
9238	if (is_t4(adapter))
9239		sp->pack_boundary = sp->pad_boundary;
9240	else {
9241		r = t4_read_reg(adapter, A_SGE_CONTROL2);
9242		if (G_INGPACKBOUNDARY(r) == 0)
9243			sp->pack_boundary = 16;
9244		else
9245			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
9246	}
9247	for (i = 0; i < SGE_FLBUF_SIZES; i++)
9248		sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
9249		    A_SGE_FL_BUFFER_SIZE0 + (4 * i));
9250
9251	return 0;
9252}
9253
9254/*
9255 * Read and cache the adapter's compressed filter mode and ingress config.
9256 */
9257static void read_filter_mode_and_ingress_config(struct adapter *adap,
9258    bool sleep_ok)
9259{
9260	uint32_t v;
9261	struct tp_params *tpp = &adap->params.tp;
9262
9263	t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
9264	    sleep_ok);
9265	t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
9266	    sleep_ok);
9267
9268	/*
9269	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9270	 * shift positions of several elements of the Compressed Filter Tuple
9271	 * for this adapter which we need frequently ...
9272	 */
9273	tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9274	tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
9275	tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9276	tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9277	tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
9278	tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9279	tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
9280	tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
9281	tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
9282	tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
9283
9284	if (chip_id(adap) > CHELSIO_T4) {
9285		v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(3));
9286		adap->params.tp.hash_filter_mask = v;
9287		v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
9288		adap->params.tp.hash_filter_mask |= (u64)v << 32;
9289	}
9290}
9291
9292/**
9293 *      t4_init_tp_params - initialize adap->params.tp
9294 *      @adap: the adapter
9295 *
9296 *      Initialize various fields of the adapter's TP Parameters structure.
9297 */
9298int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9299{
9300	int chan;
9301	u32 v;
9302	struct tp_params *tpp = &adap->params.tp;
9303
9304	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9305	tpp->tre = G_TIMERRESOLUTION(v);
9306	tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
9307
9308	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9309	for (chan = 0; chan < MAX_NCHAN; chan++)
9310		tpp->tx_modq[chan] = chan;
9311
9312	read_filter_mode_and_ingress_config(adap, sleep_ok);
9313
9314	/*
9315	 * Cache a mask of the bits that represent the error vector portion of
9316	 * rx_pkt.err_vec.  T6+ can use a compressed error vector to make room
9317	 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
9318	 */
9319	tpp->err_vec_mask = htobe16(0xffff);
9320	if (chip_id(adap) > CHELSIO_T5) {
9321		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9322		if (v & F_CRXPKTENC) {
9323			tpp->err_vec_mask =
9324			    htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
9325		}
9326	}
9327
9328	return 0;
9329}
9330
9331/**
9332 *      t4_filter_field_shift - calculate filter field shift
9333 *      @adap: the adapter
9334 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9335 *
9336 *      Return the shift position of a filter field within the Compressed
9337 *      Filter Tuple.  The filter field is specified via its selection bit
9338 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
9339 */
9340int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9341{
9342	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9343	unsigned int sel;
9344	int field_shift;
9345
9346	if ((filter_mode & filter_sel) == 0)
9347		return -1;
9348
9349	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9350		switch (filter_mode & sel) {
9351		case F_FCOE:
9352			field_shift += W_FT_FCOE;
9353			break;
9354		case F_PORT:
9355			field_shift += W_FT_PORT;
9356			break;
9357		case F_VNIC_ID:
9358			field_shift += W_FT_VNIC_ID;
9359			break;
9360		case F_VLAN:
9361			field_shift += W_FT_VLAN;
9362			break;
9363		case F_TOS:
9364			field_shift += W_FT_TOS;
9365			break;
9366		case F_PROTOCOL:
9367			field_shift += W_FT_PROTOCOL;
9368			break;
9369		case F_ETHERTYPE:
9370			field_shift += W_FT_ETHERTYPE;
9371			break;
9372		case F_MACMATCH:
9373			field_shift += W_FT_MACMATCH;
9374			break;
9375		case F_MPSHITTYPE:
9376			field_shift += W_FT_MPSHITTYPE;
9377			break;
9378		case F_FRAGMENTATION:
9379			field_shift += W_FT_FRAGMENTATION;
9380			break;
9381		}
9382	}
9383	return field_shift;
9384}
9385
9386int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
9387{
9388	u8 addr[6];
9389	int ret, i, j;
9390	struct port_info *p = adap2pinfo(adap, port_id);
9391	u32 param, val;
9392	struct vi_info *vi = &p->vi[0];
9393
9394	for (i = 0, j = -1; i <= p->port_id; i++) {
9395		do {
9396			j++;
9397		} while ((adap->params.portvec & (1 << j)) == 0);
9398	}
9399
9400	p->tx_chan = j;
9401	p->mps_bg_map = t4_get_mps_bg_map(adap, j);
9402	p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
9403	p->lport = j;
9404
9405	if (!(adap->flags & IS_VF) ||
9406	    adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
9407 		t4_update_port_info(p);
9408	}
9409
9410	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &vi->rss_size,
9411	    &vi->vfvld, &vi->vin);
9412	if (ret < 0)
9413		return ret;
9414
9415	vi->viid = ret;
9416	t4_os_set_hw_addr(p, addr);
9417
9418	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
9419	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
9420	    V_FW_PARAMS_PARAM_YZ(vi->viid);
9421	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
9422	if (ret)
9423		vi->rss_base = 0xffff;
9424	else {
9425		/* MPASS((val >> 16) == rss_size); */
9426		vi->rss_base = val & 0xffff;
9427	}
9428
9429	return 0;
9430}
9431
9432/**
9433 *	t4_read_cimq_cfg - read CIM queue configuration
9434 *	@adap: the adapter
9435 *	@base: holds the queue base addresses in bytes
9436 *	@size: holds the queue sizes in bytes
9437 *	@thres: holds the queue full thresholds in bytes
9438 *
9439 *	Returns the current configuration of the CIM queues, starting with
9440 *	the IBQs, then the OBQs.
9441 */
9442void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9443{
9444	unsigned int i, v;
9445	int cim_num_obq = adap->chip_params->cim_num_obq;
9446
9447	for (i = 0; i < CIM_NUM_IBQ; i++) {
9448		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9449			     V_QUENUMSELECT(i));
9450		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9451		/* value is in 256-byte units */
9452		*base++ = G_CIMQBASE(v) * 256;
9453		*size++ = G_CIMQSIZE(v) * 256;
9454		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9455	}
9456	for (i = 0; i < cim_num_obq; i++) {
9457		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9458			     V_QUENUMSELECT(i));
9459		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9460		/* value is in 256-byte units */
9461		*base++ = G_CIMQBASE(v) * 256;
9462		*size++ = G_CIMQSIZE(v) * 256;
9463	}
9464}
9465
9466/**
9467 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
9468 *	@adap: the adapter
9469 *	@qid: the queue index
9470 *	@data: where to store the queue contents
9471 *	@n: capacity of @data in 32-bit words
9472 *
9473 *	Reads the contents of the selected CIM queue starting at address 0 up
9474 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9475 *	error and the number of 32-bit words actually read on success.
9476 */
9477int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9478{
9479	int i, err, attempts;
9480	unsigned int addr;
9481	const unsigned int nwords = CIM_IBQ_SIZE * 4;
9482
9483	if (qid > 5 || (n & 3))
9484		return -EINVAL;
9485
9486	addr = qid * nwords;
9487	if (n > nwords)
9488		n = nwords;
9489
9490	/* It might take 3-10ms before the IBQ debug read access is allowed.
9491	 * Wait for 1 Sec with a delay of 1 usec.
9492	 */
9493	attempts = 1000000;
9494
9495	for (i = 0; i < n; i++, addr++) {
9496		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9497			     F_IBQDBGEN);
9498		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9499				      attempts, 1);
9500		if (err)
9501			return err;
9502		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9503	}
9504	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9505	return i;
9506}
9507
9508/**
9509 *	t4_read_cim_obq - read the contents of a CIM outbound queue
9510 *	@adap: the adapter
9511 *	@qid: the queue index
9512 *	@data: where to store the queue contents
9513 *	@n: capacity of @data in 32-bit words
9514 *
9515 *	Reads the contents of the selected CIM queue starting at address 0 up
9516 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9517 *	error and the number of 32-bit words actually read on success.
9518 */
9519int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9520{
9521	int i, err;
9522	unsigned int addr, v, nwords;
9523	int cim_num_obq = adap->chip_params->cim_num_obq;
9524
9525	if ((qid > (cim_num_obq - 1)) || (n & 3))
9526		return -EINVAL;
9527
9528	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9529		     V_QUENUMSELECT(qid));
9530	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9531
9532	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
9533	nwords = G_CIMQSIZE(v) * 64;  /* same */
9534	if (n > nwords)
9535		n = nwords;
9536
9537	for (i = 0; i < n; i++, addr++) {
9538		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9539			     F_OBQDBGEN);
9540		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9541				      2, 1);
9542		if (err)
9543			return err;
9544		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9545	}
9546	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9547	return i;
9548}
9549
9550enum {
9551	CIM_QCTL_BASE     = 0,
9552	CIM_CTL_BASE      = 0x2000,
9553	CIM_PBT_ADDR_BASE = 0x2800,
9554	CIM_PBT_LRF_BASE  = 0x3000,
9555	CIM_PBT_DATA_BASE = 0x3800
9556};
9557
9558/**
9559 *	t4_cim_read - read a block from CIM internal address space
9560 *	@adap: the adapter
9561 *	@addr: the start address within the CIM address space
9562 *	@n: number of words to read
9563 *	@valp: where to store the result
9564 *
9565 *	Reads a block of 4-byte words from the CIM intenal address space.
9566 */
9567int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9568		unsigned int *valp)
9569{
9570	int ret = 0;
9571
9572	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9573		return -EBUSY;
9574
9575	for ( ; !ret && n--; addr += 4) {
9576		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9577		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9578				      0, 5, 2);
9579		if (!ret)
9580			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9581	}
9582	return ret;
9583}
9584
9585/**
9586 *	t4_cim_write - write a block into CIM internal address space
9587 *	@adap: the adapter
9588 *	@addr: the start address within the CIM address space
9589 *	@n: number of words to write
9590 *	@valp: set of values to write
9591 *
9592 *	Writes a block of 4-byte words into the CIM intenal address space.
9593 */
9594int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9595		 const unsigned int *valp)
9596{
9597	int ret = 0;
9598
9599	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9600		return -EBUSY;
9601
9602	for ( ; !ret && n--; addr += 4) {
9603		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9604		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9605		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9606				      0, 5, 2);
9607	}
9608	return ret;
9609}
9610
9611static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9612			 unsigned int val)
9613{
9614	return t4_cim_write(adap, addr, 1, &val);
9615}
9616
9617/**
9618 *	t4_cim_ctl_read - read a block from CIM control region
9619 *	@adap: the adapter
9620 *	@addr: the start address within the CIM control region
9621 *	@n: number of words to read
9622 *	@valp: where to store the result
9623 *
9624 *	Reads a block of 4-byte words from the CIM control region.
9625 */
9626int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
9627		    unsigned int *valp)
9628{
9629	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
9630}
9631
9632/**
9633 *	t4_cim_read_la - read CIM LA capture buffer
9634 *	@adap: the adapter
9635 *	@la_buf: where to store the LA data
9636 *	@wrptr: the HW write pointer within the capture buffer
9637 *
9638 *	Reads the contents of the CIM LA buffer with the most recent entry at
9639 *	the end	of the returned data and with the entry at @wrptr first.
9640 *	We try to leave the LA in the running state we find it in.
9641 */
9642int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9643{
9644	int i, ret;
9645	unsigned int cfg, val, idx;
9646
9647	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9648	if (ret)
9649		return ret;
9650
9651	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
9652		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9653		if (ret)
9654			return ret;
9655	}
9656
9657	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9658	if (ret)
9659		goto restart;
9660
9661	idx = G_UPDBGLAWRPTR(val);
9662	if (wrptr)
9663		*wrptr = idx;
9664
9665	for (i = 0; i < adap->params.cim_la_size; i++) {
9666		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9667				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9668		if (ret)
9669			break;
9670		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9671		if (ret)
9672			break;
9673		if (val & F_UPDBGLARDEN) {
9674			ret = -ETIMEDOUT;
9675			break;
9676		}
9677		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9678		if (ret)
9679			break;
9680
9681		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9682		idx = (idx + 1) & M_UPDBGLARDPTR;
9683		/*
9684		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9685		 * identify the 32-bit portion of the full 312-bit data
9686		 */
9687		if (is_t6(adap))
9688			while ((idx & 0xf) > 9)
9689				idx = (idx + 1) % M_UPDBGLARDPTR;
9690	}
9691restart:
9692	if (cfg & F_UPDBGLAEN) {
9693		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9694				      cfg & ~F_UPDBGLARDEN);
9695		if (!ret)
9696			ret = r;
9697	}
9698	return ret;
9699}
9700
9701/**
9702 *	t4_tp_read_la - read TP LA capture buffer
9703 *	@adap: the adapter
9704 *	@la_buf: where to store the LA data
9705 *	@wrptr: the HW write pointer within the capture buffer
9706 *
9707 *	Reads the contents of the TP LA buffer with the most recent entry at
9708 *	the end	of the returned data and with the entry at @wrptr first.
9709 *	We leave the LA in the running state we find it in.
9710 */
9711void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9712{
9713	bool last_incomplete;
9714	unsigned int i, cfg, val, idx;
9715
9716	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
9717	if (cfg & F_DBGLAENABLE)			/* freeze LA */
9718		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9719			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
9720
9721	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
9722	idx = G_DBGLAWPTR(val);
9723	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
9724	if (last_incomplete)
9725		idx = (idx + 1) & M_DBGLARPTR;
9726	if (wrptr)
9727		*wrptr = idx;
9728
9729	val &= 0xffff;
9730	val &= ~V_DBGLARPTR(M_DBGLARPTR);
9731	val |= adap->params.tp.la_mask;
9732
9733	for (i = 0; i < TPLA_SIZE; i++) {
9734		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
9735		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
9736		idx = (idx + 1) & M_DBGLARPTR;
9737	}
9738
9739	/* Wipe out last entry if it isn't valid */
9740	if (last_incomplete)
9741		la_buf[TPLA_SIZE - 1] = ~0ULL;
9742
9743	if (cfg & F_DBGLAENABLE)		/* restore running state */
9744		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
9745			     cfg | adap->params.tp.la_mask);
9746}
9747
9748/*
9749 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9750 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
9751 * state for more than the Warning Threshold then we'll issue a warning about
9752 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
9753 * appears to be hung every Warning Repeat second till the situation clears.
9754 * If the situation clears, we'll note that as well.
9755 */
9756#define SGE_IDMA_WARN_THRESH 1
9757#define SGE_IDMA_WARN_REPEAT 300
9758
9759/**
9760 *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9761 *	@adapter: the adapter
9762 *	@idma: the adapter IDMA Monitor state
9763 *
9764 *	Initialize the state of an SGE Ingress DMA Monitor.
9765 */
9766void t4_idma_monitor_init(struct adapter *adapter,
9767			  struct sge_idma_monitor_state *idma)
9768{
9769	/* Initialize the state variables for detecting an SGE Ingress DMA
9770	 * hang.  The SGE has internal counters which count up on each clock
9771	 * tick whenever the SGE finds its Ingress DMA State Engines in the
9772	 * same state they were on the previous clock tick.  The clock used is
9773	 * the Core Clock so we have a limit on the maximum "time" they can
9774	 * record; typically a very small number of seconds.  For instance,
9775	 * with a 600MHz Core Clock, we can only count up to a bit more than
9776	 * 7s.  So we'll synthesize a larger counter in order to not run the
9777	 * risk of having the "timers" overflow and give us the flexibility to
9778	 * maintain a Hung SGE State Machine of our own which operates across
9779	 * a longer time frame.
9780	 */
9781	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9782	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
9783}
9784
9785/**
9786 *	t4_idma_monitor - monitor SGE Ingress DMA state
9787 *	@adapter: the adapter
9788 *	@idma: the adapter IDMA Monitor state
9789 *	@hz: number of ticks/second
9790 *	@ticks: number of ticks since the last IDMA Monitor call
9791 */
9792void t4_idma_monitor(struct adapter *adapter,
9793		     struct sge_idma_monitor_state *idma,
9794		     int hz, int ticks)
9795{
9796	int i, idma_same_state_cnt[2];
9797
9798	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
9799	  * are counters inside the SGE which count up on each clock when the
9800	  * SGE finds its Ingress DMA State Engines in the same states they
9801	  * were in the previous clock.  The counters will peg out at
9802	  * 0xffffffff without wrapping around so once they pass the 1s
9803	  * threshold they'll stay above that till the IDMA state changes.
9804	  */
9805	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
9806	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
9807	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9808
9809	for (i = 0; i < 2; i++) {
9810		u32 debug0, debug11;
9811
9812		/* If the Ingress DMA Same State Counter ("timer") is less
9813		 * than 1s, then we can reset our synthesized Stall Timer and
9814		 * continue.  If we have previously emitted warnings about a
9815		 * potential stalled Ingress Queue, issue a note indicating
9816		 * that the Ingress Queue has resumed forward progress.
9817		 */
9818		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9819			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
9820				CH_WARN(adapter, "SGE idma%d, queue %u, "
9821					"resumed after %d seconds\n",
9822					i, idma->idma_qid[i],
9823					idma->idma_stalled[i]/hz);
9824			idma->idma_stalled[i] = 0;
9825			continue;
9826		}
9827
9828		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9829		 * domain.  The first time we get here it'll be because we
9830		 * passed the 1s Threshold; each additional time it'll be
9831		 * because the RX Timer Callback is being fired on its regular
9832		 * schedule.
9833		 *
9834		 * If the stall is below our Potential Hung Ingress Queue
9835		 * Warning Threshold, continue.
9836		 */
9837		if (idma->idma_stalled[i] == 0) {
9838			idma->idma_stalled[i] = hz;
9839			idma->idma_warn[i] = 0;
9840		} else {
9841			idma->idma_stalled[i] += ticks;
9842			idma->idma_warn[i] -= ticks;
9843		}
9844
9845		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
9846			continue;
9847
9848		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9849		 */
9850		if (idma->idma_warn[i] > 0)
9851			continue;
9852		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
9853
9854		/* Read and save the SGE IDMA State and Queue ID information.
9855		 * We do this every time in case it changes across time ...
9856		 * can't be too careful ...
9857		 */
9858		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
9859		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9860		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9861
9862		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
9863		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
9864		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9865
9866		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
9867			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9868			i, idma->idma_qid[i], idma->idma_state[i],
9869			idma->idma_stalled[i]/hz,
9870			debug0, debug11);
9871		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9872	}
9873}
9874
9875/**
9876 *	t4_read_pace_tbl - read the pace table
9877 *	@adap: the adapter
9878 *	@pace_vals: holds the returned values
9879 *
9880 *	Returns the values of TP's pace table in microseconds.
9881 */
9882void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9883{
9884	unsigned int i, v;
9885
9886	for (i = 0; i < NTX_SCHED; i++) {
9887		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
9888		v = t4_read_reg(adap, A_TP_PACE_TABLE);
9889		pace_vals[i] = dack_ticks_to_usec(adap, v);
9890	}
9891}
9892
9893/**
9894 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9895 *	@adap: the adapter
9896 *	@sched: the scheduler index
9897 *	@kbps: the byte rate in Kbps
9898 *	@ipg: the interpacket delay in tenths of nanoseconds
9899 *
9900 *	Return the current configuration of a HW Tx scheduler.
9901 */
9902void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
9903		     unsigned int *ipg, bool sleep_ok)
9904{
9905	unsigned int v, addr, bpt, cpt;
9906
9907	if (kbps) {
9908		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
9909		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9910		if (sched & 1)
9911			v >>= 16;
9912		bpt = (v >> 8) & 0xff;
9913		cpt = v & 0xff;
9914		if (!cpt)
9915			*kbps = 0;	/* scheduler disabled */
9916		else {
9917			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9918			*kbps = (v * bpt) / 125;
9919		}
9920	}
9921	if (ipg) {
9922		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
9923		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9924		if (sched & 1)
9925			v >>= 16;
9926		v &= 0xffff;
9927		*ipg = (10000 * v) / core_ticks_per_usec(adap);
9928	}
9929}
9930
9931/**
9932 *	t4_load_cfg - download config file
9933 *	@adap: the adapter
9934 *	@cfg_data: the cfg text file to write
9935 *	@size: text file size
9936 *
9937 *	Write the supplied config text file to the card's serial flash.
9938 */
9939int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9940{
9941	int ret, i, n, cfg_addr;
9942	unsigned int addr;
9943	unsigned int flash_cfg_start_sec;
9944	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9945
9946	cfg_addr = t4_flash_cfg_addr(adap);
9947	if (cfg_addr < 0)
9948		return cfg_addr;
9949
9950	addr = cfg_addr;
9951	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9952
9953	if (size > FLASH_CFG_MAX_SIZE) {
9954		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
9955		       FLASH_CFG_MAX_SIZE);
9956		return -EFBIG;
9957	}
9958
9959	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
9960			 sf_sec_size);
9961	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9962				     flash_cfg_start_sec + i - 1);
9963	/*
9964	 * If size == 0 then we're simply erasing the FLASH sectors associated
9965	 * with the on-adapter Firmware Configuration File.
9966	 */
9967	if (ret || size == 0)
9968		goto out;
9969
9970	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9971	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9972		if ( (size - i) <  SF_PAGE_SIZE)
9973			n = size - i;
9974		else
9975			n = SF_PAGE_SIZE;
9976		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
9977		if (ret)
9978			goto out;
9979
9980		addr += SF_PAGE_SIZE;
9981		cfg_data += SF_PAGE_SIZE;
9982	}
9983
9984out:
9985	if (ret)
9986		CH_ERR(adap, "config file %s failed %d\n",
9987		       (size == 0 ? "clear" : "download"), ret);
9988	return ret;
9989}
9990
9991/**
9992 *	t5_fw_init_extern_mem - initialize the external memory
9993 *	@adap: the adapter
9994 *
9995 *	Initializes the external memory on T5.
9996 */
9997int t5_fw_init_extern_mem(struct adapter *adap)
9998{
9999	u32 params[1], val[1];
10000	int ret;
10001
10002	if (!is_t5(adap))
10003		return 0;
10004
10005	val[0] = 0xff; /* Initialize all MCs */
10006	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10007			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10008	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10009			FW_CMD_MAX_TIMEOUT);
10010
10011	return ret;
10012}
10013
10014/* BIOS boot headers */
10015typedef struct pci_expansion_rom_header {
10016	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10017	u8	reserved[22]; /* Reserved per processor Architecture data */
10018	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10019} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10020
10021/* Legacy PCI Expansion ROM Header */
10022typedef struct legacy_pci_expansion_rom_header {
10023	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10024	u8	size512; /* Current Image Size in units of 512 bytes */
10025	u8	initentry_point[4];
10026	u8	cksum; /* Checksum computed on the entire Image */
10027	u8	reserved[16]; /* Reserved */
10028	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
10029} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10030
10031/* EFI PCI Expansion ROM Header */
10032typedef struct efi_pci_expansion_rom_header {
10033	u8	signature[2]; // ROM signature. The value 0xaa55
10034	u8	initialization_size[2]; /* Units 512. Includes this header */
10035	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10036	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
10037	u8	efi_machine_type[2]; /* Machine type from EFI image header */
10038	u8	compression_type[2]; /* Compression type. */
10039		/*
10040		 * Compression type definition
10041		 * 0x0: uncompressed
10042		 * 0x1: Compressed
10043		 * 0x2-0xFFFF: Reserved
10044		 */
10045	u8	reserved[8]; /* Reserved */
10046	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
10047	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10048} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10049
10050/* PCI Data Structure Format */
10051typedef struct pcir_data_structure { /* PCI Data Structure */
10052	u8	signature[4]; /* Signature. The string "PCIR" */
10053	u8	vendor_id[2]; /* Vendor Identification */
10054	u8	device_id[2]; /* Device Identification */
10055	u8	vital_product[2]; /* Pointer to Vital Product Data */
10056	u8	length[2]; /* PCIR Data Structure Length */
10057	u8	revision; /* PCIR Data Structure Revision */
10058	u8	class_code[3]; /* Class Code */
10059	u8	image_length[2]; /* Image Length. Multiple of 512B */
10060	u8	code_revision[2]; /* Revision Level of Code/Data */
10061	u8	code_type; /* Code Type. */
10062		/*
10063		 * PCI Expansion ROM Code Types
10064		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10065		 * 0x01: Open Firmware standard for PCI. FCODE
10066		 * 0x02: Hewlett-Packard PA RISC. HP reserved
10067		 * 0x03: EFI Image. EFI
10068		 * 0x04-0xFF: Reserved.
10069		 */
10070	u8	indicator; /* Indicator. Identifies the last image in the ROM */
10071	u8	reserved[2]; /* Reserved */
10072} pcir_data_t; /* PCI__DATA_STRUCTURE */
10073
10074/* BOOT constants */
10075enum {
10076	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10077	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
10078	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
10079	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10080	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
10081	VENDOR_ID = 0x1425, /* Vendor ID */
10082	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10083};
10084
10085/*
10086 *	modify_device_id - Modifies the device ID of the Boot BIOS image
10087 *	@adatper: the device ID to write.
10088 *	@boot_data: the boot image to modify.
10089 *
10090 *	Write the supplied device ID to the boot BIOS image.
10091 */
10092static void modify_device_id(int device_id, u8 *boot_data)
10093{
10094	legacy_pci_exp_rom_header_t *header;
10095	pcir_data_t *pcir_header;
10096	u32 cur_header = 0;
10097
10098	/*
10099	 * Loop through all chained images and change the device ID's
10100	 */
10101	while (1) {
10102		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10103		pcir_header = (pcir_data_t *) &boot_data[cur_header +
10104			      le16_to_cpu(*(u16*)header->pcir_offset)];
10105
10106		/*
10107		 * Only modify the Device ID if code type is Legacy or HP.
10108		 * 0x00: Okay to modify
10109		 * 0x01: FCODE. Do not be modify
10110		 * 0x03: Okay to modify
10111		 * 0x04-0xFF: Do not modify
10112		 */
10113		if (pcir_header->code_type == 0x00) {
10114			u8 csum = 0;
10115			int i;
10116
10117			/*
10118			 * Modify Device ID to match current adatper
10119			 */
10120			*(u16*) pcir_header->device_id = device_id;
10121
10122			/*
10123			 * Set checksum temporarily to 0.
10124			 * We will recalculate it later.
10125			 */
10126			header->cksum = 0x0;
10127
10128			/*
10129			 * Calculate and update checksum
10130			 */
10131			for (i = 0; i < (header->size512 * 512); i++)
10132				csum += (u8)boot_data[cur_header + i];
10133
10134			/*
10135			 * Invert summed value to create the checksum
10136			 * Writing new checksum value directly to the boot data
10137			 */
10138			boot_data[cur_header + 7] = -csum;
10139
10140		} else if (pcir_header->code_type == 0x03) {
10141
10142			/*
10143			 * Modify Device ID to match current adatper
10144			 */
10145			*(u16*) pcir_header->device_id = device_id;
10146
10147		}
10148
10149
10150		/*
10151		 * Check indicator element to identify if this is the last
10152		 * image in the ROM.
10153		 */
10154		if (pcir_header->indicator & 0x80)
10155			break;
10156
10157		/*
10158		 * Move header pointer up to the next image in the ROM.
10159		 */
10160		cur_header += header->size512 * 512;
10161	}
10162}
10163
10164/*
10165 *	t4_load_boot - download boot flash
10166 *	@adapter: the adapter
10167 *	@boot_data: the boot image to write
10168 *	@boot_addr: offset in flash to write boot_data
10169 *	@size: image size
10170 *
10171 *	Write the supplied boot image to the card's serial flash.
10172 *	The boot image has the following sections: a 28-byte header and the
10173 *	boot image.
10174 */
10175int t4_load_boot(struct adapter *adap, u8 *boot_data,
10176		 unsigned int boot_addr, unsigned int size)
10177{
10178	pci_exp_rom_header_t *header;
10179	int pcir_offset ;
10180	pcir_data_t *pcir_header;
10181	int ret, addr;
10182	uint16_t device_id;
10183	unsigned int i;
10184	unsigned int boot_sector = (boot_addr * 1024 );
10185	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10186
10187	/*
10188	 * Make sure the boot image does not encroach on the firmware region
10189	 */
10190	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10191		CH_ERR(adap, "boot image encroaching on firmware region\n");
10192		return -EFBIG;
10193	}
10194
10195	/*
10196	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10197	 * and Boot configuration data sections. These 3 boot sections span
10198	 * sectors 0 to 7 in flash and live right before the FW image location.
10199	 */
10200	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10201			sf_sec_size);
10202	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10203				     (boot_sector >> 16) + i - 1);
10204
10205	/*
10206	 * If size == 0 then we're simply erasing the FLASH sectors associated
10207	 * with the on-adapter option ROM file
10208	 */
10209	if (ret || (size == 0))
10210		goto out;
10211
10212	/* Get boot header */
10213	header = (pci_exp_rom_header_t *)boot_data;
10214	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10215	/* PCIR Data Structure */
10216	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10217
10218	/*
10219	 * Perform some primitive sanity testing to avoid accidentally
10220	 * writing garbage over the boot sectors.  We ought to check for
10221	 * more but it's not worth it for now ...
10222	 */
10223	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10224		CH_ERR(adap, "boot image too small/large\n");
10225		return -EFBIG;
10226	}
10227
10228#ifndef CHELSIO_T4_DIAGS
10229	/*
10230	 * Check BOOT ROM header signature
10231	 */
10232	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10233		CH_ERR(adap, "Boot image missing signature\n");
10234		return -EINVAL;
10235	}
10236
10237	/*
10238	 * Check PCI header signature
10239	 */
10240	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10241		CH_ERR(adap, "PCI header missing signature\n");
10242		return -EINVAL;
10243	}
10244
10245	/*
10246	 * Check Vendor ID matches Chelsio ID
10247	 */
10248	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10249		CH_ERR(adap, "Vendor ID missing signature\n");
10250		return -EINVAL;
10251	}
10252#endif
10253
10254	/*
10255	 * Retrieve adapter's device ID
10256	 */
10257	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10258	/* Want to deal with PF 0 so I strip off PF 4 indicator */
10259	device_id = device_id & 0xf0ff;
10260
10261	/*
10262	 * Check PCIE Device ID
10263	 */
10264	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10265		/*
10266		 * Change the device ID in the Boot BIOS image to match
10267		 * the Device ID of the current adapter.
10268		 */
10269		modify_device_id(device_id, boot_data);
10270	}
10271
10272	/*
10273	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10274	 * we finish copying the rest of the boot image. This will ensure
10275	 * that the BIOS boot header will only be written if the boot image
10276	 * was written in full.
10277	 */
10278	addr = boot_sector;
10279	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10280		addr += SF_PAGE_SIZE;
10281		boot_data += SF_PAGE_SIZE;
10282		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10283		if (ret)
10284			goto out;
10285	}
10286
10287	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10288			     (const u8 *)header, 0);
10289
10290out:
10291	if (ret)
10292		CH_ERR(adap, "boot image download failed, error %d\n", ret);
10293	return ret;
10294}
10295
10296/*
10297 *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10298 *	@adapter: the adapter
10299 *
10300 *	Return the address within the flash where the OptionROM Configuration
10301 *	is stored, or an error if the device FLASH is too small to contain
10302 *	a OptionROM Configuration.
10303 */
10304static int t4_flash_bootcfg_addr(struct adapter *adapter)
10305{
10306	/*
10307	 * If the device FLASH isn't large enough to hold a Firmware
10308	 * Configuration File, return an error.
10309	 */
10310	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10311		return -ENOSPC;
10312
10313	return FLASH_BOOTCFG_START;
10314}
10315
10316int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10317{
10318	int ret, i, n, cfg_addr;
10319	unsigned int addr;
10320	unsigned int flash_cfg_start_sec;
10321	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10322
10323	cfg_addr = t4_flash_bootcfg_addr(adap);
10324	if (cfg_addr < 0)
10325		return cfg_addr;
10326
10327	addr = cfg_addr;
10328	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10329
10330	if (size > FLASH_BOOTCFG_MAX_SIZE) {
10331		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10332			FLASH_BOOTCFG_MAX_SIZE);
10333		return -EFBIG;
10334	}
10335
10336	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10337			 sf_sec_size);
10338	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10339					flash_cfg_start_sec + i - 1);
10340
10341	/*
10342	 * If size == 0 then we're simply erasing the FLASH sectors associated
10343	 * with the on-adapter OptionROM Configuration File.
10344	 */
10345	if (ret || size == 0)
10346		goto out;
10347
10348	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10349	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10350		if ( (size - i) <  SF_PAGE_SIZE)
10351			n = size - i;
10352		else
10353			n = SF_PAGE_SIZE;
10354		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10355		if (ret)
10356			goto out;
10357
10358		addr += SF_PAGE_SIZE;
10359		cfg_data += SF_PAGE_SIZE;
10360	}
10361
10362out:
10363	if (ret)
10364		CH_ERR(adap, "boot config data %s failed %d\n",
10365				(size == 0 ? "clear" : "download"), ret);
10366	return ret;
10367}
10368
10369/**
10370 *	t4_set_filter_mode - configure the optional components of filter tuples
10371 *	@adap: the adapter
10372 *	@mode_map: a bitmap selcting which optional filter components to enable
10373 * 	@sleep_ok: if true we may sleep while awaiting command completion
10374 *
10375 *	Sets the filter mode by selecting the optional components to enable
10376 *	in filter tuples.  Returns 0 on success and a negative error if the
10377 *	requested mode needs more bits than are available for optional
10378 *	components.
10379 */
10380int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10381		       bool sleep_ok)
10382{
10383	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10384
10385	int i, nbits = 0;
10386
10387	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10388		if (mode_map & (1 << i))
10389			nbits += width[i];
10390	if (nbits > FILTER_OPT_LEN)
10391		return -EINVAL;
10392	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10393	read_filter_mode_and_ingress_config(adap, sleep_ok);
10394
10395	return 0;
10396}
10397
10398/**
10399 *	t4_clr_port_stats - clear port statistics
10400 *	@adap: the adapter
10401 *	@idx: the port index
10402 *
10403 *	Clear HW statistics for the given port.
10404 */
10405void t4_clr_port_stats(struct adapter *adap, int idx)
10406{
10407	unsigned int i;
10408	u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
10409	u32 port_base_addr;
10410
10411	if (is_t4(adap))
10412		port_base_addr = PORT_BASE(idx);
10413	else
10414		port_base_addr = T5_PORT_BASE(idx);
10415
10416	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10417			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10418		t4_write_reg(adap, port_base_addr + i, 0);
10419	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10420			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10421		t4_write_reg(adap, port_base_addr + i, 0);
10422	for (i = 0; i < 4; i++)
10423		if (bgmap & (1 << i)) {
10424			t4_write_reg(adap,
10425			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10426			t4_write_reg(adap,
10427			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10428		}
10429}
10430
10431/**
10432 *	t4_i2c_rd - read I2C data from adapter
10433 *	@adap: the adapter
10434 *	@port: Port number if per-port device; <0 if not
10435 *	@devid: per-port device ID or absolute device ID
10436 *	@offset: byte offset into device I2C space
10437 *	@len: byte length of I2C space data
10438 *	@buf: buffer in which to return I2C data
10439 *
10440 *	Reads the I2C data from the indicated device and location.
10441 */
10442int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10443	      int port, unsigned int devid,
10444	      unsigned int offset, unsigned int len,
10445	      u8 *buf)
10446{
10447	u32 ldst_addrspace;
10448	struct fw_ldst_cmd ldst;
10449	int ret;
10450
10451	if (port >= 4 ||
10452	    devid >= 256 ||
10453	    offset >= 256 ||
10454	    len > sizeof ldst.u.i2c.data)
10455		return -EINVAL;
10456
10457	memset(&ldst, 0, sizeof ldst);
10458	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10459	ldst.op_to_addrspace =
10460		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10461			    F_FW_CMD_REQUEST |
10462			    F_FW_CMD_READ |
10463			    ldst_addrspace);
10464	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10465	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10466	ldst.u.i2c.did = devid;
10467	ldst.u.i2c.boffset = offset;
10468	ldst.u.i2c.blen = len;
10469	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10470	if (!ret)
10471		memcpy(buf, ldst.u.i2c.data, len);
10472	return ret;
10473}
10474
10475/**
10476 *	t4_i2c_wr - write I2C data to adapter
10477 *	@adap: the adapter
10478 *	@port: Port number if per-port device; <0 if not
10479 *	@devid: per-port device ID or absolute device ID
10480 *	@offset: byte offset into device I2C space
10481 *	@len: byte length of I2C space data
10482 *	@buf: buffer containing new I2C data
10483 *
10484 *	Write the I2C data to the indicated device and location.
10485 */
10486int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10487	      int port, unsigned int devid,
10488	      unsigned int offset, unsigned int len,
10489	      u8 *buf)
10490{
10491	u32 ldst_addrspace;
10492	struct fw_ldst_cmd ldst;
10493
10494	if (port >= 4 ||
10495	    devid >= 256 ||
10496	    offset >= 256 ||
10497	    len > sizeof ldst.u.i2c.data)
10498		return -EINVAL;
10499
10500	memset(&ldst, 0, sizeof ldst);
10501	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10502	ldst.op_to_addrspace =
10503		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10504			    F_FW_CMD_REQUEST |
10505			    F_FW_CMD_WRITE |
10506			    ldst_addrspace);
10507	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10508	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10509	ldst.u.i2c.did = devid;
10510	ldst.u.i2c.boffset = offset;
10511	ldst.u.i2c.blen = len;
10512	memcpy(ldst.u.i2c.data, buf, len);
10513	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10514}
10515
10516/**
10517 * 	t4_sge_ctxt_rd - read an SGE context through FW
10518 * 	@adap: the adapter
10519 * 	@mbox: mailbox to use for the FW command
10520 * 	@cid: the context id
10521 * 	@ctype: the context type
10522 * 	@data: where to store the context data
10523 *
10524 * 	Issues a FW command through the given mailbox to read an SGE context.
10525 */
10526int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10527		   enum ctxt_type ctype, u32 *data)
10528{
10529	int ret;
10530	struct fw_ldst_cmd c;
10531
10532	if (ctype == CTXT_EGRESS)
10533		ret = FW_LDST_ADDRSPC_SGE_EGRC;
10534	else if (ctype == CTXT_INGRESS)
10535		ret = FW_LDST_ADDRSPC_SGE_INGC;
10536	else if (ctype == CTXT_FLM)
10537		ret = FW_LDST_ADDRSPC_SGE_FLMC;
10538	else
10539		ret = FW_LDST_ADDRSPC_SGE_CONMC;
10540
10541	memset(&c, 0, sizeof(c));
10542	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10543					F_FW_CMD_REQUEST | F_FW_CMD_READ |
10544					V_FW_LDST_CMD_ADDRSPACE(ret));
10545	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10546	c.u.idctxt.physid = cpu_to_be32(cid);
10547
10548	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10549	if (ret == 0) {
10550		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10551		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10552		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10553		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10554		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10555		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10556	}
10557	return ret;
10558}
10559
10560/**
10561 * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10562 * 	@adap: the adapter
10563 * 	@cid: the context id
10564 * 	@ctype: the context type
10565 * 	@data: where to store the context data
10566 *
10567 * 	Reads an SGE context directly, bypassing FW.  This is only for
10568 * 	debugging when FW is unavailable.
10569 */
10570int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10571		      u32 *data)
10572{
10573	int i, ret;
10574
10575	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10576	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10577	if (!ret)
10578		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
10579			*data++ = t4_read_reg(adap, i);
10580	return ret;
10581}
10582
10583int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
10584    int sleep_ok)
10585{
10586	struct fw_sched_cmd cmd;
10587
10588	memset(&cmd, 0, sizeof(cmd));
10589	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10590				      F_FW_CMD_REQUEST |
10591				      F_FW_CMD_WRITE);
10592	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10593
10594	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
10595	cmd.u.config.type = type;
10596	cmd.u.config.minmaxen = minmaxen;
10597
10598	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10599			       NULL, sleep_ok);
10600}
10601
10602int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
10603		    int rateunit, int ratemode, int channel, int cl,
10604		    int minrate, int maxrate, int weight, int pktsize,
10605		    int burstsize, int sleep_ok)
10606{
10607	struct fw_sched_cmd cmd;
10608
10609	memset(&cmd, 0, sizeof(cmd));
10610	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10611				      F_FW_CMD_REQUEST |
10612				      F_FW_CMD_WRITE);
10613	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10614
10615	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10616	cmd.u.params.type = type;
10617	cmd.u.params.level = level;
10618	cmd.u.params.mode = mode;
10619	cmd.u.params.ch = channel;
10620	cmd.u.params.cl = cl;
10621	cmd.u.params.unit = rateunit;
10622	cmd.u.params.rate = ratemode;
10623	cmd.u.params.min = cpu_to_be32(minrate);
10624	cmd.u.params.max = cpu_to_be32(maxrate);
10625	cmd.u.params.weight = cpu_to_be16(weight);
10626	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10627	cmd.u.params.burstsize = cpu_to_be16(burstsize);
10628
10629	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10630			       NULL, sleep_ok);
10631}
10632
10633int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
10634    unsigned int maxrate, int sleep_ok)
10635{
10636	struct fw_sched_cmd cmd;
10637
10638	memset(&cmd, 0, sizeof(cmd));
10639	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10640				      F_FW_CMD_REQUEST |
10641				      F_FW_CMD_WRITE);
10642	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10643
10644	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10645	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10646	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
10647	cmd.u.params.ch = channel;
10648	cmd.u.params.rate = ratemode;		/* REL or ABS */
10649	cmd.u.params.max = cpu_to_be32(maxrate);/*  %  or kbps */
10650
10651	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10652			       NULL, sleep_ok);
10653}
10654
10655int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
10656    int weight, int sleep_ok)
10657{
10658	struct fw_sched_cmd cmd;
10659
10660	if (weight < 0 || weight > 100)
10661		return -EINVAL;
10662
10663	memset(&cmd, 0, sizeof(cmd));
10664	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10665				      F_FW_CMD_REQUEST |
10666				      F_FW_CMD_WRITE);
10667	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10668
10669	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10670	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10671	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
10672	cmd.u.params.ch = channel;
10673	cmd.u.params.cl = cl;
10674	cmd.u.params.weight = cpu_to_be16(weight);
10675
10676	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10677			       NULL, sleep_ok);
10678}
10679
10680int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
10681    int mode, unsigned int maxrate, int pktsize, int sleep_ok)
10682{
10683	struct fw_sched_cmd cmd;
10684
10685	memset(&cmd, 0, sizeof(cmd));
10686	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10687				      F_FW_CMD_REQUEST |
10688				      F_FW_CMD_WRITE);
10689	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10690
10691	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10692	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
10693	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
10694	cmd.u.params.mode = mode;
10695	cmd.u.params.ch = channel;
10696	cmd.u.params.cl = cl;
10697	cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
10698	cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
10699	cmd.u.params.max = cpu_to_be32(maxrate);
10700	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10701
10702	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10703			       NULL, sleep_ok);
10704}
10705
10706/*
10707 *	t4_config_watchdog - configure (enable/disable) a watchdog timer
10708 *	@adapter: the adapter
10709 * 	@mbox: mailbox to use for the FW command
10710 * 	@pf: the PF owning the queue
10711 * 	@vf: the VF owning the queue
10712 *	@timeout: watchdog timeout in ms
10713 *	@action: watchdog timer / action
10714 *
10715 *	There are separate watchdog timers for each possible watchdog
10716 *	action.  Configure one of the watchdog timers by setting a non-zero
10717 *	timeout.  Disable a watchdog timer by using a timeout of zero.
10718 */
10719int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
10720		       unsigned int pf, unsigned int vf,
10721		       unsigned int timeout, unsigned int action)
10722{
10723	struct fw_watchdog_cmd wdog;
10724	unsigned int ticks;
10725
10726	/*
10727	 * The watchdog command expects a timeout in units of 10ms so we need
10728	 * to convert it here (via rounding) and force a minimum of one 10ms
10729	 * "tick" if the timeout is non-zero but the conversion results in 0
10730	 * ticks.
10731	 */
10732	ticks = (timeout + 5)/10;
10733	if (timeout && !ticks)
10734		ticks = 1;
10735
10736	memset(&wdog, 0, sizeof wdog);
10737	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
10738				     F_FW_CMD_REQUEST |
10739				     F_FW_CMD_WRITE |
10740				     V_FW_PARAMS_CMD_PFN(pf) |
10741				     V_FW_PARAMS_CMD_VFN(vf));
10742	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
10743	wdog.timeout = cpu_to_be32(ticks);
10744	wdog.action = cpu_to_be32(action);
10745
10746	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
10747}
10748
10749int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
10750{
10751	struct fw_devlog_cmd devlog_cmd;
10752	int ret;
10753
10754	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10755	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10756					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
10757	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10758	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10759			 sizeof(devlog_cmd), &devlog_cmd);
10760	if (ret)
10761		return ret;
10762
10763	*level = devlog_cmd.level;
10764	return 0;
10765}
10766
10767int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
10768{
10769	struct fw_devlog_cmd devlog_cmd;
10770
10771	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
10772	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10773					     F_FW_CMD_REQUEST |
10774					     F_FW_CMD_WRITE);
10775	devlog_cmd.level = level;
10776	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10777	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
10778			  sizeof(devlog_cmd), &devlog_cmd);
10779}
10780