1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: releng/10.3/sys/dev/cxgbe/common/t4_hw.c 286897 2015-08-18 19:04:55Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39	if (cold) \
40		DELAY((x) * 1000); \
41	else \
42		pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 *	t4_wait_op_done_val - wait until an operation is completed
47 *	@adapter: the adapter performing the operation
48 *	@reg: the register to check for completion
49 *	@mask: a single-bit field within @reg that indicates completion
50 *	@polarity: the value of the field when the operation is completed
51 *	@attempts: number of check iterations
52 *	@delay: delay in usecs between iterations
53 *	@valp: where to store the value of the register at completion time
54 *
55 *	Wait until an operation is completed by checking a bit in a register
56 *	up to @attempts times.  If @valp is not NULL the value of the register
57 *	at the time it indicated completion is stored there.  Returns 0 if the
58 *	operation completes and	-EAGAIN	otherwise.
59 */
60int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61		        int polarity, int attempts, int delay, u32 *valp)
62{
63	while (1) {
64		u32 val = t4_read_reg(adapter, reg);
65
66		if (!!(val & mask) == polarity) {
67			if (valp)
68				*valp = val;
69			return 0;
70		}
71		if (--attempts == 0)
72			return -EAGAIN;
73		if (delay)
74			udelay(delay);
75	}
76}
77
78/**
79 *	t4_set_reg_field - set a register field to a value
80 *	@adapter: the adapter to program
81 *	@addr: the register address
82 *	@mask: specifies the portion of the register to modify
83 *	@val: the new value for the register field
84 *
85 *	Sets a register field specified by the supplied mask to the
86 *	given value.
87 */
88void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89		      u32 val)
90{
91	u32 v = t4_read_reg(adapter, addr) & ~mask;
92
93	t4_write_reg(adapter, addr, v | val);
94	(void) t4_read_reg(adapter, addr);      /* flush */
95}
96
97/**
98 *	t4_read_indirect - read indirectly addressed registers
99 *	@adap: the adapter
100 *	@addr_reg: register holding the indirect address
101 *	@data_reg: register holding the value of the indirect register
102 *	@vals: where the read register values are stored
103 *	@nregs: how many indirect registers to read
104 *	@start_idx: index of first indirect register to read
105 *
106 *	Reads registers that are accessed indirectly through an address/data
107 *	register pair.
108 */
109void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111		      unsigned int start_idx)
112{
113	while (nregs--) {
114		t4_write_reg(adap, addr_reg, start_idx);
115		*vals++ = t4_read_reg(adap, data_reg);
116		start_idx++;
117	}
118}
119
120/**
121 *	t4_write_indirect - write indirectly addressed registers
122 *	@adap: the adapter
123 *	@addr_reg: register holding the indirect addresses
124 *	@data_reg: register holding the value for the indirect registers
125 *	@vals: values to write
126 *	@nregs: how many indirect registers to write
127 *	@start_idx: address of first indirect register to write
128 *
129 *	Writes a sequential block of registers that are accessed indirectly
130 *	through an address/data register pair.
131 */
132void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133		       unsigned int data_reg, const u32 *vals,
134		       unsigned int nregs, unsigned int start_idx)
135{
136	while (nregs--) {
137		t4_write_reg(adap, addr_reg, start_idx++);
138		t4_write_reg(adap, data_reg, *vals++);
139	}
140}
141
142/*
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism.  This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
147 */
148u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149{
150	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152		     V_REGISTER(reg));
153	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154}
155
156/*
157 *	t4_report_fw_error - report firmware error
158 *	@adap: the adapter
159 *
160 *	The adapter firmware can indicate error conditions to the host.
161 *	This routine prints out the reason for the firmware error (as
162 *	reported by the firmware).
163 */
164static void t4_report_fw_error(struct adapter *adap)
165{
166	static const char *reason[] = {
167		"Crash",			/* PCIE_FW_EVAL_CRASH */
168		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174		"Reserved",			/* reserved */
175	};
176	u32 pcie_fw;
177
178	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179	if (pcie_fw & F_PCIE_FW_ERR)
180		CH_ERR(adap, "Firmware reports adapter error: %s\n",
181		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
182}
183
184/*
185 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
186 */
187static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
188			 u32 mbox_addr)
189{
190	for ( ; nflit; nflit--, mbox_addr += 8)
191		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
192}
193
194/*
195 * Handle a FW assertion reported in a mailbox.
196 */
197static void fw_asrt(struct adapter *adap, u32 mbox_addr)
198{
199	struct fw_debug_cmd asrt;
200
201	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
205}
206
207#define X_CIM_PF_NOACCESS 0xeeeeeeee
208/**
209 *	t4_wr_mbox_meat - send a command to FW through the given mailbox
210 *	@adap: the adapter
211 *	@mbox: index of the mailbox to use
212 *	@cmd: the command to write
213 *	@size: command length in bytes
214 *	@rpl: where to optionally store the reply
215 *	@sleep_ok: if true we may sleep while awaiting command completion
216 *
217 *	Sends the given command to FW through the selected mailbox and waits
218 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
219 *	store the FW's reply to the command.  The command and its optional
220 *	reply are of the same length.  Some FW commands like RESET and
221 *	INITIALIZE can take a considerable amount of time to execute.
222 *	@sleep_ok determines whether we may sleep while awaiting the response.
223 *	If sleeping is allowed we use progressive backoff otherwise we spin.
224 *
225 *	The return value is 0 on success or a negative errno on failure.  A
226 *	failure can happen either because we are not able to execute the
227 *	command or FW executes it but signals an error.  In the latter case
228 *	the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231		    void *rpl, bool sleep_ok)
232{
233	/*
234	 * We delay in small increments at first in an effort to maintain
235	 * responsiveness for simple, fast executing commands but then back
236	 * off to larger delays to a maximum retry delay.
237	 */
238	static const int delay[] = {
239		1, 1, 3, 5, 10, 10, 20, 50, 100
240	};
241
242	u32 v;
243	u64 res;
244	int i, ms, delay_idx;
245	const __be64 *p = cmd;
246	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
248
249	if ((size & 15) || size > MBOX_LEN)
250		return -EINVAL;
251
252	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255
256	if (v != X_MBOWNER_PL)
257		return v ? -EBUSY : -ETIMEDOUT;
258
259	for (i = 0; i < size; i += 8, p++)
260		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
261
262	CH_DUMP_MBOX(adap, mbox, data_reg);
263
264	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
265	t4_read_reg(adap, ctl_reg);          /* flush write */
266
267	delay_idx = 0;
268	ms = delay[0];
269
270	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271		if (sleep_ok) {
272			ms = delay[delay_idx];  /* last element may repeat */
273			if (delay_idx < ARRAY_SIZE(delay) - 1)
274				delay_idx++;
275			msleep(ms);
276		} else
277			mdelay(ms);
278
279		v = t4_read_reg(adap, ctl_reg);
280		if (v == X_CIM_PF_NOACCESS)
281			continue;
282		if (G_MBOWNER(v) == X_MBOWNER_PL) {
283			if (!(v & F_MBMSGVALID)) {
284				t4_write_reg(adap, ctl_reg,
285					     V_MBOWNER(X_MBOWNER_NONE));
286				continue;
287			}
288
289			CH_DUMP_MBOX(adap, mbox, data_reg);
290
291			res = t4_read_reg64(adap, data_reg);
292			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
293				fw_asrt(adap, data_reg);
294				res = V_FW_CMD_RETVAL(EIO);
295			} else if (rpl)
296				get_mbox_rpl(adap, rpl, size / 8, data_reg);
297			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
298			return -G_FW_CMD_RETVAL((int)res);
299		}
300	}
301
302	/*
303	 * We timed out waiting for a reply to our mailbox command.  Report
304	 * the error and also check to see if the firmware reported any
305	 * errors ...
306	 */
307	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
308	       *(const u8 *)cmd, mbox);
309	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
310		t4_report_fw_error(adap);
311	return -ETIMEDOUT;
312}
313
314/**
315 *	t4_mc_read - read from MC through backdoor accesses
316 *	@adap: the adapter
317 *	@idx: which MC to access
318 *	@addr: address of first byte requested
319 *	@data: 64 bytes of data containing the requested address
320 *	@ecc: where to store the corresponding 64-bit ECC word
321 *
322 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
323 *	that covers the requested address @addr.  If @parity is not %NULL it
324 *	is assigned the 64-bit ECC word for the read data.
325 */
326int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
327{
328	int i;
329	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
330	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
331
332	if (is_t4(adap)) {
333		mc_bist_cmd_reg = A_MC_BIST_CMD;
334		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
335		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
336		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
337		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
338	} else {
339		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
340		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
341		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
342		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
343						  idx);
344		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
345						  idx);
346	}
347
348	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
349		return -EBUSY;
350	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
351	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
352	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
353	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
354		     F_START_BIST | V_BIST_CMD_GAP(1));
355	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
356	if (i)
357		return i;
358
359#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
360
361	for (i = 15; i >= 0; i--)
362		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
363	if (ecc)
364		*ecc = t4_read_reg64(adap, MC_DATA(16));
365#undef MC_DATA
366	return 0;
367}
368
369/**
370 *	t4_edc_read - read from EDC through backdoor accesses
371 *	@adap: the adapter
372 *	@idx: which EDC to access
373 *	@addr: address of first byte requested
374 *	@data: 64 bytes of data containing the requested address
375 *	@ecc: where to store the corresponding 64-bit ECC word
376 *
377 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
378 *	that covers the requested address @addr.  If @parity is not %NULL it
379 *	is assigned the 64-bit ECC word for the read data.
380 */
381int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
382{
383	int i;
384	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
385	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
386
387	if (is_t4(adap)) {
388		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
389		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
390		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
391		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
392						    idx);
393		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
394						    idx);
395	} else {
396/*
397 * These macro are missing in t4_regs.h file.
398 * Added temporarily for testing.
399 */
400#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
401#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
402		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
403		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
404		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
405		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
406						    idx);
407		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
408						    idx);
409#undef EDC_REG_T5
410#undef EDC_STRIDE_T5
411	}
412
413	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
414		return -EBUSY;
415	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
416	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
417	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
418	t4_write_reg(adap, edc_bist_cmd_reg,
419		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
420	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
421	if (i)
422		return i;
423
424#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
425
426	for (i = 15; i >= 0; i--)
427		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
428	if (ecc)
429		*ecc = t4_read_reg64(adap, EDC_DATA(16));
430#undef EDC_DATA
431	return 0;
432}
433
434/**
435 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
436 *	@adap: the adapter
437 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
438 *	@addr: address within indicated memory type
439 *	@len: amount of memory to read
440 *	@buf: host memory buffer
441 *
442 *	Reads an [almost] arbitrary memory region in the firmware: the
443 *	firmware memory address, length and host buffer must be aligned on
444 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
445 *	the firmware's memory.  If this memory contains data structures which
446 *	contain multi-byte integers, it's the callers responsibility to
447 *	perform appropriate byte order conversions.
448 */
449int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
450		__be32 *buf)
451{
452	u32 pos, start, end, offset;
453	int ret;
454
455	/*
456	 * Argument sanity checks ...
457	 */
458	if ((addr & 0x3) || (len & 0x3))
459		return -EINVAL;
460
461	/*
462	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
463	 * need to round down the start and round up the end.  We'll start
464	 * copying out of the first line at (addr - start) a word at a time.
465	 */
466	start = addr & ~(64-1);
467	end = (addr + len + 64-1) & ~(64-1);
468	offset = (addr - start)/sizeof(__be32);
469
470	for (pos = start; pos < end; pos += 64, offset = 0) {
471		__be32 data[16];
472
473		/*
474		 * Read the chip's memory block and bail if there's an error.
475		 */
476		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
477			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
478		else
479			ret = t4_edc_read(adap, mtype, pos, data, NULL);
480		if (ret)
481			return ret;
482
483		/*
484		 * Copy the data into the caller's memory buffer.
485		 */
486		while (offset < 16 && len > 0) {
487			*buf++ = data[offset++];
488			len -= sizeof(__be32);
489		}
490	}
491
492	return 0;
493}
494
495/*
496 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
497 * VPD-R header.
498 */
499struct t4_vpd_hdr {
500	u8  id_tag;
501	u8  id_len[2];
502	u8  id_data[ID_LEN];
503	u8  vpdr_tag;
504	u8  vpdr_len[2];
505};
506
507/*
508 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
509 */
510#define EEPROM_MAX_RD_POLL 40
511#define EEPROM_MAX_WR_POLL 6
512#define EEPROM_STAT_ADDR   0x7bfc
513#define VPD_BASE           0x400
514#define VPD_BASE_OLD       0
515#define VPD_LEN            1024
516#define VPD_INFO_FLD_HDR_SIZE	3
517#define CHELSIO_VPD_UNIQUE_ID 0x82
518
519/**
520 *	t4_seeprom_read - read a serial EEPROM location
521 *	@adapter: adapter to read
522 *	@addr: EEPROM virtual address
523 *	@data: where to store the read data
524 *
525 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
526 *	VPD capability.  Note that this function must be called with a virtual
527 *	address.
528 */
529int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
530{
531	u16 val;
532	int attempts = EEPROM_MAX_RD_POLL;
533	unsigned int base = adapter->params.pci.vpd_cap_addr;
534
535	if (addr >= EEPROMVSIZE || (addr & 3))
536		return -EINVAL;
537
538	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
539	do {
540		udelay(10);
541		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
542	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
543
544	if (!(val & PCI_VPD_ADDR_F)) {
545		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
546		return -EIO;
547	}
548	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
549	*data = le32_to_cpu(*data);
550	return 0;
551}
552
553/**
554 *	t4_seeprom_write - write a serial EEPROM location
555 *	@adapter: adapter to write
556 *	@addr: virtual EEPROM address
557 *	@data: value to write
558 *
559 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
560 *	VPD capability.  Note that this function must be called with a virtual
561 *	address.
562 */
563int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
564{
565	u16 val;
566	int attempts = EEPROM_MAX_WR_POLL;
567	unsigned int base = adapter->params.pci.vpd_cap_addr;
568
569	if (addr >= EEPROMVSIZE || (addr & 3))
570		return -EINVAL;
571
572	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
573				 cpu_to_le32(data));
574	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
575				 (u16)addr | PCI_VPD_ADDR_F);
576	do {
577		msleep(1);
578		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
579	} while ((val & PCI_VPD_ADDR_F) && --attempts);
580
581	if (val & PCI_VPD_ADDR_F) {
582		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
583		return -EIO;
584	}
585	return 0;
586}
587
588/**
589 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
590 *	@phys_addr: the physical EEPROM address
591 *	@fn: the PCI function number
592 *	@sz: size of function-specific area
593 *
594 *	Translate a physical EEPROM address to virtual.  The first 1K is
595 *	accessed through virtual addresses starting at 31K, the rest is
596 *	accessed through virtual addresses starting at 0.
597 *
598 *	The mapping is as follows:
599 *	[0..1K) -> [31K..32K)
600 *	[1K..1K+A) -> [ES-A..ES)
601 *	[1K+A..ES) -> [0..ES-A-1K)
602 *
603 *	where A = @fn * @sz, and ES = EEPROM size.
604 */
605int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
606{
607	fn *= sz;
608	if (phys_addr < 1024)
609		return phys_addr + (31 << 10);
610	if (phys_addr < 1024 + fn)
611		return EEPROMSIZE - fn + phys_addr - 1024;
612	if (phys_addr < EEPROMSIZE)
613		return phys_addr - 1024 - fn;
614	return -EINVAL;
615}
616
617/**
618 *	t4_seeprom_wp - enable/disable EEPROM write protection
619 *	@adapter: the adapter
620 *	@enable: whether to enable or disable write protection
621 *
622 *	Enables or disables write protection on the serial EEPROM.
623 */
624int t4_seeprom_wp(struct adapter *adapter, int enable)
625{
626	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
627}
628
629/**
630 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
631 *	@v: Pointer to buffered vpd data structure
632 *	@kw: The keyword to search for
633 *
634 *	Returns the value of the information field keyword or
635 *	-ENOENT otherwise.
636 */
637static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
638{
639         int i;
640	 unsigned int offset , len;
641	 const u8 *buf = &v->id_tag;
642	 const u8 *vpdr_len = &v->vpdr_tag;
643	 offset = sizeof(struct t4_vpd_hdr);
644	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
645
646	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
647		 return -ENOENT;
648	 }
649
650         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
651		 if(memcmp(buf + i , kw , 2) == 0){
652			 i += VPD_INFO_FLD_HDR_SIZE;
653                         return i;
654		  }
655
656                 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
657         }
658
659         return -ENOENT;
660}
661
662
663/**
664 *	get_vpd_params - read VPD parameters from VPD EEPROM
665 *	@adapter: adapter to read
666 *	@p: where to store the parameters
667 *
668 *	Reads card parameters stored in VPD EEPROM.
669 */
670static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
671{
672	int i, ret, addr;
673	int ec, sn, pn, na;
674	u8 vpd[VPD_LEN], csum;
675	const struct t4_vpd_hdr *v;
676
677	/*
678	 * Card information normally starts at VPD_BASE but early cards had
679	 * it at 0.
680	 */
681	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
682	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
683
684	for (i = 0; i < sizeof(vpd); i += 4) {
685		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
686		if (ret)
687			return ret;
688	}
689 	v = (const struct t4_vpd_hdr *)vpd;
690
691#define FIND_VPD_KW(var,name) do { \
692	var = get_vpd_keyword_val(v , name); \
693	if (var < 0) { \
694		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
695		return -EINVAL; \
696	} \
697} while (0)
698
699	FIND_VPD_KW(i, "RV");
700	for (csum = 0; i >= 0; i--)
701		csum += vpd[i];
702
703	if (csum) {
704		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
705		return -EINVAL;
706	}
707	FIND_VPD_KW(ec, "EC");
708	FIND_VPD_KW(sn, "SN");
709	FIND_VPD_KW(pn, "PN");
710	FIND_VPD_KW(na, "NA");
711#undef FIND_VPD_KW
712
713	memcpy(p->id, v->id_data, ID_LEN);
714	strstrip(p->id);
715	memcpy(p->ec, vpd + ec, EC_LEN);
716	strstrip(p->ec);
717	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
718	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
719	strstrip(p->sn);
720	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
721	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
722	strstrip((char *)p->pn);
723	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
724	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
725	strstrip((char *)p->na);
726
727	return 0;
728}
729
730/* serial flash and firmware constants and flash config file constants */
731enum {
732	SF_ATTEMPTS = 10,             /* max retries for SF operations */
733
734	/* flash command opcodes */
735	SF_PROG_PAGE    = 2,          /* program page */
736	SF_WR_DISABLE   = 4,          /* disable writes */
737	SF_RD_STATUS    = 5,          /* read status register */
738	SF_WR_ENABLE    = 6,          /* enable writes */
739	SF_RD_DATA_FAST = 0xb,        /* read flash */
740	SF_RD_ID        = 0x9f,       /* read ID */
741	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
742};
743
744/**
745 *	sf1_read - read data from the serial flash
746 *	@adapter: the adapter
747 *	@byte_cnt: number of bytes to read
748 *	@cont: whether another operation will be chained
749 *	@lock: whether to lock SF for PL access only
750 *	@valp: where to store the read data
751 *
752 *	Reads up to 4 bytes of data from the serial flash.  The location of
753 *	the read needs to be specified prior to calling this by issuing the
754 *	appropriate commands to the serial flash.
755 */
756static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
757		    int lock, u32 *valp)
758{
759	int ret;
760
761	if (!byte_cnt || byte_cnt > 4)
762		return -EINVAL;
763	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
764		return -EBUSY;
765	t4_write_reg(adapter, A_SF_OP,
766		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
767	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
768	if (!ret)
769		*valp = t4_read_reg(adapter, A_SF_DATA);
770	return ret;
771}
772
773/**
774 *	sf1_write - write data to the serial flash
775 *	@adapter: the adapter
776 *	@byte_cnt: number of bytes to write
777 *	@cont: whether another operation will be chained
778 *	@lock: whether to lock SF for PL access only
779 *	@val: value to write
780 *
781 *	Writes up to 4 bytes of data to the serial flash.  The location of
782 *	the write needs to be specified prior to calling this by issuing the
783 *	appropriate commands to the serial flash.
784 */
785static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
786		     int lock, u32 val)
787{
788	if (!byte_cnt || byte_cnt > 4)
789		return -EINVAL;
790	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
791		return -EBUSY;
792	t4_write_reg(adapter, A_SF_DATA, val);
793	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
794		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
796}
797
798/**
799 *	flash_wait_op - wait for a flash operation to complete
800 *	@adapter: the adapter
801 *	@attempts: max number of polls of the status register
802 *	@delay: delay between polls in ms
803 *
804 *	Wait for a flash operation to complete by polling the status register.
805 */
806static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
807{
808	int ret;
809	u32 status;
810
811	while (1) {
812		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
813		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
814			return ret;
815		if (!(status & 1))
816			return 0;
817		if (--attempts == 0)
818			return -EAGAIN;
819		if (delay)
820			msleep(delay);
821	}
822}
823
824/**
825 *	t4_read_flash - read words from serial flash
826 *	@adapter: the adapter
827 *	@addr: the start address for the read
828 *	@nwords: how many 32-bit words to read
829 *	@data: where to store the read data
830 *	@byte_oriented: whether to store data as bytes or as words
831 *
832 *	Read the specified number of 32-bit words from the serial flash.
833 *	If @byte_oriented is set the read data is stored as a byte array
834 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
835 *	natural endianess.
836 */
837int t4_read_flash(struct adapter *adapter, unsigned int addr,
838		  unsigned int nwords, u32 *data, int byte_oriented)
839{
840	int ret;
841
842	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
843		return -EINVAL;
844
845	addr = swab32(addr) | SF_RD_DATA_FAST;
846
847	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
848	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
849		return ret;
850
851	for ( ; nwords; nwords--, data++) {
852		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
853		if (nwords == 1)
854			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
855		if (ret)
856			return ret;
857		if (byte_oriented)
858			*data = htonl(*data);
859	}
860	return 0;
861}
862
863/**
864 *	t4_write_flash - write up to a page of data to the serial flash
865 *	@adapter: the adapter
866 *	@addr: the start address to write
867 *	@n: length of data to write in bytes
868 *	@data: the data to write
869 *	@byte_oriented: whether to store data as bytes or as words
870 *
871 *	Writes up to a page of data (256 bytes) to the serial flash starting
872 *	at the given address.  All the data must be written to the same page.
873 *	If @byte_oriented is set the write data is stored as byte stream
874 *	(i.e. matches what on disk), otherwise in big-endian.
875 */
876static int t4_write_flash(struct adapter *adapter, unsigned int addr,
877			  unsigned int n, const u8 *data, int byte_oriented)
878{
879	int ret;
880	u32 buf[SF_PAGE_SIZE / 4];
881	unsigned int i, c, left, val, offset = addr & 0xff;
882
883	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
884		return -EINVAL;
885
886	val = swab32(addr) | SF_PROG_PAGE;
887
888	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
889	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
890		goto unlock;
891
892	for (left = n; left; left -= c) {
893		c = min(left, 4U);
894		for (val = 0, i = 0; i < c; ++i)
895			val = (val << 8) + *data++;
896
897		if (!byte_oriented)
898			val = htonl(val);
899
900		ret = sf1_write(adapter, c, c != left, 1, val);
901		if (ret)
902			goto unlock;
903	}
904	ret = flash_wait_op(adapter, 8, 1);
905	if (ret)
906		goto unlock;
907
908	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
909
910	/* Read the page to verify the write succeeded */
911	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
912			    byte_oriented);
913	if (ret)
914		return ret;
915
916	if (memcmp(data - n, (u8 *)buf + offset, n)) {
917		CH_ERR(adapter, "failed to correctly write the flash page "
918		       "at %#x\n", addr);
919		return -EIO;
920	}
921	return 0;
922
923unlock:
924	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
925	return ret;
926}
927
928/**
929 *	t4_get_fw_version - read the firmware version
930 *	@adapter: the adapter
931 *	@vers: where to place the version
932 *
933 *	Reads the FW version from flash.
934 */
935int t4_get_fw_version(struct adapter *adapter, u32 *vers)
936{
937	return t4_read_flash(adapter,
938			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
939			     vers, 0);
940}
941
942/**
943 *	t4_get_tp_version - read the TP microcode version
944 *	@adapter: the adapter
945 *	@vers: where to place the version
946 *
947 *	Reads the TP microcode version from flash.
948 */
949int t4_get_tp_version(struct adapter *adapter, u32 *vers)
950{
951	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
952							      tp_microcode_ver),
953			     1, vers, 0);
954}
955
956/**
957 *	t4_check_fw_version - check if the FW is compatible with this driver
958 *	@adapter: the adapter
959 *
960 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
961 *	if there's exact match, a negative error if the version could not be
962 *	read or there's a major version mismatch, and a positive value if the
963 *	expected major version is found but there's a minor version mismatch.
964 */
965int t4_check_fw_version(struct adapter *adapter)
966{
967	int ret, major, minor, micro;
968	int exp_major, exp_minor, exp_micro;
969
970	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
971	if (!ret)
972		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
973	if (ret)
974		return ret;
975
976	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
977	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
978	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
979
980	switch (chip_id(adapter)) {
981	case CHELSIO_T4:
982		exp_major = T4FW_VERSION_MAJOR;
983		exp_minor = T4FW_VERSION_MINOR;
984		exp_micro = T4FW_VERSION_MICRO;
985		break;
986	case CHELSIO_T5:
987		exp_major = T5FW_VERSION_MAJOR;
988		exp_minor = T5FW_VERSION_MINOR;
989		exp_micro = T5FW_VERSION_MICRO;
990		break;
991	default:
992		CH_ERR(adapter, "Unsupported chip type, %x\n",
993		    chip_id(adapter));
994		return -EINVAL;
995	}
996
997	if (major != exp_major) {            /* major mismatch - fail */
998		CH_ERR(adapter, "card FW has major version %u, driver wants "
999		       "%u\n", major, exp_major);
1000		return -EINVAL;
1001	}
1002
1003	if (minor == exp_minor && micro == exp_micro)
1004		return 0;                                   /* perfect match */
1005
1006	/* Minor/micro version mismatch.  Report it but often it's OK. */
1007	return 1;
1008}
1009
1010/**
1011 *	t4_flash_erase_sectors - erase a range of flash sectors
1012 *	@adapter: the adapter
1013 *	@start: the first sector to erase
1014 *	@end: the last sector to erase
1015 *
1016 *	Erases the sectors in the given inclusive range.
1017 */
1018static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1019{
1020	int ret = 0;
1021
1022	while (start <= end) {
1023		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1024		    (ret = sf1_write(adapter, 4, 0, 1,
1025				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1026		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1027			CH_ERR(adapter, "erase of flash sector %d failed, "
1028			       "error %d\n", start, ret);
1029			break;
1030		}
1031		start++;
1032	}
1033	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1034	return ret;
1035}
1036
1037/**
1038 *	t4_flash_cfg_addr - return the address of the flash configuration file
1039 *	@adapter: the adapter
1040 *
1041 *	Return the address within the flash where the Firmware Configuration
1042 *	File is stored, or an error if the device FLASH is too small to contain
1043 *	a Firmware Configuration File.
1044 */
1045int t4_flash_cfg_addr(struct adapter *adapter)
1046{
1047	/*
1048	 * If the device FLASH isn't large enough to hold a Firmware
1049	 * Configuration File, return an error.
1050	 */
1051	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1052		return -ENOSPC;
1053
1054	return FLASH_CFG_START;
1055}
1056
1057/**
1058 *	t4_load_cfg - download config file
1059 *	@adap: the adapter
1060 *	@cfg_data: the cfg text file to write
1061 *	@size: text file size
1062 *
1063 *	Write the supplied config text file to the card's serial flash.
1064 */
1065int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1066{
1067	int ret, i, n, cfg_addr;
1068	unsigned int addr;
1069	unsigned int flash_cfg_start_sec;
1070	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1071
1072	cfg_addr = t4_flash_cfg_addr(adap);
1073	if (cfg_addr < 0)
1074		return cfg_addr;
1075
1076	addr = cfg_addr;
1077	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1078
1079	if (size > FLASH_CFG_MAX_SIZE) {
1080		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1081		       FLASH_CFG_MAX_SIZE);
1082		return -EFBIG;
1083	}
1084
1085	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1086			 sf_sec_size);
1087	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1088				     flash_cfg_start_sec + i - 1);
1089	/*
1090	 * If size == 0 then we're simply erasing the FLASH sectors associated
1091	 * with the on-adapter Firmware Configuration File.
1092	 */
1093	if (ret || size == 0)
1094		goto out;
1095
1096	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1097	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1098		if ( (size - i) <  SF_PAGE_SIZE)
1099			n = size - i;
1100		else
1101			n = SF_PAGE_SIZE;
1102		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1103		if (ret)
1104			goto out;
1105
1106		addr += SF_PAGE_SIZE;
1107		cfg_data += SF_PAGE_SIZE;
1108	}
1109
1110out:
1111	if (ret)
1112		CH_ERR(adap, "config file %s failed %d\n",
1113		       (size == 0 ? "clear" : "download"), ret);
1114	return ret;
1115}
1116
1117
1118/**
1119 *	t4_load_fw - download firmware
1120 *	@adap: the adapter
1121 *	@fw_data: the firmware image to write
1122 *	@size: image size
1123 *
1124 *	Write the supplied firmware image to the card's serial flash.
1125 */
1126int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1127{
1128	u32 csum;
1129	int ret, addr;
1130	unsigned int i;
1131	u8 first_page[SF_PAGE_SIZE];
1132	const u32 *p = (const u32 *)fw_data;
1133	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1134	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1135	unsigned int fw_start_sec;
1136	unsigned int fw_start;
1137	unsigned int fw_size;
1138
1139	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1140		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1141		fw_start = FLASH_FWBOOTSTRAP_START;
1142		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1143	} else {
1144		fw_start_sec = FLASH_FW_START_SEC;
1145 		fw_start = FLASH_FW_START;
1146		fw_size = FLASH_FW_MAX_SIZE;
1147	}
1148	if (!size) {
1149		CH_ERR(adap, "FW image has no data\n");
1150		return -EINVAL;
1151	}
1152	if (size & 511) {
1153		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1154		return -EINVAL;
1155	}
1156	if (ntohs(hdr->len512) * 512 != size) {
1157		CH_ERR(adap, "FW image size differs from size in FW header\n");
1158		return -EINVAL;
1159	}
1160	if (size > fw_size) {
1161		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1162		return -EFBIG;
1163	}
1164	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1165	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1166		CH_ERR(adap,
1167		    "FW image (%d) is not suitable for this adapter (%d)\n",
1168		    hdr->chip, chip_id(adap));
1169		return -EINVAL;
1170	}
1171
1172	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1173		csum += ntohl(p[i]);
1174
1175	if (csum != 0xffffffff) {
1176		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1177		       csum);
1178		return -EINVAL;
1179	}
1180
1181	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1182	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1183	if (ret)
1184		goto out;
1185
1186	/*
1187	 * We write the correct version at the end so the driver can see a bad
1188	 * version if the FW write fails.  Start by writing a copy of the
1189	 * first page with a bad version.
1190	 */
1191	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1192	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1193	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1194	if (ret)
1195		goto out;
1196
1197	addr = fw_start;
1198	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1199		addr += SF_PAGE_SIZE;
1200		fw_data += SF_PAGE_SIZE;
1201		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1202		if (ret)
1203			goto out;
1204	}
1205
1206	ret = t4_write_flash(adap,
1207			     fw_start + offsetof(struct fw_hdr, fw_ver),
1208			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1209out:
1210	if (ret)
1211		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1212	return ret;
1213}
1214
1215/* BIOS boot headers */
1216typedef struct pci_expansion_rom_header {
1217	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1218	u8	reserved[22]; /* Reserved per processor Architecture data */
1219	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1220} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1221
1222/* Legacy PCI Expansion ROM Header */
1223typedef struct legacy_pci_expansion_rom_header {
1224	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1225	u8	size512; /* Current Image Size in units of 512 bytes */
1226	u8	initentry_point[4];
1227	u8	cksum; /* Checksum computed on the entire Image */
1228	u8	reserved[16]; /* Reserved */
1229	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1230} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1231
1232/* EFI PCI Expansion ROM Header */
1233typedef struct efi_pci_expansion_rom_header {
1234	u8	signature[2]; // ROM signature. The value 0xaa55
1235	u8	initialization_size[2]; /* Units 512. Includes this header */
1236	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1237	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1238	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1239	u8	compression_type[2]; /* Compression type. */
1240		/*
1241		 * Compression type definition
1242		 * 0x0: uncompressed
1243		 * 0x1: Compressed
1244		 * 0x2-0xFFFF: Reserved
1245		 */
1246	u8	reserved[8]; /* Reserved */
1247	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1248	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1249} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1250
1251/* PCI Data Structure Format */
1252typedef struct pcir_data_structure { /* PCI Data Structure */
1253	u8	signature[4]; /* Signature. The string "PCIR" */
1254	u8	vendor_id[2]; /* Vendor Identification */
1255	u8	device_id[2]; /* Device Identification */
1256	u8	vital_product[2]; /* Pointer to Vital Product Data */
1257	u8	length[2]; /* PCIR Data Structure Length */
1258	u8	revision; /* PCIR Data Structure Revision */
1259	u8	class_code[3]; /* Class Code */
1260	u8	image_length[2]; /* Image Length. Multiple of 512B */
1261	u8	code_revision[2]; /* Revision Level of Code/Data */
1262	u8	code_type; /* Code Type. */
1263		/*
1264		 * PCI Expansion ROM Code Types
1265		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1266		 * 0x01: Open Firmware standard for PCI. FCODE
1267		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1268		 * 0x03: EFI Image. EFI
1269		 * 0x04-0xFF: Reserved.
1270		 */
1271	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1272	u8	reserved[2]; /* Reserved */
1273} pcir_data_t; /* PCI__DATA_STRUCTURE */
1274
1275/* BOOT constants */
1276enum {
1277	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1278	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1279	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1280	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1281	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1282	VENDOR_ID = 0x1425, /* Vendor ID */
1283	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1284};
1285
1286/*
1287 *	modify_device_id - Modifies the device ID of the Boot BIOS image
1288 *	@adatper: the device ID to write.
1289 *	@boot_data: the boot image to modify.
1290 *
1291 *	Write the supplied device ID to the boot BIOS image.
1292 */
1293static void modify_device_id(int device_id, u8 *boot_data)
1294{
1295	legacy_pci_exp_rom_header_t *header;
1296	pcir_data_t *pcir_header;
1297	u32 cur_header = 0;
1298
1299	/*
1300	 * Loop through all chained images and change the device ID's
1301	 */
1302	while (1) {
1303		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1304		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1305		    le16_to_cpu(*(u16*)header->pcir_offset)];
1306
1307		/*
1308		 * Only modify the Device ID if code type is Legacy or HP.
1309		 * 0x00: Okay to modify
1310		 * 0x01: FCODE. Do not be modify
1311		 * 0x03: Okay to modify
1312		 * 0x04-0xFF: Do not modify
1313		 */
1314		if (pcir_header->code_type == 0x00) {
1315			u8 csum = 0;
1316			int i;
1317
1318			/*
1319			 * Modify Device ID to match current adatper
1320			 */
1321			*(u16*) pcir_header->device_id = device_id;
1322
1323			/*
1324			 * Set checksum temporarily to 0.
1325			 * We will recalculate it later.
1326			 */
1327			header->cksum = 0x0;
1328
1329			/*
1330			 * Calculate and update checksum
1331			 */
1332			for (i = 0; i < (header->size512 * 512); i++)
1333				csum += (u8)boot_data[cur_header + i];
1334
1335			/*
1336			 * Invert summed value to create the checksum
1337			 * Writing new checksum value directly to the boot data
1338			 */
1339			boot_data[cur_header + 7] = -csum;
1340
1341		} else if (pcir_header->code_type == 0x03) {
1342
1343			/*
1344			 * Modify Device ID to match current adatper
1345			 */
1346			*(u16*) pcir_header->device_id = device_id;
1347
1348		}
1349
1350
1351		/*
1352		 * Check indicator element to identify if this is the last
1353		 * image in the ROM.
1354		 */
1355		if (pcir_header->indicator & 0x80)
1356			break;
1357
1358		/*
1359		 * Move header pointer up to the next image in the ROM.
1360		 */
1361		cur_header += header->size512 * 512;
1362	}
1363}
1364
1365/*
1366 *	t4_load_boot - download boot flash
1367 *	@adapter: the adapter
1368 *	@boot_data: the boot image to write
1369 *	@boot_addr: offset in flash to write boot_data
1370 *	@size: image size
1371 *
1372 *	Write the supplied boot image to the card's serial flash.
1373 *	The boot image has the following sections: a 28-byte header and the
1374 *	boot image.
1375 */
1376int t4_load_boot(struct adapter *adap, u8 *boot_data,
1377		 unsigned int boot_addr, unsigned int size)
1378{
1379	pci_exp_rom_header_t *header;
1380	int pcir_offset ;
1381	pcir_data_t *pcir_header;
1382	int ret, addr;
1383	uint16_t device_id;
1384	unsigned int i;
1385	unsigned int boot_sector = boot_addr * 1024;
1386	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1387
1388	/*
1389	 * Make sure the boot image does not encroach on the firmware region
1390	 */
1391	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1392		CH_ERR(adap, "boot image encroaching on firmware region\n");
1393		return -EFBIG;
1394	}
1395
1396	/*
1397	 * Number of sectors spanned
1398	 */
1399	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1400			sf_sec_size);
1401	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1402				     (boot_sector >> 16) + i - 1);
1403
1404	/*
1405	 * If size == 0 then we're simply erasing the FLASH sectors associated
1406	 * with the on-adapter option ROM file
1407	 */
1408	if (ret || (size == 0))
1409		goto out;
1410
1411	/* Get boot header */
1412	header = (pci_exp_rom_header_t *)boot_data;
1413	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1414	/* PCIR Data Structure */
1415	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1416
1417	/*
1418	 * Perform some primitive sanity testing to avoid accidentally
1419	 * writing garbage over the boot sectors.  We ought to check for
1420	 * more but it's not worth it for now ...
1421	 */
1422	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1423		CH_ERR(adap, "boot image too small/large\n");
1424		return -EFBIG;
1425	}
1426
1427	/*
1428	 * Check BOOT ROM header signature
1429	 */
1430	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1431		CH_ERR(adap, "Boot image missing signature\n");
1432		return -EINVAL;
1433	}
1434
1435	/*
1436	 * Check PCI header signature
1437	 */
1438	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1439		CH_ERR(adap, "PCI header missing signature\n");
1440		return -EINVAL;
1441	}
1442
1443	/*
1444	 * Check Vendor ID matches Chelsio ID
1445	 */
1446	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1447		CH_ERR(adap, "Vendor ID missing signature\n");
1448		return -EINVAL;
1449	}
1450
1451	/*
1452	 * Retrieve adapter's device ID
1453	 */
1454	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1455	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1456	device_id = (device_id & 0xff) | 0x4000;
1457
1458	/*
1459	 * Check PCIE Device ID
1460	 */
1461	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1462		/*
1463		 * Change the device ID in the Boot BIOS image to match
1464		 * the Device ID of the current adapter.
1465		 */
1466		modify_device_id(device_id, boot_data);
1467	}
1468
1469	/*
1470	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1471	 * we finish copying the rest of the boot image. This will ensure
1472	 * that the BIOS boot header will only be written if the boot image
1473	 * was written in full.
1474	 */
1475	addr = boot_sector;
1476	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1477		addr += SF_PAGE_SIZE;
1478		boot_data += SF_PAGE_SIZE;
1479		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1480		if (ret)
1481			goto out;
1482	}
1483
1484	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1485
1486out:
1487	if (ret)
1488		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1489	return ret;
1490}
1491
1492/**
1493 *	t4_read_cimq_cfg - read CIM queue configuration
1494 *	@adap: the adapter
1495 *	@base: holds the queue base addresses in bytes
1496 *	@size: holds the queue sizes in bytes
1497 *	@thres: holds the queue full thresholds in bytes
1498 *
1499 *	Returns the current configuration of the CIM queues, starting with
1500 *	the IBQs, then the OBQs.
1501 */
1502void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1503{
1504	unsigned int i, v;
1505	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1506
1507	for (i = 0; i < CIM_NUM_IBQ; i++) {
1508		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1509			     V_QUENUMSELECT(i));
1510		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1511		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1512		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1513		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1514	}
1515	for (i = 0; i < cim_num_obq; i++) {
1516		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1517			     V_QUENUMSELECT(i));
1518		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1519		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1520		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1521	}
1522}
1523
1524/**
1525 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1526 *	@adap: the adapter
1527 *	@qid: the queue index
1528 *	@data: where to store the queue contents
1529 *	@n: capacity of @data in 32-bit words
1530 *
1531 *	Reads the contents of the selected CIM queue starting at address 0 up
1532 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1533 *	error and the number of 32-bit words actually read on success.
1534 */
1535int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1536{
1537	int i, err;
1538	unsigned int addr;
1539	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1540
1541	if (qid > 5 || (n & 3))
1542		return -EINVAL;
1543
1544	addr = qid * nwords;
1545	if (n > nwords)
1546		n = nwords;
1547
1548	for (i = 0; i < n; i++, addr++) {
1549		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1550			     F_IBQDBGEN);
1551		/*
1552		 * It might take 3-10ms before the IBQ debug read access is
1553		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1554		 */
1555		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1556				      1000000, 1);
1557		if (err)
1558			return err;
1559		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1560	}
1561	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1562	return i;
1563}
1564
1565/**
1566 *	t4_read_cim_obq - read the contents of a CIM outbound queue
1567 *	@adap: the adapter
1568 *	@qid: the queue index
1569 *	@data: where to store the queue contents
1570 *	@n: capacity of @data in 32-bit words
1571 *
1572 *	Reads the contents of the selected CIM queue starting at address 0 up
1573 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1574 *	error and the number of 32-bit words actually read on success.
1575 */
1576int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1577{
1578	int i, err;
1579	unsigned int addr, v, nwords;
1580	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1581
1582	if (qid >= cim_num_obq || (n & 3))
1583		return -EINVAL;
1584
1585	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1586		     V_QUENUMSELECT(qid));
1587	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1588
1589	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1590	nwords = G_CIMQSIZE(v) * 64;  /* same */
1591	if (n > nwords)
1592		n = nwords;
1593
1594	for (i = 0; i < n; i++, addr++) {
1595		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1596			     F_OBQDBGEN);
1597		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1598				      2, 1);
1599		if (err)
1600			return err;
1601		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1602	}
1603	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1604	return i;
1605}
1606
1607enum {
1608	CIM_QCTL_BASE     = 0,
1609	CIM_CTL_BASE      = 0x2000,
1610	CIM_PBT_ADDR_BASE = 0x2800,
1611	CIM_PBT_LRF_BASE  = 0x3000,
1612	CIM_PBT_DATA_BASE = 0x3800
1613};
1614
1615/**
1616 *	t4_cim_read - read a block from CIM internal address space
1617 *	@adap: the adapter
1618 *	@addr: the start address within the CIM address space
1619 *	@n: number of words to read
1620 *	@valp: where to store the result
1621 *
1622 *	Reads a block of 4-byte words from the CIM intenal address space.
1623 */
1624int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1625		unsigned int *valp)
1626{
1627	int ret = 0;
1628
1629	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1630		return -EBUSY;
1631
1632	for ( ; !ret && n--; addr += 4) {
1633		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1634		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1635				      0, 5, 2);
1636		if (!ret)
1637			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1638	}
1639	return ret;
1640}
1641
1642/**
1643 *	t4_cim_write - write a block into CIM internal address space
1644 *	@adap: the adapter
1645 *	@addr: the start address within the CIM address space
1646 *	@n: number of words to write
1647 *	@valp: set of values to write
1648 *
1649 *	Writes a block of 4-byte words into the CIM intenal address space.
1650 */
1651int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1652		 const unsigned int *valp)
1653{
1654	int ret = 0;
1655
1656	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1657		return -EBUSY;
1658
1659	for ( ; !ret && n--; addr += 4) {
1660		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1661		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1662		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1663				      0, 5, 2);
1664	}
1665	return ret;
1666}
1667
1668static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1669{
1670	return t4_cim_write(adap, addr, 1, &val);
1671}
1672
1673/**
1674 *	t4_cim_ctl_read - read a block from CIM control region
1675 *	@adap: the adapter
1676 *	@addr: the start address within the CIM control region
1677 *	@n: number of words to read
1678 *	@valp: where to store the result
1679 *
1680 *	Reads a block of 4-byte words from the CIM control region.
1681 */
1682int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1683		    unsigned int *valp)
1684{
1685	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1686}
1687
1688/**
1689 *	t4_cim_read_la - read CIM LA capture buffer
1690 *	@adap: the adapter
1691 *	@la_buf: where to store the LA data
1692 *	@wrptr: the HW write pointer within the capture buffer
1693 *
1694 *	Reads the contents of the CIM LA buffer with the most recent entry at
1695 *	the end	of the returned data and with the entry at @wrptr first.
1696 *	We try to leave the LA in the running state we find it in.
1697 */
1698int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1699{
1700	int i, ret;
1701	unsigned int cfg, val, idx;
1702
1703	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1704	if (ret)
1705		return ret;
1706
1707	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1708		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1709		if (ret)
1710			return ret;
1711	}
1712
1713	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1714	if (ret)
1715		goto restart;
1716
1717	idx = G_UPDBGLAWRPTR(val);
1718	if (wrptr)
1719		*wrptr = idx;
1720
1721	for (i = 0; i < adap->params.cim_la_size; i++) {
1722		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1723				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1724		if (ret)
1725			break;
1726		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1727		if (ret)
1728			break;
1729		if (val & F_UPDBGLARDEN) {
1730			ret = -ETIMEDOUT;
1731			break;
1732		}
1733		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1734		if (ret)
1735			break;
1736		idx = (idx + 1) & M_UPDBGLARDPTR;
1737	}
1738restart:
1739	if (cfg & F_UPDBGLAEN) {
1740		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1741				      cfg & ~F_UPDBGLARDEN);
1742		if (!ret)
1743			ret = r;
1744	}
1745	return ret;
1746}
1747
1748void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1749			unsigned int *pif_req_wrptr,
1750			unsigned int *pif_rsp_wrptr)
1751{
1752	int i, j;
1753	u32 cfg, val, req, rsp;
1754
1755	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1756	if (cfg & F_LADBGEN)
1757		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1758
1759	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1760	req = G_POLADBGWRPTR(val);
1761	rsp = G_PILADBGWRPTR(val);
1762	if (pif_req_wrptr)
1763		*pif_req_wrptr = req;
1764	if (pif_rsp_wrptr)
1765		*pif_rsp_wrptr = rsp;
1766
1767	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1768		for (j = 0; j < 6; j++) {
1769			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1770				     V_PILADBGRDPTR(rsp));
1771			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1772			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1773			req++;
1774			rsp++;
1775		}
1776		req = (req + 2) & M_POLADBGRDPTR;
1777		rsp = (rsp + 2) & M_PILADBGRDPTR;
1778	}
1779	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1780}
1781
1782void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1783{
1784	u32 cfg;
1785	int i, j, idx;
1786
1787	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1788	if (cfg & F_LADBGEN)
1789		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1790
1791	for (i = 0; i < CIM_MALA_SIZE; i++) {
1792		for (j = 0; j < 5; j++) {
1793			idx = 8 * i + j;
1794			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1795				     V_PILADBGRDPTR(idx));
1796			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1797			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1798		}
1799	}
1800	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1801}
1802
1803/**
1804 *	t4_tp_read_la - read TP LA capture buffer
1805 *	@adap: the adapter
1806 *	@la_buf: where to store the LA data
1807 *	@wrptr: the HW write pointer within the capture buffer
1808 *
1809 *	Reads the contents of the TP LA buffer with the most recent entry at
1810 *	the end	of the returned data and with the entry at @wrptr first.
1811 *	We leave the LA in the running state we find it in.
1812 */
1813void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1814{
1815	bool last_incomplete;
1816	unsigned int i, cfg, val, idx;
1817
1818	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1819	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1820		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1821			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1822
1823	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1824	idx = G_DBGLAWPTR(val);
1825	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1826	if (last_incomplete)
1827		idx = (idx + 1) & M_DBGLARPTR;
1828	if (wrptr)
1829		*wrptr = idx;
1830
1831	val &= 0xffff;
1832	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1833	val |= adap->params.tp.la_mask;
1834
1835	for (i = 0; i < TPLA_SIZE; i++) {
1836		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1837		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1838		idx = (idx + 1) & M_DBGLARPTR;
1839	}
1840
1841	/* Wipe out last entry if it isn't valid */
1842	if (last_incomplete)
1843		la_buf[TPLA_SIZE - 1] = ~0ULL;
1844
1845	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1846		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1847			     cfg | adap->params.tp.la_mask);
1848}
1849
1850void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1851{
1852	unsigned int i, j;
1853
1854	for (i = 0; i < 8; i++) {
1855		u32 *p = la_buf + i;
1856
1857		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1858		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1859		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1860		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1861			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1862	}
1863}
1864
1865#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1866		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1867		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1868
1869/**
1870 *	t4_link_start - apply link configuration to MAC/PHY
1871 *	@phy: the PHY to setup
1872 *	@mac: the MAC to setup
1873 *	@lc: the requested link configuration
1874 *
1875 *	Set up a port's MAC and PHY according to a desired link configuration.
1876 *	- If the PHY can auto-negotiate first decide what to advertise, then
1877 *	  enable/disable auto-negotiation as desired, and reset.
1878 *	- If the PHY does not auto-negotiate just reset it.
1879 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1880 *	  otherwise do it later based on the outcome of auto-negotiation.
1881 */
1882int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1883		  struct link_config *lc)
1884{
1885	struct fw_port_cmd c;
1886	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1887
1888	lc->link_ok = 0;
1889	if (lc->requested_fc & PAUSE_RX)
1890		fc |= FW_PORT_CAP_FC_RX;
1891	if (lc->requested_fc & PAUSE_TX)
1892		fc |= FW_PORT_CAP_FC_TX;
1893
1894	memset(&c, 0, sizeof(c));
1895	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1896			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1897	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1898				  FW_LEN16(c));
1899
1900	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1901		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1902		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1903	} else if (lc->autoneg == AUTONEG_DISABLE) {
1904		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1905		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1906	} else
1907		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1908
1909	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1910}
1911
1912/**
1913 *	t4_restart_aneg - restart autonegotiation
1914 *	@adap: the adapter
1915 *	@mbox: mbox to use for the FW command
1916 *	@port: the port id
1917 *
1918 *	Restarts autonegotiation for the selected port.
1919 */
1920int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1921{
1922	struct fw_port_cmd c;
1923
1924	memset(&c, 0, sizeof(c));
1925	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1926			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1927	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1928				  FW_LEN16(c));
1929	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1930	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1931}
1932
1933struct intr_info {
1934	unsigned int mask;       /* bits to check in interrupt status */
1935	const char *msg;         /* message to print or NULL */
1936	short stat_idx;          /* stat counter to increment or -1 */
1937	unsigned short fatal;    /* whether the condition reported is fatal */
1938};
1939
1940/**
1941 *	t4_handle_intr_status - table driven interrupt handler
1942 *	@adapter: the adapter that generated the interrupt
1943 *	@reg: the interrupt status register to process
1944 *	@acts: table of interrupt actions
1945 *
1946 *	A table driven interrupt handler that applies a set of masks to an
1947 *	interrupt status word and performs the corresponding actions if the
1948 *	interrupts described by the mask have occured.  The actions include
1949 *	optionally emitting a warning or alert message.  The table is terminated
1950 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1951 *	conditions.
1952 */
1953static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1954				 const struct intr_info *acts)
1955{
1956	int fatal = 0;
1957	unsigned int mask = 0;
1958	unsigned int status = t4_read_reg(adapter, reg);
1959
1960	for ( ; acts->mask; ++acts) {
1961		if (!(status & acts->mask))
1962			continue;
1963		if (acts->fatal) {
1964			fatal++;
1965			CH_ALERT(adapter, "%s (0x%x)\n",
1966				 acts->msg, status & acts->mask);
1967		} else if (acts->msg)
1968			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1969					  acts->msg, status & acts->mask);
1970		mask |= acts->mask;
1971	}
1972	status &= mask;
1973	if (status)                           /* clear processed interrupts */
1974		t4_write_reg(adapter, reg, status);
1975	return fatal;
1976}
1977
1978/*
1979 * Interrupt handler for the PCIE module.
1980 */
1981static void pcie_intr_handler(struct adapter *adapter)
1982{
1983	static struct intr_info sysbus_intr_info[] = {
1984		{ F_RNPP, "RXNP array parity error", -1, 1 },
1985		{ F_RPCP, "RXPC array parity error", -1, 1 },
1986		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1987		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1988		{ F_RFTP, "RXFT array parity error", -1, 1 },
1989		{ 0 }
1990	};
1991	static struct intr_info pcie_port_intr_info[] = {
1992		{ F_TPCP, "TXPC array parity error", -1, 1 },
1993		{ F_TNPP, "TXNP array parity error", -1, 1 },
1994		{ F_TFTP, "TXFT array parity error", -1, 1 },
1995		{ F_TCAP, "TXCA array parity error", -1, 1 },
1996		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1997		{ F_RCAP, "RXCA array parity error", -1, 1 },
1998		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1999		{ F_RDPE, "Rx data parity error", -1, 1 },
2000		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
2001		{ 0 }
2002	};
2003	static struct intr_info pcie_intr_info[] = {
2004		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2005		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2006		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2007		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2008		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2009		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2010		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2011		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2012		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2013		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2014		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2015		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2016		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2017		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2018		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2019		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2020		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2021		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2022		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2023		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2024		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2025		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2026		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2027		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2028		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2029		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2030		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2031		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2032		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2033		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2034		  0 },
2035		{ 0 }
2036	};
2037
2038	static struct intr_info t5_pcie_intr_info[] = {
2039		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2040		  -1, 1 },
2041		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2042		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2043		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2044		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2045		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2046		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2047		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2048		  -1, 1 },
2049		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2050		  -1, 1 },
2051		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2052		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2053		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2054		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2055		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2056		  -1, 1 },
2057		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2058		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2059		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2060		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2061		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2062		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2063		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2064		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2065		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2066		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2067		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2068		  -1, 1 },
2069		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2070		  -1, 1 },
2071		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2072		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2073		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2074		{ F_READRSPERR, "Outbound read error", -1,
2075		  0 },
2076		{ 0 }
2077	};
2078
2079	int fat;
2080
2081	if (is_t4(adapter))
2082		fat = t4_handle_intr_status(adapter,
2083					    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2084					    sysbus_intr_info) +
2085		      t4_handle_intr_status(adapter,
2086					    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2087					    pcie_port_intr_info) +
2088		      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2089					    pcie_intr_info);
2090	else
2091		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2092					    t5_pcie_intr_info);
2093	if (fat)
2094		t4_fatal_err(adapter);
2095}
2096
2097/*
2098 * TP interrupt handler.
2099 */
2100static void tp_intr_handler(struct adapter *adapter)
2101{
2102	static struct intr_info tp_intr_info[] = {
2103		{ 0x3fffffff, "TP parity error", -1, 1 },
2104		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2105		{ 0 }
2106	};
2107
2108	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2109		t4_fatal_err(adapter);
2110}
2111
2112/*
2113 * SGE interrupt handler.
2114 */
2115static void sge_intr_handler(struct adapter *adapter)
2116{
2117	u64 v;
2118	u32 err;
2119
2120	static struct intr_info sge_intr_info[] = {
2121		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2122		  "SGE received CPL exceeding IQE size", -1, 1 },
2123		{ F_ERR_INVALID_CIDX_INC,
2124		  "SGE GTS CIDX increment too large", -1, 0 },
2125		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2126		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2127		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2128		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2129		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2130		  0 },
2131		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2132		  0 },
2133		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2134		  0 },
2135		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2136		  0 },
2137		{ F_ERR_ING_CTXT_PRIO,
2138		  "SGE too many priority ingress contexts", -1, 0 },
2139		{ F_ERR_EGR_CTXT_PRIO,
2140		  "SGE too many priority egress contexts", -1, 0 },
2141		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2142		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2143		{ 0 }
2144	};
2145
2146	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2147	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2148	if (v) {
2149		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2150			 (unsigned long long)v);
2151		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2152		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2153	}
2154
2155	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2156
2157	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2158	if (err & F_ERROR_QID_VALID) {
2159		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2160		if (err & F_UNCAPTURED_ERROR)
2161			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2162		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2163			     F_UNCAPTURED_ERROR);
2164	}
2165
2166	if (v != 0)
2167		t4_fatal_err(adapter);
2168}
2169
2170#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2171		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2172#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2173		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2174
2175/*
2176 * CIM interrupt handler.
2177 */
2178static void cim_intr_handler(struct adapter *adapter)
2179{
2180	static struct intr_info cim_intr_info[] = {
2181		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2182		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2183		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2184		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2185		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2186		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2187		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2188		{ 0 }
2189	};
2190	static struct intr_info cim_upintr_info[] = {
2191		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2192		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2193		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2194		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2195		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2196		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2197		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2198		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2199		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2200		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2201		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2202		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2203		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2204		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2205		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2206		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2207		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2208		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2209		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2210		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2211		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2212		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2213		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2214		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2215		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2216		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2217		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2218		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2219		{ 0 }
2220	};
2221	int fat;
2222
2223	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2224		t4_report_fw_error(adapter);
2225
2226	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2227				    cim_intr_info) +
2228	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2229				    cim_upintr_info);
2230	if (fat)
2231		t4_fatal_err(adapter);
2232}
2233
2234/*
2235 * ULP RX interrupt handler.
2236 */
2237static void ulprx_intr_handler(struct adapter *adapter)
2238{
2239	static struct intr_info ulprx_intr_info[] = {
2240		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2241		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2242		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2243		{ 0 }
2244	};
2245
2246	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2247		t4_fatal_err(adapter);
2248}
2249
2250/*
2251 * ULP TX interrupt handler.
2252 */
2253static void ulptx_intr_handler(struct adapter *adapter)
2254{
2255	static struct intr_info ulptx_intr_info[] = {
2256		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2257		  0 },
2258		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2259		  0 },
2260		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2261		  0 },
2262		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2263		  0 },
2264		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2265		{ 0 }
2266	};
2267
2268	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2269		t4_fatal_err(adapter);
2270}
2271
2272/*
2273 * PM TX interrupt handler.
2274 */
2275static void pmtx_intr_handler(struct adapter *adapter)
2276{
2277	static struct intr_info pmtx_intr_info[] = {
2278		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2279		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2280		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2281		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2282		{ 0xffffff0, "PMTX framing error", -1, 1 },
2283		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2284		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2285		  1 },
2286		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2287		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2288		{ 0 }
2289	};
2290
2291	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2292		t4_fatal_err(adapter);
2293}
2294
2295/*
2296 * PM RX interrupt handler.
2297 */
2298static void pmrx_intr_handler(struct adapter *adapter)
2299{
2300	static struct intr_info pmrx_intr_info[] = {
2301		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2302		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2303		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2304		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2305		  1 },
2306		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2307		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2308		{ 0 }
2309	};
2310
2311	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2312		t4_fatal_err(adapter);
2313}
2314
2315/*
2316 * CPL switch interrupt handler.
2317 */
2318static void cplsw_intr_handler(struct adapter *adapter)
2319{
2320	static struct intr_info cplsw_intr_info[] = {
2321		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2322		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2323		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2324		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2325		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2326		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2327		{ 0 }
2328	};
2329
2330	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2331		t4_fatal_err(adapter);
2332}
2333
2334/*
2335 * LE interrupt handler.
2336 */
2337static void le_intr_handler(struct adapter *adap)
2338{
2339	static struct intr_info le_intr_info[] = {
2340		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2341		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2342		{ F_PARITYERR, "LE parity error", -1, 1 },
2343		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2344		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2345		{ 0 }
2346	};
2347
2348	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2349		t4_fatal_err(adap);
2350}
2351
2352/*
2353 * MPS interrupt handler.
2354 */
2355static void mps_intr_handler(struct adapter *adapter)
2356{
2357	static struct intr_info mps_rx_intr_info[] = {
2358		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2359		{ 0 }
2360	};
2361	static struct intr_info mps_tx_intr_info[] = {
2362		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2363		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2364		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2365		  -1, 1 },
2366		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2367		  -1, 1 },
2368		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2369		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2370		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2371		{ 0 }
2372	};
2373	static struct intr_info mps_trc_intr_info[] = {
2374		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2375		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2376		  1 },
2377		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2378		{ 0 }
2379	};
2380	static struct intr_info mps_stat_sram_intr_info[] = {
2381		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2382		{ 0 }
2383	};
2384	static struct intr_info mps_stat_tx_intr_info[] = {
2385		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2386		{ 0 }
2387	};
2388	static struct intr_info mps_stat_rx_intr_info[] = {
2389		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2390		{ 0 }
2391	};
2392	static struct intr_info mps_cls_intr_info[] = {
2393		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2394		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2395		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2396		{ 0 }
2397	};
2398
2399	int fat;
2400
2401	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2402				    mps_rx_intr_info) +
2403	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2404				    mps_tx_intr_info) +
2405	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2406				    mps_trc_intr_info) +
2407	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2408				    mps_stat_sram_intr_info) +
2409	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2410				    mps_stat_tx_intr_info) +
2411	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2412				    mps_stat_rx_intr_info) +
2413	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2414				    mps_cls_intr_info);
2415
2416	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2417	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2418	if (fat)
2419		t4_fatal_err(adapter);
2420}
2421
2422#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2423
2424/*
2425 * EDC/MC interrupt handler.
2426 */
2427static void mem_intr_handler(struct adapter *adapter, int idx)
2428{
2429	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2430
2431	unsigned int addr, cnt_addr, v;
2432
2433	if (idx <= MEM_EDC1) {
2434		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2435		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2436	} else {
2437		if (is_t4(adapter)) {
2438			addr = A_MC_INT_CAUSE;
2439			cnt_addr = A_MC_ECC_STATUS;
2440		} else {
2441			addr = A_MC_P_INT_CAUSE;
2442			cnt_addr = A_MC_P_ECC_STATUS;
2443		}
2444	}
2445
2446	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2447	if (v & F_PERR_INT_CAUSE)
2448		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2449	if (v & F_ECC_CE_INT_CAUSE) {
2450		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2451
2452		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2453		CH_WARN_RATELIMIT(adapter,
2454				  "%u %s correctable ECC data error%s\n",
2455				  cnt, name[idx], cnt > 1 ? "s" : "");
2456	}
2457	if (v & F_ECC_UE_INT_CAUSE)
2458		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2459			 name[idx]);
2460
2461	t4_write_reg(adapter, addr, v);
2462	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2463		t4_fatal_err(adapter);
2464}
2465
2466/*
2467 * MA interrupt handler.
2468 */
2469static void ma_intr_handler(struct adapter *adapter)
2470{
2471	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2472
2473	if (status & F_MEM_PERR_INT_CAUSE) {
2474		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2475			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
2476		if (is_t5(adapter))
2477			CH_ALERT(adapter,
2478				 "MA parity error, parity status %#x\n",
2479				 t4_read_reg(adapter,
2480				 	     A_MA_PARITY_ERROR_STATUS2));
2481	}
2482	if (status & F_MEM_WRAP_INT_CAUSE) {
2483		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2484		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2485			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2486			 G_MEM_WRAP_ADDRESS(v) << 4);
2487	}
2488	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2489	t4_fatal_err(adapter);
2490}
2491
2492/*
2493 * SMB interrupt handler.
2494 */
2495static void smb_intr_handler(struct adapter *adap)
2496{
2497	static struct intr_info smb_intr_info[] = {
2498		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2499		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2500		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2501		{ 0 }
2502	};
2503
2504	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2505		t4_fatal_err(adap);
2506}
2507
2508/*
2509 * NC-SI interrupt handler.
2510 */
2511static void ncsi_intr_handler(struct adapter *adap)
2512{
2513	static struct intr_info ncsi_intr_info[] = {
2514		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2515		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2516		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2517		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2518		{ 0 }
2519	};
2520
2521	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2522		t4_fatal_err(adap);
2523}
2524
2525/*
2526 * XGMAC interrupt handler.
2527 */
2528static void xgmac_intr_handler(struct adapter *adap, int port)
2529{
2530	u32 v, int_cause_reg;
2531
2532	if (is_t4(adap))
2533		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2534	else
2535		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2536
2537	v = t4_read_reg(adap, int_cause_reg);
2538	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2539	if (!v)
2540		return;
2541
2542	if (v & F_TXFIFO_PRTY_ERR)
2543		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2544	if (v & F_RXFIFO_PRTY_ERR)
2545		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2546	t4_write_reg(adap, int_cause_reg, v);
2547	t4_fatal_err(adap);
2548}
2549
2550/*
2551 * PL interrupt handler.
2552 */
2553static void pl_intr_handler(struct adapter *adap)
2554{
2555	static struct intr_info pl_intr_info[] = {
2556		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2557		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2558		{ 0 }
2559	};
2560
2561	static struct intr_info t5_pl_intr_info[] = {
2562		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2563		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2564		{ 0 }
2565	};
2566
2567	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2568	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2569		t4_fatal_err(adap);
2570}
2571
2572#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2573#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2574		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2575		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2576
2577/**
2578 *	t4_slow_intr_handler - control path interrupt handler
2579 *	@adapter: the adapter
2580 *
2581 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2582 *	The designation 'slow' is because it involves register reads, while
2583 *	data interrupts typically don't involve any MMIOs.
2584 */
2585int t4_slow_intr_handler(struct adapter *adapter)
2586{
2587	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2588
2589	if (!(cause & GLBL_INTR_MASK))
2590		return 0;
2591	if (cause & F_CIM)
2592		cim_intr_handler(adapter);
2593	if (cause & F_MPS)
2594		mps_intr_handler(adapter);
2595	if (cause & F_NCSI)
2596		ncsi_intr_handler(adapter);
2597	if (cause & F_PL)
2598		pl_intr_handler(adapter);
2599	if (cause & F_SMB)
2600		smb_intr_handler(adapter);
2601	if (cause & F_XGMAC0)
2602		xgmac_intr_handler(adapter, 0);
2603	if (cause & F_XGMAC1)
2604		xgmac_intr_handler(adapter, 1);
2605	if (cause & F_XGMAC_KR0)
2606		xgmac_intr_handler(adapter, 2);
2607	if (cause & F_XGMAC_KR1)
2608		xgmac_intr_handler(adapter, 3);
2609	if (cause & F_PCIE)
2610		pcie_intr_handler(adapter);
2611	if (cause & F_MC)
2612		mem_intr_handler(adapter, MEM_MC);
2613	if (cause & F_EDC0)
2614		mem_intr_handler(adapter, MEM_EDC0);
2615	if (cause & F_EDC1)
2616		mem_intr_handler(adapter, MEM_EDC1);
2617	if (cause & F_LE)
2618		le_intr_handler(adapter);
2619	if (cause & F_TP)
2620		tp_intr_handler(adapter);
2621	if (cause & F_MA)
2622		ma_intr_handler(adapter);
2623	if (cause & F_PM_TX)
2624		pmtx_intr_handler(adapter);
2625	if (cause & F_PM_RX)
2626		pmrx_intr_handler(adapter);
2627	if (cause & F_ULP_RX)
2628		ulprx_intr_handler(adapter);
2629	if (cause & F_CPL_SWITCH)
2630		cplsw_intr_handler(adapter);
2631	if (cause & F_SGE)
2632		sge_intr_handler(adapter);
2633	if (cause & F_ULP_TX)
2634		ulptx_intr_handler(adapter);
2635
2636	/* Clear the interrupts just processed for which we are the master. */
2637	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2638	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2639	return 1;
2640}
2641
2642/**
2643 *	t4_intr_enable - enable interrupts
2644 *	@adapter: the adapter whose interrupts should be enabled
2645 *
2646 *	Enable PF-specific interrupts for the calling function and the top-level
2647 *	interrupt concentrator for global interrupts.  Interrupts are already
2648 *	enabled at each module,	here we just enable the roots of the interrupt
2649 *	hierarchies.
2650 *
2651 *	Note: this function should be called only when the driver manages
2652 *	non PF-specific interrupts from the various HW modules.  Only one PCI
2653 *	function at a time should be doing this.
2654 */
2655void t4_intr_enable(struct adapter *adapter)
2656{
2657	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2658
2659	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2660		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2661		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2662		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2663		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2664		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2665		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2666		     F_EGRESS_SIZE_ERR);
2667	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2668	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2669}
2670
2671/**
2672 *	t4_intr_disable - disable interrupts
2673 *	@adapter: the adapter whose interrupts should be disabled
2674 *
2675 *	Disable interrupts.  We only disable the top-level interrupt
2676 *	concentrators.  The caller must be a PCI function managing global
2677 *	interrupts.
2678 */
2679void t4_intr_disable(struct adapter *adapter)
2680{
2681	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2682
2683	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2684	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2685}
2686
2687/**
2688 *	t4_intr_clear - clear all interrupts
2689 *	@adapter: the adapter whose interrupts should be cleared
2690 *
2691 *	Clears all interrupts.  The caller must be a PCI function managing
2692 *	global interrupts.
2693 */
2694void t4_intr_clear(struct adapter *adapter)
2695{
2696	static const unsigned int cause_reg[] = {
2697		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2698		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2699		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
2700		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2701		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2702		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2703		A_TP_INT_CAUSE,
2704		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2705		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2706		A_MPS_RX_PERR_INT_CAUSE,
2707		A_CPL_INTR_CAUSE,
2708		MYPF_REG(A_PL_PF_INT_CAUSE),
2709		A_PL_PL_INT_CAUSE,
2710		A_LE_DB_INT_CAUSE,
2711	};
2712
2713	unsigned int i;
2714
2715	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2716		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2717
2718	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2719				A_MC_P_INT_CAUSE, 0xffffffff);
2720
2721	if (is_t4(adapter)) {
2722		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2723				0xffffffff);
2724		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2725				0xffffffff);
2726	} else
2727		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
2728
2729	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2730	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2731}
2732
2733/**
2734 *	hash_mac_addr - return the hash value of a MAC address
2735 *	@addr: the 48-bit Ethernet MAC address
2736 *
2737 *	Hashes a MAC address according to the hash function used by HW inexact
2738 *	(hash) address matching.
2739 */
2740static int hash_mac_addr(const u8 *addr)
2741{
2742	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2743	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2744	a ^= b;
2745	a ^= (a >> 12);
2746	a ^= (a >> 6);
2747	return a & 0x3f;
2748}
2749
2750/**
2751 *	t4_config_rss_range - configure a portion of the RSS mapping table
2752 *	@adapter: the adapter
2753 *	@mbox: mbox to use for the FW command
2754 *	@viid: virtual interface whose RSS subtable is to be written
2755 *	@start: start entry in the table to write
2756 *	@n: how many table entries to write
2757 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2758 *	@nrspq: number of values in @rspq
2759 *
2760 *	Programs the selected part of the VI's RSS mapping table with the
2761 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2762 *	until the full table range is populated.
2763 *
2764 *	The caller must ensure the values in @rspq are in the range allowed for
2765 *	@viid.
2766 */
2767int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2768			int start, int n, const u16 *rspq, unsigned int nrspq)
2769{
2770	int ret;
2771	const u16 *rsp = rspq;
2772	const u16 *rsp_end = rspq + nrspq;
2773	struct fw_rss_ind_tbl_cmd cmd;
2774
2775	memset(&cmd, 0, sizeof(cmd));
2776	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2777			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2778			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2779	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2780
2781
2782	/*
2783	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2784	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2785	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2786	 * reserved.
2787	 */
2788	while (n > 0) {
2789		int nq = min(n, 32);
2790		int nq_packed = 0;
2791		__be32 *qp = &cmd.iq0_to_iq2;
2792
2793		/*
2794		 * Set up the firmware RSS command header to send the next
2795		 * "nq" Ingress Queue IDs to the firmware.
2796		 */
2797		cmd.niqid = htons(nq);
2798		cmd.startidx = htons(start);
2799
2800		/*
2801		 * "nq" more done for the start of the next loop.
2802		 */
2803		start += nq;
2804		n -= nq;
2805
2806		/*
2807		 * While there are still Ingress Queue IDs to stuff into the
2808		 * current firmware RSS command, retrieve them from the
2809		 * Ingress Queue ID array and insert them into the command.
2810		 */
2811		while (nq > 0) {
2812			/*
2813			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2814			 * around the Ingress Queue ID array if necessary) and
2815			 * insert them into the firmware RSS command at the
2816			 * current 3-tuple position within the commad.
2817			 */
2818			u16 qbuf[3];
2819			u16 *qbp = qbuf;
2820			int nqbuf = min(3, nq);
2821
2822			nq -= nqbuf;
2823			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2824			while (nqbuf && nq_packed < 32) {
2825				nqbuf--;
2826				nq_packed++;
2827				*qbp++ = *rsp++;
2828				if (rsp >= rsp_end)
2829					rsp = rspq;
2830			}
2831			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2832					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2833					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2834		}
2835
2836		/*
2837		 * Send this portion of the RRS table update to the firmware;
2838		 * bail out on any errors.
2839		 */
2840		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2841		if (ret)
2842			return ret;
2843	}
2844
2845	return 0;
2846}
2847
2848/**
2849 *	t4_config_glbl_rss - configure the global RSS mode
2850 *	@adapter: the adapter
2851 *	@mbox: mbox to use for the FW command
2852 *	@mode: global RSS mode
2853 *	@flags: mode-specific flags
2854 *
2855 *	Sets the global RSS mode.
2856 */
2857int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2858		       unsigned int flags)
2859{
2860	struct fw_rss_glb_config_cmd c;
2861
2862	memset(&c, 0, sizeof(c));
2863	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2864			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2865	c.retval_len16 = htonl(FW_LEN16(c));
2866	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2867		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2868	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2869		c.u.basicvirtual.mode_pkd =
2870			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2871		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2872	} else
2873		return -EINVAL;
2874	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2875}
2876
2877/**
2878 *	t4_config_vi_rss - configure per VI RSS settings
2879 *	@adapter: the adapter
2880 *	@mbox: mbox to use for the FW command
2881 *	@viid: the VI id
2882 *	@flags: RSS flags
2883 *	@defq: id of the default RSS queue for the VI.
2884 *
2885 *	Configures VI-specific RSS properties.
2886 */
2887int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2888		     unsigned int flags, unsigned int defq)
2889{
2890	struct fw_rss_vi_config_cmd c;
2891
2892	memset(&c, 0, sizeof(c));
2893	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2894			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2895			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2896	c.retval_len16 = htonl(FW_LEN16(c));
2897	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2898					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2899	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2900}
2901
2902/* Read an RSS table row */
2903static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2904{
2905	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2906	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2907				   5, 0, val);
2908}
2909
2910/**
2911 *	t4_read_rss - read the contents of the RSS mapping table
2912 *	@adapter: the adapter
2913 *	@map: holds the contents of the RSS mapping table
2914 *
2915 *	Reads the contents of the RSS hash->queue mapping table.
2916 */
2917int t4_read_rss(struct adapter *adapter, u16 *map)
2918{
2919	u32 val;
2920	int i, ret;
2921
2922	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2923		ret = rd_rss_row(adapter, i, &val);
2924		if (ret)
2925			return ret;
2926		*map++ = G_LKPTBLQUEUE0(val);
2927		*map++ = G_LKPTBLQUEUE1(val);
2928	}
2929	return 0;
2930}
2931
2932/**
2933 *	t4_read_rss_key - read the global RSS key
2934 *	@adap: the adapter
2935 *	@key: 10-entry array holding the 320-bit RSS key
2936 *
2937 *	Reads the global 320-bit RSS key.
2938 */
2939void t4_read_rss_key(struct adapter *adap, u32 *key)
2940{
2941	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2942			 A_TP_RSS_SECRET_KEY0);
2943}
2944
2945/**
2946 *	t4_write_rss_key - program one of the RSS keys
2947 *	@adap: the adapter
2948 *	@key: 10-entry array holding the 320-bit RSS key
2949 *	@idx: which RSS key to write
2950 *
2951 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2952 *	0..15 the corresponding entry in the RSS key table is written,
2953 *	otherwise the global RSS key is written.
2954 */
2955void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2956{
2957	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2958			  A_TP_RSS_SECRET_KEY0);
2959	if (idx >= 0 && idx < 16)
2960		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2961			     V_KEYWRADDR(idx) | F_KEYWREN);
2962}
2963
2964/**
2965 *	t4_read_rss_pf_config - read PF RSS Configuration Table
2966 *	@adapter: the adapter
2967 *	@index: the entry in the PF RSS table to read
2968 *	@valp: where to store the returned value
2969 *
2970 *	Reads the PF RSS Configuration Table at the specified index and returns
2971 *	the value found there.
2972 */
2973void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2974{
2975	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2976			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2977}
2978
2979/**
2980 *	t4_write_rss_pf_config - write PF RSS Configuration Table
2981 *	@adapter: the adapter
2982 *	@index: the entry in the VF RSS table to read
2983 *	@val: the value to store
2984 *
2985 *	Writes the PF RSS Configuration Table at the specified index with the
2986 *	specified value.
2987 */
2988void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2989{
2990	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2991			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2992}
2993
2994/**
2995 *	t4_read_rss_vf_config - read VF RSS Configuration Table
2996 *	@adapter: the adapter
2997 *	@index: the entry in the VF RSS table to read
2998 *	@vfl: where to store the returned VFL
2999 *	@vfh: where to store the returned VFH
3000 *
3001 *	Reads the VF RSS Configuration Table at the specified index and returns
3002 *	the (VFL, VFH) values found there.
3003 */
3004void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
3005			   u32 *vfl, u32 *vfh)
3006{
3007	u32 vrt;
3008
3009	/*
3010	 * Request that the index'th VF Table values be read into VFL/VFH.
3011	 */
3012	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3013	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
3014	vrt |= V_VFWRADDR(index) | F_VFRDEN;
3015	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3016
3017	/*
3018	 * Grab the VFL/VFH values ...
3019	 */
3020	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3021			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3022	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3023			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3024}
3025
3026/**
3027 *	t4_write_rss_vf_config - write VF RSS Configuration Table
3028 *
3029 *	@adapter: the adapter
3030 *	@index: the entry in the VF RSS table to write
3031 *	@vfl: the VFL to store
3032 *	@vfh: the VFH to store
3033 *
3034 *	Writes the VF RSS Configuration Table at the specified index with the
3035 *	specified (VFL, VFH) values.
3036 */
3037void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3038			    u32 vfl, u32 vfh)
3039{
3040	u32 vrt;
3041
3042	/*
3043	 * Load up VFL/VFH with the values to be written ...
3044	 */
3045	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3046			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3047	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3048			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3049
3050	/*
3051	 * Write the VFL/VFH into the VF Table at index'th location.
3052	 */
3053	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3054	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3055	vrt |= V_VFWRADDR(index) | F_VFWREN;
3056	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3057}
3058
3059/**
3060 *	t4_read_rss_pf_map - read PF RSS Map
3061 *	@adapter: the adapter
3062 *
3063 *	Reads the PF RSS Map register and returns its value.
3064 */
3065u32 t4_read_rss_pf_map(struct adapter *adapter)
3066{
3067	u32 pfmap;
3068
3069	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3070			 &pfmap, 1, A_TP_RSS_PF_MAP);
3071	return pfmap;
3072}
3073
3074/**
3075 *	t4_write_rss_pf_map - write PF RSS Map
3076 *	@adapter: the adapter
3077 *	@pfmap: PF RSS Map value
3078 *
3079 *	Writes the specified value to the PF RSS Map register.
3080 */
3081void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3082{
3083	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3084			  &pfmap, 1, A_TP_RSS_PF_MAP);
3085}
3086
3087/**
3088 *	t4_read_rss_pf_mask - read PF RSS Mask
3089 *	@adapter: the adapter
3090 *
3091 *	Reads the PF RSS Mask register and returns its value.
3092 */
3093u32 t4_read_rss_pf_mask(struct adapter *adapter)
3094{
3095	u32 pfmask;
3096
3097	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3098			 &pfmask, 1, A_TP_RSS_PF_MSK);
3099	return pfmask;
3100}
3101
3102/**
3103 *	t4_write_rss_pf_mask - write PF RSS Mask
3104 *	@adapter: the adapter
3105 *	@pfmask: PF RSS Mask value
3106 *
3107 *	Writes the specified value to the PF RSS Mask register.
3108 */
3109void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3110{
3111	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3112			  &pfmask, 1, A_TP_RSS_PF_MSK);
3113}
3114
3115static void refresh_vlan_pri_map(struct adapter *adap)
3116{
3117
3118        t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3119                         &adap->params.tp.vlan_pri_map, 1,
3120                         A_TP_VLAN_PRI_MAP);
3121
3122	/*
3123	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3124	 * shift positions of several elements of the Compressed Filter Tuple
3125	 * for this adapter which we need frequently ...
3126	 */
3127	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3128	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3129	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3130	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
3131
3132	/*
3133	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3134	 * represents the presense of an Outer VLAN instead of a VNIC ID.
3135	 */
3136	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3137		adap->params.tp.vnic_shift = -1;
3138}
3139
3140/**
3141 *	t4_set_filter_mode - configure the optional components of filter tuples
3142 *	@adap: the adapter
3143 *	@mode_map: a bitmap selcting which optional filter components to enable
3144 *
3145 *	Sets the filter mode by selecting the optional components to enable
3146 *	in filter tuples.  Returns 0 on success and a negative error if the
3147 *	requested mode needs more bits than are available for optional
3148 *	components.
3149 */
3150int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3151{
3152	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3153
3154	int i, nbits = 0;
3155
3156	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3157		if (mode_map & (1 << i))
3158			nbits += width[i];
3159	if (nbits > FILTER_OPT_LEN)
3160		return -EINVAL;
3161	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3162			  A_TP_VLAN_PRI_MAP);
3163	refresh_vlan_pri_map(adap);
3164
3165	return 0;
3166}
3167
3168/**
3169 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3170 *	@adap: the adapter
3171 *	@v4: holds the TCP/IP counter values
3172 *	@v6: holds the TCP/IPv6 counter values
3173 *
3174 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3175 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3176 */
3177void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3178			 struct tp_tcp_stats *v6)
3179{
3180	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3181
3182#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3183#define STAT(x)     val[STAT_IDX(x)]
3184#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3185
3186	if (v4) {
3187		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3188				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3189		v4->tcpOutRsts = STAT(OUT_RST);
3190		v4->tcpInSegs  = STAT64(IN_SEG);
3191		v4->tcpOutSegs = STAT64(OUT_SEG);
3192		v4->tcpRetransSegs = STAT64(RXT_SEG);
3193	}
3194	if (v6) {
3195		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3196				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3197		v6->tcpOutRsts = STAT(OUT_RST);
3198		v6->tcpInSegs  = STAT64(IN_SEG);
3199		v6->tcpOutSegs = STAT64(OUT_SEG);
3200		v6->tcpRetransSegs = STAT64(RXT_SEG);
3201	}
3202#undef STAT64
3203#undef STAT
3204#undef STAT_IDX
3205}
3206
3207/**
3208 *	t4_tp_get_err_stats - read TP's error MIB counters
3209 *	@adap: the adapter
3210 *	@st: holds the counter values
3211 *
3212 *	Returns the values of TP's error counters.
3213 */
3214void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3215{
3216	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3217			 12, A_TP_MIB_MAC_IN_ERR_0);
3218	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3219			 8, A_TP_MIB_TNL_CNG_DROP_0);
3220	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3221			 4, A_TP_MIB_TNL_DROP_0);
3222	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3223			 4, A_TP_MIB_OFD_VLN_DROP_0);
3224	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3225			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3226	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3227			 2, A_TP_MIB_OFD_ARP_DROP);
3228}
3229
3230/**
3231 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3232 *	@adap: the adapter
3233 *	@st: holds the counter values
3234 *
3235 *	Returns the values of TP's proxy counters.
3236 */
3237void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3238{
3239	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3240			 4, A_TP_MIB_TNL_LPBK_0);
3241}
3242
3243/**
3244 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3245 *	@adap: the adapter
3246 *	@st: holds the counter values
3247 *
3248 *	Returns the values of TP's CPL counters.
3249 */
3250void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3251{
3252	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3253			 8, A_TP_MIB_CPL_IN_REQ_0);
3254}
3255
3256/**
3257 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3258 *	@adap: the adapter
3259 *	@st: holds the counter values
3260 *
3261 *	Returns the values of TP's RDMA counters.
3262 */
3263void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3264{
3265	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3266			 2, A_TP_MIB_RQE_DFR_MOD);
3267}
3268
3269/**
3270 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3271 *	@adap: the adapter
3272 *	@idx: the port index
3273 *	@st: holds the counter values
3274 *
3275 *	Returns the values of TP's FCoE counters for the selected port.
3276 */
3277void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3278		       struct tp_fcoe_stats *st)
3279{
3280	u32 val[2];
3281
3282	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3283			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3284	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3285			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3286	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3287			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3288	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3289}
3290
3291/**
3292 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3293 *	@adap: the adapter
3294 *	@st: holds the counter values
3295 *
3296 *	Returns the values of TP's counters for non-TCP directly-placed packets.
3297 */
3298void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3299{
3300	u32 val[4];
3301
3302	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3303			 A_TP_MIB_USM_PKTS);
3304	st->frames = val[0];
3305	st->drops = val[1];
3306	st->octets = ((u64)val[2] << 32) | val[3];
3307}
3308
3309/**
3310 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3311 *	@adap: the adapter
3312 *	@mtus: where to store the MTU values
3313 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3314 *
3315 *	Reads the HW path MTU table.
3316 */
3317void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3318{
3319	u32 v;
3320	int i;
3321
3322	for (i = 0; i < NMTUS; ++i) {
3323		t4_write_reg(adap, A_TP_MTU_TABLE,
3324			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3325		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3326		mtus[i] = G_MTUVALUE(v);
3327		if (mtu_log)
3328			mtu_log[i] = G_MTUWIDTH(v);
3329	}
3330}
3331
3332/**
3333 *	t4_read_cong_tbl - reads the congestion control table
3334 *	@adap: the adapter
3335 *	@incr: where to store the alpha values
3336 *
3337 *	Reads the additive increments programmed into the HW congestion
3338 *	control table.
3339 */
3340void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3341{
3342	unsigned int mtu, w;
3343
3344	for (mtu = 0; mtu < NMTUS; ++mtu)
3345		for (w = 0; w < NCCTRL_WIN; ++w) {
3346			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3347				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3348			incr[mtu][w] = (u16)t4_read_reg(adap,
3349						A_TP_CCTRL_TABLE) & 0x1fff;
3350		}
3351}
3352
3353/**
3354 *	t4_read_pace_tbl - read the pace table
3355 *	@adap: the adapter
3356 *	@pace_vals: holds the returned values
3357 *
3358 *	Returns the values of TP's pace table in microseconds.
3359 */
3360void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3361{
3362	unsigned int i, v;
3363
3364	for (i = 0; i < NTX_SCHED; i++) {
3365		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3366		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3367		pace_vals[i] = dack_ticks_to_usec(adap, v);
3368	}
3369}
3370
3371/**
3372 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3373 *	@adap: the adapter
3374 *	@addr: the indirect TP register address
3375 *	@mask: specifies the field within the register to modify
3376 *	@val: new value for the field
3377 *
3378 *	Sets a field of an indirect TP register to the given value.
3379 */
3380void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3381			    unsigned int mask, unsigned int val)
3382{
3383	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3384	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3385	t4_write_reg(adap, A_TP_PIO_DATA, val);
3386}
3387
3388/**
3389 *	init_cong_ctrl - initialize congestion control parameters
3390 *	@a: the alpha values for congestion control
3391 *	@b: the beta values for congestion control
3392 *
3393 *	Initialize the congestion control parameters.
3394 */
3395static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3396{
3397	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3398	a[9] = 2;
3399	a[10] = 3;
3400	a[11] = 4;
3401	a[12] = 5;
3402	a[13] = 6;
3403	a[14] = 7;
3404	a[15] = 8;
3405	a[16] = 9;
3406	a[17] = 10;
3407	a[18] = 14;
3408	a[19] = 17;
3409	a[20] = 21;
3410	a[21] = 25;
3411	a[22] = 30;
3412	a[23] = 35;
3413	a[24] = 45;
3414	a[25] = 60;
3415	a[26] = 80;
3416	a[27] = 100;
3417	a[28] = 200;
3418	a[29] = 300;
3419	a[30] = 400;
3420	a[31] = 500;
3421
3422	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3423	b[9] = b[10] = 1;
3424	b[11] = b[12] = 2;
3425	b[13] = b[14] = b[15] = b[16] = 3;
3426	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3427	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3428	b[28] = b[29] = 6;
3429	b[30] = b[31] = 7;
3430}
3431
3432/* The minimum additive increment value for the congestion control table */
3433#define CC_MIN_INCR 2U
3434
3435/**
3436 *	t4_load_mtus - write the MTU and congestion control HW tables
3437 *	@adap: the adapter
3438 *	@mtus: the values for the MTU table
3439 *	@alpha: the values for the congestion control alpha parameter
3440 *	@beta: the values for the congestion control beta parameter
3441 *
3442 *	Write the HW MTU table with the supplied MTUs and the high-speed
3443 *	congestion control table with the supplied alpha, beta, and MTUs.
3444 *	We write the two tables together because the additive increments
3445 *	depend on the MTUs.
3446 */
3447void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3448		  const unsigned short *alpha, const unsigned short *beta)
3449{
3450	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3451		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3452		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3453		28672, 40960, 57344, 81920, 114688, 163840, 229376
3454	};
3455
3456	unsigned int i, w;
3457
3458	for (i = 0; i < NMTUS; ++i) {
3459		unsigned int mtu = mtus[i];
3460		unsigned int log2 = fls(mtu);
3461
3462		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3463			log2--;
3464		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3465			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3466
3467		for (w = 0; w < NCCTRL_WIN; ++w) {
3468			unsigned int inc;
3469
3470			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3471				  CC_MIN_INCR);
3472
3473			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3474				     (w << 16) | (beta[w] << 13) | inc);
3475		}
3476	}
3477}
3478
3479/**
3480 *	t4_set_pace_tbl - set the pace table
3481 *	@adap: the adapter
3482 *	@pace_vals: the pace values in microseconds
3483 *	@start: index of the first entry in the HW pace table to set
3484 *	@n: how many entries to set
3485 *
3486 *	Sets (a subset of the) HW pace table.
3487 */
3488int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3489		     unsigned int start, unsigned int n)
3490{
3491	unsigned int vals[NTX_SCHED], i;
3492	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3493
3494	if (n > NTX_SCHED)
3495	    return -ERANGE;
3496
3497	/* convert values from us to dack ticks, rounding to closest value */
3498	for (i = 0; i < n; i++, pace_vals++) {
3499		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3500		if (vals[i] > 0x7ff)
3501			return -ERANGE;
3502		if (*pace_vals && vals[i] == 0)
3503			return -ERANGE;
3504	}
3505	for (i = 0; i < n; i++, start++)
3506		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3507	return 0;
3508}
3509
3510/**
3511 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3512 *	@adap: the adapter
3513 *	@kbps: target rate in Kbps
3514 *	@sched: the scheduler index
3515 *
3516 *	Configure a Tx HW scheduler for the target rate.
3517 */
3518int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3519{
3520	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3521	unsigned int clk = adap->params.vpd.cclk * 1000;
3522	unsigned int selected_cpt = 0, selected_bpt = 0;
3523
3524	if (kbps > 0) {
3525		kbps *= 125;     /* -> bytes */
3526		for (cpt = 1; cpt <= 255; cpt++) {
3527			tps = clk / cpt;
3528			bpt = (kbps + tps / 2) / tps;
3529			if (bpt > 0 && bpt <= 255) {
3530				v = bpt * tps;
3531				delta = v >= kbps ? v - kbps : kbps - v;
3532				if (delta < mindelta) {
3533					mindelta = delta;
3534					selected_cpt = cpt;
3535					selected_bpt = bpt;
3536				}
3537			} else if (selected_cpt)
3538				break;
3539		}
3540		if (!selected_cpt)
3541			return -EINVAL;
3542	}
3543	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3544		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3545	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3546	if (sched & 1)
3547		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3548	else
3549		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3550	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3551	return 0;
3552}
3553
3554/**
3555 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3556 *	@adap: the adapter
3557 *	@sched: the scheduler index
3558 *	@ipg: the interpacket delay in tenths of nanoseconds
3559 *
3560 *	Set the interpacket delay for a HW packet rate scheduler.
3561 */
3562int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3563{
3564	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3565
3566	/* convert ipg to nearest number of core clocks */
3567	ipg *= core_ticks_per_usec(adap);
3568	ipg = (ipg + 5000) / 10000;
3569	if (ipg > M_TXTIMERSEPQ0)
3570		return -EINVAL;
3571
3572	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3573	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3574	if (sched & 1)
3575		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3576	else
3577		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3578	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3579	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3580	return 0;
3581}
3582
3583/**
3584 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3585 *	@adap: the adapter
3586 *	@sched: the scheduler index
3587 *	@kbps: the byte rate in Kbps
3588 *	@ipg: the interpacket delay in tenths of nanoseconds
3589 *
3590 *	Return the current configuration of a HW Tx scheduler.
3591 */
3592void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3593		     unsigned int *ipg)
3594{
3595	unsigned int v, addr, bpt, cpt;
3596
3597	if (kbps) {
3598		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3599		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3600		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3601		if (sched & 1)
3602			v >>= 16;
3603		bpt = (v >> 8) & 0xff;
3604		cpt = v & 0xff;
3605		if (!cpt)
3606			*kbps = 0;        /* scheduler disabled */
3607		else {
3608			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3609			*kbps = (v * bpt) / 125;
3610		}
3611	}
3612	if (ipg) {
3613		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3614		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3615		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3616		if (sched & 1)
3617			v >>= 16;
3618		v &= 0xffff;
3619		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3620	}
3621}
3622
3623/*
3624 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3625 * clocks.  The formula is
3626 *
3627 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3628 *
3629 * which is equivalent to
3630 *
3631 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3632 */
3633static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3634{
3635	u64 v = bytes256 * adap->params.vpd.cclk;
3636
3637	return v * 62 + v / 2;
3638}
3639
3640/**
3641 *	t4_get_chan_txrate - get the current per channel Tx rates
3642 *	@adap: the adapter
3643 *	@nic_rate: rates for NIC traffic
3644 *	@ofld_rate: rates for offloaded traffic
3645 *
3646 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3647 *	for each channel.
3648 */
3649void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3650{
3651	u32 v;
3652
3653	v = t4_read_reg(adap, A_TP_TX_TRATE);
3654	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3655	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3656	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3657	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3658
3659	v = t4_read_reg(adap, A_TP_TX_ORATE);
3660	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3661	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3662	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3663	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3664}
3665
3666/**
3667 *	t4_set_trace_filter - configure one of the tracing filters
3668 *	@adap: the adapter
3669 *	@tp: the desired trace filter parameters
3670 *	@idx: which filter to configure
3671 *	@enable: whether to enable or disable the filter
3672 *
3673 *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3674 *	it indicates that the filter is already written in the register and it
3675 *	just needs to be enabled or disabled.
3676 */
3677int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3678    int idx, int enable)
3679{
3680	int i, ofst = idx * 4;
3681	u32 data_reg, mask_reg, cfg;
3682	u32 multitrc = F_TRCMULTIFILTER;
3683	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3684
3685	if (idx < 0 || idx >= NTRACE)
3686		return -EINVAL;
3687
3688	if (tp == NULL || !enable) {
3689		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3690		    enable ? en : 0);
3691		return 0;
3692	}
3693
3694	/*
3695	 * TODO - After T4 data book is updated, specify the exact
3696	 * section below.
3697	 *
3698	 * See T4 data book - MPS section for a complete description
3699	 * of the below if..else handling of A_MPS_TRC_CFG register
3700	 * value.
3701	 */
3702	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3703	if (cfg & F_TRCMULTIFILTER) {
3704		/*
3705		 * If multiple tracers are enabled, then maximum
3706		 * capture size is 2.5KB (FIFO size of a single channel)
3707		 * minus 2 flits for CPL_TRACE_PKT header.
3708		 */
3709		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3710			return -EINVAL;
3711	} else {
3712		/*
3713		 * If multiple tracers are disabled, to avoid deadlocks
3714		 * maximum packet capture size of 9600 bytes is recommended.
3715		 * Also in this mode, only trace0 can be enabled and running.
3716		 */
3717		multitrc = 0;
3718		if (tp->snap_len > 9600 || idx)
3719			return -EINVAL;
3720	}
3721
3722	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3723	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3724	    tp->min_len > M_TFMINPKTSIZE)
3725		return -EINVAL;
3726
3727	/* stop the tracer we'll be changing */
3728	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3729
3730	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3731	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3732	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3733
3734	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3735		t4_write_reg(adap, data_reg, tp->data[i]);
3736		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3737	}
3738	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3739		     V_TFCAPTUREMAX(tp->snap_len) |
3740		     V_TFMINPKTSIZE(tp->min_len));
3741	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3742		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3743		     (is_t4(adap) ?
3744		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3745		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3746
3747	return 0;
3748}
3749
3750/**
3751 *	t4_get_trace_filter - query one of the tracing filters
3752 *	@adap: the adapter
3753 *	@tp: the current trace filter parameters
3754 *	@idx: which trace filter to query
3755 *	@enabled: non-zero if the filter is enabled
3756 *
3757 *	Returns the current settings of one of the HW tracing filters.
3758 */
3759void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3760			 int *enabled)
3761{
3762	u32 ctla, ctlb;
3763	int i, ofst = idx * 4;
3764	u32 data_reg, mask_reg;
3765
3766	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3767	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3768
3769	if (is_t4(adap)) {
3770		*enabled = !!(ctla & F_TFEN);
3771		tp->port =  G_TFPORT(ctla);
3772		tp->invert = !!(ctla & F_TFINVERTMATCH);
3773	} else {
3774		*enabled = !!(ctla & F_T5_TFEN);
3775		tp->port = G_T5_TFPORT(ctla);
3776		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3777	}
3778	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3779	tp->min_len = G_TFMINPKTSIZE(ctlb);
3780	tp->skip_ofst = G_TFOFFSET(ctla);
3781	tp->skip_len = G_TFLENGTH(ctla);
3782
3783	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3784	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3785	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3786
3787	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3788		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3789		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3790	}
3791}
3792
3793/**
3794 *	t4_pmtx_get_stats - returns the HW stats from PMTX
3795 *	@adap: the adapter
3796 *	@cnt: where to store the count statistics
3797 *	@cycles: where to store the cycle statistics
3798 *
3799 *	Returns performance statistics from PMTX.
3800 */
3801void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3802{
3803	int i;
3804	u32 data[2];
3805
3806	for (i = 0; i < PM_NSTATS; i++) {
3807		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3808		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3809		if (is_t4(adap))
3810			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3811		else {
3812			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3813					 A_PM_TX_DBG_DATA, data, 2,
3814					 A_PM_TX_DBG_STAT_MSB);
3815			cycles[i] = (((u64)data[0] << 32) | data[1]);
3816		}
3817	}
3818}
3819
3820/**
3821 *	t4_pmrx_get_stats - returns the HW stats from PMRX
3822 *	@adap: the adapter
3823 *	@cnt: where to store the count statistics
3824 *	@cycles: where to store the cycle statistics
3825 *
3826 *	Returns performance statistics from PMRX.
3827 */
3828void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3829{
3830	int i;
3831	u32 data[2];
3832
3833	for (i = 0; i < PM_NSTATS; i++) {
3834		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3835		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3836		if (is_t4(adap))
3837			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3838		else {
3839			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3840					 A_PM_RX_DBG_DATA, data, 2,
3841					 A_PM_RX_DBG_STAT_MSB);
3842			cycles[i] = (((u64)data[0] << 32) | data[1]);
3843		}
3844	}
3845}
3846
3847/**
3848 *	get_mps_bg_map - return the buffer groups associated with a port
3849 *	@adap: the adapter
3850 *	@idx: the port index
3851 *
3852 *	Returns a bitmap indicating which MPS buffer groups are associated
3853 *	with the given port.  Bit i is set if buffer group i is used by the
3854 *	port.
3855 */
3856static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3857{
3858	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3859
3860	if (n == 0)
3861		return idx == 0 ? 0xf : 0;
3862	if (n == 1)
3863		return idx < 2 ? (3 << (2 * idx)) : 0;
3864	return 1 << idx;
3865}
3866
3867/**
3868 *      t4_get_port_stats_offset - collect port stats relative to a previous
3869 *                                 snapshot
3870 *      @adap: The adapter
3871 *      @idx: The port
3872 *      @stats: Current stats to fill
3873 *      @offset: Previous stats snapshot
3874 */
3875void t4_get_port_stats_offset(struct adapter *adap, int idx,
3876		struct port_stats *stats,
3877		struct port_stats *offset)
3878{
3879	u64 *s, *o;
3880	int i;
3881
3882	t4_get_port_stats(adap, idx, stats);
3883	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3884			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3885			i++, s++, o++)
3886		*s -= *o;
3887}
3888
3889/**
3890 *	t4_get_port_stats - collect port statistics
3891 *	@adap: the adapter
3892 *	@idx: the port index
3893 *	@p: the stats structure to fill
3894 *
3895 *	Collect statistics related to the given port from HW.
3896 */
3897void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3898{
3899	u32 bgmap = get_mps_bg_map(adap, idx);
3900
3901#define GET_STAT(name) \
3902	t4_read_reg64(adap, \
3903	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3904	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3905#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3906
3907	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3908	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3909	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3910	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3911	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3912	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3913	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3914	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3915	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3916	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3917	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3918	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3919	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3920	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3921	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3922	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3923	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3924	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3925	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3926	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3927	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3928	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3929	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3930
3931	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3932	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3933	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3934	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3935	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3936	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3937	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3938	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3939	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3940	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3941	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3942	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3943	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3944	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3945	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3946	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3947	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3948	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3949	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3950	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3951	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3952	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3953	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3954	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3955	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3956	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3957	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3958
3959	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3960	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3961	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3962	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3963	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3964	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3965	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3966	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3967
3968#undef GET_STAT
3969#undef GET_STAT_COM
3970}
3971
3972/**
3973 *	t4_clr_port_stats - clear port statistics
3974 *	@adap: the adapter
3975 *	@idx: the port index
3976 *
3977 *	Clear HW statistics for the given port.
3978 */
3979void t4_clr_port_stats(struct adapter *adap, int idx)
3980{
3981	unsigned int i;
3982	u32 bgmap = get_mps_bg_map(adap, idx);
3983	u32 port_base_addr;
3984
3985	if (is_t4(adap))
3986		port_base_addr = PORT_BASE(idx);
3987	else
3988		port_base_addr = T5_PORT_BASE(idx);
3989
3990	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3991			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3992		t4_write_reg(adap, port_base_addr + i, 0);
3993	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3994			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3995		t4_write_reg(adap, port_base_addr + i, 0);
3996	for (i = 0; i < 4; i++)
3997		if (bgmap & (1 << i)) {
3998			t4_write_reg(adap,
3999				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
4000			t4_write_reg(adap,
4001				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
4002		}
4003}
4004
4005/**
4006 *	t4_get_lb_stats - collect loopback port statistics
4007 *	@adap: the adapter
4008 *	@idx: the loopback port index
4009 *	@p: the stats structure to fill
4010 *
4011 *	Return HW statistics for the given loopback port.
4012 */
4013void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
4014{
4015	u32 bgmap = get_mps_bg_map(adap, idx);
4016
4017#define GET_STAT(name) \
4018	t4_read_reg64(adap, \
4019	(is_t4(adap) ? \
4020	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
4021	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
4022#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
4023
4024	p->octets           = GET_STAT(BYTES);
4025	p->frames           = GET_STAT(FRAMES);
4026	p->bcast_frames     = GET_STAT(BCAST);
4027	p->mcast_frames     = GET_STAT(MCAST);
4028	p->ucast_frames     = GET_STAT(UCAST);
4029	p->error_frames     = GET_STAT(ERROR);
4030
4031	p->frames_64        = GET_STAT(64B);
4032	p->frames_65_127    = GET_STAT(65B_127B);
4033	p->frames_128_255   = GET_STAT(128B_255B);
4034	p->frames_256_511   = GET_STAT(256B_511B);
4035	p->frames_512_1023  = GET_STAT(512B_1023B);
4036	p->frames_1024_1518 = GET_STAT(1024B_1518B);
4037	p->frames_1519_max  = GET_STAT(1519B_MAX);
4038	p->drop             = GET_STAT(DROP_FRAMES);
4039
4040	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
4041	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
4042	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
4043	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
4044	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
4045	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4046	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4047	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4048
4049#undef GET_STAT
4050#undef GET_STAT_COM
4051}
4052
4053/**
4054 *	t4_wol_magic_enable - enable/disable magic packet WoL
4055 *	@adap: the adapter
4056 *	@port: the physical port index
4057 *	@addr: MAC address expected in magic packets, %NULL to disable
4058 *
4059 *	Enables/disables magic packet wake-on-LAN for the selected port.
4060 */
4061void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4062			 const u8 *addr)
4063{
4064	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4065
4066	if (is_t4(adap)) {
4067		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4068		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4069		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4070	} else {
4071		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4072		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4073		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4074	}
4075
4076	if (addr) {
4077		t4_write_reg(adap, mag_id_reg_l,
4078			     (addr[2] << 24) | (addr[3] << 16) |
4079			     (addr[4] << 8) | addr[5]);
4080		t4_write_reg(adap, mag_id_reg_h,
4081			     (addr[0] << 8) | addr[1]);
4082	}
4083	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4084			 V_MAGICEN(addr != NULL));
4085}
4086
4087/**
4088 *	t4_wol_pat_enable - enable/disable pattern-based WoL
4089 *	@adap: the adapter
4090 *	@port: the physical port index
4091 *	@map: bitmap of which HW pattern filters to set
4092 *	@mask0: byte mask for bytes 0-63 of a packet
4093 *	@mask1: byte mask for bytes 64-127 of a packet
4094 *	@crc: Ethernet CRC for selected bytes
4095 *	@enable: enable/disable switch
4096 *
4097 *	Sets the pattern filters indicated in @map to mask out the bytes
4098 *	specified in @mask0/@mask1 in received packets and compare the CRC of
4099 *	the resulting packet against @crc.  If @enable is %true pattern-based
4100 *	WoL is enabled, otherwise disabled.
4101 */
4102int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4103		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4104{
4105	int i;
4106	u32 port_cfg_reg;
4107
4108	if (is_t4(adap))
4109		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4110	else
4111		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4112
4113	if (!enable) {
4114		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4115		return 0;
4116	}
4117	if (map > 0xff)
4118		return -EINVAL;
4119
4120#define EPIO_REG(name) \
4121	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4122	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4123
4124	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4125	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4126	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4127
4128	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4129		if (!(map & 1))
4130			continue;
4131
4132		/* write byte masks */
4133		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4134		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4135		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4136		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4137			return -ETIMEDOUT;
4138
4139		/* write CRC */
4140		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4141		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4142		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4143		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4144			return -ETIMEDOUT;
4145	}
4146#undef EPIO_REG
4147
4148	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4149	return 0;
4150}
4151
4152/**
4153 *	t4_mk_filtdelwr - create a delete filter WR
4154 *	@ftid: the filter ID
4155 *	@wr: the filter work request to populate
4156 *	@qid: ingress queue to receive the delete notification
4157 *
4158 *	Creates a filter work request to delete the supplied filter.  If @qid is
4159 *	negative the delete notification is suppressed.
4160 */
4161void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4162{
4163	memset(wr, 0, sizeof(*wr));
4164	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4165	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4166	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4167			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4168	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4169	if (qid >= 0)
4170		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4171}
4172
4173#define INIT_CMD(var, cmd, rd_wr) do { \
4174	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4175				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4176	(var).retval_len16 = htonl(FW_LEN16(var)); \
4177} while (0)
4178
4179int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4180{
4181	struct fw_ldst_cmd c;
4182
4183	memset(&c, 0, sizeof(c));
4184	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4185		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4186	c.cycles_to_len16 = htonl(FW_LEN16(c));
4187	c.u.addrval.addr = htonl(addr);
4188	c.u.addrval.val = htonl(val);
4189
4190	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4191}
4192
4193/**
4194 *	t4_mdio_rd - read a PHY register through MDIO
4195 *	@adap: the adapter
4196 *	@mbox: mailbox to use for the FW command
4197 *	@phy_addr: the PHY address
4198 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4199 *	@reg: the register to read
4200 *	@valp: where to store the value
4201 *
4202 *	Issues a FW command through the given mailbox to read a PHY register.
4203 */
4204int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4205	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4206{
4207	int ret;
4208	struct fw_ldst_cmd c;
4209
4210	memset(&c, 0, sizeof(c));
4211	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4212		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4213	c.cycles_to_len16 = htonl(FW_LEN16(c));
4214	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4215				   V_FW_LDST_CMD_MMD(mmd));
4216	c.u.mdio.raddr = htons(reg);
4217
4218	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4219	if (ret == 0)
4220		*valp = ntohs(c.u.mdio.rval);
4221	return ret;
4222}
4223
4224/**
4225 *	t4_mdio_wr - write a PHY register through MDIO
4226 *	@adap: the adapter
4227 *	@mbox: mailbox to use for the FW command
4228 *	@phy_addr: the PHY address
4229 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4230 *	@reg: the register to write
4231 *	@valp: value to write
4232 *
4233 *	Issues a FW command through the given mailbox to write a PHY register.
4234 */
4235int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4236	       unsigned int mmd, unsigned int reg, unsigned int val)
4237{
4238	struct fw_ldst_cmd c;
4239
4240	memset(&c, 0, sizeof(c));
4241	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4242		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4243	c.cycles_to_len16 = htonl(FW_LEN16(c));
4244	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4245				   V_FW_LDST_CMD_MMD(mmd));
4246	c.u.mdio.raddr = htons(reg);
4247	c.u.mdio.rval = htons(val);
4248
4249	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4250}
4251
4252/**
4253 *	t4_i2c_rd - read I2C data from adapter
4254 *	@adap: the adapter
4255 *	@port: Port number if per-port device; <0 if not
4256 *	@devid: per-port device ID or absolute device ID
4257 *	@offset: byte offset into device I2C space
4258 *	@len: byte length of I2C space data
4259 *	@buf: buffer in which to return I2C data
4260 *
4261 *	Reads the I2C data from the indicated device and location.
4262 */
4263int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
4264	      int port, unsigned int devid,
4265	      unsigned int offset, unsigned int len,
4266	      u8 *buf)
4267{
4268	struct fw_ldst_cmd ldst;
4269	int ret;
4270
4271	if (port >= 4 ||
4272	    devid >= 256 ||
4273	    offset >= 256 ||
4274	    len > sizeof ldst.u.i2c.data)
4275		return -EINVAL;
4276
4277	memset(&ldst, 0, sizeof ldst);
4278	ldst.op_to_addrspace =
4279		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4280			    F_FW_CMD_REQUEST |
4281			    F_FW_CMD_READ |
4282			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4283	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4284	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4285	ldst.u.i2c.did = devid;
4286	ldst.u.i2c.boffset = offset;
4287	ldst.u.i2c.blen = len;
4288	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4289	if (!ret)
4290		memcpy(buf, ldst.u.i2c.data, len);
4291	return ret;
4292}
4293
4294/**
4295 *	t4_i2c_wr - write I2C data to adapter
4296 *	@adap: the adapter
4297 *	@port: Port number if per-port device; <0 if not
4298 *	@devid: per-port device ID or absolute device ID
4299 *	@offset: byte offset into device I2C space
4300 *	@len: byte length of I2C space data
4301 *	@buf: buffer containing new I2C data
4302 *
4303 *	Write the I2C data to the indicated device and location.
4304 */
4305int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
4306	      int port, unsigned int devid,
4307	      unsigned int offset, unsigned int len,
4308	      u8 *buf)
4309{
4310	struct fw_ldst_cmd ldst;
4311
4312	if (port >= 4 ||
4313	    devid >= 256 ||
4314	    offset >= 256 ||
4315	    len > sizeof ldst.u.i2c.data)
4316		return -EINVAL;
4317
4318	memset(&ldst, 0, sizeof ldst);
4319	ldst.op_to_addrspace =
4320		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4321			    F_FW_CMD_REQUEST |
4322			    F_FW_CMD_WRITE |
4323			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4324	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4325	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4326	ldst.u.i2c.did = devid;
4327	ldst.u.i2c.boffset = offset;
4328	ldst.u.i2c.blen = len;
4329	memcpy(ldst.u.i2c.data, buf, len);
4330	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4331}
4332
4333/**
4334 *	t4_sge_ctxt_flush - flush the SGE context cache
4335 *	@adap: the adapter
4336 *	@mbox: mailbox to use for the FW command
4337 *
4338 *	Issues a FW command through the given mailbox to flush the
4339 *	SGE context cache.
4340 */
4341int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4342{
4343	int ret;
4344	struct fw_ldst_cmd c;
4345
4346	memset(&c, 0, sizeof(c));
4347	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4348			F_FW_CMD_READ |
4349			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4350	c.cycles_to_len16 = htonl(FW_LEN16(c));
4351	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4352
4353	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4354	return ret;
4355}
4356
4357/**
4358 *	t4_sge_ctxt_rd - read an SGE context through FW
4359 *	@adap: the adapter
4360 *	@mbox: mailbox to use for the FW command
4361 *	@cid: the context id
4362 *	@ctype: the context type
4363 *	@data: where to store the context data
4364 *
4365 *	Issues a FW command through the given mailbox to read an SGE context.
4366 */
4367int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4368		   enum ctxt_type ctype, u32 *data)
4369{
4370	int ret;
4371	struct fw_ldst_cmd c;
4372
4373	if (ctype == CTXT_EGRESS)
4374		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4375	else if (ctype == CTXT_INGRESS)
4376		ret = FW_LDST_ADDRSPC_SGE_INGC;
4377	else if (ctype == CTXT_FLM)
4378		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4379	else
4380		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4381
4382	memset(&c, 0, sizeof(c));
4383	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4384				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4385	c.cycles_to_len16 = htonl(FW_LEN16(c));
4386	c.u.idctxt.physid = htonl(cid);
4387
4388	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4389	if (ret == 0) {
4390		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4391		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4392		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4393		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4394		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4395		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4396	}
4397	return ret;
4398}
4399
4400/**
4401 *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4402 *	@adap: the adapter
4403 *	@cid: the context id
4404 *	@ctype: the context type
4405 *	@data: where to store the context data
4406 *
4407 *	Reads an SGE context directly, bypassing FW.  This is only for
4408 *	debugging when FW is unavailable.
4409 */
4410int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4411		      u32 *data)
4412{
4413	int i, ret;
4414
4415	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4416	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4417	if (!ret)
4418		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4419			*data++ = t4_read_reg(adap, i);
4420	return ret;
4421}
4422
4423/**
4424 *	t4_fw_hello - establish communication with FW
4425 *	@adap: the adapter
4426 *	@mbox: mailbox to use for the FW command
4427 *	@evt_mbox: mailbox to receive async FW events
4428 *	@master: specifies the caller's willingness to be the device master
4429 *	@state: returns the current device state (if non-NULL)
4430 *
4431 *	Issues a command to establish communication with FW.  Returns either
4432 *	an error (negative integer) or the mailbox of the Master PF.
4433 */
4434int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4435		enum dev_master master, enum dev_state *state)
4436{
4437	int ret;
4438	struct fw_hello_cmd c;
4439	u32 v;
4440	unsigned int master_mbox;
4441	int retries = FW_CMD_HELLO_RETRIES;
4442
4443retry:
4444	memset(&c, 0, sizeof(c));
4445	INIT_CMD(c, HELLO, WRITE);
4446	c.err_to_clearinit = htonl(
4447		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4448		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4449		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4450			M_FW_HELLO_CMD_MBMASTER) |
4451		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4452		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4453		F_FW_HELLO_CMD_CLEARINIT);
4454
4455	/*
4456	 * Issue the HELLO command to the firmware.  If it's not successful
4457	 * but indicates that we got a "busy" or "timeout" condition, retry
4458	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4459	 * retry limit, check to see if the firmware left us any error
4460	 * information and report that if so ...
4461	 */
4462	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4463	if (ret != FW_SUCCESS) {
4464		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4465			goto retry;
4466		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4467			t4_report_fw_error(adap);
4468		return ret;
4469	}
4470
4471	v = ntohl(c.err_to_clearinit);
4472	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4473	if (state) {
4474		if (v & F_FW_HELLO_CMD_ERR)
4475			*state = DEV_STATE_ERR;
4476		else if (v & F_FW_HELLO_CMD_INIT)
4477			*state = DEV_STATE_INIT;
4478		else
4479			*state = DEV_STATE_UNINIT;
4480	}
4481
4482	/*
4483	 * If we're not the Master PF then we need to wait around for the
4484	 * Master PF Driver to finish setting up the adapter.
4485	 *
4486	 * Note that we also do this wait if we're a non-Master-capable PF and
4487	 * there is no current Master PF; a Master PF may show up momentarily
4488	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4489	 * OS loads lots of different drivers rapidly at the same time).  In
4490	 * this case, the Master PF returned by the firmware will be
4491	 * M_PCIE_FW_MASTER so the test below will work ...
4492	 */
4493	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4494	    master_mbox != mbox) {
4495		int waiting = FW_CMD_HELLO_TIMEOUT;
4496
4497		/*
4498		 * Wait for the firmware to either indicate an error or
4499		 * initialized state.  If we see either of these we bail out
4500		 * and report the issue to the caller.  If we exhaust the
4501		 * "hello timeout" and we haven't exhausted our retries, try
4502		 * again.  Otherwise bail with a timeout error.
4503		 */
4504		for (;;) {
4505			u32 pcie_fw;
4506
4507			msleep(50);
4508			waiting -= 50;
4509
4510			/*
4511			 * If neither Error nor Initialialized are indicated
4512			 * by the firmware keep waiting till we exhaust our
4513			 * timeout ... and then retry if we haven't exhausted
4514			 * our retries ...
4515			 */
4516			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4517			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4518				if (waiting <= 0) {
4519					if (retries-- > 0)
4520						goto retry;
4521
4522					return -ETIMEDOUT;
4523				}
4524				continue;
4525			}
4526
4527			/*
4528			 * We either have an Error or Initialized condition
4529			 * report errors preferentially.
4530			 */
4531			if (state) {
4532				if (pcie_fw & F_PCIE_FW_ERR)
4533					*state = DEV_STATE_ERR;
4534				else if (pcie_fw & F_PCIE_FW_INIT)
4535					*state = DEV_STATE_INIT;
4536			}
4537
4538			/*
4539			 * If we arrived before a Master PF was selected and
4540			 * there's not a valid Master PF, grab its identity
4541			 * for our caller.
4542			 */
4543			if (master_mbox == M_PCIE_FW_MASTER &&
4544			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4545				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4546			break;
4547		}
4548	}
4549
4550	return master_mbox;
4551}
4552
4553/**
4554 *	t4_fw_bye - end communication with FW
4555 *	@adap: the adapter
4556 *	@mbox: mailbox to use for the FW command
4557 *
4558 *	Issues a command to terminate communication with FW.
4559 */
4560int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4561{
4562	struct fw_bye_cmd c;
4563
4564	memset(&c, 0, sizeof(c));
4565	INIT_CMD(c, BYE, WRITE);
4566	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4567}
4568
4569/**
4570 *	t4_fw_reset - issue a reset to FW
4571 *	@adap: the adapter
4572 *	@mbox: mailbox to use for the FW command
4573 *	@reset: specifies the type of reset to perform
4574 *
4575 *	Issues a reset command of the specified type to FW.
4576 */
4577int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4578{
4579	struct fw_reset_cmd c;
4580
4581	memset(&c, 0, sizeof(c));
4582	INIT_CMD(c, RESET, WRITE);
4583	c.val = htonl(reset);
4584	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4585}
4586
4587/**
4588 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4589 *	@adap: the adapter
4590 *	@mbox: mailbox to use for the FW RESET command (if desired)
4591 *	@force: force uP into RESET even if FW RESET command fails
4592 *
4593 *	Issues a RESET command to firmware (if desired) with a HALT indication
4594 *	and then puts the microprocessor into RESET state.  The RESET command
4595 *	will only be issued if a legitimate mailbox is provided (mbox <=
4596 *	M_PCIE_FW_MASTER).
4597 *
4598 *	This is generally used in order for the host to safely manipulate the
4599 *	adapter without fear of conflicting with whatever the firmware might
4600 *	be doing.  The only way out of this state is to RESTART the firmware
4601 *	...
4602 */
4603int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4604{
4605	int ret = 0;
4606
4607	/*
4608	 * If a legitimate mailbox is provided, issue a RESET command
4609	 * with a HALT indication.
4610	 */
4611	if (mbox <= M_PCIE_FW_MASTER) {
4612		struct fw_reset_cmd c;
4613
4614		memset(&c, 0, sizeof(c));
4615		INIT_CMD(c, RESET, WRITE);
4616		c.val = htonl(F_PIORST | F_PIORSTMODE);
4617		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4618		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4619	}
4620
4621	/*
4622	 * Normally we won't complete the operation if the firmware RESET
4623	 * command fails but if our caller insists we'll go ahead and put the
4624	 * uP into RESET.  This can be useful if the firmware is hung or even
4625	 * missing ...  We'll have to take the risk of putting the uP into
4626	 * RESET without the cooperation of firmware in that case.
4627	 *
4628	 * We also force the firmware's HALT flag to be on in case we bypassed
4629	 * the firmware RESET command above or we're dealing with old firmware
4630	 * which doesn't have the HALT capability.  This will serve as a flag
4631	 * for the incoming firmware to know that it's coming out of a HALT
4632	 * rather than a RESET ... if it's new enough to understand that ...
4633	 */
4634	if (ret == 0 || force) {
4635		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4636		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4637	}
4638
4639	/*
4640	 * And we always return the result of the firmware RESET command
4641	 * even when we force the uP into RESET ...
4642	 */
4643	return ret;
4644}
4645
4646/**
4647 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4648 *	@adap: the adapter
4649 *	@reset: if we want to do a RESET to restart things
4650 *
4651 *	Restart firmware previously halted by t4_fw_halt().  On successful
4652 *	return the previous PF Master remains as the new PF Master and there
4653 *	is no need to issue a new HELLO command, etc.
4654 *
4655 *	We do this in two ways:
4656 *
4657 *	 1. If we're dealing with newer firmware we'll simply want to take
4658 *	    the chip's microprocessor out of RESET.  This will cause the
4659 *	    firmware to start up from its start vector.  And then we'll loop
4660 *	    until the firmware indicates it's started again (PCIE_FW.HALT
4661 *	    reset to 0) or we timeout.
4662 *
4663 *	 2. If we're dealing with older firmware then we'll need to RESET
4664 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4665 *	    flag and automatically RESET itself on startup.
4666 */
4667int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4668{
4669	if (reset) {
4670		/*
4671		 * Since we're directing the RESET instead of the firmware
4672		 * doing it automatically, we need to clear the PCIE_FW.HALT
4673		 * bit.
4674		 */
4675		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4676
4677		/*
4678		 * If we've been given a valid mailbox, first try to get the
4679		 * firmware to do the RESET.  If that works, great and we can
4680		 * return success.  Otherwise, if we haven't been given a
4681		 * valid mailbox or the RESET command failed, fall back to
4682		 * hitting the chip with a hammer.
4683		 */
4684		if (mbox <= M_PCIE_FW_MASTER) {
4685			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4686			msleep(100);
4687			if (t4_fw_reset(adap, mbox,
4688					F_PIORST | F_PIORSTMODE) == 0)
4689				return 0;
4690		}
4691
4692		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4693		msleep(2000);
4694	} else {
4695		int ms;
4696
4697		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4698		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4699			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4700				return FW_SUCCESS;
4701			msleep(100);
4702			ms += 100;
4703		}
4704		return -ETIMEDOUT;
4705	}
4706	return 0;
4707}
4708
4709/**
4710 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4711 *	@adap: the adapter
4712 *	@mbox: mailbox to use for the FW RESET command (if desired)
4713 *	@fw_data: the firmware image to write
4714 *	@size: image size
4715 *	@force: force upgrade even if firmware doesn't cooperate
4716 *
4717 *	Perform all of the steps necessary for upgrading an adapter's
4718 *	firmware image.  Normally this requires the cooperation of the
4719 *	existing firmware in order to halt all existing activities
4720 *	but if an invalid mailbox token is passed in we skip that step
4721 *	(though we'll still put the adapter microprocessor into RESET in
4722 *	that case).
4723 *
4724 *	On successful return the new firmware will have been loaded and
4725 *	the adapter will have been fully RESET losing all previous setup
4726 *	state.  On unsuccessful return the adapter may be completely hosed ...
4727 *	positive errno indicates that the adapter is ~probably~ intact, a
4728 *	negative errno indicates that things are looking bad ...
4729 */
4730int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4731		  const u8 *fw_data, unsigned int size, int force)
4732{
4733	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4734	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4735	int reset, ret;
4736
4737	if (!bootstrap) {
4738		ret = t4_fw_halt(adap, mbox, force);
4739		if (ret < 0 && !force)
4740			return ret;
4741	}
4742
4743	ret = t4_load_fw(adap, fw_data, size);
4744	if (ret < 0 || bootstrap)
4745		return ret;
4746
4747	/*
4748	 * Older versions of the firmware don't understand the new
4749	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4750	 * restart.  So for newly loaded older firmware we'll have to do the
4751	 * RESET for it so it starts up on a clean slate.  We can tell if
4752	 * the newly loaded firmware will handle this right by checking
4753	 * its header flags to see if it advertises the capability.
4754	 */
4755	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4756	return t4_fw_restart(adap, mbox, reset);
4757}
4758
4759/**
4760 *	t4_fw_initialize - ask FW to initialize the device
4761 *	@adap: the adapter
4762 *	@mbox: mailbox to use for the FW command
4763 *
4764 *	Issues a command to FW to partially initialize the device.  This
4765 *	performs initialization that generally doesn't depend on user input.
4766 */
4767int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4768{
4769	struct fw_initialize_cmd c;
4770
4771	memset(&c, 0, sizeof(c));
4772	INIT_CMD(c, INITIALIZE, WRITE);
4773	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4774}
4775
4776/**
4777 *	t4_query_params - query FW or device parameters
4778 *	@adap: the adapter
4779 *	@mbox: mailbox to use for the FW command
4780 *	@pf: the PF
4781 *	@vf: the VF
4782 *	@nparams: the number of parameters
4783 *	@params: the parameter names
4784 *	@val: the parameter values
4785 *
4786 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4787 *	queried at once.
4788 */
4789int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4790		    unsigned int vf, unsigned int nparams, const u32 *params,
4791		    u32 *val)
4792{
4793	int i, ret;
4794	struct fw_params_cmd c;
4795	__be32 *p = &c.param[0].mnem;
4796
4797	if (nparams > 7)
4798		return -EINVAL;
4799
4800	memset(&c, 0, sizeof(c));
4801	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4802			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4803			    V_FW_PARAMS_CMD_VFN(vf));
4804	c.retval_len16 = htonl(FW_LEN16(c));
4805
4806	for (i = 0; i < nparams; i++, p += 2, params++)
4807		*p = htonl(*params);
4808
4809	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4810	if (ret == 0)
4811		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4812			*val++ = ntohl(*p);
4813	return ret;
4814}
4815
4816/**
4817 *	t4_set_params - sets FW or device parameters
4818 *	@adap: the adapter
4819 *	@mbox: mailbox to use for the FW command
4820 *	@pf: the PF
4821 *	@vf: the VF
4822 *	@nparams: the number of parameters
4823 *	@params: the parameter names
4824 *	@val: the parameter values
4825 *
4826 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4827 *	specified at once.
4828 */
4829int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4830		  unsigned int vf, unsigned int nparams, const u32 *params,
4831		  const u32 *val)
4832{
4833	struct fw_params_cmd c;
4834	__be32 *p = &c.param[0].mnem;
4835
4836	if (nparams > 7)
4837		return -EINVAL;
4838
4839	memset(&c, 0, sizeof(c));
4840	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4841			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4842			    V_FW_PARAMS_CMD_VFN(vf));
4843	c.retval_len16 = htonl(FW_LEN16(c));
4844
4845	while (nparams--) {
4846		*p++ = htonl(*params);
4847		params++;
4848		*p++ = htonl(*val);
4849		val++;
4850	}
4851
4852	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4853}
4854
4855/**
4856 *	t4_cfg_pfvf - configure PF/VF resource limits
4857 *	@adap: the adapter
4858 *	@mbox: mailbox to use for the FW command
4859 *	@pf: the PF being configured
4860 *	@vf: the VF being configured
4861 *	@txq: the max number of egress queues
4862 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4863 *	@rxqi: the max number of interrupt-capable ingress queues
4864 *	@rxq: the max number of interruptless ingress queues
4865 *	@tc: the PCI traffic class
4866 *	@vi: the max number of virtual interfaces
4867 *	@cmask: the channel access rights mask for the PF/VF
4868 *	@pmask: the port access rights mask for the PF/VF
4869 *	@nexact: the maximum number of exact MPS filters
4870 *	@rcaps: read capabilities
4871 *	@wxcaps: write/execute capabilities
4872 *
4873 *	Configures resource limits and capabilities for a physical or virtual
4874 *	function.
4875 */
4876int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4877		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4878		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4879		unsigned int vi, unsigned int cmask, unsigned int pmask,
4880		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4881{
4882	struct fw_pfvf_cmd c;
4883
4884	memset(&c, 0, sizeof(c));
4885	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4886			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4887			    V_FW_PFVF_CMD_VFN(vf));
4888	c.retval_len16 = htonl(FW_LEN16(c));
4889	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4890			       V_FW_PFVF_CMD_NIQ(rxq));
4891	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4892			      V_FW_PFVF_CMD_PMASK(pmask) |
4893			      V_FW_PFVF_CMD_NEQ(txq));
4894	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4895				V_FW_PFVF_CMD_NEXACTF(nexact));
4896	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4897				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4898				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4899	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4900}
4901
4902/**
4903 *	t4_alloc_vi_func - allocate a virtual interface
4904 *	@adap: the adapter
4905 *	@mbox: mailbox to use for the FW command
4906 *	@port: physical port associated with the VI
4907 *	@pf: the PF owning the VI
4908 *	@vf: the VF owning the VI
4909 *	@nmac: number of MAC addresses needed (1 to 5)
4910 *	@mac: the MAC addresses of the VI
4911 *	@rss_size: size of RSS table slice associated with this VI
4912 *	@portfunc: which Port Application Function MAC Address is desired
4913 *	@idstype: Intrusion Detection Type
4914 *
4915 *	Allocates a virtual interface for the given physical port.  If @mac is
4916 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4917 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4918 *	stored consecutively so the space needed is @nmac * 6 bytes.
4919 *	Returns a negative error number or the non-negative VI id.
4920 */
4921int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4922		     unsigned int port, unsigned int pf, unsigned int vf,
4923		     unsigned int nmac, u8 *mac, u16 *rss_size,
4924		     unsigned int portfunc, unsigned int idstype)
4925{
4926	int ret;
4927	struct fw_vi_cmd c;
4928
4929	memset(&c, 0, sizeof(c));
4930	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4931			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4932			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4933	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4934	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4935			       V_FW_VI_CMD_FUNC(portfunc));
4936	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4937	c.nmac = nmac - 1;
4938
4939	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4940	if (ret)
4941		return ret;
4942
4943	if (mac) {
4944		memcpy(mac, c.mac, sizeof(c.mac));
4945		switch (nmac) {
4946		case 5:
4947			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4948		case 4:
4949			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4950		case 3:
4951			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4952		case 2:
4953			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4954		}
4955	}
4956	if (rss_size)
4957		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4958	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4959}
4960
4961/**
4962 *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4963 *	@adap: the adapter
4964 *	@mbox: mailbox to use for the FW command
4965 *	@port: physical port associated with the VI
4966 *	@pf: the PF owning the VI
4967 *	@vf: the VF owning the VI
4968 *	@nmac: number of MAC addresses needed (1 to 5)
4969 *	@mac: the MAC addresses of the VI
4970 *	@rss_size: size of RSS table slice associated with this VI
4971 *
4972 *	backwards compatible and convieniance routine to allocate a Virtual
4973 *	Interface with a Ethernet Port Application Function and Intrustion
4974 *	Detection System disabled.
4975 */
4976int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4977		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4978		u16 *rss_size)
4979{
4980	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4981				FW_VI_FUNC_ETH, 0);
4982}
4983
4984/**
4985 *	t4_free_vi - free a virtual interface
4986 *	@adap: the adapter
4987 *	@mbox: mailbox to use for the FW command
4988 *	@pf: the PF owning the VI
4989 *	@vf: the VF owning the VI
4990 *	@viid: virtual interface identifiler
4991 *
4992 *	Free a previously allocated virtual interface.
4993 */
4994int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4995	       unsigned int vf, unsigned int viid)
4996{
4997	struct fw_vi_cmd c;
4998
4999	memset(&c, 0, sizeof(c));
5000	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
5001			    F_FW_CMD_REQUEST |
5002			    F_FW_CMD_EXEC |
5003			    V_FW_VI_CMD_PFN(pf) |
5004			    V_FW_VI_CMD_VFN(vf));
5005	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
5006	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
5007
5008	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5009}
5010
5011/**
5012 *	t4_set_rxmode - set Rx properties of a virtual interface
5013 *	@adap: the adapter
5014 *	@mbox: mailbox to use for the FW command
5015 *	@viid: the VI id
5016 *	@mtu: the new MTU or -1
5017 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
5018 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
5019 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
5020 *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
5021 *	@sleep_ok: if true we may sleep while awaiting command completion
5022 *
5023 *	Sets Rx properties of a virtual interface.
5024 */
5025int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
5026		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
5027		  bool sleep_ok)
5028{
5029	struct fw_vi_rxmode_cmd c;
5030
5031	/* convert to FW values */
5032	if (mtu < 0)
5033		mtu = M_FW_VI_RXMODE_CMD_MTU;
5034	if (promisc < 0)
5035		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
5036	if (all_multi < 0)
5037		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
5038	if (bcast < 0)
5039		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
5040	if (vlanex < 0)
5041		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
5042
5043	memset(&c, 0, sizeof(c));
5044	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
5045			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
5046	c.retval_len16 = htonl(FW_LEN16(c));
5047	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
5048				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
5049				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
5050				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
5051				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
5052	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5053}
5054
5055/**
5056 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5057 *	@adap: the adapter
5058 *	@mbox: mailbox to use for the FW command
5059 *	@viid: the VI id
5060 *	@free: if true any existing filters for this VI id are first removed
5061 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
5062 *	@addr: the MAC address(es)
5063 *	@idx: where to store the index of each allocated filter
5064 *	@hash: pointer to hash address filter bitmap
5065 *	@sleep_ok: call is allowed to sleep
5066 *
5067 *	Allocates an exact-match filter for each of the supplied addresses and
5068 *	sets it to the corresponding address.  If @idx is not %NULL it should
5069 *	have at least @naddr entries, each of which will be set to the index of
5070 *	the filter allocated for the corresponding MAC address.  If a filter
5071 *	could not be allocated for an address its index is set to 0xffff.
5072 *	If @hash is not %NULL addresses that fail to allocate an exact filter
5073 *	are hashed and update the hash filter bitmap pointed at by @hash.
5074 *
5075 *	Returns a negative error number or the number of filters allocated.
5076 */
5077int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5078		      unsigned int viid, bool free, unsigned int naddr,
5079		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5080{
5081	int offset, ret = 0;
5082	struct fw_vi_mac_cmd c;
5083	unsigned int nfilters = 0;
5084	unsigned int max_naddr = is_t4(adap) ?
5085				       NUM_MPS_CLS_SRAM_L_INSTANCES :
5086				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5087	unsigned int rem = naddr;
5088
5089	if (naddr > max_naddr)
5090		return -EINVAL;
5091
5092	for (offset = 0; offset < naddr ; /**/) {
5093		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5094					 ? rem
5095					 : ARRAY_SIZE(c.u.exact));
5096		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5097						     u.exact[fw_naddr]), 16);
5098		struct fw_vi_mac_exact *p;
5099		int i;
5100
5101		memset(&c, 0, sizeof(c));
5102		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5103				     F_FW_CMD_REQUEST |
5104				     F_FW_CMD_WRITE |
5105				     V_FW_CMD_EXEC(free) |
5106				     V_FW_VI_MAC_CMD_VIID(viid));
5107		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5108					    V_FW_CMD_LEN16(len16));
5109
5110		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5111			p->valid_to_idx = htons(
5112				F_FW_VI_MAC_CMD_VALID |
5113				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5114			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5115		}
5116
5117		/*
5118		 * It's okay if we run out of space in our MAC address arena.
5119		 * Some of the addresses we submit may get stored so we need
5120		 * to run through the reply to see what the results were ...
5121		 */
5122		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5123		if (ret && ret != -FW_ENOMEM)
5124			break;
5125
5126		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5127			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5128
5129			if (idx)
5130				idx[offset+i] = (index >=  max_naddr
5131						 ? 0xffff
5132						 : index);
5133			if (index < max_naddr)
5134				nfilters++;
5135			else if (hash)
5136				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5137		}
5138
5139		free = false;
5140		offset += fw_naddr;
5141		rem -= fw_naddr;
5142	}
5143
5144	if (ret == 0 || ret == -FW_ENOMEM)
5145		ret = nfilters;
5146	return ret;
5147}
5148
5149/**
5150 *	t4_change_mac - modifies the exact-match filter for a MAC address
5151 *	@adap: the adapter
5152 *	@mbox: mailbox to use for the FW command
5153 *	@viid: the VI id
5154 *	@idx: index of existing filter for old value of MAC address, or -1
5155 *	@addr: the new MAC address value
5156 *	@persist: whether a new MAC allocation should be persistent
5157 *	@add_smt: if true also add the address to the HW SMT
5158 *
5159 *	Modifies an exact-match filter and sets it to the new MAC address if
5160 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5161 *	latter case the address is added persistently if @persist is %true.
5162 *
5163 *	Note that in general it is not possible to modify the value of a given
5164 *	filter so the generic way to modify an address filter is to free the one
5165 *	being used by the old address value and allocate a new filter for the
5166 *	new address value.
5167 *
5168 *	Returns a negative error number or the index of the filter with the new
5169 *	MAC value.  Note that this index may differ from @idx.
5170 */
5171int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5172		  int idx, const u8 *addr, bool persist, bool add_smt)
5173{
5174	int ret, mode;
5175	struct fw_vi_mac_cmd c;
5176	struct fw_vi_mac_exact *p = c.u.exact;
5177	unsigned int max_mac_addr = is_t4(adap) ?
5178				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5179				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5180
5181	if (idx < 0)                             /* new allocation */
5182		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5183	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5184
5185	memset(&c, 0, sizeof(c));
5186	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5187			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5188	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5189	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5190				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5191				V_FW_VI_MAC_CMD_IDX(idx));
5192	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5193
5194	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5195	if (ret == 0) {
5196		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5197		if (ret >= max_mac_addr)
5198			ret = -ENOMEM;
5199	}
5200	return ret;
5201}
5202
5203/**
5204 *	t4_set_addr_hash - program the MAC inexact-match hash filter
5205 *	@adap: the adapter
5206 *	@mbox: mailbox to use for the FW command
5207 *	@viid: the VI id
5208 *	@ucast: whether the hash filter should also match unicast addresses
5209 *	@vec: the value to be written to the hash filter
5210 *	@sleep_ok: call is allowed to sleep
5211 *
5212 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5213 */
5214int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5215		     bool ucast, u64 vec, bool sleep_ok)
5216{
5217	struct fw_vi_mac_cmd c;
5218
5219	memset(&c, 0, sizeof(c));
5220	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5221			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5222	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5223				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5224				    V_FW_CMD_LEN16(1));
5225	c.u.hash.hashvec = cpu_to_be64(vec);
5226	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5227}
5228
5229/**
5230 *	t4_enable_vi - enable/disable a virtual interface
5231 *	@adap: the adapter
5232 *	@mbox: mailbox to use for the FW command
5233 *	@viid: the VI id
5234 *	@rx_en: 1=enable Rx, 0=disable Rx
5235 *	@tx_en: 1=enable Tx, 0=disable Tx
5236 *
5237 *	Enables/disables a virtual interface.
5238 */
5239int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5240		 bool rx_en, bool tx_en)
5241{
5242	struct fw_vi_enable_cmd c;
5243
5244	memset(&c, 0, sizeof(c));
5245	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5246			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5247	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5248			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5249	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5250}
5251
5252/**
5253 *	t4_identify_port - identify a VI's port by blinking its LED
5254 *	@adap: the adapter
5255 *	@mbox: mailbox to use for the FW command
5256 *	@viid: the VI id
5257 *	@nblinks: how many times to blink LED at 2.5 Hz
5258 *
5259 *	Identifies a VI's port by blinking its LED.
5260 */
5261int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5262		     unsigned int nblinks)
5263{
5264	struct fw_vi_enable_cmd c;
5265
5266	memset(&c, 0, sizeof(c));
5267	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5268			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5269	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5270	c.blinkdur = htons(nblinks);
5271	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5272}
5273
5274/**
5275 *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5276 *	@adap: the adapter
5277 *	@mbox: mailbox to use for the FW command
5278 *	@start: %true to enable the queues, %false to disable them
5279 *	@pf: the PF owning the queues
5280 *	@vf: the VF owning the queues
5281 *	@iqid: ingress queue id
5282 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5283 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5284 *
5285 *	Starts or stops an ingress queue and its associated FLs, if any.
5286 */
5287int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5288		     unsigned int pf, unsigned int vf, unsigned int iqid,
5289		     unsigned int fl0id, unsigned int fl1id)
5290{
5291	struct fw_iq_cmd c;
5292
5293	memset(&c, 0, sizeof(c));
5294	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5295			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5296			    V_FW_IQ_CMD_VFN(vf));
5297	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5298				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5299	c.iqid = htons(iqid);
5300	c.fl0id = htons(fl0id);
5301	c.fl1id = htons(fl1id);
5302	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5303}
5304
5305/**
5306 *	t4_iq_free - free an ingress queue and its FLs
5307 *	@adap: the adapter
5308 *	@mbox: mailbox to use for the FW command
5309 *	@pf: the PF owning the queues
5310 *	@vf: the VF owning the queues
5311 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5312 *	@iqid: ingress queue id
5313 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5314 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5315 *
5316 *	Frees an ingress queue and its associated FLs, if any.
5317 */
5318int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5319	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5320	       unsigned int fl0id, unsigned int fl1id)
5321{
5322	struct fw_iq_cmd c;
5323
5324	memset(&c, 0, sizeof(c));
5325	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5326			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5327			    V_FW_IQ_CMD_VFN(vf));
5328	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5329	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5330	c.iqid = htons(iqid);
5331	c.fl0id = htons(fl0id);
5332	c.fl1id = htons(fl1id);
5333	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5334}
5335
5336/**
5337 *	t4_eth_eq_free - free an Ethernet egress queue
5338 *	@adap: the adapter
5339 *	@mbox: mailbox to use for the FW command
5340 *	@pf: the PF owning the queue
5341 *	@vf: the VF owning the queue
5342 *	@eqid: egress queue id
5343 *
5344 *	Frees an Ethernet egress queue.
5345 */
5346int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5347		   unsigned int vf, unsigned int eqid)
5348{
5349	struct fw_eq_eth_cmd c;
5350
5351	memset(&c, 0, sizeof(c));
5352	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5353			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5354			    V_FW_EQ_ETH_CMD_VFN(vf));
5355	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5356	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5357	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5358}
5359
5360/**
5361 *	t4_ctrl_eq_free - free a control egress queue
5362 *	@adap: the adapter
5363 *	@mbox: mailbox to use for the FW command
5364 *	@pf: the PF owning the queue
5365 *	@vf: the VF owning the queue
5366 *	@eqid: egress queue id
5367 *
5368 *	Frees a control egress queue.
5369 */
5370int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5371		    unsigned int vf, unsigned int eqid)
5372{
5373	struct fw_eq_ctrl_cmd c;
5374
5375	memset(&c, 0, sizeof(c));
5376	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5377			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5378			    V_FW_EQ_CTRL_CMD_VFN(vf));
5379	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5380	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5381	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5382}
5383
5384/**
5385 *	t4_ofld_eq_free - free an offload egress queue
5386 *	@adap: the adapter
5387 *	@mbox: mailbox to use for the FW command
5388 *	@pf: the PF owning the queue
5389 *	@vf: the VF owning the queue
5390 *	@eqid: egress queue id
5391 *
5392 *	Frees a control egress queue.
5393 */
5394int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5395		    unsigned int vf, unsigned int eqid)
5396{
5397	struct fw_eq_ofld_cmd c;
5398
5399	memset(&c, 0, sizeof(c));
5400	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5401			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5402			    V_FW_EQ_OFLD_CMD_VFN(vf));
5403	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5404	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5405	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5406}
5407
5408/**
5409 *	t4_handle_fw_rpl - process a FW reply message
5410 *	@adap: the adapter
5411 *	@rpl: start of the FW message
5412 *
5413 *	Processes a FW message, such as link state change messages.
5414 */
5415int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5416{
5417	u8 opcode = *(const u8 *)rpl;
5418	const struct fw_port_cmd *p = (const void *)rpl;
5419	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5420
5421	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5422		/* link/module state change message */
5423		int speed = 0, fc = 0, i;
5424		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5425		struct port_info *pi = NULL;
5426		struct link_config *lc;
5427		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5428		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5429		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5430
5431		if (stat & F_FW_PORT_CMD_RXPAUSE)
5432			fc |= PAUSE_RX;
5433		if (stat & F_FW_PORT_CMD_TXPAUSE)
5434			fc |= PAUSE_TX;
5435		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5436			speed = SPEED_100;
5437		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5438			speed = SPEED_1000;
5439		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5440			speed = SPEED_10000;
5441		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5442			speed = SPEED_40000;
5443
5444		for_each_port(adap, i) {
5445			pi = adap2pinfo(adap, i);
5446			if (pi->tx_chan == chan)
5447				break;
5448		}
5449		lc = &pi->link_cfg;
5450
5451		if (mod != pi->mod_type) {
5452			pi->mod_type = mod;
5453			t4_os_portmod_changed(adap, i);
5454		}
5455		if (link_ok != lc->link_ok || speed != lc->speed ||
5456		    fc != lc->fc) {                    /* something changed */
5457			int reason;
5458
5459			if (!link_ok && lc->link_ok)
5460				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5461			else
5462				reason = -1;
5463
5464			lc->link_ok = link_ok;
5465			lc->speed = speed;
5466			lc->fc = fc;
5467			lc->supported = ntohs(p->u.info.pcap);
5468			t4_os_link_changed(adap, i, link_ok, reason);
5469		}
5470	} else {
5471		CH_WARN_RATELIMIT(adap,
5472		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5473		return -EINVAL;
5474	}
5475	return 0;
5476}
5477
5478/**
5479 *	get_pci_mode - determine a card's PCI mode
5480 *	@adapter: the adapter
5481 *	@p: where to store the PCI settings
5482 *
5483 *	Determines a card's PCI mode and associated parameters, such as speed
5484 *	and width.
5485 */
5486static void __devinit get_pci_mode(struct adapter *adapter,
5487				   struct pci_params *p)
5488{
5489	u16 val;
5490	u32 pcie_cap;
5491
5492	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5493	if (pcie_cap) {
5494		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5495		p->speed = val & PCI_EXP_LNKSTA_CLS;
5496		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5497	}
5498}
5499
5500/**
5501 *	init_link_config - initialize a link's SW state
5502 *	@lc: structure holding the link state
5503 *	@caps: link capabilities
5504 *
5505 *	Initializes the SW state maintained for each link, including the link's
5506 *	capabilities and default speed/flow-control/autonegotiation settings.
5507 */
5508static void __devinit init_link_config(struct link_config *lc,
5509				       unsigned int caps)
5510{
5511	lc->supported = caps;
5512	lc->requested_speed = 0;
5513	lc->speed = 0;
5514	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5515	if (lc->supported & FW_PORT_CAP_ANEG) {
5516		lc->advertising = lc->supported & ADVERT_MASK;
5517		lc->autoneg = AUTONEG_ENABLE;
5518		lc->requested_fc |= PAUSE_AUTONEG;
5519	} else {
5520		lc->advertising = 0;
5521		lc->autoneg = AUTONEG_DISABLE;
5522	}
5523}
5524
5525static int __devinit get_flash_params(struct adapter *adapter)
5526{
5527	int ret;
5528	u32 info = 0;
5529
5530	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5531	if (!ret)
5532		ret = sf1_read(adapter, 3, 0, 1, &info);
5533	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5534	if (ret < 0)
5535		return ret;
5536
5537	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5538		return -EINVAL;
5539	info >>= 16;                           /* log2 of size */
5540	if (info >= 0x14 && info < 0x18)
5541		adapter->params.sf_nsec = 1 << (info - 16);
5542	else if (info == 0x18)
5543		adapter->params.sf_nsec = 64;
5544	else
5545		return -EINVAL;
5546	adapter->params.sf_size = 1 << info;
5547	return 0;
5548}
5549
5550static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5551						  u8 range)
5552{
5553	u16 val;
5554	u32 pcie_cap;
5555
5556	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5557	if (pcie_cap) {
5558		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5559		val &= 0xfff0;
5560		val |= range ;
5561		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5562	}
5563}
5564
5565/**
5566 *	t4_prep_adapter - prepare SW and HW for operation
5567 *	@adapter: the adapter
5568 *	@reset: if true perform a HW reset
5569 *
5570 *	Initialize adapter SW state for the various HW modules, set initial
5571 *	values for some adapter tunables, take PHYs out of reset, and
5572 *	initialize the MDIO interface.
5573 */
5574int __devinit t4_prep_adapter(struct adapter *adapter)
5575{
5576	int ret;
5577	uint16_t device_id;
5578	uint32_t pl_rev;
5579
5580	get_pci_mode(adapter, &adapter->params.pci);
5581
5582	pl_rev = t4_read_reg(adapter, A_PL_REV);
5583	adapter->params.chipid = G_CHIPID(pl_rev);
5584	adapter->params.rev = G_REV(pl_rev);
5585	if (adapter->params.chipid == 0) {
5586		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5587		adapter->params.chipid = CHELSIO_T4;
5588
5589		/* T4A1 chip is not supported */
5590		if (adapter->params.rev == 1) {
5591			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5592			return -EINVAL;
5593		}
5594	}
5595	adapter->params.pci.vpd_cap_addr =
5596	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5597
5598	ret = get_flash_params(adapter);
5599	if (ret < 0)
5600		return ret;
5601
5602	ret = get_vpd_params(adapter, &adapter->params.vpd);
5603	if (ret < 0)
5604		return ret;
5605
5606	/* Cards with real ASICs have the chipid in the PCIe device id */
5607	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5608	if (device_id >> 12 == adapter->params.chipid)
5609		adapter->params.cim_la_size = CIMLA_SIZE;
5610	else {
5611		/* FPGA */
5612		adapter->params.fpga = 1;
5613		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5614	}
5615
5616	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5617
5618	/*
5619	 * Default port and clock for debugging in case we can't reach FW.
5620	 */
5621	adapter->params.nports = 1;
5622	adapter->params.portvec = 1;
5623	adapter->params.vpd.cclk = 50000;
5624
5625	/* Set pci completion timeout value to 4 seconds. */
5626	set_pcie_completion_timeout(adapter, 0xd);
5627	return 0;
5628}
5629
5630/**
5631 *	t4_init_tp_params - initialize adap->params.tp
5632 *	@adap: the adapter
5633 *
5634 *	Initialize various fields of the adapter's TP Parameters structure.
5635 */
5636int __devinit t4_init_tp_params(struct adapter *adap)
5637{
5638	int chan;
5639	u32 v;
5640
5641	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5642	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5643	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5644
5645	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5646	for (chan = 0; chan < NCHAN; chan++)
5647		adap->params.tp.tx_modq[chan] = chan;
5648
5649	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5650			 &adap->params.tp.ingress_config, 1,
5651			 A_TP_INGRESS_CONFIG);
5652	refresh_vlan_pri_map(adap);
5653
5654	return 0;
5655}
5656
5657/**
5658 *	t4_filter_field_shift - calculate filter field shift
5659 *	@adap: the adapter
5660 *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5661 *
5662 *	Return the shift position of a filter field within the Compressed
5663 *	Filter Tuple.  The filter field is specified via its selection bit
5664 *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5665 */
5666int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5667{
5668	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5669	unsigned int sel;
5670	int field_shift;
5671
5672	if ((filter_mode & filter_sel) == 0)
5673		return -1;
5674
5675	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5676	    switch (filter_mode & sel) {
5677		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5678		case F_PORT:          field_shift += W_FT_PORT;          break;
5679		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5680		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5681		case F_TOS:           field_shift += W_FT_TOS;           break;
5682		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5683		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5684		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5685		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5686		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5687	    }
5688	}
5689	return field_shift;
5690}
5691
5692int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5693{
5694	u8 addr[6];
5695	int ret, i, j;
5696	struct fw_port_cmd c;
5697	u16 rss_size;
5698	adapter_t *adap = p->adapter;
5699	u32 param, val;
5700
5701	memset(&c, 0, sizeof(c));
5702
5703	for (i = 0, j = -1; i <= p->port_id; i++) {
5704		do {
5705			j++;
5706		} while ((adap->params.portvec & (1 << j)) == 0);
5707	}
5708
5709	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5710			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5711			       V_FW_PORT_CMD_PORTID(j));
5712	c.action_to_len16 = htonl(
5713		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5714		FW_LEN16(c));
5715	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5716	if (ret)
5717		return ret;
5718
5719	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5720	if (ret < 0)
5721		return ret;
5722
5723	p->viid = ret;
5724	p->tx_chan = j;
5725	p->rx_chan_map = get_mps_bg_map(adap, j);
5726	p->lport = j;
5727	p->rss_size = rss_size;
5728	t4_os_set_hw_addr(adap, p->port_id, addr);
5729
5730	ret = ntohl(c.u.info.lstatus_to_modtype);
5731	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5732		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5733	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5734	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5735
5736	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5737
5738	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5739	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
5740	    V_FW_PARAMS_PARAM_YZ(p->viid);
5741	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
5742	if (ret)
5743		p->rss_base = 0xffff;
5744	else {
5745		/* MPASS((val >> 16) == rss_size); */
5746		p->rss_base = val & 0xffff;
5747	}
5748
5749	return 0;
5750}
5751
5752int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
5753    		    int sleep_ok)
5754{
5755	struct fw_sched_cmd cmd;
5756
5757	memset(&cmd, 0, sizeof(cmd));
5758	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5759				      F_FW_CMD_REQUEST |
5760				      F_FW_CMD_WRITE);
5761	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5762
5763	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5764	cmd.u.config.type = type;
5765	cmd.u.config.minmaxen = minmaxen;
5766
5767	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5768			       NULL, sleep_ok);
5769}
5770
5771int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5772		    int rateunit, int ratemode, int channel, int cl,
5773		    int minrate, int maxrate, int weight, int pktsize,
5774		    int sleep_ok)
5775{
5776	struct fw_sched_cmd cmd;
5777
5778	memset(&cmd, 0, sizeof(cmd));
5779	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5780				      F_FW_CMD_REQUEST |
5781				      F_FW_CMD_WRITE);
5782	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5783
5784	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5785	cmd.u.params.type = type;
5786	cmd.u.params.level = level;
5787	cmd.u.params.mode = mode;
5788	cmd.u.params.ch = channel;
5789	cmd.u.params.cl = cl;
5790	cmd.u.params.unit = rateunit;
5791	cmd.u.params.rate = ratemode;
5792	cmd.u.params.min = cpu_to_be32(minrate);
5793	cmd.u.params.max = cpu_to_be32(maxrate);
5794	cmd.u.params.weight = cpu_to_be16(weight);
5795	cmd.u.params.pktsize = cpu_to_be16(pktsize);
5796
5797	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5798			       NULL, sleep_ok);
5799}
5800