t4_hw.c revision 269082
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.c 269082 2014-07-25 00:30:55Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39	if (cold) \
40		DELAY((x) * 1000); \
41	else \
42		pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 *	t4_wait_op_done_val - wait until an operation is completed
47 *	@adapter: the adapter performing the operation
48 *	@reg: the register to check for completion
49 *	@mask: a single-bit field within @reg that indicates completion
50 *	@polarity: the value of the field when the operation is completed
51 *	@attempts: number of check iterations
52 *	@delay: delay in usecs between iterations
53 *	@valp: where to store the value of the register at completion time
54 *
55 *	Wait until an operation is completed by checking a bit in a register
56 *	up to @attempts times.  If @valp is not NULL the value of the register
57 *	at the time it indicated completion is stored there.  Returns 0 if the
58 *	operation completes and	-EAGAIN	otherwise.
59 */
60int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61		        int polarity, int attempts, int delay, u32 *valp)
62{
63	while (1) {
64		u32 val = t4_read_reg(adapter, reg);
65
66		if (!!(val & mask) == polarity) {
67			if (valp)
68				*valp = val;
69			return 0;
70		}
71		if (--attempts == 0)
72			return -EAGAIN;
73		if (delay)
74			udelay(delay);
75	}
76}
77
78/**
79 *	t4_set_reg_field - set a register field to a value
80 *	@adapter: the adapter to program
81 *	@addr: the register address
82 *	@mask: specifies the portion of the register to modify
83 *	@val: the new value for the register field
84 *
85 *	Sets a register field specified by the supplied mask to the
86 *	given value.
87 */
88void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89		      u32 val)
90{
91	u32 v = t4_read_reg(adapter, addr) & ~mask;
92
93	t4_write_reg(adapter, addr, v | val);
94	(void) t4_read_reg(adapter, addr);      /* flush */
95}
96
97/**
98 *	t4_read_indirect - read indirectly addressed registers
99 *	@adap: the adapter
100 *	@addr_reg: register holding the indirect address
101 *	@data_reg: register holding the value of the indirect register
102 *	@vals: where the read register values are stored
103 *	@nregs: how many indirect registers to read
104 *	@start_idx: index of first indirect register to read
105 *
106 *	Reads registers that are accessed indirectly through an address/data
107 *	register pair.
108 */
109void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111		      unsigned int start_idx)
112{
113	while (nregs--) {
114		t4_write_reg(adap, addr_reg, start_idx);
115		*vals++ = t4_read_reg(adap, data_reg);
116		start_idx++;
117	}
118}
119
120/**
121 *	t4_write_indirect - write indirectly addressed registers
122 *	@adap: the adapter
123 *	@addr_reg: register holding the indirect addresses
124 *	@data_reg: register holding the value for the indirect registers
125 *	@vals: values to write
126 *	@nregs: how many indirect registers to write
127 *	@start_idx: address of first indirect register to write
128 *
129 *	Writes a sequential block of registers that are accessed indirectly
130 *	through an address/data register pair.
131 */
132void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133		       unsigned int data_reg, const u32 *vals,
134		       unsigned int nregs, unsigned int start_idx)
135{
136	while (nregs--) {
137		t4_write_reg(adap, addr_reg, start_idx++);
138		t4_write_reg(adap, data_reg, *vals++);
139	}
140}
141
142/*
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism.  This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
147 */
148u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149{
150	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152		     V_REGISTER(reg));
153	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154}
155
156/*
157 *	t4_report_fw_error - report firmware error
158 *	@adap: the adapter
159 *
160 *	The adapter firmware can indicate error conditions to the host.
161 *	This routine prints out the reason for the firmware error (as
162 *	reported by the firmware).
163 */
164static void t4_report_fw_error(struct adapter *adap)
165{
166	static const char *reason[] = {
167		"Crash",			/* PCIE_FW_EVAL_CRASH */
168		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174		"Reserved",			/* reserved */
175	};
176	u32 pcie_fw;
177
178	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179	if (pcie_fw & F_PCIE_FW_ERR)
180		CH_ERR(adap, "Firmware reports adapter error: %s\n",
181		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
182}
183
184/*
185 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
186 */
187static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
188			 u32 mbox_addr)
189{
190	for ( ; nflit; nflit--, mbox_addr += 8)
191		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
192}
193
194/*
195 * Handle a FW assertion reported in a mailbox.
196 */
197static void fw_asrt(struct adapter *adap, u32 mbox_addr)
198{
199	struct fw_debug_cmd asrt;
200
201	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
202	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
203		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
204		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
205}
206
207#define X_CIM_PF_NOACCESS 0xeeeeeeee
208/**
209 *	t4_wr_mbox_meat - send a command to FW through the given mailbox
210 *	@adap: the adapter
211 *	@mbox: index of the mailbox to use
212 *	@cmd: the command to write
213 *	@size: command length in bytes
214 *	@rpl: where to optionally store the reply
215 *	@sleep_ok: if true we may sleep while awaiting command completion
216 *
217 *	Sends the given command to FW through the selected mailbox and waits
218 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
219 *	store the FW's reply to the command.  The command and its optional
220 *	reply are of the same length.  Some FW commands like RESET and
221 *	INITIALIZE can take a considerable amount of time to execute.
222 *	@sleep_ok determines whether we may sleep while awaiting the response.
223 *	If sleeping is allowed we use progressive backoff otherwise we spin.
224 *
225 *	The return value is 0 on success or a negative errno on failure.  A
226 *	failure can happen either because we are not able to execute the
227 *	command or FW executes it but signals an error.  In the latter case
228 *	the return value is the error code indicated by FW (negated).
229 */
230int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
231		    void *rpl, bool sleep_ok)
232{
233	/*
234	 * We delay in small increments at first in an effort to maintain
235	 * responsiveness for simple, fast executing commands but then back
236	 * off to larger delays to a maximum retry delay.
237	 */
238	static const int delay[] = {
239		1, 1, 3, 5, 10, 10, 20, 50, 100
240	};
241
242	u32 v;
243	u64 res;
244	int i, ms, delay_idx;
245	const __be64 *p = cmd;
246	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
247	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
248
249	if ((size & 15) || size > MBOX_LEN)
250		return -EINVAL;
251
252	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
253	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
254		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255
256	if (v != X_MBOWNER_PL)
257		return v ? -EBUSY : -ETIMEDOUT;
258
259	for (i = 0; i < size; i += 8, p++)
260		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
261
262	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
263	t4_read_reg(adap, ctl_reg);          /* flush write */
264
265	delay_idx = 0;
266	ms = delay[0];
267
268	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
269		if (sleep_ok) {
270			ms = delay[delay_idx];  /* last element may repeat */
271			if (delay_idx < ARRAY_SIZE(delay) - 1)
272				delay_idx++;
273			msleep(ms);
274		} else
275			mdelay(ms);
276
277		v = t4_read_reg(adap, ctl_reg);
278		if (v == X_CIM_PF_NOACCESS)
279			continue;
280		if (G_MBOWNER(v) == X_MBOWNER_PL) {
281			if (!(v & F_MBMSGVALID)) {
282				t4_write_reg(adap, ctl_reg,
283					     V_MBOWNER(X_MBOWNER_NONE));
284				continue;
285			}
286
287			res = t4_read_reg64(adap, data_reg);
288			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
289				fw_asrt(adap, data_reg);
290				res = V_FW_CMD_RETVAL(EIO);
291			} else if (rpl)
292				get_mbox_rpl(adap, rpl, size / 8, data_reg);
293			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
294			return -G_FW_CMD_RETVAL((int)res);
295		}
296	}
297
298	/*
299	 * We timed out waiting for a reply to our mailbox command.  Report
300	 * the error and also check to see if the firmware reported any
301	 * errors ...
302	 */
303	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
304	       *(const u8 *)cmd, mbox);
305	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
306		t4_report_fw_error(adap);
307	return -ETIMEDOUT;
308}
309
310/**
311 *	t4_mc_read - read from MC through backdoor accesses
312 *	@adap: the adapter
313 *	@idx: which MC to access
314 *	@addr: address of first byte requested
315 *	@data: 64 bytes of data containing the requested address
316 *	@ecc: where to store the corresponding 64-bit ECC word
317 *
318 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
319 *	that covers the requested address @addr.  If @parity is not %NULL it
320 *	is assigned the 64-bit ECC word for the read data.
321 */
322int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323{
324	int i;
325	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
326	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
327
328	if (is_t4(adap)) {
329		mc_bist_cmd_reg = A_MC_BIST_CMD;
330		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
331		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
332		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
333		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
334	} else {
335		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
336		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
337		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
338		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
339						  idx);
340		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
341						  idx);
342	}
343
344	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
345		return -EBUSY;
346	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
347	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
348	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
349	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
350		     F_START_BIST | V_BIST_CMD_GAP(1));
351	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
352	if (i)
353		return i;
354
355#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
356
357	for (i = 15; i >= 0; i--)
358		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
359	if (ecc)
360		*ecc = t4_read_reg64(adap, MC_DATA(16));
361#undef MC_DATA
362	return 0;
363}
364
365/**
366 *	t4_edc_read - read from EDC through backdoor accesses
367 *	@adap: the adapter
368 *	@idx: which EDC to access
369 *	@addr: address of first byte requested
370 *	@data: 64 bytes of data containing the requested address
371 *	@ecc: where to store the corresponding 64-bit ECC word
372 *
373 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
374 *	that covers the requested address @addr.  If @parity is not %NULL it
375 *	is assigned the 64-bit ECC word for the read data.
376 */
377int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
378{
379	int i;
380	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
381	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
382
383	if (is_t4(adap)) {
384		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
385		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
386		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
387		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
388						    idx);
389		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
390						    idx);
391	} else {
392/*
393 * These macro are missing in t4_regs.h file.
394 * Added temporarily for testing.
395 */
396#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
397#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
398		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
399		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
400		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
401		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
402						    idx);
403		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
404						    idx);
405#undef EDC_REG_T5
406#undef EDC_STRIDE_T5
407	}
408
409	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
410		return -EBUSY;
411	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
412	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
413	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
414	t4_write_reg(adap, edc_bist_cmd_reg,
415		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
416	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
417	if (i)
418		return i;
419
420#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
421
422	for (i = 15; i >= 0; i--)
423		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
424	if (ecc)
425		*ecc = t4_read_reg64(adap, EDC_DATA(16));
426#undef EDC_DATA
427	return 0;
428}
429
430/**
431 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
432 *	@adap: the adapter
433 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
434 *	@addr: address within indicated memory type
435 *	@len: amount of memory to read
436 *	@buf: host memory buffer
437 *
438 *	Reads an [almost] arbitrary memory region in the firmware: the
439 *	firmware memory address, length and host buffer must be aligned on
440 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
441 *	the firmware's memory.  If this memory contains data structures which
442 *	contain multi-byte integers, it's the callers responsibility to
443 *	perform appropriate byte order conversions.
444 */
445int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
446		__be32 *buf)
447{
448	u32 pos, start, end, offset;
449	int ret;
450
451	/*
452	 * Argument sanity checks ...
453	 */
454	if ((addr & 0x3) || (len & 0x3))
455		return -EINVAL;
456
457	/*
458	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
459	 * need to round down the start and round up the end.  We'll start
460	 * copying out of the first line at (addr - start) a word at a time.
461	 */
462	start = addr & ~(64-1);
463	end = (addr + len + 64-1) & ~(64-1);
464	offset = (addr - start)/sizeof(__be32);
465
466	for (pos = start; pos < end; pos += 64, offset = 0) {
467		__be32 data[16];
468
469		/*
470		 * Read the chip's memory block and bail if there's an error.
471		 */
472		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
473			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
474		else
475			ret = t4_edc_read(adap, mtype, pos, data, NULL);
476		if (ret)
477			return ret;
478
479		/*
480		 * Copy the data into the caller's memory buffer.
481		 */
482		while (offset < 16 && len > 0) {
483			*buf++ = data[offset++];
484			len -= sizeof(__be32);
485		}
486	}
487
488	return 0;
489}
490
491/*
492 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
493 * VPD-R header.
494 */
495struct t4_vpd_hdr {
496	u8  id_tag;
497	u8  id_len[2];
498	u8  id_data[ID_LEN];
499	u8  vpdr_tag;
500	u8  vpdr_len[2];
501};
502
503/*
504 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
505 */
506#define EEPROM_MAX_RD_POLL 40
507#define EEPROM_MAX_WR_POLL 6
508#define EEPROM_STAT_ADDR   0x7bfc
509#define VPD_BASE           0x400
510#define VPD_BASE_OLD       0
511#define VPD_LEN            1024
512#define VPD_INFO_FLD_HDR_SIZE	3
513#define CHELSIO_VPD_UNIQUE_ID 0x82
514
515/**
516 *	t4_seeprom_read - read a serial EEPROM location
517 *	@adapter: adapter to read
518 *	@addr: EEPROM virtual address
519 *	@data: where to store the read data
520 *
521 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
522 *	VPD capability.  Note that this function must be called with a virtual
523 *	address.
524 */
525int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
526{
527	u16 val;
528	int attempts = EEPROM_MAX_RD_POLL;
529	unsigned int base = adapter->params.pci.vpd_cap_addr;
530
531	if (addr >= EEPROMVSIZE || (addr & 3))
532		return -EINVAL;
533
534	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
535	do {
536		udelay(10);
537		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
538	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
539
540	if (!(val & PCI_VPD_ADDR_F)) {
541		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
542		return -EIO;
543	}
544	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
545	*data = le32_to_cpu(*data);
546	return 0;
547}
548
549/**
550 *	t4_seeprom_write - write a serial EEPROM location
551 *	@adapter: adapter to write
552 *	@addr: virtual EEPROM address
553 *	@data: value to write
554 *
555 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
556 *	VPD capability.  Note that this function must be called with a virtual
557 *	address.
558 */
559int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
560{
561	u16 val;
562	int attempts = EEPROM_MAX_WR_POLL;
563	unsigned int base = adapter->params.pci.vpd_cap_addr;
564
565	if (addr >= EEPROMVSIZE || (addr & 3))
566		return -EINVAL;
567
568	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
569				 cpu_to_le32(data));
570	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
571				 (u16)addr | PCI_VPD_ADDR_F);
572	do {
573		msleep(1);
574		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
575	} while ((val & PCI_VPD_ADDR_F) && --attempts);
576
577	if (val & PCI_VPD_ADDR_F) {
578		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
579		return -EIO;
580	}
581	return 0;
582}
583
584/**
585 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
586 *	@phys_addr: the physical EEPROM address
587 *	@fn: the PCI function number
588 *	@sz: size of function-specific area
589 *
590 *	Translate a physical EEPROM address to virtual.  The first 1K is
591 *	accessed through virtual addresses starting at 31K, the rest is
592 *	accessed through virtual addresses starting at 0.
593 *
594 *	The mapping is as follows:
595 *	[0..1K) -> [31K..32K)
596 *	[1K..1K+A) -> [ES-A..ES)
597 *	[1K+A..ES) -> [0..ES-A-1K)
598 *
599 *	where A = @fn * @sz, and ES = EEPROM size.
600 */
601int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
602{
603	fn *= sz;
604	if (phys_addr < 1024)
605		return phys_addr + (31 << 10);
606	if (phys_addr < 1024 + fn)
607		return EEPROMSIZE - fn + phys_addr - 1024;
608	if (phys_addr < EEPROMSIZE)
609		return phys_addr - 1024 - fn;
610	return -EINVAL;
611}
612
613/**
614 *	t4_seeprom_wp - enable/disable EEPROM write protection
615 *	@adapter: the adapter
616 *	@enable: whether to enable or disable write protection
617 *
618 *	Enables or disables write protection on the serial EEPROM.
619 */
620int t4_seeprom_wp(struct adapter *adapter, int enable)
621{
622	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
623}
624
625/**
626 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
627 *	@v: Pointer to buffered vpd data structure
628 *	@kw: The keyword to search for
629 *
630 *	Returns the value of the information field keyword or
631 *	-ENOENT otherwise.
632 */
633static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
634{
635         int i;
636	 unsigned int offset , len;
637	 const u8 *buf = &v->id_tag;
638	 const u8 *vpdr_len = &v->vpdr_tag;
639	 offset = sizeof(struct t4_vpd_hdr);
640	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
641
642	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
643		 return -ENOENT;
644	 }
645
646         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
647		 if(memcmp(buf + i , kw , 2) == 0){
648			 i += VPD_INFO_FLD_HDR_SIZE;
649                         return i;
650		  }
651
652                 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
653         }
654
655         return -ENOENT;
656}
657
658
659/**
660 *	get_vpd_params - read VPD parameters from VPD EEPROM
661 *	@adapter: adapter to read
662 *	@p: where to store the parameters
663 *
664 *	Reads card parameters stored in VPD EEPROM.
665 */
666static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667{
668	int i, ret, addr;
669	int ec, sn, pn, na;
670	u8 vpd[VPD_LEN], csum;
671	const struct t4_vpd_hdr *v;
672
673	/*
674	 * Card information normally starts at VPD_BASE but early cards had
675	 * it at 0.
676	 */
677	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
678	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
679
680	for (i = 0; i < sizeof(vpd); i += 4) {
681		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
682		if (ret)
683			return ret;
684	}
685 	v = (const struct t4_vpd_hdr *)vpd;
686
687#define FIND_VPD_KW(var,name) do { \
688	var = get_vpd_keyword_val(v , name); \
689	if (var < 0) { \
690		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
691		return -EINVAL; \
692	} \
693} while (0)
694
695	FIND_VPD_KW(i, "RV");
696	for (csum = 0; i >= 0; i--)
697		csum += vpd[i];
698
699	if (csum) {
700		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
701		return -EINVAL;
702	}
703	FIND_VPD_KW(ec, "EC");
704	FIND_VPD_KW(sn, "SN");
705	FIND_VPD_KW(pn, "PN");
706	FIND_VPD_KW(na, "NA");
707#undef FIND_VPD_KW
708
709	memcpy(p->id, v->id_data, ID_LEN);
710	strstrip(p->id);
711	memcpy(p->ec, vpd + ec, EC_LEN);
712	strstrip(p->ec);
713	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
714	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
715	strstrip(p->sn);
716	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
717	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
718	strstrip((char *)p->pn);
719	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
720	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
721	strstrip((char *)p->na);
722
723	return 0;
724}
725
726/* serial flash and firmware constants and flash config file constants */
727enum {
728	SF_ATTEMPTS = 10,             /* max retries for SF operations */
729
730	/* flash command opcodes */
731	SF_PROG_PAGE    = 2,          /* program page */
732	SF_WR_DISABLE   = 4,          /* disable writes */
733	SF_RD_STATUS    = 5,          /* read status register */
734	SF_WR_ENABLE    = 6,          /* enable writes */
735	SF_RD_DATA_FAST = 0xb,        /* read flash */
736	SF_RD_ID        = 0x9f,       /* read ID */
737	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
738};
739
740/**
741 *	sf1_read - read data from the serial flash
742 *	@adapter: the adapter
743 *	@byte_cnt: number of bytes to read
744 *	@cont: whether another operation will be chained
745 *	@lock: whether to lock SF for PL access only
746 *	@valp: where to store the read data
747 *
748 *	Reads up to 4 bytes of data from the serial flash.  The location of
749 *	the read needs to be specified prior to calling this by issuing the
750 *	appropriate commands to the serial flash.
751 */
752static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
753		    int lock, u32 *valp)
754{
755	int ret;
756
757	if (!byte_cnt || byte_cnt > 4)
758		return -EINVAL;
759	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
760		return -EBUSY;
761	t4_write_reg(adapter, A_SF_OP,
762		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
763	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
764	if (!ret)
765		*valp = t4_read_reg(adapter, A_SF_DATA);
766	return ret;
767}
768
769/**
770 *	sf1_write - write data to the serial flash
771 *	@adapter: the adapter
772 *	@byte_cnt: number of bytes to write
773 *	@cont: whether another operation will be chained
774 *	@lock: whether to lock SF for PL access only
775 *	@val: value to write
776 *
777 *	Writes up to 4 bytes of data to the serial flash.  The location of
778 *	the write needs to be specified prior to calling this by issuing the
779 *	appropriate commands to the serial flash.
780 */
781static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
782		     int lock, u32 val)
783{
784	if (!byte_cnt || byte_cnt > 4)
785		return -EINVAL;
786	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
787		return -EBUSY;
788	t4_write_reg(adapter, A_SF_DATA, val);
789	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
790		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
791	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
792}
793
794/**
795 *	flash_wait_op - wait for a flash operation to complete
796 *	@adapter: the adapter
797 *	@attempts: max number of polls of the status register
798 *	@delay: delay between polls in ms
799 *
800 *	Wait for a flash operation to complete by polling the status register.
801 */
802static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
803{
804	int ret;
805	u32 status;
806
807	while (1) {
808		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
809		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
810			return ret;
811		if (!(status & 1))
812			return 0;
813		if (--attempts == 0)
814			return -EAGAIN;
815		if (delay)
816			msleep(delay);
817	}
818}
819
820/**
821 *	t4_read_flash - read words from serial flash
822 *	@adapter: the adapter
823 *	@addr: the start address for the read
824 *	@nwords: how many 32-bit words to read
825 *	@data: where to store the read data
826 *	@byte_oriented: whether to store data as bytes or as words
827 *
828 *	Read the specified number of 32-bit words from the serial flash.
829 *	If @byte_oriented is set the read data is stored as a byte array
830 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
831 *	natural endianess.
832 */
833int t4_read_flash(struct adapter *adapter, unsigned int addr,
834		  unsigned int nwords, u32 *data, int byte_oriented)
835{
836	int ret;
837
838	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
839		return -EINVAL;
840
841	addr = swab32(addr) | SF_RD_DATA_FAST;
842
843	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
844	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
845		return ret;
846
847	for ( ; nwords; nwords--, data++) {
848		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
849		if (nwords == 1)
850			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
851		if (ret)
852			return ret;
853		if (byte_oriented)
854			*data = htonl(*data);
855	}
856	return 0;
857}
858
859/**
860 *	t4_write_flash - write up to a page of data to the serial flash
861 *	@adapter: the adapter
862 *	@addr: the start address to write
863 *	@n: length of data to write in bytes
864 *	@data: the data to write
865 *	@byte_oriented: whether to store data as bytes or as words
866 *
867 *	Writes up to a page of data (256 bytes) to the serial flash starting
868 *	at the given address.  All the data must be written to the same page.
869 *	If @byte_oriented is set the write data is stored as byte stream
870 *	(i.e. matches what on disk), otherwise in big-endian.
871 */
872static int t4_write_flash(struct adapter *adapter, unsigned int addr,
873			  unsigned int n, const u8 *data, int byte_oriented)
874{
875	int ret;
876	u32 buf[SF_PAGE_SIZE / 4];
877	unsigned int i, c, left, val, offset = addr & 0xff;
878
879	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
880		return -EINVAL;
881
882	val = swab32(addr) | SF_PROG_PAGE;
883
884	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
885	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
886		goto unlock;
887
888	for (left = n; left; left -= c) {
889		c = min(left, 4U);
890		for (val = 0, i = 0; i < c; ++i)
891			val = (val << 8) + *data++;
892
893		if (!byte_oriented)
894			val = htonl(val);
895
896		ret = sf1_write(adapter, c, c != left, 1, val);
897		if (ret)
898			goto unlock;
899	}
900	ret = flash_wait_op(adapter, 8, 1);
901	if (ret)
902		goto unlock;
903
904	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
905
906	/* Read the page to verify the write succeeded */
907	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
908			    byte_oriented);
909	if (ret)
910		return ret;
911
912	if (memcmp(data - n, (u8 *)buf + offset, n)) {
913		CH_ERR(adapter, "failed to correctly write the flash page "
914		       "at %#x\n", addr);
915		return -EIO;
916	}
917	return 0;
918
919unlock:
920	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
921	return ret;
922}
923
924/**
925 *	t4_get_fw_version - read the firmware version
926 *	@adapter: the adapter
927 *	@vers: where to place the version
928 *
929 *	Reads the FW version from flash.
930 */
931int t4_get_fw_version(struct adapter *adapter, u32 *vers)
932{
933	return t4_read_flash(adapter,
934			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
935			     vers, 0);
936}
937
938/**
939 *	t4_get_tp_version - read the TP microcode version
940 *	@adapter: the adapter
941 *	@vers: where to place the version
942 *
943 *	Reads the TP microcode version from flash.
944 */
945int t4_get_tp_version(struct adapter *adapter, u32 *vers)
946{
947	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
948							      tp_microcode_ver),
949			     1, vers, 0);
950}
951
952/**
953 *	t4_check_fw_version - check if the FW is compatible with this driver
954 *	@adapter: the adapter
955 *
956 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
957 *	if there's exact match, a negative error if the version could not be
958 *	read or there's a major version mismatch, and a positive value if the
959 *	expected major version is found but there's a minor version mismatch.
960 */
961int t4_check_fw_version(struct adapter *adapter)
962{
963	int ret, major, minor, micro;
964	int exp_major, exp_minor, exp_micro;
965
966	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
967	if (!ret)
968		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
969	if (ret)
970		return ret;
971
972	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
973	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
974	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
975
976	switch (chip_id(adapter)) {
977	case CHELSIO_T4:
978		exp_major = T4FW_VERSION_MAJOR;
979		exp_minor = T4FW_VERSION_MINOR;
980		exp_micro = T4FW_VERSION_MICRO;
981		break;
982	case CHELSIO_T5:
983		exp_major = T5FW_VERSION_MAJOR;
984		exp_minor = T5FW_VERSION_MINOR;
985		exp_micro = T5FW_VERSION_MICRO;
986		break;
987	default:
988		CH_ERR(adapter, "Unsupported chip type, %x\n",
989		    chip_id(adapter));
990		return -EINVAL;
991	}
992
993	if (major != exp_major) {            /* major mismatch - fail */
994		CH_ERR(adapter, "card FW has major version %u, driver wants "
995		       "%u\n", major, exp_major);
996		return -EINVAL;
997	}
998
999	if (minor == exp_minor && micro == exp_micro)
1000		return 0;                                   /* perfect match */
1001
1002	/* Minor/micro version mismatch.  Report it but often it's OK. */
1003	return 1;
1004}
1005
1006/**
1007 *	t4_flash_erase_sectors - erase a range of flash sectors
1008 *	@adapter: the adapter
1009 *	@start: the first sector to erase
1010 *	@end: the last sector to erase
1011 *
1012 *	Erases the sectors in the given inclusive range.
1013 */
1014static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015{
1016	int ret = 0;
1017
1018	while (start <= end) {
1019		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1020		    (ret = sf1_write(adapter, 4, 0, 1,
1021				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1022		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1023			CH_ERR(adapter, "erase of flash sector %d failed, "
1024			       "error %d\n", start, ret);
1025			break;
1026		}
1027		start++;
1028	}
1029	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1030	return ret;
1031}
1032
1033/**
1034 *	t4_flash_cfg_addr - return the address of the flash configuration file
1035 *	@adapter: the adapter
1036 *
1037 *	Return the address within the flash where the Firmware Configuration
1038 *	File is stored, or an error if the device FLASH is too small to contain
1039 *	a Firmware Configuration File.
1040 */
1041int t4_flash_cfg_addr(struct adapter *adapter)
1042{
1043	/*
1044	 * If the device FLASH isn't large enough to hold a Firmware
1045	 * Configuration File, return an error.
1046	 */
1047	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1048		return -ENOSPC;
1049
1050	return FLASH_CFG_START;
1051}
1052
1053/**
1054 *	t4_load_cfg - download config file
1055 *	@adap: the adapter
1056 *	@cfg_data: the cfg text file to write
1057 *	@size: text file size
1058 *
1059 *	Write the supplied config text file to the card's serial flash.
1060 */
1061int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1062{
1063	int ret, i, n, cfg_addr;
1064	unsigned int addr;
1065	unsigned int flash_cfg_start_sec;
1066	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1067
1068	cfg_addr = t4_flash_cfg_addr(adap);
1069	if (cfg_addr < 0)
1070		return cfg_addr;
1071
1072	addr = cfg_addr;
1073	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1074
1075	if (size > FLASH_CFG_MAX_SIZE) {
1076		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1077		       FLASH_CFG_MAX_SIZE);
1078		return -EFBIG;
1079	}
1080
1081	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1082			 sf_sec_size);
1083	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1084				     flash_cfg_start_sec + i - 1);
1085	/*
1086	 * If size == 0 then we're simply erasing the FLASH sectors associated
1087	 * with the on-adapter Firmware Configuration File.
1088	 */
1089	if (ret || size == 0)
1090		goto out;
1091
1092	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1093	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1094		if ( (size - i) <  SF_PAGE_SIZE)
1095			n = size - i;
1096		else
1097			n = SF_PAGE_SIZE;
1098		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1099		if (ret)
1100			goto out;
1101
1102		addr += SF_PAGE_SIZE;
1103		cfg_data += SF_PAGE_SIZE;
1104	}
1105
1106out:
1107	if (ret)
1108		CH_ERR(adap, "config file %s failed %d\n",
1109		       (size == 0 ? "clear" : "download"), ret);
1110	return ret;
1111}
1112
1113
1114/**
1115 *	t4_load_fw - download firmware
1116 *	@adap: the adapter
1117 *	@fw_data: the firmware image to write
1118 *	@size: image size
1119 *
1120 *	Write the supplied firmware image to the card's serial flash.
1121 */
1122int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1123{
1124	u32 csum;
1125	int ret, addr;
1126	unsigned int i;
1127	u8 first_page[SF_PAGE_SIZE];
1128	const u32 *p = (const u32 *)fw_data;
1129	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1130	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1131	unsigned int fw_start_sec;
1132	unsigned int fw_start;
1133	unsigned int fw_size;
1134
1135	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1136		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1137		fw_start = FLASH_FWBOOTSTRAP_START;
1138		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1139	} else {
1140		fw_start_sec = FLASH_FW_START_SEC;
1141 		fw_start = FLASH_FW_START;
1142		fw_size = FLASH_FW_MAX_SIZE;
1143	}
1144	if (!size) {
1145		CH_ERR(adap, "FW image has no data\n");
1146		return -EINVAL;
1147	}
1148	if (size & 511) {
1149		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1150		return -EINVAL;
1151	}
1152	if (ntohs(hdr->len512) * 512 != size) {
1153		CH_ERR(adap, "FW image size differs from size in FW header\n");
1154		return -EINVAL;
1155	}
1156	if (size > fw_size) {
1157		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1158		return -EFBIG;
1159	}
1160	if ((is_t4(adap) && hdr->chip != FW_HDR_CHIP_T4) ||
1161	    (is_t5(adap) && hdr->chip != FW_HDR_CHIP_T5)) {
1162		CH_ERR(adap,
1163		    "FW image (%d) is not suitable for this adapter (%d)\n",
1164		    hdr->chip, chip_id(adap));
1165		return -EINVAL;
1166	}
1167
1168	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1169		csum += ntohl(p[i]);
1170
1171	if (csum != 0xffffffff) {
1172		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1173		       csum);
1174		return -EINVAL;
1175	}
1176
1177	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1178	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1179	if (ret)
1180		goto out;
1181
1182	/*
1183	 * We write the correct version at the end so the driver can see a bad
1184	 * version if the FW write fails.  Start by writing a copy of the
1185	 * first page with a bad version.
1186	 */
1187	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1188	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1189	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1190	if (ret)
1191		goto out;
1192
1193	addr = fw_start;
1194	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1195		addr += SF_PAGE_SIZE;
1196		fw_data += SF_PAGE_SIZE;
1197		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1198		if (ret)
1199			goto out;
1200	}
1201
1202	ret = t4_write_flash(adap,
1203			     fw_start + offsetof(struct fw_hdr, fw_ver),
1204			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1205out:
1206	if (ret)
1207		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1208	return ret;
1209}
1210
1211/* BIOS boot headers */
1212typedef struct pci_expansion_rom_header {
1213	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1214	u8	reserved[22]; /* Reserved per processor Architecture data */
1215	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1216} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1217
1218/* Legacy PCI Expansion ROM Header */
1219typedef struct legacy_pci_expansion_rom_header {
1220	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1221	u8	size512; /* Current Image Size in units of 512 bytes */
1222	u8	initentry_point[4];
1223	u8	cksum; /* Checksum computed on the entire Image */
1224	u8	reserved[16]; /* Reserved */
1225	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1226} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1227
1228/* EFI PCI Expansion ROM Header */
1229typedef struct efi_pci_expansion_rom_header {
1230	u8	signature[2]; // ROM signature. The value 0xaa55
1231	u8	initialization_size[2]; /* Units 512. Includes this header */
1232	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1233	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1234	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1235	u8	compression_type[2]; /* Compression type. */
1236		/*
1237		 * Compression type definition
1238		 * 0x0: uncompressed
1239		 * 0x1: Compressed
1240		 * 0x2-0xFFFF: Reserved
1241		 */
1242	u8	reserved[8]; /* Reserved */
1243	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1244	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1245} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1246
1247/* PCI Data Structure Format */
1248typedef struct pcir_data_structure { /* PCI Data Structure */
1249	u8	signature[4]; /* Signature. The string "PCIR" */
1250	u8	vendor_id[2]; /* Vendor Identification */
1251	u8	device_id[2]; /* Device Identification */
1252	u8	vital_product[2]; /* Pointer to Vital Product Data */
1253	u8	length[2]; /* PCIR Data Structure Length */
1254	u8	revision; /* PCIR Data Structure Revision */
1255	u8	class_code[3]; /* Class Code */
1256	u8	image_length[2]; /* Image Length. Multiple of 512B */
1257	u8	code_revision[2]; /* Revision Level of Code/Data */
1258	u8	code_type; /* Code Type. */
1259		/*
1260		 * PCI Expansion ROM Code Types
1261		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1262		 * 0x01: Open Firmware standard for PCI. FCODE
1263		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1264		 * 0x03: EFI Image. EFI
1265		 * 0x04-0xFF: Reserved.
1266		 */
1267	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1268	u8	reserved[2]; /* Reserved */
1269} pcir_data_t; /* PCI__DATA_STRUCTURE */
1270
1271/* BOOT constants */
1272enum {
1273	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1274	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1275	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1276	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1277	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1278	VENDOR_ID = 0x1425, /* Vendor ID */
1279	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1280};
1281
1282/*
1283 *	modify_device_id - Modifies the device ID of the Boot BIOS image
1284 *	@adatper: the device ID to write.
1285 *	@boot_data: the boot image to modify.
1286 *
1287 *	Write the supplied device ID to the boot BIOS image.
1288 */
1289static void modify_device_id(int device_id, u8 *boot_data)
1290{
1291	legacy_pci_exp_rom_header_t *header;
1292	pcir_data_t *pcir_header;
1293	u32 cur_header = 0;
1294
1295	/*
1296	 * Loop through all chained images and change the device ID's
1297	 */
1298	while (1) {
1299		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1300		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1301		    le16_to_cpu(*(u16*)header->pcir_offset)];
1302
1303		/*
1304		 * Only modify the Device ID if code type is Legacy or HP.
1305		 * 0x00: Okay to modify
1306		 * 0x01: FCODE. Do not be modify
1307		 * 0x03: Okay to modify
1308		 * 0x04-0xFF: Do not modify
1309		 */
1310		if (pcir_header->code_type == 0x00) {
1311			u8 csum = 0;
1312			int i;
1313
1314			/*
1315			 * Modify Device ID to match current adatper
1316			 */
1317			*(u16*) pcir_header->device_id = device_id;
1318
1319			/*
1320			 * Set checksum temporarily to 0.
1321			 * We will recalculate it later.
1322			 */
1323			header->cksum = 0x0;
1324
1325			/*
1326			 * Calculate and update checksum
1327			 */
1328			for (i = 0; i < (header->size512 * 512); i++)
1329				csum += (u8)boot_data[cur_header + i];
1330
1331			/*
1332			 * Invert summed value to create the checksum
1333			 * Writing new checksum value directly to the boot data
1334			 */
1335			boot_data[cur_header + 7] = -csum;
1336
1337		} else if (pcir_header->code_type == 0x03) {
1338
1339			/*
1340			 * Modify Device ID to match current adatper
1341			 */
1342			*(u16*) pcir_header->device_id = device_id;
1343
1344		}
1345
1346
1347		/*
1348		 * Check indicator element to identify if this is the last
1349		 * image in the ROM.
1350		 */
1351		if (pcir_header->indicator & 0x80)
1352			break;
1353
1354		/*
1355		 * Move header pointer up to the next image in the ROM.
1356		 */
1357		cur_header += header->size512 * 512;
1358	}
1359}
1360
1361/*
1362 *	t4_load_boot - download boot flash
1363 *	@adapter: the adapter
1364 *	@boot_data: the boot image to write
1365 *	@boot_addr: offset in flash to write boot_data
1366 *	@size: image size
1367 *
1368 *	Write the supplied boot image to the card's serial flash.
1369 *	The boot image has the following sections: a 28-byte header and the
1370 *	boot image.
1371 */
1372int t4_load_boot(struct adapter *adap, u8 *boot_data,
1373		 unsigned int boot_addr, unsigned int size)
1374{
1375	pci_exp_rom_header_t *header;
1376	int pcir_offset ;
1377	pcir_data_t *pcir_header;
1378	int ret, addr;
1379	uint16_t device_id;
1380	unsigned int i;
1381	unsigned int boot_sector = boot_addr * 1024;
1382	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1383
1384	/*
1385	 * Make sure the boot image does not encroach on the firmware region
1386	 */
1387	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1388		CH_ERR(adap, "boot image encroaching on firmware region\n");
1389		return -EFBIG;
1390	}
1391
1392	/*
1393	 * Number of sectors spanned
1394	 */
1395	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1396			sf_sec_size);
1397	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1398				     (boot_sector >> 16) + i - 1);
1399
1400	/*
1401	 * If size == 0 then we're simply erasing the FLASH sectors associated
1402	 * with the on-adapter option ROM file
1403	 */
1404	if (ret || (size == 0))
1405		goto out;
1406
1407	/* Get boot header */
1408	header = (pci_exp_rom_header_t *)boot_data;
1409	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1410	/* PCIR Data Structure */
1411	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1412
1413	/*
1414	 * Perform some primitive sanity testing to avoid accidentally
1415	 * writing garbage over the boot sectors.  We ought to check for
1416	 * more but it's not worth it for now ...
1417	 */
1418	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1419		CH_ERR(adap, "boot image too small/large\n");
1420		return -EFBIG;
1421	}
1422
1423	/*
1424	 * Check BOOT ROM header signature
1425	 */
1426	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1427		CH_ERR(adap, "Boot image missing signature\n");
1428		return -EINVAL;
1429	}
1430
1431	/*
1432	 * Check PCI header signature
1433	 */
1434	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1435		CH_ERR(adap, "PCI header missing signature\n");
1436		return -EINVAL;
1437	}
1438
1439	/*
1440	 * Check Vendor ID matches Chelsio ID
1441	 */
1442	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1443		CH_ERR(adap, "Vendor ID missing signature\n");
1444		return -EINVAL;
1445	}
1446
1447	/*
1448	 * Retrieve adapter's device ID
1449	 */
1450	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1451	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1452	device_id = (device_id & 0xff) | 0x4000;
1453
1454	/*
1455	 * Check PCIE Device ID
1456	 */
1457	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1458		/*
1459		 * Change the device ID in the Boot BIOS image to match
1460		 * the Device ID of the current adapter.
1461		 */
1462		modify_device_id(device_id, boot_data);
1463	}
1464
1465	/*
1466	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1467	 * we finish copying the rest of the boot image. This will ensure
1468	 * that the BIOS boot header will only be written if the boot image
1469	 * was written in full.
1470	 */
1471	addr = boot_sector;
1472	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1473		addr += SF_PAGE_SIZE;
1474		boot_data += SF_PAGE_SIZE;
1475		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1476		if (ret)
1477			goto out;
1478	}
1479
1480	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1481
1482out:
1483	if (ret)
1484		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1485	return ret;
1486}
1487
1488/**
1489 *	t4_read_cimq_cfg - read CIM queue configuration
1490 *	@adap: the adapter
1491 *	@base: holds the queue base addresses in bytes
1492 *	@size: holds the queue sizes in bytes
1493 *	@thres: holds the queue full thresholds in bytes
1494 *
1495 *	Returns the current configuration of the CIM queues, starting with
1496 *	the IBQs, then the OBQs.
1497 */
1498void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1499{
1500	unsigned int i, v;
1501	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1502
1503	for (i = 0; i < CIM_NUM_IBQ; i++) {
1504		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1505			     V_QUENUMSELECT(i));
1506		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1507		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1508		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1509		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1510	}
1511	for (i = 0; i < cim_num_obq; i++) {
1512		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1513			     V_QUENUMSELECT(i));
1514		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1515		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1516		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1517	}
1518}
1519
1520/**
1521 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1522 *	@adap: the adapter
1523 *	@qid: the queue index
1524 *	@data: where to store the queue contents
1525 *	@n: capacity of @data in 32-bit words
1526 *
1527 *	Reads the contents of the selected CIM queue starting at address 0 up
1528 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1529 *	error and the number of 32-bit words actually read on success.
1530 */
1531int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1532{
1533	int i, err;
1534	unsigned int addr;
1535	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1536
1537	if (qid > 5 || (n & 3))
1538		return -EINVAL;
1539
1540	addr = qid * nwords;
1541	if (n > nwords)
1542		n = nwords;
1543
1544	for (i = 0; i < n; i++, addr++) {
1545		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1546			     F_IBQDBGEN);
1547		/*
1548		 * It might take 3-10ms before the IBQ debug read access is
1549		 * allowed.  Wait for 1 Sec with a delay of 1 usec.
1550		 */
1551		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1552				      1000000, 1);
1553		if (err)
1554			return err;
1555		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1556	}
1557	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1558	return i;
1559}
1560
1561/**
1562 *	t4_read_cim_obq - read the contents of a CIM outbound queue
1563 *	@adap: the adapter
1564 *	@qid: the queue index
1565 *	@data: where to store the queue contents
1566 *	@n: capacity of @data in 32-bit words
1567 *
1568 *	Reads the contents of the selected CIM queue starting at address 0 up
1569 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1570 *	error and the number of 32-bit words actually read on success.
1571 */
1572int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1573{
1574	int i, err;
1575	unsigned int addr, v, nwords;
1576	int cim_num_obq = is_t4(adap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1577
1578	if (qid >= cim_num_obq || (n & 3))
1579		return -EINVAL;
1580
1581	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1582		     V_QUENUMSELECT(qid));
1583	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1584
1585	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1586	nwords = G_CIMQSIZE(v) * 64;  /* same */
1587	if (n > nwords)
1588		n = nwords;
1589
1590	for (i = 0; i < n; i++, addr++) {
1591		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1592			     F_OBQDBGEN);
1593		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1594				      2, 1);
1595		if (err)
1596			return err;
1597		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1598	}
1599	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1600	return i;
1601}
1602
1603enum {
1604	CIM_QCTL_BASE     = 0,
1605	CIM_CTL_BASE      = 0x2000,
1606	CIM_PBT_ADDR_BASE = 0x2800,
1607	CIM_PBT_LRF_BASE  = 0x3000,
1608	CIM_PBT_DATA_BASE = 0x3800
1609};
1610
1611/**
1612 *	t4_cim_read - read a block from CIM internal address space
1613 *	@adap: the adapter
1614 *	@addr: the start address within the CIM address space
1615 *	@n: number of words to read
1616 *	@valp: where to store the result
1617 *
1618 *	Reads a block of 4-byte words from the CIM intenal address space.
1619 */
1620int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1621		unsigned int *valp)
1622{
1623	int ret = 0;
1624
1625	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1626		return -EBUSY;
1627
1628	for ( ; !ret && n--; addr += 4) {
1629		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1630		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1631				      0, 5, 2);
1632		if (!ret)
1633			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1634	}
1635	return ret;
1636}
1637
1638/**
1639 *	t4_cim_write - write a block into CIM internal address space
1640 *	@adap: the adapter
1641 *	@addr: the start address within the CIM address space
1642 *	@n: number of words to write
1643 *	@valp: set of values to write
1644 *
1645 *	Writes a block of 4-byte words into the CIM intenal address space.
1646 */
1647int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1648		 const unsigned int *valp)
1649{
1650	int ret = 0;
1651
1652	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1653		return -EBUSY;
1654
1655	for ( ; !ret && n--; addr += 4) {
1656		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1657		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1658		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1659				      0, 5, 2);
1660	}
1661	return ret;
1662}
1663
1664static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1665{
1666	return t4_cim_write(adap, addr, 1, &val);
1667}
1668
1669/**
1670 *	t4_cim_ctl_read - read a block from CIM control region
1671 *	@adap: the adapter
1672 *	@addr: the start address within the CIM control region
1673 *	@n: number of words to read
1674 *	@valp: where to store the result
1675 *
1676 *	Reads a block of 4-byte words from the CIM control region.
1677 */
1678int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1679		    unsigned int *valp)
1680{
1681	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1682}
1683
1684/**
1685 *	t4_cim_read_la - read CIM LA capture buffer
1686 *	@adap: the adapter
1687 *	@la_buf: where to store the LA data
1688 *	@wrptr: the HW write pointer within the capture buffer
1689 *
1690 *	Reads the contents of the CIM LA buffer with the most recent entry at
1691 *	the end	of the returned data and with the entry at @wrptr first.
1692 *	We try to leave the LA in the running state we find it in.
1693 */
1694int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1695{
1696	int i, ret;
1697	unsigned int cfg, val, idx;
1698
1699	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1700	if (ret)
1701		return ret;
1702
1703	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1704		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1705		if (ret)
1706			return ret;
1707	}
1708
1709	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1710	if (ret)
1711		goto restart;
1712
1713	idx = G_UPDBGLAWRPTR(val);
1714	if (wrptr)
1715		*wrptr = idx;
1716
1717	for (i = 0; i < adap->params.cim_la_size; i++) {
1718		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1719				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1720		if (ret)
1721			break;
1722		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1723		if (ret)
1724			break;
1725		if (val & F_UPDBGLARDEN) {
1726			ret = -ETIMEDOUT;
1727			break;
1728		}
1729		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1730		if (ret)
1731			break;
1732		idx = (idx + 1) & M_UPDBGLARDPTR;
1733	}
1734restart:
1735	if (cfg & F_UPDBGLAEN) {
1736		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1737				      cfg & ~F_UPDBGLARDEN);
1738		if (!ret)
1739			ret = r;
1740	}
1741	return ret;
1742}
1743
1744void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1745			unsigned int *pif_req_wrptr,
1746			unsigned int *pif_rsp_wrptr)
1747{
1748	int i, j;
1749	u32 cfg, val, req, rsp;
1750
1751	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1752	if (cfg & F_LADBGEN)
1753		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1754
1755	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1756	req = G_POLADBGWRPTR(val);
1757	rsp = G_PILADBGWRPTR(val);
1758	if (pif_req_wrptr)
1759		*pif_req_wrptr = req;
1760	if (pif_rsp_wrptr)
1761		*pif_rsp_wrptr = rsp;
1762
1763	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1764		for (j = 0; j < 6; j++) {
1765			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1766				     V_PILADBGRDPTR(rsp));
1767			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1768			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1769			req++;
1770			rsp++;
1771		}
1772		req = (req + 2) & M_POLADBGRDPTR;
1773		rsp = (rsp + 2) & M_PILADBGRDPTR;
1774	}
1775	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1776}
1777
1778void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1779{
1780	u32 cfg;
1781	int i, j, idx;
1782
1783	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1784	if (cfg & F_LADBGEN)
1785		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1786
1787	for (i = 0; i < CIM_MALA_SIZE; i++) {
1788		for (j = 0; j < 5; j++) {
1789			idx = 8 * i + j;
1790			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1791				     V_PILADBGRDPTR(idx));
1792			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1793			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1794		}
1795	}
1796	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1797}
1798
1799/**
1800 *	t4_tp_read_la - read TP LA capture buffer
1801 *	@adap: the adapter
1802 *	@la_buf: where to store the LA data
1803 *	@wrptr: the HW write pointer within the capture buffer
1804 *
1805 *	Reads the contents of the TP LA buffer with the most recent entry at
1806 *	the end	of the returned data and with the entry at @wrptr first.
1807 *	We leave the LA in the running state we find it in.
1808 */
1809void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1810{
1811	bool last_incomplete;
1812	unsigned int i, cfg, val, idx;
1813
1814	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1815	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1816		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1817			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1818
1819	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1820	idx = G_DBGLAWPTR(val);
1821	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1822	if (last_incomplete)
1823		idx = (idx + 1) & M_DBGLARPTR;
1824	if (wrptr)
1825		*wrptr = idx;
1826
1827	val &= 0xffff;
1828	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1829	val |= adap->params.tp.la_mask;
1830
1831	for (i = 0; i < TPLA_SIZE; i++) {
1832		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1833		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1834		idx = (idx + 1) & M_DBGLARPTR;
1835	}
1836
1837	/* Wipe out last entry if it isn't valid */
1838	if (last_incomplete)
1839		la_buf[TPLA_SIZE - 1] = ~0ULL;
1840
1841	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1842		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1843			     cfg | adap->params.tp.la_mask);
1844}
1845
1846void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1847{
1848	unsigned int i, j;
1849
1850	for (i = 0; i < 8; i++) {
1851		u32 *p = la_buf + i;
1852
1853		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1854		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1855		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1856		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1857			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1858	}
1859}
1860
1861#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1862		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1863		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1864
1865/**
1866 *	t4_link_start - apply link configuration to MAC/PHY
1867 *	@phy: the PHY to setup
1868 *	@mac: the MAC to setup
1869 *	@lc: the requested link configuration
1870 *
1871 *	Set up a port's MAC and PHY according to a desired link configuration.
1872 *	- If the PHY can auto-negotiate first decide what to advertise, then
1873 *	  enable/disable auto-negotiation as desired, and reset.
1874 *	- If the PHY does not auto-negotiate just reset it.
1875 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1876 *	  otherwise do it later based on the outcome of auto-negotiation.
1877 */
1878int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1879		  struct link_config *lc)
1880{
1881	struct fw_port_cmd c;
1882	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1883
1884	lc->link_ok = 0;
1885	if (lc->requested_fc & PAUSE_RX)
1886		fc |= FW_PORT_CAP_FC_RX;
1887	if (lc->requested_fc & PAUSE_TX)
1888		fc |= FW_PORT_CAP_FC_TX;
1889
1890	memset(&c, 0, sizeof(c));
1891	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1892			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1893	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1894				  FW_LEN16(c));
1895
1896	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1897		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1898		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1899	} else if (lc->autoneg == AUTONEG_DISABLE) {
1900		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1901		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1902	} else
1903		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1904
1905	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1906}
1907
1908/**
1909 *	t4_restart_aneg - restart autonegotiation
1910 *	@adap: the adapter
1911 *	@mbox: mbox to use for the FW command
1912 *	@port: the port id
1913 *
1914 *	Restarts autonegotiation for the selected port.
1915 */
1916int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1917{
1918	struct fw_port_cmd c;
1919
1920	memset(&c, 0, sizeof(c));
1921	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1922			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1923	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1924				  FW_LEN16(c));
1925	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1926	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1927}
1928
1929struct intr_info {
1930	unsigned int mask;       /* bits to check in interrupt status */
1931	const char *msg;         /* message to print or NULL */
1932	short stat_idx;          /* stat counter to increment or -1 */
1933	unsigned short fatal;    /* whether the condition reported is fatal */
1934};
1935
1936/**
1937 *	t4_handle_intr_status - table driven interrupt handler
1938 *	@adapter: the adapter that generated the interrupt
1939 *	@reg: the interrupt status register to process
1940 *	@acts: table of interrupt actions
1941 *
1942 *	A table driven interrupt handler that applies a set of masks to an
1943 *	interrupt status word and performs the corresponding actions if the
1944 *	interrupts described by the mask have occured.  The actions include
1945 *	optionally emitting a warning or alert message.  The table is terminated
1946 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1947 *	conditions.
1948 */
1949static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1950				 const struct intr_info *acts)
1951{
1952	int fatal = 0;
1953	unsigned int mask = 0;
1954	unsigned int status = t4_read_reg(adapter, reg);
1955
1956	for ( ; acts->mask; ++acts) {
1957		if (!(status & acts->mask))
1958			continue;
1959		if (acts->fatal) {
1960			fatal++;
1961			CH_ALERT(adapter, "%s (0x%x)\n",
1962				 acts->msg, status & acts->mask);
1963		} else if (acts->msg)
1964			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1965					  acts->msg, status & acts->mask);
1966		mask |= acts->mask;
1967	}
1968	status &= mask;
1969	if (status)                           /* clear processed interrupts */
1970		t4_write_reg(adapter, reg, status);
1971	return fatal;
1972}
1973
1974/*
1975 * Interrupt handler for the PCIE module.
1976 */
1977static void pcie_intr_handler(struct adapter *adapter)
1978{
1979	static struct intr_info sysbus_intr_info[] = {
1980		{ F_RNPP, "RXNP array parity error", -1, 1 },
1981		{ F_RPCP, "RXPC array parity error", -1, 1 },
1982		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1983		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1984		{ F_RFTP, "RXFT array parity error", -1, 1 },
1985		{ 0 }
1986	};
1987	static struct intr_info pcie_port_intr_info[] = {
1988		{ F_TPCP, "TXPC array parity error", -1, 1 },
1989		{ F_TNPP, "TXNP array parity error", -1, 1 },
1990		{ F_TFTP, "TXFT array parity error", -1, 1 },
1991		{ F_TCAP, "TXCA array parity error", -1, 1 },
1992		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1993		{ F_RCAP, "RXCA array parity error", -1, 1 },
1994		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1995		{ F_RDPE, "Rx data parity error", -1, 1 },
1996		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1997		{ 0 }
1998	};
1999	static struct intr_info pcie_intr_info[] = {
2000		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
2001		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
2002		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
2003		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2004		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2005		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2006		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2007		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
2008		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
2009		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2010		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
2011		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2012		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2013		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
2014		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2015		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2016		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
2017		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2018		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2019		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2020		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2021		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
2022		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
2023		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2024		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
2025		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
2026		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
2027		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
2028		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
2029		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
2030		  0 },
2031		{ 0 }
2032	};
2033
2034	static struct intr_info t5_pcie_intr_info[] = {
2035		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
2036		  -1, 1 },
2037		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
2038		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
2039		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
2040		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
2041		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
2042		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
2043		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
2044		  -1, 1 },
2045		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
2046		  -1, 1 },
2047		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
2048		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
2049		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
2050		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
2051		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
2052		  -1, 1 },
2053		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
2054		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
2055		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
2056		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
2057		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
2058		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
2059		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
2060		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
2061		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
2062		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
2063		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
2064		  -1, 1 },
2065		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
2066		  -1, 1 },
2067		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
2068		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
2069		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
2070		{ F_READRSPERR, "Outbound read error", -1,
2071		  0 },
2072		{ 0 }
2073	};
2074
2075	int fat;
2076
2077	fat = t4_handle_intr_status(adapter,
2078				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2079				    sysbus_intr_info) +
2080	      t4_handle_intr_status(adapter,
2081				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2082				    pcie_port_intr_info) +
2083	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
2084				    is_t4(adapter) ?
2085				    pcie_intr_info : t5_pcie_intr_info);
2086	if (fat)
2087		t4_fatal_err(adapter);
2088}
2089
2090/*
2091 * TP interrupt handler.
2092 */
2093static void tp_intr_handler(struct adapter *adapter)
2094{
2095	static struct intr_info tp_intr_info[] = {
2096		{ 0x3fffffff, "TP parity error", -1, 1 },
2097		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2098		{ 0 }
2099	};
2100
2101	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
2102		t4_fatal_err(adapter);
2103}
2104
2105/*
2106 * SGE interrupt handler.
2107 */
2108static void sge_intr_handler(struct adapter *adapter)
2109{
2110	u64 v;
2111	u32 err;
2112
2113	static struct intr_info sge_intr_info[] = {
2114		{ F_ERR_CPL_EXCEED_IQE_SIZE,
2115		  "SGE received CPL exceeding IQE size", -1, 1 },
2116		{ F_ERR_INVALID_CIDX_INC,
2117		  "SGE GTS CIDX increment too large", -1, 0 },
2118		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2119		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2120		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
2121		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
2122		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2123		  0 },
2124		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2125		  0 },
2126		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2127		  0 },
2128		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2129		  0 },
2130		{ F_ERR_ING_CTXT_PRIO,
2131		  "SGE too many priority ingress contexts", -1, 0 },
2132		{ F_ERR_EGR_CTXT_PRIO,
2133		  "SGE too many priority egress contexts", -1, 0 },
2134		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2135		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2136		{ 0 }
2137	};
2138
2139	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
2140	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2141	if (v) {
2142		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2143			 (unsigned long long)v);
2144		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2145		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2146	}
2147
2148	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2149
2150	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2151	if (err & F_ERROR_QID_VALID) {
2152		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2153		if (err & F_UNCAPTURED_ERROR)
2154			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2155		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2156			     F_UNCAPTURED_ERROR);
2157	}
2158
2159	if (v != 0)
2160		t4_fatal_err(adapter);
2161}
2162
2163#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2164		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2165#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2166		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2167
2168/*
2169 * CIM interrupt handler.
2170 */
2171static void cim_intr_handler(struct adapter *adapter)
2172{
2173	static struct intr_info cim_intr_info[] = {
2174		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2175		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2176		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2177		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2178		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2179		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2180		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2181		{ 0 }
2182	};
2183	static struct intr_info cim_upintr_info[] = {
2184		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2185		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2186		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2187		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2188		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2189		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2190		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2191		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2192		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2193		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2194		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2195		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2196		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2197		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2198		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2199		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2200		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2201		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2202		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2203		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2204		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2205		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2206		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2207		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2208		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2209		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2210		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2211		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2212		{ 0 }
2213	};
2214	int fat;
2215
2216	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2217		t4_report_fw_error(adapter);
2218
2219	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2220				    cim_intr_info) +
2221	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2222				    cim_upintr_info);
2223	if (fat)
2224		t4_fatal_err(adapter);
2225}
2226
2227/*
2228 * ULP RX interrupt handler.
2229 */
2230static void ulprx_intr_handler(struct adapter *adapter)
2231{
2232	static struct intr_info ulprx_intr_info[] = {
2233		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2234		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2235		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2236		{ 0 }
2237	};
2238
2239	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2240		t4_fatal_err(adapter);
2241}
2242
2243/*
2244 * ULP TX interrupt handler.
2245 */
2246static void ulptx_intr_handler(struct adapter *adapter)
2247{
2248	static struct intr_info ulptx_intr_info[] = {
2249		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2250		  0 },
2251		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2252		  0 },
2253		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2254		  0 },
2255		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2256		  0 },
2257		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2258		{ 0 }
2259	};
2260
2261	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2262		t4_fatal_err(adapter);
2263}
2264
2265/*
2266 * PM TX interrupt handler.
2267 */
2268static void pmtx_intr_handler(struct adapter *adapter)
2269{
2270	static struct intr_info pmtx_intr_info[] = {
2271		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2272		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2273		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2274		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2275		{ 0xffffff0, "PMTX framing error", -1, 1 },
2276		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2277		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2278		  1 },
2279		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2280		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2281		{ 0 }
2282	};
2283
2284	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2285		t4_fatal_err(adapter);
2286}
2287
2288/*
2289 * PM RX interrupt handler.
2290 */
2291static void pmrx_intr_handler(struct adapter *adapter)
2292{
2293	static struct intr_info pmrx_intr_info[] = {
2294		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2295		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2296		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2297		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2298		  1 },
2299		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2300		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2301		{ 0 }
2302	};
2303
2304	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2305		t4_fatal_err(adapter);
2306}
2307
2308/*
2309 * CPL switch interrupt handler.
2310 */
2311static void cplsw_intr_handler(struct adapter *adapter)
2312{
2313	static struct intr_info cplsw_intr_info[] = {
2314		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2315		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2316		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2317		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2318		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2319		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2320		{ 0 }
2321	};
2322
2323	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2324		t4_fatal_err(adapter);
2325}
2326
2327/*
2328 * LE interrupt handler.
2329 */
2330static void le_intr_handler(struct adapter *adap)
2331{
2332	static struct intr_info le_intr_info[] = {
2333		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2334		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2335		{ F_PARITYERR, "LE parity error", -1, 1 },
2336		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2337		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2338		{ 0 }
2339	};
2340
2341	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2342		t4_fatal_err(adap);
2343}
2344
2345/*
2346 * MPS interrupt handler.
2347 */
2348static void mps_intr_handler(struct adapter *adapter)
2349{
2350	static struct intr_info mps_rx_intr_info[] = {
2351		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2352		{ 0 }
2353	};
2354	static struct intr_info mps_tx_intr_info[] = {
2355		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2356		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2357		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2358		  -1, 1 },
2359		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2360		  -1, 1 },
2361		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2362		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2363		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2364		{ 0 }
2365	};
2366	static struct intr_info mps_trc_intr_info[] = {
2367		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2368		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2369		  1 },
2370		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2371		{ 0 }
2372	};
2373	static struct intr_info mps_stat_sram_intr_info[] = {
2374		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2375		{ 0 }
2376	};
2377	static struct intr_info mps_stat_tx_intr_info[] = {
2378		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2379		{ 0 }
2380	};
2381	static struct intr_info mps_stat_rx_intr_info[] = {
2382		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2383		{ 0 }
2384	};
2385	static struct intr_info mps_cls_intr_info[] = {
2386		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2387		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2388		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2389		{ 0 }
2390	};
2391
2392	int fat;
2393
2394	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2395				    mps_rx_intr_info) +
2396	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2397				    mps_tx_intr_info) +
2398	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2399				    mps_trc_intr_info) +
2400	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2401				    mps_stat_sram_intr_info) +
2402	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2403				    mps_stat_tx_intr_info) +
2404	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2405				    mps_stat_rx_intr_info) +
2406	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2407				    mps_cls_intr_info);
2408
2409	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2410	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2411	if (fat)
2412		t4_fatal_err(adapter);
2413}
2414
2415#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2416
2417/*
2418 * EDC/MC interrupt handler.
2419 */
2420static void mem_intr_handler(struct adapter *adapter, int idx)
2421{
2422	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2423
2424	unsigned int addr, cnt_addr, v;
2425
2426	if (idx <= MEM_EDC1) {
2427		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2428		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2429	} else {
2430		if (is_t4(adapter)) {
2431			addr = A_MC_INT_CAUSE;
2432			cnt_addr = A_MC_ECC_STATUS;
2433		} else {
2434			addr = A_MC_P_INT_CAUSE;
2435			cnt_addr = A_MC_P_ECC_STATUS;
2436		}
2437	}
2438
2439	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2440	if (v & F_PERR_INT_CAUSE)
2441		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2442	if (v & F_ECC_CE_INT_CAUSE) {
2443		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2444
2445		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2446		CH_WARN_RATELIMIT(adapter,
2447				  "%u %s correctable ECC data error%s\n",
2448				  cnt, name[idx], cnt > 1 ? "s" : "");
2449	}
2450	if (v & F_ECC_UE_INT_CAUSE)
2451		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2452			 name[idx]);
2453
2454	t4_write_reg(adapter, addr, v);
2455	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2456		t4_fatal_err(adapter);
2457}
2458
2459/*
2460 * MA interrupt handler.
2461 */
2462static void ma_intr_handler(struct adapter *adapter)
2463{
2464	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2465
2466	if (status & F_MEM_PERR_INT_CAUSE)
2467		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2468			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2469	if (status & F_MEM_WRAP_INT_CAUSE) {
2470		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2471		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2472			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2473			 G_MEM_WRAP_ADDRESS(v) << 4);
2474	}
2475	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2476	t4_fatal_err(adapter);
2477}
2478
2479/*
2480 * SMB interrupt handler.
2481 */
2482static void smb_intr_handler(struct adapter *adap)
2483{
2484	static struct intr_info smb_intr_info[] = {
2485		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2486		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2487		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2488		{ 0 }
2489	};
2490
2491	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2492		t4_fatal_err(adap);
2493}
2494
2495/*
2496 * NC-SI interrupt handler.
2497 */
2498static void ncsi_intr_handler(struct adapter *adap)
2499{
2500	static struct intr_info ncsi_intr_info[] = {
2501		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2502		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2503		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2504		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2505		{ 0 }
2506	};
2507
2508	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2509		t4_fatal_err(adap);
2510}
2511
2512/*
2513 * XGMAC interrupt handler.
2514 */
2515static void xgmac_intr_handler(struct adapter *adap, int port)
2516{
2517	u32 v, int_cause_reg;
2518
2519	if (is_t4(adap))
2520		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2521	else
2522		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2523
2524	v = t4_read_reg(adap, int_cause_reg);
2525	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2526	if (!v)
2527		return;
2528
2529	if (v & F_TXFIFO_PRTY_ERR)
2530		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2531	if (v & F_RXFIFO_PRTY_ERR)
2532		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2533	t4_write_reg(adap, int_cause_reg, v);
2534	t4_fatal_err(adap);
2535}
2536
2537/*
2538 * PL interrupt handler.
2539 */
2540static void pl_intr_handler(struct adapter *adap)
2541{
2542	static struct intr_info pl_intr_info[] = {
2543		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2544		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2545		{ 0 }
2546	};
2547
2548	static struct intr_info t5_pl_intr_info[] = {
2549		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2550		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2551		{ 0 }
2552	};
2553
2554	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2555	    is_t4(adap) ?  pl_intr_info : t5_pl_intr_info))
2556		t4_fatal_err(adap);
2557}
2558
2559#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2560#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2561		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2562		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2563
2564/**
2565 *	t4_slow_intr_handler - control path interrupt handler
2566 *	@adapter: the adapter
2567 *
2568 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2569 *	The designation 'slow' is because it involves register reads, while
2570 *	data interrupts typically don't involve any MMIOs.
2571 */
2572int t4_slow_intr_handler(struct adapter *adapter)
2573{
2574	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2575
2576	if (!(cause & GLBL_INTR_MASK))
2577		return 0;
2578	if (cause & F_CIM)
2579		cim_intr_handler(adapter);
2580	if (cause & F_MPS)
2581		mps_intr_handler(adapter);
2582	if (cause & F_NCSI)
2583		ncsi_intr_handler(adapter);
2584	if (cause & F_PL)
2585		pl_intr_handler(adapter);
2586	if (cause & F_SMB)
2587		smb_intr_handler(adapter);
2588	if (cause & F_XGMAC0)
2589		xgmac_intr_handler(adapter, 0);
2590	if (cause & F_XGMAC1)
2591		xgmac_intr_handler(adapter, 1);
2592	if (cause & F_XGMAC_KR0)
2593		xgmac_intr_handler(adapter, 2);
2594	if (cause & F_XGMAC_KR1)
2595		xgmac_intr_handler(adapter, 3);
2596	if (cause & F_PCIE)
2597		pcie_intr_handler(adapter);
2598	if (cause & F_MC)
2599		mem_intr_handler(adapter, MEM_MC);
2600	if (cause & F_EDC0)
2601		mem_intr_handler(adapter, MEM_EDC0);
2602	if (cause & F_EDC1)
2603		mem_intr_handler(adapter, MEM_EDC1);
2604	if (cause & F_LE)
2605		le_intr_handler(adapter);
2606	if (cause & F_TP)
2607		tp_intr_handler(adapter);
2608	if (cause & F_MA)
2609		ma_intr_handler(adapter);
2610	if (cause & F_PM_TX)
2611		pmtx_intr_handler(adapter);
2612	if (cause & F_PM_RX)
2613		pmrx_intr_handler(adapter);
2614	if (cause & F_ULP_RX)
2615		ulprx_intr_handler(adapter);
2616	if (cause & F_CPL_SWITCH)
2617		cplsw_intr_handler(adapter);
2618	if (cause & F_SGE)
2619		sge_intr_handler(adapter);
2620	if (cause & F_ULP_TX)
2621		ulptx_intr_handler(adapter);
2622
2623	/* Clear the interrupts just processed for which we are the master. */
2624	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2625	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2626	return 1;
2627}
2628
2629/**
2630 *	t4_intr_enable - enable interrupts
2631 *	@adapter: the adapter whose interrupts should be enabled
2632 *
2633 *	Enable PF-specific interrupts for the calling function and the top-level
2634 *	interrupt concentrator for global interrupts.  Interrupts are already
2635 *	enabled at each module,	here we just enable the roots of the interrupt
2636 *	hierarchies.
2637 *
2638 *	Note: this function should be called only when the driver manages
2639 *	non PF-specific interrupts from the various HW modules.  Only one PCI
2640 *	function at a time should be doing this.
2641 */
2642void t4_intr_enable(struct adapter *adapter)
2643{
2644	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2645
2646	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2647		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2648		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2649		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2650		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2651		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2652		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2653		     F_EGRESS_SIZE_ERR);
2654	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2655	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2656}
2657
2658/**
2659 *	t4_intr_disable - disable interrupts
2660 *	@adapter: the adapter whose interrupts should be disabled
2661 *
2662 *	Disable interrupts.  We only disable the top-level interrupt
2663 *	concentrators.  The caller must be a PCI function managing global
2664 *	interrupts.
2665 */
2666void t4_intr_disable(struct adapter *adapter)
2667{
2668	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2669
2670	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2671	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2672}
2673
2674/**
2675 *	t4_intr_clear - clear all interrupts
2676 *	@adapter: the adapter whose interrupts should be cleared
2677 *
2678 *	Clears all interrupts.  The caller must be a PCI function managing
2679 *	global interrupts.
2680 */
2681void t4_intr_clear(struct adapter *adapter)
2682{
2683	static const unsigned int cause_reg[] = {
2684		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2685		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2686		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2687		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2688		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2689		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2690		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2691		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2692		A_TP_INT_CAUSE,
2693		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2694		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2695		A_MPS_RX_PERR_INT_CAUSE,
2696		A_CPL_INTR_CAUSE,
2697		MYPF_REG(A_PL_PF_INT_CAUSE),
2698		A_PL_PL_INT_CAUSE,
2699		A_LE_DB_INT_CAUSE,
2700	};
2701
2702	unsigned int i;
2703
2704	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2705		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2706
2707	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
2708				A_MC_P_INT_CAUSE, 0xffffffff);
2709
2710	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2711	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2712}
2713
2714/**
2715 *	hash_mac_addr - return the hash value of a MAC address
2716 *	@addr: the 48-bit Ethernet MAC address
2717 *
2718 *	Hashes a MAC address according to the hash function used by HW inexact
2719 *	(hash) address matching.
2720 */
2721static int hash_mac_addr(const u8 *addr)
2722{
2723	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2724	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2725	a ^= b;
2726	a ^= (a >> 12);
2727	a ^= (a >> 6);
2728	return a & 0x3f;
2729}
2730
2731/**
2732 *	t4_config_rss_range - configure a portion of the RSS mapping table
2733 *	@adapter: the adapter
2734 *	@mbox: mbox to use for the FW command
2735 *	@viid: virtual interface whose RSS subtable is to be written
2736 *	@start: start entry in the table to write
2737 *	@n: how many table entries to write
2738 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2739 *	@nrspq: number of values in @rspq
2740 *
2741 *	Programs the selected part of the VI's RSS mapping table with the
2742 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2743 *	until the full table range is populated.
2744 *
2745 *	The caller must ensure the values in @rspq are in the range allowed for
2746 *	@viid.
2747 */
2748int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2749			int start, int n, const u16 *rspq, unsigned int nrspq)
2750{
2751	int ret;
2752	const u16 *rsp = rspq;
2753	const u16 *rsp_end = rspq + nrspq;
2754	struct fw_rss_ind_tbl_cmd cmd;
2755
2756	memset(&cmd, 0, sizeof(cmd));
2757	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2758			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2759			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2760	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2761
2762
2763	/*
2764	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2765	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2766	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2767	 * reserved.
2768	 */
2769	while (n > 0) {
2770		int nq = min(n, 32);
2771		int nq_packed = 0;
2772		__be32 *qp = &cmd.iq0_to_iq2;
2773
2774		/*
2775		 * Set up the firmware RSS command header to send the next
2776		 * "nq" Ingress Queue IDs to the firmware.
2777		 */
2778		cmd.niqid = htons(nq);
2779		cmd.startidx = htons(start);
2780
2781		/*
2782		 * "nq" more done for the start of the next loop.
2783		 */
2784		start += nq;
2785		n -= nq;
2786
2787		/*
2788		 * While there are still Ingress Queue IDs to stuff into the
2789		 * current firmware RSS command, retrieve them from the
2790		 * Ingress Queue ID array and insert them into the command.
2791		 */
2792		while (nq > 0) {
2793			/*
2794			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2795			 * around the Ingress Queue ID array if necessary) and
2796			 * insert them into the firmware RSS command at the
2797			 * current 3-tuple position within the commad.
2798			 */
2799			u16 qbuf[3];
2800			u16 *qbp = qbuf;
2801			int nqbuf = min(3, nq);
2802
2803			nq -= nqbuf;
2804			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2805			while (nqbuf && nq_packed < 32) {
2806				nqbuf--;
2807				nq_packed++;
2808				*qbp++ = *rsp++;
2809				if (rsp >= rsp_end)
2810					rsp = rspq;
2811			}
2812			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2813					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2814					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2815		}
2816
2817		/*
2818		 * Send this portion of the RRS table update to the firmware;
2819		 * bail out on any errors.
2820		 */
2821		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2822		if (ret)
2823			return ret;
2824	}
2825
2826	return 0;
2827}
2828
2829/**
2830 *	t4_config_glbl_rss - configure the global RSS mode
2831 *	@adapter: the adapter
2832 *	@mbox: mbox to use for the FW command
2833 *	@mode: global RSS mode
2834 *	@flags: mode-specific flags
2835 *
2836 *	Sets the global RSS mode.
2837 */
2838int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2839		       unsigned int flags)
2840{
2841	struct fw_rss_glb_config_cmd c;
2842
2843	memset(&c, 0, sizeof(c));
2844	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2845			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2846	c.retval_len16 = htonl(FW_LEN16(c));
2847	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2848		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2849	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2850		c.u.basicvirtual.mode_pkd =
2851			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2852		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2853	} else
2854		return -EINVAL;
2855	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2856}
2857
2858/**
2859 *	t4_config_vi_rss - configure per VI RSS settings
2860 *	@adapter: the adapter
2861 *	@mbox: mbox to use for the FW command
2862 *	@viid: the VI id
2863 *	@flags: RSS flags
2864 *	@defq: id of the default RSS queue for the VI.
2865 *
2866 *	Configures VI-specific RSS properties.
2867 */
2868int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2869		     unsigned int flags, unsigned int defq)
2870{
2871	struct fw_rss_vi_config_cmd c;
2872
2873	memset(&c, 0, sizeof(c));
2874	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2875			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2876			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2877	c.retval_len16 = htonl(FW_LEN16(c));
2878	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2879					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2880	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2881}
2882
2883/* Read an RSS table row */
2884static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2885{
2886	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2887	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2888				   5, 0, val);
2889}
2890
2891/**
2892 *	t4_read_rss - read the contents of the RSS mapping table
2893 *	@adapter: the adapter
2894 *	@map: holds the contents of the RSS mapping table
2895 *
2896 *	Reads the contents of the RSS hash->queue mapping table.
2897 */
2898int t4_read_rss(struct adapter *adapter, u16 *map)
2899{
2900	u32 val;
2901	int i, ret;
2902
2903	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2904		ret = rd_rss_row(adapter, i, &val);
2905		if (ret)
2906			return ret;
2907		*map++ = G_LKPTBLQUEUE0(val);
2908		*map++ = G_LKPTBLQUEUE1(val);
2909	}
2910	return 0;
2911}
2912
2913/**
2914 *	t4_read_rss_key - read the global RSS key
2915 *	@adap: the adapter
2916 *	@key: 10-entry array holding the 320-bit RSS key
2917 *
2918 *	Reads the global 320-bit RSS key.
2919 */
2920void t4_read_rss_key(struct adapter *adap, u32 *key)
2921{
2922	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2923			 A_TP_RSS_SECRET_KEY0);
2924}
2925
2926/**
2927 *	t4_write_rss_key - program one of the RSS keys
2928 *	@adap: the adapter
2929 *	@key: 10-entry array holding the 320-bit RSS key
2930 *	@idx: which RSS key to write
2931 *
2932 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2933 *	0..15 the corresponding entry in the RSS key table is written,
2934 *	otherwise the global RSS key is written.
2935 */
2936void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2937{
2938	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2939			  A_TP_RSS_SECRET_KEY0);
2940	if (idx >= 0 && idx < 16)
2941		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2942			     V_KEYWRADDR(idx) | F_KEYWREN);
2943}
2944
2945/**
2946 *	t4_read_rss_pf_config - read PF RSS Configuration Table
2947 *	@adapter: the adapter
2948 *	@index: the entry in the PF RSS table to read
2949 *	@valp: where to store the returned value
2950 *
2951 *	Reads the PF RSS Configuration Table at the specified index and returns
2952 *	the value found there.
2953 */
2954void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2955{
2956	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2957			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2958}
2959
2960/**
2961 *	t4_write_rss_pf_config - write PF RSS Configuration Table
2962 *	@adapter: the adapter
2963 *	@index: the entry in the VF RSS table to read
2964 *	@val: the value to store
2965 *
2966 *	Writes the PF RSS Configuration Table at the specified index with the
2967 *	specified value.
2968 */
2969void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2970{
2971	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2972			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2973}
2974
2975/**
2976 *	t4_read_rss_vf_config - read VF RSS Configuration Table
2977 *	@adapter: the adapter
2978 *	@index: the entry in the VF RSS table to read
2979 *	@vfl: where to store the returned VFL
2980 *	@vfh: where to store the returned VFH
2981 *
2982 *	Reads the VF RSS Configuration Table at the specified index and returns
2983 *	the (VFL, VFH) values found there.
2984 */
2985void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2986			   u32 *vfl, u32 *vfh)
2987{
2988	u32 vrt;
2989
2990	/*
2991	 * Request that the index'th VF Table values be read into VFL/VFH.
2992	 */
2993	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2994	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2995	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2996	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2997
2998	/*
2999	 * Grab the VFL/VFH values ...
3000	 */
3001	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3002			 vfl, 1, A_TP_RSS_VFL_CONFIG);
3003	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3004			 vfh, 1, A_TP_RSS_VFH_CONFIG);
3005}
3006
3007/**
3008 *	t4_write_rss_vf_config - write VF RSS Configuration Table
3009 *
3010 *	@adapter: the adapter
3011 *	@index: the entry in the VF RSS table to write
3012 *	@vfl: the VFL to store
3013 *	@vfh: the VFH to store
3014 *
3015 *	Writes the VF RSS Configuration Table at the specified index with the
3016 *	specified (VFL, VFH) values.
3017 */
3018void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
3019			    u32 vfl, u32 vfh)
3020{
3021	u32 vrt;
3022
3023	/*
3024	 * Load up VFL/VFH with the values to be written ...
3025	 */
3026	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3027			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
3028	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3029			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
3030
3031	/*
3032	 * Write the VFL/VFH into the VF Table at index'th location.
3033	 */
3034	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
3035	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
3036	vrt |= V_VFWRADDR(index) | F_VFWREN;
3037	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
3038}
3039
3040/**
3041 *	t4_read_rss_pf_map - read PF RSS Map
3042 *	@adapter: the adapter
3043 *
3044 *	Reads the PF RSS Map register and returns its value.
3045 */
3046u32 t4_read_rss_pf_map(struct adapter *adapter)
3047{
3048	u32 pfmap;
3049
3050	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3051			 &pfmap, 1, A_TP_RSS_PF_MAP);
3052	return pfmap;
3053}
3054
3055/**
3056 *	t4_write_rss_pf_map - write PF RSS Map
3057 *	@adapter: the adapter
3058 *	@pfmap: PF RSS Map value
3059 *
3060 *	Writes the specified value to the PF RSS Map register.
3061 */
3062void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
3063{
3064	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3065			  &pfmap, 1, A_TP_RSS_PF_MAP);
3066}
3067
3068/**
3069 *	t4_read_rss_pf_mask - read PF RSS Mask
3070 *	@adapter: the adapter
3071 *
3072 *	Reads the PF RSS Mask register and returns its value.
3073 */
3074u32 t4_read_rss_pf_mask(struct adapter *adapter)
3075{
3076	u32 pfmask;
3077
3078	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3079			 &pfmask, 1, A_TP_RSS_PF_MSK);
3080	return pfmask;
3081}
3082
3083/**
3084 *	t4_write_rss_pf_mask - write PF RSS Mask
3085 *	@adapter: the adapter
3086 *	@pfmask: PF RSS Mask value
3087 *
3088 *	Writes the specified value to the PF RSS Mask register.
3089 */
3090void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
3091{
3092	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3093			  &pfmask, 1, A_TP_RSS_PF_MSK);
3094}
3095
3096/**
3097 *	t4_set_filter_mode - configure the optional components of filter tuples
3098 *	@adap: the adapter
3099 *	@mode_map: a bitmap selcting which optional filter components to enable
3100 *
3101 *	Sets the filter mode by selecting the optional components to enable
3102 *	in filter tuples.  Returns 0 on success and a negative error if the
3103 *	requested mode needs more bits than are available for optional
3104 *	components.
3105 */
3106int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
3107{
3108	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
3109
3110	int i, nbits = 0;
3111
3112	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
3113		if (mode_map & (1 << i))
3114			nbits += width[i];
3115	if (nbits > FILTER_OPT_LEN)
3116		return -EINVAL;
3117	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
3118			  A_TP_VLAN_PRI_MAP);
3119	return 0;
3120}
3121
3122/**
3123 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
3124 *	@adap: the adapter
3125 *	@v4: holds the TCP/IP counter values
3126 *	@v6: holds the TCP/IPv6 counter values
3127 *
3128 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
3129 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
3130 */
3131void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
3132			 struct tp_tcp_stats *v6)
3133{
3134	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
3135
3136#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
3137#define STAT(x)     val[STAT_IDX(x)]
3138#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
3139
3140	if (v4) {
3141		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3142				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
3143		v4->tcpOutRsts = STAT(OUT_RST);
3144		v4->tcpInSegs  = STAT64(IN_SEG);
3145		v4->tcpOutSegs = STAT64(OUT_SEG);
3146		v4->tcpRetransSegs = STAT64(RXT_SEG);
3147	}
3148	if (v6) {
3149		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3150				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
3151		v6->tcpOutRsts = STAT(OUT_RST);
3152		v6->tcpInSegs  = STAT64(IN_SEG);
3153		v6->tcpOutSegs = STAT64(OUT_SEG);
3154		v6->tcpRetransSegs = STAT64(RXT_SEG);
3155	}
3156#undef STAT64
3157#undef STAT
3158#undef STAT_IDX
3159}
3160
3161/**
3162 *	t4_tp_get_err_stats - read TP's error MIB counters
3163 *	@adap: the adapter
3164 *	@st: holds the counter values
3165 *
3166 *	Returns the values of TP's error counters.
3167 */
3168void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3169{
3170	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3171			 12, A_TP_MIB_MAC_IN_ERR_0);
3172	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3173			 8, A_TP_MIB_TNL_CNG_DROP_0);
3174	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3175			 4, A_TP_MIB_TNL_DROP_0);
3176	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3177			 4, A_TP_MIB_OFD_VLN_DROP_0);
3178	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3179			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3180	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3181			 2, A_TP_MIB_OFD_ARP_DROP);
3182}
3183
3184/**
3185 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3186 *	@adap: the adapter
3187 *	@st: holds the counter values
3188 *
3189 *	Returns the values of TP's proxy counters.
3190 */
3191void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3192{
3193	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3194			 4, A_TP_MIB_TNL_LPBK_0);
3195}
3196
3197/**
3198 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3199 *	@adap: the adapter
3200 *	@st: holds the counter values
3201 *
3202 *	Returns the values of TP's CPL counters.
3203 */
3204void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3205{
3206	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3207			 8, A_TP_MIB_CPL_IN_REQ_0);
3208}
3209
3210/**
3211 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3212 *	@adap: the adapter
3213 *	@st: holds the counter values
3214 *
3215 *	Returns the values of TP's RDMA counters.
3216 */
3217void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3218{
3219	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3220			 2, A_TP_MIB_RQE_DFR_MOD);
3221}
3222
3223/**
3224 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3225 *	@adap: the adapter
3226 *	@idx: the port index
3227 *	@st: holds the counter values
3228 *
3229 *	Returns the values of TP's FCoE counters for the selected port.
3230 */
3231void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3232		       struct tp_fcoe_stats *st)
3233{
3234	u32 val[2];
3235
3236	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3237			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3238	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3239			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3240	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3241			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3242	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3243}
3244
3245/**
3246 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3247 *	@adap: the adapter
3248 *	@st: holds the counter values
3249 *
3250 *	Returns the values of TP's counters for non-TCP directly-placed packets.
3251 */
3252void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3253{
3254	u32 val[4];
3255
3256	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3257			 A_TP_MIB_USM_PKTS);
3258	st->frames = val[0];
3259	st->drops = val[1];
3260	st->octets = ((u64)val[2] << 32) | val[3];
3261}
3262
3263/**
3264 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3265 *	@adap: the adapter
3266 *	@mtus: where to store the MTU values
3267 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3268 *
3269 *	Reads the HW path MTU table.
3270 */
3271void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3272{
3273	u32 v;
3274	int i;
3275
3276	for (i = 0; i < NMTUS; ++i) {
3277		t4_write_reg(adap, A_TP_MTU_TABLE,
3278			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3279		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3280		mtus[i] = G_MTUVALUE(v);
3281		if (mtu_log)
3282			mtu_log[i] = G_MTUWIDTH(v);
3283	}
3284}
3285
3286/**
3287 *	t4_read_cong_tbl - reads the congestion control table
3288 *	@adap: the adapter
3289 *	@incr: where to store the alpha values
3290 *
3291 *	Reads the additive increments programmed into the HW congestion
3292 *	control table.
3293 */
3294void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3295{
3296	unsigned int mtu, w;
3297
3298	for (mtu = 0; mtu < NMTUS; ++mtu)
3299		for (w = 0; w < NCCTRL_WIN; ++w) {
3300			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3301				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3302			incr[mtu][w] = (u16)t4_read_reg(adap,
3303						A_TP_CCTRL_TABLE) & 0x1fff;
3304		}
3305}
3306
3307/**
3308 *	t4_read_pace_tbl - read the pace table
3309 *	@adap: the adapter
3310 *	@pace_vals: holds the returned values
3311 *
3312 *	Returns the values of TP's pace table in microseconds.
3313 */
3314void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3315{
3316	unsigned int i, v;
3317
3318	for (i = 0; i < NTX_SCHED; i++) {
3319		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3320		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3321		pace_vals[i] = dack_ticks_to_usec(adap, v);
3322	}
3323}
3324
3325/**
3326 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3327 *	@adap: the adapter
3328 *	@addr: the indirect TP register address
3329 *	@mask: specifies the field within the register to modify
3330 *	@val: new value for the field
3331 *
3332 *	Sets a field of an indirect TP register to the given value.
3333 */
3334void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3335			    unsigned int mask, unsigned int val)
3336{
3337	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3338	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3339	t4_write_reg(adap, A_TP_PIO_DATA, val);
3340}
3341
3342/**
3343 *	init_cong_ctrl - initialize congestion control parameters
3344 *	@a: the alpha values for congestion control
3345 *	@b: the beta values for congestion control
3346 *
3347 *	Initialize the congestion control parameters.
3348 */
3349static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3350{
3351	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3352	a[9] = 2;
3353	a[10] = 3;
3354	a[11] = 4;
3355	a[12] = 5;
3356	a[13] = 6;
3357	a[14] = 7;
3358	a[15] = 8;
3359	a[16] = 9;
3360	a[17] = 10;
3361	a[18] = 14;
3362	a[19] = 17;
3363	a[20] = 21;
3364	a[21] = 25;
3365	a[22] = 30;
3366	a[23] = 35;
3367	a[24] = 45;
3368	a[25] = 60;
3369	a[26] = 80;
3370	a[27] = 100;
3371	a[28] = 200;
3372	a[29] = 300;
3373	a[30] = 400;
3374	a[31] = 500;
3375
3376	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3377	b[9] = b[10] = 1;
3378	b[11] = b[12] = 2;
3379	b[13] = b[14] = b[15] = b[16] = 3;
3380	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3381	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3382	b[28] = b[29] = 6;
3383	b[30] = b[31] = 7;
3384}
3385
3386/* The minimum additive increment value for the congestion control table */
3387#define CC_MIN_INCR 2U
3388
3389/**
3390 *	t4_load_mtus - write the MTU and congestion control HW tables
3391 *	@adap: the adapter
3392 *	@mtus: the values for the MTU table
3393 *	@alpha: the values for the congestion control alpha parameter
3394 *	@beta: the values for the congestion control beta parameter
3395 *
3396 *	Write the HW MTU table with the supplied MTUs and the high-speed
3397 *	congestion control table with the supplied alpha, beta, and MTUs.
3398 *	We write the two tables together because the additive increments
3399 *	depend on the MTUs.
3400 */
3401void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3402		  const unsigned short *alpha, const unsigned short *beta)
3403{
3404	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3405		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3406		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3407		28672, 40960, 57344, 81920, 114688, 163840, 229376
3408	};
3409
3410	unsigned int i, w;
3411
3412	for (i = 0; i < NMTUS; ++i) {
3413		unsigned int mtu = mtus[i];
3414		unsigned int log2 = fls(mtu);
3415
3416		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3417			log2--;
3418		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3419			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3420
3421		for (w = 0; w < NCCTRL_WIN; ++w) {
3422			unsigned int inc;
3423
3424			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3425				  CC_MIN_INCR);
3426
3427			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3428				     (w << 16) | (beta[w] << 13) | inc);
3429		}
3430	}
3431}
3432
3433/**
3434 *	t4_set_pace_tbl - set the pace table
3435 *	@adap: the adapter
3436 *	@pace_vals: the pace values in microseconds
3437 *	@start: index of the first entry in the HW pace table to set
3438 *	@n: how many entries to set
3439 *
3440 *	Sets (a subset of the) HW pace table.
3441 */
3442int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3443		     unsigned int start, unsigned int n)
3444{
3445	unsigned int vals[NTX_SCHED], i;
3446	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3447
3448	if (n > NTX_SCHED)
3449	    return -ERANGE;
3450
3451	/* convert values from us to dack ticks, rounding to closest value */
3452	for (i = 0; i < n; i++, pace_vals++) {
3453		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3454		if (vals[i] > 0x7ff)
3455			return -ERANGE;
3456		if (*pace_vals && vals[i] == 0)
3457			return -ERANGE;
3458	}
3459	for (i = 0; i < n; i++, start++)
3460		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3461	return 0;
3462}
3463
3464/**
3465 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3466 *	@adap: the adapter
3467 *	@kbps: target rate in Kbps
3468 *	@sched: the scheduler index
3469 *
3470 *	Configure a Tx HW scheduler for the target rate.
3471 */
3472int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3473{
3474	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3475	unsigned int clk = adap->params.vpd.cclk * 1000;
3476	unsigned int selected_cpt = 0, selected_bpt = 0;
3477
3478	if (kbps > 0) {
3479		kbps *= 125;     /* -> bytes */
3480		for (cpt = 1; cpt <= 255; cpt++) {
3481			tps = clk / cpt;
3482			bpt = (kbps + tps / 2) / tps;
3483			if (bpt > 0 && bpt <= 255) {
3484				v = bpt * tps;
3485				delta = v >= kbps ? v - kbps : kbps - v;
3486				if (delta < mindelta) {
3487					mindelta = delta;
3488					selected_cpt = cpt;
3489					selected_bpt = bpt;
3490				}
3491			} else if (selected_cpt)
3492				break;
3493		}
3494		if (!selected_cpt)
3495			return -EINVAL;
3496	}
3497	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3498		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3499	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3500	if (sched & 1)
3501		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3502	else
3503		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3504	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3505	return 0;
3506}
3507
3508/**
3509 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3510 *	@adap: the adapter
3511 *	@sched: the scheduler index
3512 *	@ipg: the interpacket delay in tenths of nanoseconds
3513 *
3514 *	Set the interpacket delay for a HW packet rate scheduler.
3515 */
3516int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3517{
3518	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3519
3520	/* convert ipg to nearest number of core clocks */
3521	ipg *= core_ticks_per_usec(adap);
3522	ipg = (ipg + 5000) / 10000;
3523	if (ipg > M_TXTIMERSEPQ0)
3524		return -EINVAL;
3525
3526	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3527	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3528	if (sched & 1)
3529		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3530	else
3531		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3532	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3533	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3534	return 0;
3535}
3536
3537/**
3538 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3539 *	@adap: the adapter
3540 *	@sched: the scheduler index
3541 *	@kbps: the byte rate in Kbps
3542 *	@ipg: the interpacket delay in tenths of nanoseconds
3543 *
3544 *	Return the current configuration of a HW Tx scheduler.
3545 */
3546void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3547		     unsigned int *ipg)
3548{
3549	unsigned int v, addr, bpt, cpt;
3550
3551	if (kbps) {
3552		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3553		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3554		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3555		if (sched & 1)
3556			v >>= 16;
3557		bpt = (v >> 8) & 0xff;
3558		cpt = v & 0xff;
3559		if (!cpt)
3560			*kbps = 0;        /* scheduler disabled */
3561		else {
3562			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3563			*kbps = (v * bpt) / 125;
3564		}
3565	}
3566	if (ipg) {
3567		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3568		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3569		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3570		if (sched & 1)
3571			v >>= 16;
3572		v &= 0xffff;
3573		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3574	}
3575}
3576
3577/*
3578 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3579 * clocks.  The formula is
3580 *
3581 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3582 *
3583 * which is equivalent to
3584 *
3585 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3586 */
3587static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3588{
3589	u64 v = bytes256 * adap->params.vpd.cclk;
3590
3591	return v * 62 + v / 2;
3592}
3593
3594/**
3595 *	t4_get_chan_txrate - get the current per channel Tx rates
3596 *	@adap: the adapter
3597 *	@nic_rate: rates for NIC traffic
3598 *	@ofld_rate: rates for offloaded traffic
3599 *
3600 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3601 *	for each channel.
3602 */
3603void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3604{
3605	u32 v;
3606
3607	v = t4_read_reg(adap, A_TP_TX_TRATE);
3608	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3609	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3610	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3611	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3612
3613	v = t4_read_reg(adap, A_TP_TX_ORATE);
3614	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3615	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3616	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3617	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3618}
3619
3620/**
3621 *	t4_set_trace_filter - configure one of the tracing filters
3622 *	@adap: the adapter
3623 *	@tp: the desired trace filter parameters
3624 *	@idx: which filter to configure
3625 *	@enable: whether to enable or disable the filter
3626 *
3627 *	Configures one of the tracing filters available in HW.  If @tp is %NULL
3628 *	it indicates that the filter is already written in the register and it
3629 *	just needs to be enabled or disabled.
3630 */
3631int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3632    int idx, int enable)
3633{
3634	int i, ofst = idx * 4;
3635	u32 data_reg, mask_reg, cfg;
3636	u32 multitrc = F_TRCMULTIFILTER;
3637	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
3638
3639	if (idx < 0 || idx >= NTRACE)
3640		return -EINVAL;
3641
3642	if (tp == NULL || !enable) {
3643		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
3644		    enable ? en : 0);
3645		return 0;
3646	}
3647
3648	/*
3649	 * TODO - After T4 data book is updated, specify the exact
3650	 * section below.
3651	 *
3652	 * See T4 data book - MPS section for a complete description
3653	 * of the below if..else handling of A_MPS_TRC_CFG register
3654	 * value.
3655	 */
3656	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3657	if (cfg & F_TRCMULTIFILTER) {
3658		/*
3659		 * If multiple tracers are enabled, then maximum
3660		 * capture size is 2.5KB (FIFO size of a single channel)
3661		 * minus 2 flits for CPL_TRACE_PKT header.
3662		 */
3663		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3664			return -EINVAL;
3665	} else {
3666		/*
3667		 * If multiple tracers are disabled, to avoid deadlocks
3668		 * maximum packet capture size of 9600 bytes is recommended.
3669		 * Also in this mode, only trace0 can be enabled and running.
3670		 */
3671		multitrc = 0;
3672		if (tp->snap_len > 9600 || idx)
3673			return -EINVAL;
3674	}
3675
3676	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
3677	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
3678	    tp->min_len > M_TFMINPKTSIZE)
3679		return -EINVAL;
3680
3681	/* stop the tracer we'll be changing */
3682	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
3683
3684	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3685	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3686	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3687
3688	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3689		t4_write_reg(adap, data_reg, tp->data[i]);
3690		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3691	}
3692	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3693		     V_TFCAPTUREMAX(tp->snap_len) |
3694		     V_TFMINPKTSIZE(tp->min_len));
3695	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3696		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
3697		     (is_t4(adap) ?
3698		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
3699		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
3700
3701	return 0;
3702}
3703
3704/**
3705 *	t4_get_trace_filter - query one of the tracing filters
3706 *	@adap: the adapter
3707 *	@tp: the current trace filter parameters
3708 *	@idx: which trace filter to query
3709 *	@enabled: non-zero if the filter is enabled
3710 *
3711 *	Returns the current settings of one of the HW tracing filters.
3712 */
3713void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3714			 int *enabled)
3715{
3716	u32 ctla, ctlb;
3717	int i, ofst = idx * 4;
3718	u32 data_reg, mask_reg;
3719
3720	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3721	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3722
3723	if (is_t4(adap)) {
3724		*enabled = !!(ctla & F_TFEN);
3725		tp->port =  G_TFPORT(ctla);
3726		tp->invert = !!(ctla & F_TFINVERTMATCH);
3727	} else {
3728		*enabled = !!(ctla & F_T5_TFEN);
3729		tp->port = G_T5_TFPORT(ctla);
3730		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
3731	}
3732	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3733	tp->min_len = G_TFMINPKTSIZE(ctlb);
3734	tp->skip_ofst = G_TFOFFSET(ctla);
3735	tp->skip_len = G_TFLENGTH(ctla);
3736
3737	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3738	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3739	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3740
3741	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3742		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3743		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3744	}
3745}
3746
3747/**
3748 *	t4_pmtx_get_stats - returns the HW stats from PMTX
3749 *	@adap: the adapter
3750 *	@cnt: where to store the count statistics
3751 *	@cycles: where to store the cycle statistics
3752 *
3753 *	Returns performance statistics from PMTX.
3754 */
3755void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3756{
3757	int i;
3758	u32 data[2];
3759
3760	for (i = 0; i < PM_NSTATS; i++) {
3761		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3762		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3763		if (is_t4(adap))
3764			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3765		else {
3766			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3767					 A_PM_TX_DBG_DATA, data, 2,
3768					 A_PM_TX_DBG_STAT_MSB);
3769			cycles[i] = (((u64)data[0] << 32) | data[1]);
3770		}
3771	}
3772}
3773
3774/**
3775 *	t4_pmrx_get_stats - returns the HW stats from PMRX
3776 *	@adap: the adapter
3777 *	@cnt: where to store the count statistics
3778 *	@cycles: where to store the cycle statistics
3779 *
3780 *	Returns performance statistics from PMRX.
3781 */
3782void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3783{
3784	int i;
3785	u32 data[2];
3786
3787	for (i = 0; i < PM_NSTATS; i++) {
3788		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3789		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3790		if (is_t4(adap))
3791			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3792		else {
3793			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3794					 A_PM_RX_DBG_DATA, data, 2,
3795					 A_PM_RX_DBG_STAT_MSB);
3796			cycles[i] = (((u64)data[0] << 32) | data[1]);
3797		}
3798	}
3799}
3800
3801/**
3802 *	get_mps_bg_map - return the buffer groups associated with a port
3803 *	@adap: the adapter
3804 *	@idx: the port index
3805 *
3806 *	Returns a bitmap indicating which MPS buffer groups are associated
3807 *	with the given port.  Bit i is set if buffer group i is used by the
3808 *	port.
3809 */
3810static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3811{
3812	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3813
3814	if (n == 0)
3815		return idx == 0 ? 0xf : 0;
3816	if (n == 1)
3817		return idx < 2 ? (3 << (2 * idx)) : 0;
3818	return 1 << idx;
3819}
3820
3821/**
3822 *      t4_get_port_stats_offset - collect port stats relative to a previous
3823 *                                 snapshot
3824 *      @adap: The adapter
3825 *      @idx: The port
3826 *      @stats: Current stats to fill
3827 *      @offset: Previous stats snapshot
3828 */
3829void t4_get_port_stats_offset(struct adapter *adap, int idx,
3830		struct port_stats *stats,
3831		struct port_stats *offset)
3832{
3833	u64 *s, *o;
3834	int i;
3835
3836	t4_get_port_stats(adap, idx, stats);
3837	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3838			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3839			i++, s++, o++)
3840		*s -= *o;
3841}
3842
3843/**
3844 *	t4_get_port_stats - collect port statistics
3845 *	@adap: the adapter
3846 *	@idx: the port index
3847 *	@p: the stats structure to fill
3848 *
3849 *	Collect statistics related to the given port from HW.
3850 */
3851void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3852{
3853	u32 bgmap = get_mps_bg_map(adap, idx);
3854
3855#define GET_STAT(name) \
3856	t4_read_reg64(adap, \
3857	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3858	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3859#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3860
3861	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3862	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3863	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3864	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3865	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3866	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3867	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3868	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3869	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3870	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3871	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3872	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3873	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3874	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3875	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3876	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3877	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3878	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3879	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3880	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3881	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3882	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3883	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3884
3885	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3886	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3887	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3888	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3889	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3890	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3891	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3892	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3893	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3894	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3895	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3896	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3897	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3898	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3899	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3900	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3901	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3902	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3903	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3904	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3905	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3906	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3907	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3908	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3909	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3910	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3911	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3912
3913	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3914	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3915	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3916	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3917	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3918	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3919	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3920	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3921
3922#undef GET_STAT
3923#undef GET_STAT_COM
3924}
3925
3926/**
3927 *	t4_clr_port_stats - clear port statistics
3928 *	@adap: the adapter
3929 *	@idx: the port index
3930 *
3931 *	Clear HW statistics for the given port.
3932 */
3933void t4_clr_port_stats(struct adapter *adap, int idx)
3934{
3935	unsigned int i;
3936	u32 bgmap = get_mps_bg_map(adap, idx);
3937	u32 port_base_addr;
3938
3939	if (is_t4(adap))
3940		port_base_addr = PORT_BASE(idx);
3941	else
3942		port_base_addr = T5_PORT_BASE(idx);
3943
3944	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3945			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3946		t4_write_reg(adap, port_base_addr + i, 0);
3947	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3948			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3949		t4_write_reg(adap, port_base_addr + i, 0);
3950	for (i = 0; i < 4; i++)
3951		if (bgmap & (1 << i)) {
3952			t4_write_reg(adap,
3953				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3954			t4_write_reg(adap,
3955				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3956		}
3957}
3958
3959/**
3960 *	t4_get_lb_stats - collect loopback port statistics
3961 *	@adap: the adapter
3962 *	@idx: the loopback port index
3963 *	@p: the stats structure to fill
3964 *
3965 *	Return HW statistics for the given loopback port.
3966 */
3967void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3968{
3969	u32 bgmap = get_mps_bg_map(adap, idx);
3970
3971#define GET_STAT(name) \
3972	t4_read_reg64(adap, \
3973	(is_t4(adap) ? \
3974	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3975	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3976#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3977
3978	p->octets           = GET_STAT(BYTES);
3979	p->frames           = GET_STAT(FRAMES);
3980	p->bcast_frames     = GET_STAT(BCAST);
3981	p->mcast_frames     = GET_STAT(MCAST);
3982	p->ucast_frames     = GET_STAT(UCAST);
3983	p->error_frames     = GET_STAT(ERROR);
3984
3985	p->frames_64        = GET_STAT(64B);
3986	p->frames_65_127    = GET_STAT(65B_127B);
3987	p->frames_128_255   = GET_STAT(128B_255B);
3988	p->frames_256_511   = GET_STAT(256B_511B);
3989	p->frames_512_1023  = GET_STAT(512B_1023B);
3990	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3991	p->frames_1519_max  = GET_STAT(1519B_MAX);
3992	p->drop             = GET_STAT(DROP_FRAMES);
3993
3994	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3995	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3996	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3997	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3998	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3999	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
4000	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
4001	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
4002
4003#undef GET_STAT
4004#undef GET_STAT_COM
4005}
4006
4007/**
4008 *	t4_wol_magic_enable - enable/disable magic packet WoL
4009 *	@adap: the adapter
4010 *	@port: the physical port index
4011 *	@addr: MAC address expected in magic packets, %NULL to disable
4012 *
4013 *	Enables/disables magic packet wake-on-LAN for the selected port.
4014 */
4015void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
4016			 const u8 *addr)
4017{
4018	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
4019
4020	if (is_t4(adap)) {
4021		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
4022		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
4023		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4024	} else {
4025		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
4026		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
4027		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4028	}
4029
4030	if (addr) {
4031		t4_write_reg(adap, mag_id_reg_l,
4032			     (addr[2] << 24) | (addr[3] << 16) |
4033			     (addr[4] << 8) | addr[5]);
4034		t4_write_reg(adap, mag_id_reg_h,
4035			     (addr[0] << 8) | addr[1]);
4036	}
4037	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
4038			 V_MAGICEN(addr != NULL));
4039}
4040
4041/**
4042 *	t4_wol_pat_enable - enable/disable pattern-based WoL
4043 *	@adap: the adapter
4044 *	@port: the physical port index
4045 *	@map: bitmap of which HW pattern filters to set
4046 *	@mask0: byte mask for bytes 0-63 of a packet
4047 *	@mask1: byte mask for bytes 64-127 of a packet
4048 *	@crc: Ethernet CRC for selected bytes
4049 *	@enable: enable/disable switch
4050 *
4051 *	Sets the pattern filters indicated in @map to mask out the bytes
4052 *	specified in @mask0/@mask1 in received packets and compare the CRC of
4053 *	the resulting packet against @crc.  If @enable is %true pattern-based
4054 *	WoL is enabled, otherwise disabled.
4055 */
4056int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
4057		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
4058{
4059	int i;
4060	u32 port_cfg_reg;
4061
4062	if (is_t4(adap))
4063		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
4064	else
4065		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
4066
4067	if (!enable) {
4068		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
4069		return 0;
4070	}
4071	if (map > 0xff)
4072		return -EINVAL;
4073
4074#define EPIO_REG(name) \
4075	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
4076	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
4077
4078	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
4079	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
4080	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
4081
4082	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
4083		if (!(map & 1))
4084			continue;
4085
4086		/* write byte masks */
4087		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
4088		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
4089		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4090		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4091			return -ETIMEDOUT;
4092
4093		/* write CRC */
4094		t4_write_reg(adap, EPIO_REG(DATA0), crc);
4095		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
4096		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
4097		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
4098			return -ETIMEDOUT;
4099	}
4100#undef EPIO_REG
4101
4102	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
4103	return 0;
4104}
4105
4106/**
4107 *	t4_mk_filtdelwr - create a delete filter WR
4108 *	@ftid: the filter ID
4109 *	@wr: the filter work request to populate
4110 *	@qid: ingress queue to receive the delete notification
4111 *
4112 *	Creates a filter work request to delete the supplied filter.  If @qid is
4113 *	negative the delete notification is suppressed.
4114 */
4115void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
4116{
4117	memset(wr, 0, sizeof(*wr));
4118	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
4119	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
4120	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
4121			      V_FW_FILTER_WR_NOREPLY(qid < 0));
4122	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
4123	if (qid >= 0)
4124		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
4125}
4126
4127#define INIT_CMD(var, cmd, rd_wr) do { \
4128	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
4129				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
4130	(var).retval_len16 = htonl(FW_LEN16(var)); \
4131} while (0)
4132
4133int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
4134{
4135	struct fw_ldst_cmd c;
4136
4137	memset(&c, 0, sizeof(c));
4138	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4139		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
4140	c.cycles_to_len16 = htonl(FW_LEN16(c));
4141	c.u.addrval.addr = htonl(addr);
4142	c.u.addrval.val = htonl(val);
4143
4144	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4145}
4146
4147/**
4148 *	t4_mdio_rd - read a PHY register through MDIO
4149 *	@adap: the adapter
4150 *	@mbox: mailbox to use for the FW command
4151 *	@phy_addr: the PHY address
4152 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4153 *	@reg: the register to read
4154 *	@valp: where to store the value
4155 *
4156 *	Issues a FW command through the given mailbox to read a PHY register.
4157 */
4158int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4159	       unsigned int mmd, unsigned int reg, unsigned int *valp)
4160{
4161	int ret;
4162	struct fw_ldst_cmd c;
4163
4164	memset(&c, 0, sizeof(c));
4165	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4166		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4167	c.cycles_to_len16 = htonl(FW_LEN16(c));
4168	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4169				   V_FW_LDST_CMD_MMD(mmd));
4170	c.u.mdio.raddr = htons(reg);
4171
4172	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4173	if (ret == 0)
4174		*valp = ntohs(c.u.mdio.rval);
4175	return ret;
4176}
4177
4178/**
4179 *	t4_mdio_wr - write a PHY register through MDIO
4180 *	@adap: the adapter
4181 *	@mbox: mailbox to use for the FW command
4182 *	@phy_addr: the PHY address
4183 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
4184 *	@reg: the register to write
4185 *	@valp: value to write
4186 *
4187 *	Issues a FW command through the given mailbox to write a PHY register.
4188 */
4189int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4190	       unsigned int mmd, unsigned int reg, unsigned int val)
4191{
4192	struct fw_ldst_cmd c;
4193
4194	memset(&c, 0, sizeof(c));
4195	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4196		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4197	c.cycles_to_len16 = htonl(FW_LEN16(c));
4198	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4199				   V_FW_LDST_CMD_MMD(mmd));
4200	c.u.mdio.raddr = htons(reg);
4201	c.u.mdio.rval = htons(val);
4202
4203	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4204}
4205
4206/**
4207 *	t4_i2c_rd - read I2C data from adapter
4208 *	@adap: the adapter
4209 *	@port: Port number if per-port device; <0 if not
4210 *	@devid: per-port device ID or absolute device ID
4211 *	@offset: byte offset into device I2C space
4212 *	@len: byte length of I2C space data
4213 *	@buf: buffer in which to return I2C data
4214 *
4215 *	Reads the I2C data from the indicated device and location.
4216 */
4217int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
4218	      int port, unsigned int devid,
4219	      unsigned int offset, unsigned int len,
4220	      u8 *buf)
4221{
4222	struct fw_ldst_cmd ldst;
4223	int ret;
4224
4225	if (port >= 4 ||
4226	    devid >= 256 ||
4227	    offset >= 256 ||
4228	    len > sizeof ldst.u.i2c.data)
4229		return -EINVAL;
4230
4231	memset(&ldst, 0, sizeof ldst);
4232	ldst.op_to_addrspace =
4233		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4234			    F_FW_CMD_REQUEST |
4235			    F_FW_CMD_READ |
4236			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4237	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4238	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4239	ldst.u.i2c.did = devid;
4240	ldst.u.i2c.boffset = offset;
4241	ldst.u.i2c.blen = len;
4242	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4243	if (!ret)
4244		memcpy(buf, ldst.u.i2c.data, len);
4245	return ret;
4246}
4247
4248/**
4249 *	t4_i2c_wr - write I2C data to adapter
4250 *	@adap: the adapter
4251 *	@port: Port number if per-port device; <0 if not
4252 *	@devid: per-port device ID or absolute device ID
4253 *	@offset: byte offset into device I2C space
4254 *	@len: byte length of I2C space data
4255 *	@buf: buffer containing new I2C data
4256 *
4257 *	Write the I2C data to the indicated device and location.
4258 */
4259int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
4260	      int port, unsigned int devid,
4261	      unsigned int offset, unsigned int len,
4262	      u8 *buf)
4263{
4264	struct fw_ldst_cmd ldst;
4265
4266	if (port >= 4 ||
4267	    devid >= 256 ||
4268	    offset >= 256 ||
4269	    len > sizeof ldst.u.i2c.data)
4270		return -EINVAL;
4271
4272	memset(&ldst, 0, sizeof ldst);
4273	ldst.op_to_addrspace =
4274		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4275			    F_FW_CMD_REQUEST |
4276			    F_FW_CMD_WRITE |
4277			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
4278	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
4279	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
4280	ldst.u.i2c.did = devid;
4281	ldst.u.i2c.boffset = offset;
4282	ldst.u.i2c.blen = len;
4283	memcpy(ldst.u.i2c.data, buf, len);
4284	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
4285}
4286
4287/**
4288 *	t4_sge_ctxt_flush - flush the SGE context cache
4289 *	@adap: the adapter
4290 *	@mbox: mailbox to use for the FW command
4291 *
4292 *	Issues a FW command through the given mailbox to flush the
4293 *	SGE context cache.
4294 */
4295int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4296{
4297	int ret;
4298	struct fw_ldst_cmd c;
4299
4300	memset(&c, 0, sizeof(c));
4301	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4302			F_FW_CMD_READ |
4303			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4304	c.cycles_to_len16 = htonl(FW_LEN16(c));
4305	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4306
4307	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4308	return ret;
4309}
4310
4311/**
4312 *	t4_sge_ctxt_rd - read an SGE context through FW
4313 *	@adap: the adapter
4314 *	@mbox: mailbox to use for the FW command
4315 *	@cid: the context id
4316 *	@ctype: the context type
4317 *	@data: where to store the context data
4318 *
4319 *	Issues a FW command through the given mailbox to read an SGE context.
4320 */
4321int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4322		   enum ctxt_type ctype, u32 *data)
4323{
4324	int ret;
4325	struct fw_ldst_cmd c;
4326
4327	if (ctype == CTXT_EGRESS)
4328		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4329	else if (ctype == CTXT_INGRESS)
4330		ret = FW_LDST_ADDRSPC_SGE_INGC;
4331	else if (ctype == CTXT_FLM)
4332		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4333	else
4334		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4335
4336	memset(&c, 0, sizeof(c));
4337	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4338				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4339	c.cycles_to_len16 = htonl(FW_LEN16(c));
4340	c.u.idctxt.physid = htonl(cid);
4341
4342	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4343	if (ret == 0) {
4344		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4345		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4346		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4347		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4348		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4349		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4350	}
4351	return ret;
4352}
4353
4354/**
4355 *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4356 *	@adap: the adapter
4357 *	@cid: the context id
4358 *	@ctype: the context type
4359 *	@data: where to store the context data
4360 *
4361 *	Reads an SGE context directly, bypassing FW.  This is only for
4362 *	debugging when FW is unavailable.
4363 */
4364int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4365		      u32 *data)
4366{
4367	int i, ret;
4368
4369	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4370	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4371	if (!ret)
4372		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4373			*data++ = t4_read_reg(adap, i);
4374	return ret;
4375}
4376
4377/**
4378 *	t4_fw_hello - establish communication with FW
4379 *	@adap: the adapter
4380 *	@mbox: mailbox to use for the FW command
4381 *	@evt_mbox: mailbox to receive async FW events
4382 *	@master: specifies the caller's willingness to be the device master
4383 *	@state: returns the current device state (if non-NULL)
4384 *
4385 *	Issues a command to establish communication with FW.  Returns either
4386 *	an error (negative integer) or the mailbox of the Master PF.
4387 */
4388int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4389		enum dev_master master, enum dev_state *state)
4390{
4391	int ret;
4392	struct fw_hello_cmd c;
4393	u32 v;
4394	unsigned int master_mbox;
4395	int retries = FW_CMD_HELLO_RETRIES;
4396
4397retry:
4398	memset(&c, 0, sizeof(c));
4399	INIT_CMD(c, HELLO, WRITE);
4400	c.err_to_clearinit = htonl(
4401		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4402		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4403		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4404			M_FW_HELLO_CMD_MBMASTER) |
4405		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4406		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4407		F_FW_HELLO_CMD_CLEARINIT);
4408
4409	/*
4410	 * Issue the HELLO command to the firmware.  If it's not successful
4411	 * but indicates that we got a "busy" or "timeout" condition, retry
4412	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4413	 * retry limit, check to see if the firmware left us any error
4414	 * information and report that if so ...
4415	 */
4416	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4417	if (ret != FW_SUCCESS) {
4418		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4419			goto retry;
4420		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4421			t4_report_fw_error(adap);
4422		return ret;
4423	}
4424
4425	v = ntohl(c.err_to_clearinit);
4426	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4427	if (state) {
4428		if (v & F_FW_HELLO_CMD_ERR)
4429			*state = DEV_STATE_ERR;
4430		else if (v & F_FW_HELLO_CMD_INIT)
4431			*state = DEV_STATE_INIT;
4432		else
4433			*state = DEV_STATE_UNINIT;
4434	}
4435
4436	/*
4437	 * If we're not the Master PF then we need to wait around for the
4438	 * Master PF Driver to finish setting up the adapter.
4439	 *
4440	 * Note that we also do this wait if we're a non-Master-capable PF and
4441	 * there is no current Master PF; a Master PF may show up momentarily
4442	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4443	 * OS loads lots of different drivers rapidly at the same time).  In
4444	 * this case, the Master PF returned by the firmware will be
4445	 * M_PCIE_FW_MASTER so the test below will work ...
4446	 */
4447	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4448	    master_mbox != mbox) {
4449		int waiting = FW_CMD_HELLO_TIMEOUT;
4450
4451		/*
4452		 * Wait for the firmware to either indicate an error or
4453		 * initialized state.  If we see either of these we bail out
4454		 * and report the issue to the caller.  If we exhaust the
4455		 * "hello timeout" and we haven't exhausted our retries, try
4456		 * again.  Otherwise bail with a timeout error.
4457		 */
4458		for (;;) {
4459			u32 pcie_fw;
4460
4461			msleep(50);
4462			waiting -= 50;
4463
4464			/*
4465			 * If neither Error nor Initialialized are indicated
4466			 * by the firmware keep waiting till we exhaust our
4467			 * timeout ... and then retry if we haven't exhausted
4468			 * our retries ...
4469			 */
4470			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4471			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4472				if (waiting <= 0) {
4473					if (retries-- > 0)
4474						goto retry;
4475
4476					return -ETIMEDOUT;
4477				}
4478				continue;
4479			}
4480
4481			/*
4482			 * We either have an Error or Initialized condition
4483			 * report errors preferentially.
4484			 */
4485			if (state) {
4486				if (pcie_fw & F_PCIE_FW_ERR)
4487					*state = DEV_STATE_ERR;
4488				else if (pcie_fw & F_PCIE_FW_INIT)
4489					*state = DEV_STATE_INIT;
4490			}
4491
4492			/*
4493			 * If we arrived before a Master PF was selected and
4494			 * there's not a valid Master PF, grab its identity
4495			 * for our caller.
4496			 */
4497			if (master_mbox == M_PCIE_FW_MASTER &&
4498			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4499				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4500			break;
4501		}
4502	}
4503
4504	return master_mbox;
4505}
4506
4507/**
4508 *	t4_fw_bye - end communication with FW
4509 *	@adap: the adapter
4510 *	@mbox: mailbox to use for the FW command
4511 *
4512 *	Issues a command to terminate communication with FW.
4513 */
4514int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4515{
4516	struct fw_bye_cmd c;
4517
4518	memset(&c, 0, sizeof(c));
4519	INIT_CMD(c, BYE, WRITE);
4520	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4521}
4522
4523/**
4524 *	t4_fw_reset - issue a reset to FW
4525 *	@adap: the adapter
4526 *	@mbox: mailbox to use for the FW command
4527 *	@reset: specifies the type of reset to perform
4528 *
4529 *	Issues a reset command of the specified type to FW.
4530 */
4531int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4532{
4533	struct fw_reset_cmd c;
4534
4535	memset(&c, 0, sizeof(c));
4536	INIT_CMD(c, RESET, WRITE);
4537	c.val = htonl(reset);
4538	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4539}
4540
4541/**
4542 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4543 *	@adap: the adapter
4544 *	@mbox: mailbox to use for the FW RESET command (if desired)
4545 *	@force: force uP into RESET even if FW RESET command fails
4546 *
4547 *	Issues a RESET command to firmware (if desired) with a HALT indication
4548 *	and then puts the microprocessor into RESET state.  The RESET command
4549 *	will only be issued if a legitimate mailbox is provided (mbox <=
4550 *	M_PCIE_FW_MASTER).
4551 *
4552 *	This is generally used in order for the host to safely manipulate the
4553 *	adapter without fear of conflicting with whatever the firmware might
4554 *	be doing.  The only way out of this state is to RESTART the firmware
4555 *	...
4556 */
4557int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4558{
4559	int ret = 0;
4560
4561	/*
4562	 * If a legitimate mailbox is provided, issue a RESET command
4563	 * with a HALT indication.
4564	 */
4565	if (mbox <= M_PCIE_FW_MASTER) {
4566		struct fw_reset_cmd c;
4567
4568		memset(&c, 0, sizeof(c));
4569		INIT_CMD(c, RESET, WRITE);
4570		c.val = htonl(F_PIORST | F_PIORSTMODE);
4571		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4572		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4573	}
4574
4575	/*
4576	 * Normally we won't complete the operation if the firmware RESET
4577	 * command fails but if our caller insists we'll go ahead and put the
4578	 * uP into RESET.  This can be useful if the firmware is hung or even
4579	 * missing ...  We'll have to take the risk of putting the uP into
4580	 * RESET without the cooperation of firmware in that case.
4581	 *
4582	 * We also force the firmware's HALT flag to be on in case we bypassed
4583	 * the firmware RESET command above or we're dealing with old firmware
4584	 * which doesn't have the HALT capability.  This will serve as a flag
4585	 * for the incoming firmware to know that it's coming out of a HALT
4586	 * rather than a RESET ... if it's new enough to understand that ...
4587	 */
4588	if (ret == 0 || force) {
4589		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4590		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4591	}
4592
4593	/*
4594	 * And we always return the result of the firmware RESET command
4595	 * even when we force the uP into RESET ...
4596	 */
4597	return ret;
4598}
4599
4600/**
4601 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4602 *	@adap: the adapter
4603 *	@reset: if we want to do a RESET to restart things
4604 *
4605 *	Restart firmware previously halted by t4_fw_halt().  On successful
4606 *	return the previous PF Master remains as the new PF Master and there
4607 *	is no need to issue a new HELLO command, etc.
4608 *
4609 *	We do this in two ways:
4610 *
4611 *	 1. If we're dealing with newer firmware we'll simply want to take
4612 *	    the chip's microprocessor out of RESET.  This will cause the
4613 *	    firmware to start up from its start vector.  And then we'll loop
4614 *	    until the firmware indicates it's started again (PCIE_FW.HALT
4615 *	    reset to 0) or we timeout.
4616 *
4617 *	 2. If we're dealing with older firmware then we'll need to RESET
4618 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4619 *	    flag and automatically RESET itself on startup.
4620 */
4621int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4622{
4623	if (reset) {
4624		/*
4625		 * Since we're directing the RESET instead of the firmware
4626		 * doing it automatically, we need to clear the PCIE_FW.HALT
4627		 * bit.
4628		 */
4629		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4630
4631		/*
4632		 * If we've been given a valid mailbox, first try to get the
4633		 * firmware to do the RESET.  If that works, great and we can
4634		 * return success.  Otherwise, if we haven't been given a
4635		 * valid mailbox or the RESET command failed, fall back to
4636		 * hitting the chip with a hammer.
4637		 */
4638		if (mbox <= M_PCIE_FW_MASTER) {
4639			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4640			msleep(100);
4641			if (t4_fw_reset(adap, mbox,
4642					F_PIORST | F_PIORSTMODE) == 0)
4643				return 0;
4644		}
4645
4646		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4647		msleep(2000);
4648	} else {
4649		int ms;
4650
4651		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4652		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4653			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4654				return FW_SUCCESS;
4655			msleep(100);
4656			ms += 100;
4657		}
4658		return -ETIMEDOUT;
4659	}
4660	return 0;
4661}
4662
4663/**
4664 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4665 *	@adap: the adapter
4666 *	@mbox: mailbox to use for the FW RESET command (if desired)
4667 *	@fw_data: the firmware image to write
4668 *	@size: image size
4669 *	@force: force upgrade even if firmware doesn't cooperate
4670 *
4671 *	Perform all of the steps necessary for upgrading an adapter's
4672 *	firmware image.  Normally this requires the cooperation of the
4673 *	existing firmware in order to halt all existing activities
4674 *	but if an invalid mailbox token is passed in we skip that step
4675 *	(though we'll still put the adapter microprocessor into RESET in
4676 *	that case).
4677 *
4678 *	On successful return the new firmware will have been loaded and
4679 *	the adapter will have been fully RESET losing all previous setup
4680 *	state.  On unsuccessful return the adapter may be completely hosed ...
4681 *	positive errno indicates that the adapter is ~probably~ intact, a
4682 *	negative errno indicates that things are looking bad ...
4683 */
4684int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4685		  const u8 *fw_data, unsigned int size, int force)
4686{
4687	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4688	unsigned int bootstrap = ntohl(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
4689	int reset, ret;
4690
4691	if (!bootstrap) {
4692		ret = t4_fw_halt(adap, mbox, force);
4693		if (ret < 0 && !force)
4694			return ret;
4695	}
4696
4697	ret = t4_load_fw(adap, fw_data, size);
4698	if (ret < 0 || bootstrap)
4699		return ret;
4700
4701	/*
4702	 * Older versions of the firmware don't understand the new
4703	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4704	 * restart.  So for newly loaded older firmware we'll have to do the
4705	 * RESET for it so it starts up on a clean slate.  We can tell if
4706	 * the newly loaded firmware will handle this right by checking
4707	 * its header flags to see if it advertises the capability.
4708	 */
4709	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4710	return t4_fw_restart(adap, mbox, reset);
4711}
4712
4713/**
4714 *	t4_fw_initialize - ask FW to initialize the device
4715 *	@adap: the adapter
4716 *	@mbox: mailbox to use for the FW command
4717 *
4718 *	Issues a command to FW to partially initialize the device.  This
4719 *	performs initialization that generally doesn't depend on user input.
4720 */
4721int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4722{
4723	struct fw_initialize_cmd c;
4724
4725	memset(&c, 0, sizeof(c));
4726	INIT_CMD(c, INITIALIZE, WRITE);
4727	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4728}
4729
4730/**
4731 *	t4_query_params - query FW or device parameters
4732 *	@adap: the adapter
4733 *	@mbox: mailbox to use for the FW command
4734 *	@pf: the PF
4735 *	@vf: the VF
4736 *	@nparams: the number of parameters
4737 *	@params: the parameter names
4738 *	@val: the parameter values
4739 *
4740 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4741 *	queried at once.
4742 */
4743int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4744		    unsigned int vf, unsigned int nparams, const u32 *params,
4745		    u32 *val)
4746{
4747	int i, ret;
4748	struct fw_params_cmd c;
4749	__be32 *p = &c.param[0].mnem;
4750
4751	if (nparams > 7)
4752		return -EINVAL;
4753
4754	memset(&c, 0, sizeof(c));
4755	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4756			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4757			    V_FW_PARAMS_CMD_VFN(vf));
4758	c.retval_len16 = htonl(FW_LEN16(c));
4759
4760	for (i = 0; i < nparams; i++, p += 2, params++)
4761		*p = htonl(*params);
4762
4763	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4764	if (ret == 0)
4765		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4766			*val++ = ntohl(*p);
4767	return ret;
4768}
4769
4770/**
4771 *	t4_set_params - sets FW or device parameters
4772 *	@adap: the adapter
4773 *	@mbox: mailbox to use for the FW command
4774 *	@pf: the PF
4775 *	@vf: the VF
4776 *	@nparams: the number of parameters
4777 *	@params: the parameter names
4778 *	@val: the parameter values
4779 *
4780 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4781 *	specified at once.
4782 */
4783int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4784		  unsigned int vf, unsigned int nparams, const u32 *params,
4785		  const u32 *val)
4786{
4787	struct fw_params_cmd c;
4788	__be32 *p = &c.param[0].mnem;
4789
4790	if (nparams > 7)
4791		return -EINVAL;
4792
4793	memset(&c, 0, sizeof(c));
4794	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4795			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4796			    V_FW_PARAMS_CMD_VFN(vf));
4797	c.retval_len16 = htonl(FW_LEN16(c));
4798
4799	while (nparams--) {
4800		*p++ = htonl(*params);
4801		params++;
4802		*p++ = htonl(*val);
4803		val++;
4804	}
4805
4806	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4807}
4808
4809/**
4810 *	t4_cfg_pfvf - configure PF/VF resource limits
4811 *	@adap: the adapter
4812 *	@mbox: mailbox to use for the FW command
4813 *	@pf: the PF being configured
4814 *	@vf: the VF being configured
4815 *	@txq: the max number of egress queues
4816 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4817 *	@rxqi: the max number of interrupt-capable ingress queues
4818 *	@rxq: the max number of interruptless ingress queues
4819 *	@tc: the PCI traffic class
4820 *	@vi: the max number of virtual interfaces
4821 *	@cmask: the channel access rights mask for the PF/VF
4822 *	@pmask: the port access rights mask for the PF/VF
4823 *	@nexact: the maximum number of exact MPS filters
4824 *	@rcaps: read capabilities
4825 *	@wxcaps: write/execute capabilities
4826 *
4827 *	Configures resource limits and capabilities for a physical or virtual
4828 *	function.
4829 */
4830int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4831		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4832		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4833		unsigned int vi, unsigned int cmask, unsigned int pmask,
4834		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4835{
4836	struct fw_pfvf_cmd c;
4837
4838	memset(&c, 0, sizeof(c));
4839	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4840			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4841			    V_FW_PFVF_CMD_VFN(vf));
4842	c.retval_len16 = htonl(FW_LEN16(c));
4843	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4844			       V_FW_PFVF_CMD_NIQ(rxq));
4845	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4846			      V_FW_PFVF_CMD_PMASK(pmask) |
4847			      V_FW_PFVF_CMD_NEQ(txq));
4848	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4849				V_FW_PFVF_CMD_NEXACTF(nexact));
4850	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4851				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4852				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4853	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4854}
4855
4856/**
4857 *	t4_alloc_vi_func - allocate a virtual interface
4858 *	@adap: the adapter
4859 *	@mbox: mailbox to use for the FW command
4860 *	@port: physical port associated with the VI
4861 *	@pf: the PF owning the VI
4862 *	@vf: the VF owning the VI
4863 *	@nmac: number of MAC addresses needed (1 to 5)
4864 *	@mac: the MAC addresses of the VI
4865 *	@rss_size: size of RSS table slice associated with this VI
4866 *	@portfunc: which Port Application Function MAC Address is desired
4867 *	@idstype: Intrusion Detection Type
4868 *
4869 *	Allocates a virtual interface for the given physical port.  If @mac is
4870 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4871 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4872 *	stored consecutively so the space needed is @nmac * 6 bytes.
4873 *	Returns a negative error number or the non-negative VI id.
4874 */
4875int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4876		     unsigned int port, unsigned int pf, unsigned int vf,
4877		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4878		     unsigned int portfunc, unsigned int idstype)
4879{
4880	int ret;
4881	struct fw_vi_cmd c;
4882
4883	memset(&c, 0, sizeof(c));
4884	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4885			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4886			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4887	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4888	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4889			       V_FW_VI_CMD_FUNC(portfunc));
4890	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4891	c.nmac = nmac - 1;
4892
4893	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4894	if (ret)
4895		return ret;
4896
4897	if (mac) {
4898		memcpy(mac, c.mac, sizeof(c.mac));
4899		switch (nmac) {
4900		case 5:
4901			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4902		case 4:
4903			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4904		case 3:
4905			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4906		case 2:
4907			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4908		}
4909	}
4910	if (rss_size)
4911		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4912	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4913}
4914
4915/**
4916 *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4917 *	@adap: the adapter
4918 *	@mbox: mailbox to use for the FW command
4919 *	@port: physical port associated with the VI
4920 *	@pf: the PF owning the VI
4921 *	@vf: the VF owning the VI
4922 *	@nmac: number of MAC addresses needed (1 to 5)
4923 *	@mac: the MAC addresses of the VI
4924 *	@rss_size: size of RSS table slice associated with this VI
4925 *
4926 *	backwards compatible and convieniance routine to allocate a Virtual
4927 *	Interface with a Ethernet Port Application Function and Intrustion
4928 *	Detection System disabled.
4929 */
4930int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4931		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4932		unsigned int *rss_size)
4933{
4934	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4935				FW_VI_FUNC_ETH, 0);
4936}
4937
4938/**
4939 *	t4_free_vi - free a virtual interface
4940 *	@adap: the adapter
4941 *	@mbox: mailbox to use for the FW command
4942 *	@pf: the PF owning the VI
4943 *	@vf: the VF owning the VI
4944 *	@viid: virtual interface identifiler
4945 *
4946 *	Free a previously allocated virtual interface.
4947 */
4948int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4949	       unsigned int vf, unsigned int viid)
4950{
4951	struct fw_vi_cmd c;
4952
4953	memset(&c, 0, sizeof(c));
4954	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4955			    F_FW_CMD_REQUEST |
4956			    F_FW_CMD_EXEC |
4957			    V_FW_VI_CMD_PFN(pf) |
4958			    V_FW_VI_CMD_VFN(vf));
4959	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4960	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4961
4962	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4963}
4964
4965/**
4966 *	t4_set_rxmode - set Rx properties of a virtual interface
4967 *	@adap: the adapter
4968 *	@mbox: mailbox to use for the FW command
4969 *	@viid: the VI id
4970 *	@mtu: the new MTU or -1
4971 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4972 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4973 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4974 *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4975 *	@sleep_ok: if true we may sleep while awaiting command completion
4976 *
4977 *	Sets Rx properties of a virtual interface.
4978 */
4979int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4980		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4981		  bool sleep_ok)
4982{
4983	struct fw_vi_rxmode_cmd c;
4984
4985	/* convert to FW values */
4986	if (mtu < 0)
4987		mtu = M_FW_VI_RXMODE_CMD_MTU;
4988	if (promisc < 0)
4989		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4990	if (all_multi < 0)
4991		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4992	if (bcast < 0)
4993		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4994	if (vlanex < 0)
4995		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4996
4997	memset(&c, 0, sizeof(c));
4998	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4999			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
5000	c.retval_len16 = htonl(FW_LEN16(c));
5001	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
5002				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
5003				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
5004				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
5005				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
5006	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5007}
5008
5009/**
5010 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
5011 *	@adap: the adapter
5012 *	@mbox: mailbox to use for the FW command
5013 *	@viid: the VI id
5014 *	@free: if true any existing filters for this VI id are first removed
5015 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
5016 *	@addr: the MAC address(es)
5017 *	@idx: where to store the index of each allocated filter
5018 *	@hash: pointer to hash address filter bitmap
5019 *	@sleep_ok: call is allowed to sleep
5020 *
5021 *	Allocates an exact-match filter for each of the supplied addresses and
5022 *	sets it to the corresponding address.  If @idx is not %NULL it should
5023 *	have at least @naddr entries, each of which will be set to the index of
5024 *	the filter allocated for the corresponding MAC address.  If a filter
5025 *	could not be allocated for an address its index is set to 0xffff.
5026 *	If @hash is not %NULL addresses that fail to allocate an exact filter
5027 *	are hashed and update the hash filter bitmap pointed at by @hash.
5028 *
5029 *	Returns a negative error number or the number of filters allocated.
5030 */
5031int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
5032		      unsigned int viid, bool free, unsigned int naddr,
5033		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
5034{
5035	int offset, ret = 0;
5036	struct fw_vi_mac_cmd c;
5037	unsigned int nfilters = 0;
5038	unsigned int max_naddr = is_t4(adap) ?
5039				       NUM_MPS_CLS_SRAM_L_INSTANCES :
5040				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5041	unsigned int rem = naddr;
5042
5043	if (naddr > max_naddr)
5044		return -EINVAL;
5045
5046	for (offset = 0; offset < naddr ; /**/) {
5047		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
5048					 ? rem
5049					 : ARRAY_SIZE(c.u.exact));
5050		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
5051						     u.exact[fw_naddr]), 16);
5052		struct fw_vi_mac_exact *p;
5053		int i;
5054
5055		memset(&c, 0, sizeof(c));
5056		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
5057				     F_FW_CMD_REQUEST |
5058				     F_FW_CMD_WRITE |
5059				     V_FW_CMD_EXEC(free) |
5060				     V_FW_VI_MAC_CMD_VIID(viid));
5061		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
5062					    V_FW_CMD_LEN16(len16));
5063
5064		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5065			p->valid_to_idx = htons(
5066				F_FW_VI_MAC_CMD_VALID |
5067				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
5068			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
5069		}
5070
5071		/*
5072		 * It's okay if we run out of space in our MAC address arena.
5073		 * Some of the addresses we submit may get stored so we need
5074		 * to run through the reply to see what the results were ...
5075		 */
5076		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
5077		if (ret && ret != -FW_ENOMEM)
5078			break;
5079
5080		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
5081			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5082
5083			if (idx)
5084				idx[offset+i] = (index >=  max_naddr
5085						 ? 0xffff
5086						 : index);
5087			if (index < max_naddr)
5088				nfilters++;
5089			else if (hash)
5090				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
5091		}
5092
5093		free = false;
5094		offset += fw_naddr;
5095		rem -= fw_naddr;
5096	}
5097
5098	if (ret == 0 || ret == -FW_ENOMEM)
5099		ret = nfilters;
5100	return ret;
5101}
5102
5103/**
5104 *	t4_change_mac - modifies the exact-match filter for a MAC address
5105 *	@adap: the adapter
5106 *	@mbox: mailbox to use for the FW command
5107 *	@viid: the VI id
5108 *	@idx: index of existing filter for old value of MAC address, or -1
5109 *	@addr: the new MAC address value
5110 *	@persist: whether a new MAC allocation should be persistent
5111 *	@add_smt: if true also add the address to the HW SMT
5112 *
5113 *	Modifies an exact-match filter and sets it to the new MAC address if
5114 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
5115 *	latter case the address is added persistently if @persist is %true.
5116 *
5117 *	Note that in general it is not possible to modify the value of a given
5118 *	filter so the generic way to modify an address filter is to free the one
5119 *	being used by the old address value and allocate a new filter for the
5120 *	new address value.
5121 *
5122 *	Returns a negative error number or the index of the filter with the new
5123 *	MAC value.  Note that this index may differ from @idx.
5124 */
5125int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
5126		  int idx, const u8 *addr, bool persist, bool add_smt)
5127{
5128	int ret, mode;
5129	struct fw_vi_mac_cmd c;
5130	struct fw_vi_mac_exact *p = c.u.exact;
5131	unsigned int max_mac_addr = is_t4(adap) ?
5132				    NUM_MPS_CLS_SRAM_L_INSTANCES :
5133				    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5134
5135	if (idx < 0)                             /* new allocation */
5136		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
5137	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
5138
5139	memset(&c, 0, sizeof(c));
5140	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5141			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
5142	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
5143	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
5144				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
5145				V_FW_VI_MAC_CMD_IDX(idx));
5146	memcpy(p->macaddr, addr, sizeof(p->macaddr));
5147
5148	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5149	if (ret == 0) {
5150		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
5151		if (ret >= max_mac_addr)
5152			ret = -ENOMEM;
5153	}
5154	return ret;
5155}
5156
5157/**
5158 *	t4_set_addr_hash - program the MAC inexact-match hash filter
5159 *	@adap: the adapter
5160 *	@mbox: mailbox to use for the FW command
5161 *	@viid: the VI id
5162 *	@ucast: whether the hash filter should also match unicast addresses
5163 *	@vec: the value to be written to the hash filter
5164 *	@sleep_ok: call is allowed to sleep
5165 *
5166 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
5167 */
5168int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
5169		     bool ucast, u64 vec, bool sleep_ok)
5170{
5171	struct fw_vi_mac_cmd c;
5172
5173	memset(&c, 0, sizeof(c));
5174	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
5175			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
5176	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
5177				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
5178				    V_FW_CMD_LEN16(1));
5179	c.u.hash.hashvec = cpu_to_be64(vec);
5180	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
5181}
5182
5183/**
5184 *	t4_enable_vi - enable/disable a virtual interface
5185 *	@adap: the adapter
5186 *	@mbox: mailbox to use for the FW command
5187 *	@viid: the VI id
5188 *	@rx_en: 1=enable Rx, 0=disable Rx
5189 *	@tx_en: 1=enable Tx, 0=disable Tx
5190 *
5191 *	Enables/disables a virtual interface.
5192 */
5193int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
5194		 bool rx_en, bool tx_en)
5195{
5196	struct fw_vi_enable_cmd c;
5197
5198	memset(&c, 0, sizeof(c));
5199	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5200			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5201	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
5202			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
5203	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5204}
5205
5206/**
5207 *	t4_identify_port - identify a VI's port by blinking its LED
5208 *	@adap: the adapter
5209 *	@mbox: mailbox to use for the FW command
5210 *	@viid: the VI id
5211 *	@nblinks: how many times to blink LED at 2.5 Hz
5212 *
5213 *	Identifies a VI's port by blinking its LED.
5214 */
5215int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
5216		     unsigned int nblinks)
5217{
5218	struct fw_vi_enable_cmd c;
5219
5220	memset(&c, 0, sizeof(c));
5221	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
5222			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
5223	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
5224	c.blinkdur = htons(nblinks);
5225	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5226}
5227
5228/**
5229 *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
5230 *	@adap: the adapter
5231 *	@mbox: mailbox to use for the FW command
5232 *	@start: %true to enable the queues, %false to disable them
5233 *	@pf: the PF owning the queues
5234 *	@vf: the VF owning the queues
5235 *	@iqid: ingress queue id
5236 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5237 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5238 *
5239 *	Starts or stops an ingress queue and its associated FLs, if any.
5240 */
5241int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
5242		     unsigned int pf, unsigned int vf, unsigned int iqid,
5243		     unsigned int fl0id, unsigned int fl1id)
5244{
5245	struct fw_iq_cmd c;
5246
5247	memset(&c, 0, sizeof(c));
5248	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5249			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5250			    V_FW_IQ_CMD_VFN(vf));
5251	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
5252				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
5253	c.iqid = htons(iqid);
5254	c.fl0id = htons(fl0id);
5255	c.fl1id = htons(fl1id);
5256	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5257}
5258
5259/**
5260 *	t4_iq_free - free an ingress queue and its FLs
5261 *	@adap: the adapter
5262 *	@mbox: mailbox to use for the FW command
5263 *	@pf: the PF owning the queues
5264 *	@vf: the VF owning the queues
5265 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
5266 *	@iqid: ingress queue id
5267 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
5268 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
5269 *
5270 *	Frees an ingress queue and its associated FLs, if any.
5271 */
5272int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5273	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
5274	       unsigned int fl0id, unsigned int fl1id)
5275{
5276	struct fw_iq_cmd c;
5277
5278	memset(&c, 0, sizeof(c));
5279	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5280			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5281			    V_FW_IQ_CMD_VFN(vf));
5282	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5283	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5284	c.iqid = htons(iqid);
5285	c.fl0id = htons(fl0id);
5286	c.fl1id = htons(fl1id);
5287	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5288}
5289
5290/**
5291 *	t4_eth_eq_free - free an Ethernet egress queue
5292 *	@adap: the adapter
5293 *	@mbox: mailbox to use for the FW command
5294 *	@pf: the PF owning the queue
5295 *	@vf: the VF owning the queue
5296 *	@eqid: egress queue id
5297 *
5298 *	Frees an Ethernet egress queue.
5299 */
5300int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5301		   unsigned int vf, unsigned int eqid)
5302{
5303	struct fw_eq_eth_cmd c;
5304
5305	memset(&c, 0, sizeof(c));
5306	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5307			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5308			    V_FW_EQ_ETH_CMD_VFN(vf));
5309	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5310	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5311	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5312}
5313
5314/**
5315 *	t4_ctrl_eq_free - free a control egress queue
5316 *	@adap: the adapter
5317 *	@mbox: mailbox to use for the FW command
5318 *	@pf: the PF owning the queue
5319 *	@vf: the VF owning the queue
5320 *	@eqid: egress queue id
5321 *
5322 *	Frees a control egress queue.
5323 */
5324int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5325		    unsigned int vf, unsigned int eqid)
5326{
5327	struct fw_eq_ctrl_cmd c;
5328
5329	memset(&c, 0, sizeof(c));
5330	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5331			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5332			    V_FW_EQ_CTRL_CMD_VFN(vf));
5333	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5334	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5335	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5336}
5337
5338/**
5339 *	t4_ofld_eq_free - free an offload egress queue
5340 *	@adap: the adapter
5341 *	@mbox: mailbox to use for the FW command
5342 *	@pf: the PF owning the queue
5343 *	@vf: the VF owning the queue
5344 *	@eqid: egress queue id
5345 *
5346 *	Frees a control egress queue.
5347 */
5348int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5349		    unsigned int vf, unsigned int eqid)
5350{
5351	struct fw_eq_ofld_cmd c;
5352
5353	memset(&c, 0, sizeof(c));
5354	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5355			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5356			    V_FW_EQ_OFLD_CMD_VFN(vf));
5357	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5358	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5359	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5360}
5361
5362/**
5363 *	t4_handle_fw_rpl - process a FW reply message
5364 *	@adap: the adapter
5365 *	@rpl: start of the FW message
5366 *
5367 *	Processes a FW message, such as link state change messages.
5368 */
5369int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5370{
5371	u8 opcode = *(const u8 *)rpl;
5372	const struct fw_port_cmd *p = (const void *)rpl;
5373	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5374
5375	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5376		/* link/module state change message */
5377		int speed = 0, fc = 0, i;
5378		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5379		struct port_info *pi = NULL;
5380		struct link_config *lc;
5381		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5382		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5383		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5384
5385		if (stat & F_FW_PORT_CMD_RXPAUSE)
5386			fc |= PAUSE_RX;
5387		if (stat & F_FW_PORT_CMD_TXPAUSE)
5388			fc |= PAUSE_TX;
5389		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5390			speed = SPEED_100;
5391		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5392			speed = SPEED_1000;
5393		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5394			speed = SPEED_10000;
5395		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5396			speed = SPEED_40000;
5397
5398		for_each_port(adap, i) {
5399			pi = adap2pinfo(adap, i);
5400			if (pi->tx_chan == chan)
5401				break;
5402		}
5403		lc = &pi->link_cfg;
5404
5405		if (link_ok != lc->link_ok || speed != lc->speed ||
5406		    fc != lc->fc) {                    /* something changed */
5407			int reason;
5408
5409			if (!link_ok && lc->link_ok)
5410				reason = G_FW_PORT_CMD_LINKDNRC(stat);
5411			else
5412				reason = -1;
5413
5414			lc->link_ok = link_ok;
5415			lc->speed = speed;
5416			lc->fc = fc;
5417			lc->supported = ntohs(p->u.info.pcap);
5418			t4_os_link_changed(adap, i, link_ok, reason);
5419		}
5420		if (mod != pi->mod_type) {
5421			pi->mod_type = mod;
5422			t4_os_portmod_changed(adap, i);
5423		}
5424	} else {
5425		CH_WARN_RATELIMIT(adap,
5426		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5427		return -EINVAL;
5428	}
5429	return 0;
5430}
5431
5432/**
5433 *	get_pci_mode - determine a card's PCI mode
5434 *	@adapter: the adapter
5435 *	@p: where to store the PCI settings
5436 *
5437 *	Determines a card's PCI mode and associated parameters, such as speed
5438 *	and width.
5439 */
5440static void __devinit get_pci_mode(struct adapter *adapter,
5441				   struct pci_params *p)
5442{
5443	u16 val;
5444	u32 pcie_cap;
5445
5446	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5447	if (pcie_cap) {
5448		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5449		p->speed = val & PCI_EXP_LNKSTA_CLS;
5450		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5451	}
5452}
5453
5454/**
5455 *	init_link_config - initialize a link's SW state
5456 *	@lc: structure holding the link state
5457 *	@caps: link capabilities
5458 *
5459 *	Initializes the SW state maintained for each link, including the link's
5460 *	capabilities and default speed/flow-control/autonegotiation settings.
5461 */
5462static void __devinit init_link_config(struct link_config *lc,
5463				       unsigned int caps)
5464{
5465	lc->supported = caps;
5466	lc->requested_speed = 0;
5467	lc->speed = 0;
5468	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5469	if (lc->supported & FW_PORT_CAP_ANEG) {
5470		lc->advertising = lc->supported & ADVERT_MASK;
5471		lc->autoneg = AUTONEG_ENABLE;
5472		lc->requested_fc |= PAUSE_AUTONEG;
5473	} else {
5474		lc->advertising = 0;
5475		lc->autoneg = AUTONEG_DISABLE;
5476	}
5477}
5478
5479static int __devinit get_flash_params(struct adapter *adapter)
5480{
5481	int ret;
5482	u32 info = 0;
5483
5484	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5485	if (!ret)
5486		ret = sf1_read(adapter, 3, 0, 1, &info);
5487	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5488	if (ret < 0)
5489		return ret;
5490
5491	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5492		return -EINVAL;
5493	info >>= 16;                           /* log2 of size */
5494	if (info >= 0x14 && info < 0x18)
5495		adapter->params.sf_nsec = 1 << (info - 16);
5496	else if (info == 0x18)
5497		adapter->params.sf_nsec = 64;
5498	else
5499		return -EINVAL;
5500	adapter->params.sf_size = 1 << info;
5501	return 0;
5502}
5503
5504static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5505						  u8 range)
5506{
5507	u16 val;
5508	u32 pcie_cap;
5509
5510	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5511	if (pcie_cap) {
5512		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5513		val &= 0xfff0;
5514		val |= range ;
5515		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5516	}
5517}
5518
5519/**
5520 *	t4_prep_adapter - prepare SW and HW for operation
5521 *	@adapter: the adapter
5522 *	@reset: if true perform a HW reset
5523 *
5524 *	Initialize adapter SW state for the various HW modules, set initial
5525 *	values for some adapter tunables, take PHYs out of reset, and
5526 *	initialize the MDIO interface.
5527 */
5528int __devinit t4_prep_adapter(struct adapter *adapter)
5529{
5530	int ret;
5531	uint16_t device_id;
5532	uint32_t pl_rev;
5533
5534	get_pci_mode(adapter, &adapter->params.pci);
5535
5536	pl_rev = t4_read_reg(adapter, A_PL_REV);
5537	adapter->params.chipid = G_CHIPID(pl_rev);
5538	adapter->params.rev = G_REV(pl_rev);
5539	if (adapter->params.chipid == 0) {
5540		/* T4 did not have chipid in PL_REV (T5 onwards do) */
5541		adapter->params.chipid = CHELSIO_T4;
5542
5543		/* T4A1 chip is not supported */
5544		if (adapter->params.rev == 1) {
5545			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
5546			return -EINVAL;
5547		}
5548	}
5549	adapter->params.pci.vpd_cap_addr =
5550	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5551
5552	ret = get_flash_params(adapter);
5553	if (ret < 0)
5554		return ret;
5555
5556	ret = get_vpd_params(adapter, &adapter->params.vpd);
5557	if (ret < 0)
5558		return ret;
5559
5560	/* Cards with real ASICs have the chipid in the PCIe device id */
5561	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
5562	if (device_id >> 12 == adapter->params.chipid)
5563		adapter->params.cim_la_size = CIMLA_SIZE;
5564	else {
5565		/* FPGA */
5566		adapter->params.fpga = 1;
5567		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5568	}
5569
5570	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5571
5572	/*
5573	 * Default port and clock for debugging in case we can't reach FW.
5574	 */
5575	adapter->params.nports = 1;
5576	adapter->params.portvec = 1;
5577	adapter->params.vpd.cclk = 50000;
5578
5579	/* Set pci completion timeout value to 4 seconds. */
5580	set_pcie_completion_timeout(adapter, 0xd);
5581	return 0;
5582}
5583
5584/**
5585 *	t4_init_tp_params - initialize adap->params.tp
5586 *	@adap: the adapter
5587 *
5588 *	Initialize various fields of the adapter's TP Parameters structure.
5589 */
5590int __devinit t4_init_tp_params(struct adapter *adap)
5591{
5592	int chan;
5593	u32 v;
5594
5595	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5596	adap->params.tp.tre = G_TIMERRESOLUTION(v);
5597	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5598
5599	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5600	for (chan = 0; chan < NCHAN; chan++)
5601		adap->params.tp.tx_modq[chan] = chan;
5602
5603	/*
5604	 * Cache the adapter's Compressed Filter Mode and global Incress
5605	 * Configuration.
5606	 */
5607        t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5608                         &adap->params.tp.vlan_pri_map, 1,
5609                         A_TP_VLAN_PRI_MAP);
5610	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5611			 &adap->params.tp.ingress_config, 1,
5612			 A_TP_INGRESS_CONFIG);
5613
5614	/*
5615	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5616	 * shift positions of several elements of the Compressed Filter Tuple
5617	 * for this adapter which we need frequently ...
5618	 */
5619	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5620	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5621	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5622	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
5623
5624	/*
5625	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5626	 * represents the presense of an Outer VLAN instead of a VNIC ID.
5627	 */
5628	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5629		adap->params.tp.vnic_shift = -1;
5630
5631	return 0;
5632}
5633
5634/**
5635 *	t4_filter_field_shift - calculate filter field shift
5636 *	@adap: the adapter
5637 *	@filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5638 *
5639 *	Return the shift position of a filter field within the Compressed
5640 *	Filter Tuple.  The filter field is specified via its selection bit
5641 *	within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
5642 */
5643int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
5644{
5645	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5646	unsigned int sel;
5647	int field_shift;
5648
5649	if ((filter_mode & filter_sel) == 0)
5650		return -1;
5651
5652	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5653	    switch (filter_mode & sel) {
5654		case F_FCOE:          field_shift += W_FT_FCOE;          break;
5655		case F_PORT:          field_shift += W_FT_PORT;          break;
5656		case F_VNIC_ID:       field_shift += W_FT_VNIC_ID;       break;
5657		case F_VLAN:          field_shift += W_FT_VLAN;          break;
5658		case F_TOS:           field_shift += W_FT_TOS;           break;
5659		case F_PROTOCOL:      field_shift += W_FT_PROTOCOL;      break;
5660		case F_ETHERTYPE:     field_shift += W_FT_ETHERTYPE;     break;
5661		case F_MACMATCH:      field_shift += W_FT_MACMATCH;      break;
5662		case F_MPSHITTYPE:    field_shift += W_FT_MPSHITTYPE;    break;
5663		case F_FRAGMENTATION: field_shift += W_FT_FRAGMENTATION; break;
5664	    }
5665	}
5666	return field_shift;
5667}
5668
5669int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5670{
5671	u8 addr[6];
5672	int ret, i, j;
5673	struct fw_port_cmd c;
5674	unsigned int rss_size;
5675	adapter_t *adap = p->adapter;
5676
5677	memset(&c, 0, sizeof(c));
5678
5679	for (i = 0, j = -1; i <= p->port_id; i++) {
5680		do {
5681			j++;
5682		} while ((adap->params.portvec & (1 << j)) == 0);
5683	}
5684
5685	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5686			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5687			       V_FW_PORT_CMD_PORTID(j));
5688	c.action_to_len16 = htonl(
5689		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5690		FW_LEN16(c));
5691	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5692	if (ret)
5693		return ret;
5694
5695	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5696	if (ret < 0)
5697		return ret;
5698
5699	p->viid = ret;
5700	p->tx_chan = j;
5701	p->rx_chan_map = get_mps_bg_map(adap, j);
5702	p->lport = j;
5703	p->rss_size = rss_size;
5704	t4_os_set_hw_addr(adap, p->port_id, addr);
5705
5706	ret = ntohl(c.u.info.lstatus_to_modtype);
5707	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5708		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5709	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5710	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5711
5712	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5713
5714	return 0;
5715}
5716
5717int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
5718{
5719	struct fw_sched_cmd cmd;
5720
5721	memset(&cmd, 0, sizeof(cmd));
5722	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5723				      F_FW_CMD_REQUEST |
5724				      F_FW_CMD_WRITE);
5725	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5726
5727	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
5728	cmd.u.config.type = type;
5729	cmd.u.config.minmaxen = minmaxen;
5730
5731	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5732			       NULL, 1);
5733}
5734
5735int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
5736		    int rateunit, int ratemode, int channel, int cl,
5737		    int minrate, int maxrate, int weight, int pktsize)
5738{
5739	struct fw_sched_cmd cmd;
5740
5741	memset(&cmd, 0, sizeof(cmd));
5742	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
5743				      F_FW_CMD_REQUEST |
5744				      F_FW_CMD_WRITE);
5745	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5746
5747	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
5748	cmd.u.params.type = type;
5749	cmd.u.params.level = level;
5750	cmd.u.params.mode = mode;
5751	cmd.u.params.ch = channel;
5752	cmd.u.params.cl = cl;
5753	cmd.u.params.unit = rateunit;
5754	cmd.u.params.rate = ratemode;
5755	cmd.u.params.min = cpu_to_be32(minrate);
5756	cmd.u.params.max = cpu_to_be32(maxrate);
5757	cmd.u.params.weight = cpu_to_be16(weight);
5758	cmd.u.params.pktsize = cpu_to_be16(pktsize);
5759
5760	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
5761			       NULL, 1);
5762}
5763