t4_hw.c revision 247355
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/cxgbe/common/t4_hw.c 247355 2013-02-26 21:25:17Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39	if (cold) \
40		DELAY((x) * 1000); \
41	else \
42		pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 *	t4_wait_op_done_val - wait until an operation is completed
47 *	@adapter: the adapter performing the operation
48 *	@reg: the register to check for completion
49 *	@mask: a single-bit field within @reg that indicates completion
50 *	@polarity: the value of the field when the operation is completed
51 *	@attempts: number of check iterations
52 *	@delay: delay in usecs between iterations
53 *	@valp: where to store the value of the register at completion time
54 *
55 *	Wait until an operation is completed by checking a bit in a register
56 *	up to @attempts times.  If @valp is not NULL the value of the register
57 *	at the time it indicated completion is stored there.  Returns 0 if the
58 *	operation completes and	-EAGAIN	otherwise.
59 */
60int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61		        int polarity, int attempts, int delay, u32 *valp)
62{
63	while (1) {
64		u32 val = t4_read_reg(adapter, reg);
65
66		if (!!(val & mask) == polarity) {
67			if (valp)
68				*valp = val;
69			return 0;
70		}
71		if (--attempts == 0)
72			return -EAGAIN;
73		if (delay)
74			udelay(delay);
75	}
76}
77
78/**
79 *	t4_set_reg_field - set a register field to a value
80 *	@adapter: the adapter to program
81 *	@addr: the register address
82 *	@mask: specifies the portion of the register to modify
83 *	@val: the new value for the register field
84 *
85 *	Sets a register field specified by the supplied mask to the
86 *	given value.
87 */
88void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
89		      u32 val)
90{
91	u32 v = t4_read_reg(adapter, addr) & ~mask;
92
93	t4_write_reg(adapter, addr, v | val);
94	(void) t4_read_reg(adapter, addr);      /* flush */
95}
96
97/**
98 *	t4_read_indirect - read indirectly addressed registers
99 *	@adap: the adapter
100 *	@addr_reg: register holding the indirect address
101 *	@data_reg: register holding the value of the indirect register
102 *	@vals: where the read register values are stored
103 *	@nregs: how many indirect registers to read
104 *	@start_idx: index of first indirect register to read
105 *
106 *	Reads registers that are accessed indirectly through an address/data
107 *	register pair.
108 */
109void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
110		      unsigned int data_reg, u32 *vals, unsigned int nregs,
111		      unsigned int start_idx)
112{
113	while (nregs--) {
114		t4_write_reg(adap, addr_reg, start_idx);
115		*vals++ = t4_read_reg(adap, data_reg);
116		start_idx++;
117	}
118}
119
120/**
121 *	t4_write_indirect - write indirectly addressed registers
122 *	@adap: the adapter
123 *	@addr_reg: register holding the indirect addresses
124 *	@data_reg: register holding the value for the indirect registers
125 *	@vals: values to write
126 *	@nregs: how many indirect registers to write
127 *	@start_idx: address of first indirect register to write
128 *
129 *	Writes a sequential block of registers that are accessed indirectly
130 *	through an address/data register pair.
131 */
132void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
133		       unsigned int data_reg, const u32 *vals,
134		       unsigned int nregs, unsigned int start_idx)
135{
136	while (nregs--) {
137		t4_write_reg(adap, addr_reg, start_idx++);
138		t4_write_reg(adap, data_reg, *vals++);
139	}
140}
141
142/*
143 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
144 * mechanism.  This guarantees that we get the real value even if we're
145 * operating within a Virtual Machine and the Hypervisor is trapping our
146 * Configuration Space accesses.
147 */
148u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
149{
150	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
151		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
152		     V_REGISTER(reg));
153	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
154}
155
156/*
157 *	t4_report_fw_error - report firmware error
158 *	@adap: the adapter
159 *
160 *	The adapter firmware can indicate error conditions to the host.
161 *	This routine prints out the reason for the firmware error (as
162 *	reported by the firmware).
163 */
164static void t4_report_fw_error(struct adapter *adap)
165{
166	static const char *reason[] = {
167		"Crash",			/* PCIE_FW_EVAL_CRASH */
168		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
169		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
170		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
171		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
172		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
173		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
174		"Reserved",			/* reserved */
175	};
176	u32 pcie_fw;
177
178	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
179	if (!(pcie_fw & F_PCIE_FW_ERR))
180		CH_ERR(adap, "Firmware error report called with no error\n");
181	else
182		CH_ERR(adap, "Firmware reports adapter error: %s\n",
183		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
184}
185
186/*
187 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
188 */
189static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
190			 u32 mbox_addr)
191{
192	for ( ; nflit; nflit--, mbox_addr += 8)
193		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
194}
195
196/*
197 * Handle a FW assertion reported in a mailbox.
198 */
199static void fw_asrt(struct adapter *adap, u32 mbox_addr)
200{
201	struct fw_debug_cmd asrt;
202
203	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
204	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
205		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
206		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
207}
208
209#define X_CIM_PF_NOACCESS 0xeeeeeeee
210/**
211 *	t4_wr_mbox_meat - send a command to FW through the given mailbox
212 *	@adap: the adapter
213 *	@mbox: index of the mailbox to use
214 *	@cmd: the command to write
215 *	@size: command length in bytes
216 *	@rpl: where to optionally store the reply
217 *	@sleep_ok: if true we may sleep while awaiting command completion
218 *
219 *	Sends the given command to FW through the selected mailbox and waits
220 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
221 *	store the FW's reply to the command.  The command and its optional
222 *	reply are of the same length.  Some FW commands like RESET and
223 *	INITIALIZE can take a considerable amount of time to execute.
224 *	@sleep_ok determines whether we may sleep while awaiting the response.
225 *	If sleeping is allowed we use progressive backoff otherwise we spin.
226 *
227 *	The return value is 0 on success or a negative errno on failure.  A
228 *	failure can happen either because we are not able to execute the
229 *	command or FW executes it but signals an error.  In the latter case
230 *	the return value is the error code indicated by FW (negated).
231 */
232int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
233		    void *rpl, bool sleep_ok)
234{
235	/*
236	 * We delay in small increments at first in an effort to maintain
237	 * responsiveness for simple, fast executing commands but then back
238	 * off to larger delays to a maximum retry delay.
239	 */
240	static const int delay[] = {
241		1, 1, 3, 5, 10, 10, 20, 50, 100
242	};
243
244	u32 v;
245	u64 res;
246	int i, ms, delay_idx;
247	const __be64 *p = cmd;
248	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
249	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
250
251	if ((size & 15) || size > MBOX_LEN)
252		return -EINVAL;
253
254	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
255	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
256		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
257
258	if (v != X_MBOWNER_PL)
259		return v ? -EBUSY : -ETIMEDOUT;
260
261	for (i = 0; i < size; i += 8, p++)
262		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
263
264	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
265	t4_read_reg(adap, ctl_reg);          /* flush write */
266
267	delay_idx = 0;
268	ms = delay[0];
269
270	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
271		if (sleep_ok) {
272			ms = delay[delay_idx];  /* last element may repeat */
273			if (delay_idx < ARRAY_SIZE(delay) - 1)
274				delay_idx++;
275			msleep(ms);
276		} else
277			mdelay(ms);
278
279		v = t4_read_reg(adap, ctl_reg);
280		if (v == X_CIM_PF_NOACCESS)
281			continue;
282		if (G_MBOWNER(v) == X_MBOWNER_PL) {
283			if (!(v & F_MBMSGVALID)) {
284				t4_write_reg(adap, ctl_reg,
285					     V_MBOWNER(X_MBOWNER_NONE));
286				continue;
287			}
288
289			res = t4_read_reg64(adap, data_reg);
290			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
291				fw_asrt(adap, data_reg);
292				res = V_FW_CMD_RETVAL(EIO);
293			} else if (rpl)
294				get_mbox_rpl(adap, rpl, size / 8, data_reg);
295			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
296			return -G_FW_CMD_RETVAL((int)res);
297		}
298	}
299
300	/*
301	 * We timed out waiting for a reply to our mailbox command.  Report
302	 * the error and also check to see if the firmware reported any
303	 * errors ...
304	 */
305	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
306	       *(const u8 *)cmd, mbox);
307	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
308		t4_report_fw_error(adap);
309	return -ETIMEDOUT;
310}
311
312/**
313 *	t4_mc_read - read from MC through backdoor accesses
314 *	@adap: the adapter
315 *	@addr: address of first byte requested
316 *	@data: 64 bytes of data containing the requested address
317 *	@ecc: where to store the corresponding 64-bit ECC word
318 *
319 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
320 *	that covers the requested address @addr.  If @parity is not %NULL it
321 *	is assigned the 64-bit ECC word for the read data.
322 */
323int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
324{
325	int i;
326
327	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
328		return -EBUSY;
329	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
330	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
331	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
332	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
333		     V_BIST_CMD_GAP(1));
334	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
335	if (i)
336		return i;
337
338#define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
339
340	for (i = 15; i >= 0; i--)
341		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
342	if (ecc)
343		*ecc = t4_read_reg64(adap, MC_DATA(16));
344#undef MC_DATA
345	return 0;
346}
347
348/**
349 *	t4_edc_read - read from EDC through backdoor accesses
350 *	@adap: the adapter
351 *	@idx: which EDC to access
352 *	@addr: address of first byte requested
353 *	@data: 64 bytes of data containing the requested address
354 *	@ecc: where to store the corresponding 64-bit ECC word
355 *
356 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
357 *	that covers the requested address @addr.  If @parity is not %NULL it
358 *	is assigned the 64-bit ECC word for the read data.
359 */
360int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
361{
362	int i;
363
364	idx *= EDC_STRIDE;
365	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
366		return -EBUSY;
367	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
368	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
369	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
370	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
371		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
372	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
373	if (i)
374		return i;
375
376#define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
377
378	for (i = 15; i >= 0; i--)
379		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
380	if (ecc)
381		*ecc = t4_read_reg64(adap, EDC_DATA(16));
382#undef EDC_DATA
383	return 0;
384}
385
386/**
387 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
388 *	@adap: the adapter
389 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
390 *	@addr: address within indicated memory type
391 *	@len: amount of memory to read
392 *	@buf: host memory buffer
393 *
394 *	Reads an [almost] arbitrary memory region in the firmware: the
395 *	firmware memory address, length and host buffer must be aligned on
396 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
397 *	the firmware's memory.  If this memory contains data structures which
398 *	contain multi-byte integers, it's the callers responsibility to
399 *	perform appropriate byte order conversions.
400 */
401int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
402		__be32 *buf)
403{
404	u32 pos, start, end, offset;
405	int ret;
406
407	/*
408	 * Argument sanity checks ...
409	 */
410	if ((addr & 0x3) || (len & 0x3))
411		return -EINVAL;
412
413	/*
414	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
415	 * need to round down the start and round up the end.  We'll start
416	 * copying out of the first line at (addr - start) a word at a time.
417	 */
418	start = addr & ~(64-1);
419	end = (addr + len + 64-1) & ~(64-1);
420	offset = (addr - start)/sizeof(__be32);
421
422	for (pos = start; pos < end; pos += 64, offset = 0) {
423		__be32 data[16];
424
425		/*
426		 * Read the chip's memory block and bail if there's an error.
427		 */
428		if (mtype == MEM_MC)
429			ret = t4_mc_read(adap, pos, data, NULL);
430		else
431			ret = t4_edc_read(adap, mtype, pos, data, NULL);
432		if (ret)
433			return ret;
434
435		/*
436		 * Copy the data into the caller's memory buffer.
437		 */
438		while (offset < 16 && len > 0) {
439			*buf++ = data[offset++];
440			len -= sizeof(__be32);
441		}
442	}
443
444	return 0;
445}
446
447/*
448 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
449 * VPD-R header.
450 */
451struct t4_vpd_hdr {
452	u8  id_tag;
453	u8  id_len[2];
454	u8  id_data[ID_LEN];
455	u8  vpdr_tag;
456	u8  vpdr_len[2];
457};
458
459/*
460 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
461 */
462#define EEPROM_MAX_RD_POLL 40
463#define EEPROM_MAX_WR_POLL 6
464#define EEPROM_STAT_ADDR   0x7bfc
465#define VPD_BASE           0x400
466#define VPD_BASE_OLD       0
467#define VPD_LEN            512
468#define VPD_INFO_FLD_HDR_SIZE	3
469
470/**
471 *	t4_seeprom_read - read a serial EEPROM location
472 *	@adapter: adapter to read
473 *	@addr: EEPROM virtual address
474 *	@data: where to store the read data
475 *
476 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
477 *	VPD capability.  Note that this function must be called with a virtual
478 *	address.
479 */
480int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
481{
482	u16 val;
483	int attempts = EEPROM_MAX_RD_POLL;
484	unsigned int base = adapter->params.pci.vpd_cap_addr;
485
486	if (addr >= EEPROMVSIZE || (addr & 3))
487		return -EINVAL;
488
489	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
490	do {
491		udelay(10);
492		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
493	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
494
495	if (!(val & PCI_VPD_ADDR_F)) {
496		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
497		return -EIO;
498	}
499	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
500	*data = le32_to_cpu(*data);
501	return 0;
502}
503
504/**
505 *	t4_seeprom_write - write a serial EEPROM location
506 *	@adapter: adapter to write
507 *	@addr: virtual EEPROM address
508 *	@data: value to write
509 *
510 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
511 *	VPD capability.  Note that this function must be called with a virtual
512 *	address.
513 */
514int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
515{
516	u16 val;
517	int attempts = EEPROM_MAX_WR_POLL;
518	unsigned int base = adapter->params.pci.vpd_cap_addr;
519
520	if (addr >= EEPROMVSIZE || (addr & 3))
521		return -EINVAL;
522
523	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
524				 cpu_to_le32(data));
525	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
526				 (u16)addr | PCI_VPD_ADDR_F);
527	do {
528		msleep(1);
529		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
530	} while ((val & PCI_VPD_ADDR_F) && --attempts);
531
532	if (val & PCI_VPD_ADDR_F) {
533		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
534		return -EIO;
535	}
536	return 0;
537}
538
539/**
540 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
541 *	@phys_addr: the physical EEPROM address
542 *	@fn: the PCI function number
543 *	@sz: size of function-specific area
544 *
545 *	Translate a physical EEPROM address to virtual.  The first 1K is
546 *	accessed through virtual addresses starting at 31K, the rest is
547 *	accessed through virtual addresses starting at 0.
548 *
549 *	The mapping is as follows:
550 *	[0..1K) -> [31K..32K)
551 *	[1K..1K+A) -> [ES-A..ES)
552 *	[1K+A..ES) -> [0..ES-A-1K)
553 *
554 *	where A = @fn * @sz, and ES = EEPROM size.
555 */
556int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
557{
558	fn *= sz;
559	if (phys_addr < 1024)
560		return phys_addr + (31 << 10);
561	if (phys_addr < 1024 + fn)
562		return EEPROMSIZE - fn + phys_addr - 1024;
563	if (phys_addr < EEPROMSIZE)
564		return phys_addr - 1024 - fn;
565	return -EINVAL;
566}
567
568/**
569 *	t4_seeprom_wp - enable/disable EEPROM write protection
570 *	@adapter: the adapter
571 *	@enable: whether to enable or disable write protection
572 *
573 *	Enables or disables write protection on the serial EEPROM.
574 */
575int t4_seeprom_wp(struct adapter *adapter, int enable)
576{
577	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
578}
579
580/**
581 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
582 *	@v: Pointer to buffered vpd data structure
583 *	@kw: The keyword to search for
584 *
585 *	Returns the value of the information field keyword or
586 *	-ENOENT otherwise.
587 */
588static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
589{
590         int i;
591	 unsigned int offset , len;
592	 const u8 *buf = &v->id_tag;
593	 const u8 *vpdr_len = &v->vpdr_tag;
594	 offset = sizeof(struct t4_vpd_hdr);
595	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
596
597	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
598		 return -ENOENT;
599	 }
600
601         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
602		 if(memcmp(buf + i , kw , 2) == 0){
603			 i += VPD_INFO_FLD_HDR_SIZE;
604                         return i;
605		  }
606
607                 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
608         }
609
610         return -ENOENT;
611}
612
613
614/**
615 *	get_vpd_params - read VPD parameters from VPD EEPROM
616 *	@adapter: adapter to read
617 *	@p: where to store the parameters
618 *
619 *	Reads card parameters stored in VPD EEPROM.
620 */
621static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
622{
623	int i, ret, addr;
624	int ec, sn, pn, na;
625	u8 vpd[VPD_LEN], csum;
626	const struct t4_vpd_hdr *v;
627
628	/*
629	 * Card information normally starts at VPD_BASE but early cards had
630	 * it at 0.
631	 */
632	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
633	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
634
635	for (i = 0; i < sizeof(vpd); i += 4) {
636		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
637		if (ret)
638			return ret;
639	}
640 	v = (const struct t4_vpd_hdr *)vpd;
641
642#define FIND_VPD_KW(var,name) do { \
643	var = get_vpd_keyword_val(v , name); \
644	if (var < 0) { \
645		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
646		return -EINVAL; \
647	} \
648} while (0)
649
650	FIND_VPD_KW(i, "RV");
651	for (csum = 0; i >= 0; i--)
652		csum += vpd[i];
653
654	if (csum) {
655		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
656		return -EINVAL;
657	}
658	FIND_VPD_KW(ec, "EC");
659	FIND_VPD_KW(sn, "SN");
660	FIND_VPD_KW(pn, "PN");
661	FIND_VPD_KW(na, "NA");
662#undef FIND_VPD_KW
663
664	memcpy(p->id, v->id_data, ID_LEN);
665	strstrip(p->id);
666	memcpy(p->ec, vpd + ec, EC_LEN);
667	strstrip(p->ec);
668	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
669	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
670	strstrip(p->sn);
671	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
672	strstrip((char *)p->pn);
673	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
674	strstrip((char *)p->na);
675
676	return 0;
677}
678
679/* serial flash and firmware constants and flash config file constants */
680enum {
681	SF_ATTEMPTS = 10,             /* max retries for SF operations */
682
683	/* flash command opcodes */
684	SF_PROG_PAGE    = 2,          /* program page */
685	SF_WR_DISABLE   = 4,          /* disable writes */
686	SF_RD_STATUS    = 5,          /* read status register */
687	SF_WR_ENABLE    = 6,          /* enable writes */
688	SF_RD_DATA_FAST = 0xb,        /* read flash */
689	SF_RD_ID        = 0x9f,       /* read ID */
690	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
691};
692
693/**
694 *	sf1_read - read data from the serial flash
695 *	@adapter: the adapter
696 *	@byte_cnt: number of bytes to read
697 *	@cont: whether another operation will be chained
698 *	@lock: whether to lock SF for PL access only
699 *	@valp: where to store the read data
700 *
701 *	Reads up to 4 bytes of data from the serial flash.  The location of
702 *	the read needs to be specified prior to calling this by issuing the
703 *	appropriate commands to the serial flash.
704 */
705static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
706		    int lock, u32 *valp)
707{
708	int ret;
709
710	if (!byte_cnt || byte_cnt > 4)
711		return -EINVAL;
712	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
713		return -EBUSY;
714	t4_write_reg(adapter, A_SF_OP,
715		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
716	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
717	if (!ret)
718		*valp = t4_read_reg(adapter, A_SF_DATA);
719	return ret;
720}
721
722/**
723 *	sf1_write - write data to the serial flash
724 *	@adapter: the adapter
725 *	@byte_cnt: number of bytes to write
726 *	@cont: whether another operation will be chained
727 *	@lock: whether to lock SF for PL access only
728 *	@val: value to write
729 *
730 *	Writes up to 4 bytes of data to the serial flash.  The location of
731 *	the write needs to be specified prior to calling this by issuing the
732 *	appropriate commands to the serial flash.
733 */
734static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
735		     int lock, u32 val)
736{
737	if (!byte_cnt || byte_cnt > 4)
738		return -EINVAL;
739	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
740		return -EBUSY;
741	t4_write_reg(adapter, A_SF_DATA, val);
742	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
743		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
744	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
745}
746
747/**
748 *	flash_wait_op - wait for a flash operation to complete
749 *	@adapter: the adapter
750 *	@attempts: max number of polls of the status register
751 *	@delay: delay between polls in ms
752 *
753 *	Wait for a flash operation to complete by polling the status register.
754 */
755static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
756{
757	int ret;
758	u32 status;
759
760	while (1) {
761		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
762		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
763			return ret;
764		if (!(status & 1))
765			return 0;
766		if (--attempts == 0)
767			return -EAGAIN;
768		if (delay)
769			msleep(delay);
770	}
771}
772
773/**
774 *	t4_read_flash - read words from serial flash
775 *	@adapter: the adapter
776 *	@addr: the start address for the read
777 *	@nwords: how many 32-bit words to read
778 *	@data: where to store the read data
779 *	@byte_oriented: whether to store data as bytes or as words
780 *
781 *	Read the specified number of 32-bit words from the serial flash.
782 *	If @byte_oriented is set the read data is stored as a byte array
783 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
784 *	natural endianess.
785 */
786int t4_read_flash(struct adapter *adapter, unsigned int addr,
787		  unsigned int nwords, u32 *data, int byte_oriented)
788{
789	int ret;
790
791	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
792		return -EINVAL;
793
794	addr = swab32(addr) | SF_RD_DATA_FAST;
795
796	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
797	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
798		return ret;
799
800	for ( ; nwords; nwords--, data++) {
801		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
802		if (nwords == 1)
803			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
804		if (ret)
805			return ret;
806		if (byte_oriented)
807			*data = htonl(*data);
808	}
809	return 0;
810}
811
812/**
813 *	t4_write_flash - write up to a page of data to the serial flash
814 *	@adapter: the adapter
815 *	@addr: the start address to write
816 *	@n: length of data to write in bytes
817 *	@data: the data to write
818 *	@byte_oriented: whether to store data as bytes or as words
819 *
820 *	Writes up to a page of data (256 bytes) to the serial flash starting
821 *	at the given address.  All the data must be written to the same page.
822 *	If @byte_oriented is set the write data is stored as byte stream
823 *	(i.e. matches what on disk), otherwise in big-endian.
824 */
825static int t4_write_flash(struct adapter *adapter, unsigned int addr,
826			  unsigned int n, const u8 *data, int byte_oriented)
827{
828	int ret;
829	u32 buf[SF_PAGE_SIZE / 4];
830	unsigned int i, c, left, val, offset = addr & 0xff;
831
832	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
833		return -EINVAL;
834
835	val = swab32(addr) | SF_PROG_PAGE;
836
837	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
838	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
839		goto unlock;
840
841	for (left = n; left; left -= c) {
842		c = min(left, 4U);
843		for (val = 0, i = 0; i < c; ++i)
844			val = (val << 8) + *data++;
845
846		if (!byte_oriented)
847			val = htonl(val);
848
849		ret = sf1_write(adapter, c, c != left, 1, val);
850		if (ret)
851			goto unlock;
852	}
853	ret = flash_wait_op(adapter, 8, 1);
854	if (ret)
855		goto unlock;
856
857	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
858
859	/* Read the page to verify the write succeeded */
860	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
861			    byte_oriented);
862	if (ret)
863		return ret;
864
865	if (memcmp(data - n, (u8 *)buf + offset, n)) {
866		CH_ERR(adapter, "failed to correctly write the flash page "
867		       "at %#x\n", addr);
868		return -EIO;
869	}
870	return 0;
871
872unlock:
873	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
874	return ret;
875}
876
877/**
878 *	t4_get_fw_version - read the firmware version
879 *	@adapter: the adapter
880 *	@vers: where to place the version
881 *
882 *	Reads the FW version from flash.
883 */
884int t4_get_fw_version(struct adapter *adapter, u32 *vers)
885{
886	return t4_read_flash(adapter,
887			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
888			     vers, 0);
889}
890
891/**
892 *	t4_get_tp_version - read the TP microcode version
893 *	@adapter: the adapter
894 *	@vers: where to place the version
895 *
896 *	Reads the TP microcode version from flash.
897 */
898int t4_get_tp_version(struct adapter *adapter, u32 *vers)
899{
900	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
901							      tp_microcode_ver),
902			     1, vers, 0);
903}
904
905/**
906 *	t4_check_fw_version - check if the FW is compatible with this driver
907 *	@adapter: the adapter
908 *
909 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
910 *	if there's exact match, a negative error if the version could not be
911 *	read or there's a major version mismatch, and a positive value if the
912 *	expected major version is found but there's a minor version mismatch.
913 */
914int t4_check_fw_version(struct adapter *adapter)
915{
916	int ret, major, minor, micro;
917
918	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
919	if (!ret)
920		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
921	if (ret)
922		return ret;
923
924	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
925	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
926	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
927
928	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
929		CH_ERR(adapter, "card FW has major version %u, driver wants "
930		       "%u\n", major, FW_VERSION_MAJOR);
931		return -EINVAL;
932	}
933
934	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
935		return 0;                                   /* perfect match */
936
937	/* Minor/micro version mismatch.  Report it but often it's OK. */
938	return 1;
939}
940
941/**
942 *	t4_flash_erase_sectors - erase a range of flash sectors
943 *	@adapter: the adapter
944 *	@start: the first sector to erase
945 *	@end: the last sector to erase
946 *
947 *	Erases the sectors in the given inclusive range.
948 */
949static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
950{
951	int ret = 0;
952
953	while (start <= end) {
954		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
955		    (ret = sf1_write(adapter, 4, 0, 1,
956				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
957		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
958			CH_ERR(adapter, "erase of flash sector %d failed, "
959			       "error %d\n", start, ret);
960			break;
961		}
962		start++;
963	}
964	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
965	return ret;
966}
967
968/**
969 *	t4_flash_cfg_addr - return the address of the flash configuration file
970 *	@adapter: the adapter
971 *
972 *	Return the address within the flash where the Firmware Configuration
973 *	File is stored.
974 */
975unsigned int t4_flash_cfg_addr(struct adapter *adapter)
976{
977	if (adapter->params.sf_size == 0x100000)
978		return FLASH_FPGA_CFG_START;
979	else
980		return FLASH_CFG_START;
981}
982
983/**
984 *	t4_load_cfg - download config file
985 *	@adap: the adapter
986 *	@cfg_data: the cfg text file to write
987 *	@size: text file size
988 *
989 *	Write the supplied config text file to the card's serial flash.
990 */
991int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
992{
993	int ret, i, n;
994	unsigned int addr;
995	unsigned int flash_cfg_start_sec;
996	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
997
998	addr = t4_flash_cfg_addr(adap);
999	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1000
1001	if (size > FLASH_CFG_MAX_SIZE) {
1002		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1003		       FLASH_CFG_MAX_SIZE);
1004		return -EFBIG;
1005	}
1006
1007	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1008			 sf_sec_size);
1009	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1010				     flash_cfg_start_sec + i - 1);
1011	/*
1012	 * If size == 0 then we're simply erasing the FLASH sectors associated
1013	 * with the on-adapter Firmware Configuration File.
1014	 */
1015	if (ret || size == 0)
1016		goto out;
1017
1018	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1019	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1020		if ( (size - i) <  SF_PAGE_SIZE)
1021			n = size - i;
1022		else
1023			n = SF_PAGE_SIZE;
1024		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1025		if (ret)
1026			goto out;
1027
1028		addr += SF_PAGE_SIZE;
1029		cfg_data += SF_PAGE_SIZE;
1030	}
1031
1032out:
1033	if (ret)
1034		CH_ERR(adap, "config file %s failed %d\n",
1035		       (size == 0 ? "clear" : "download"), ret);
1036	return ret;
1037}
1038
1039
1040/**
1041 *	t4_load_fw - download firmware
1042 *	@adap: the adapter
1043 *	@fw_data: the firmware image to write
1044 *	@size: image size
1045 *
1046 *	Write the supplied firmware image to the card's serial flash.
1047 */
1048int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1049{
1050	u32 csum;
1051	int ret, addr;
1052	unsigned int i;
1053	u8 first_page[SF_PAGE_SIZE];
1054	const u32 *p = (const u32 *)fw_data;
1055	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1056	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1057
1058	if (!size) {
1059		CH_ERR(adap, "FW image has no data\n");
1060		return -EINVAL;
1061	}
1062	if (size & 511) {
1063		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1064		return -EINVAL;
1065	}
1066	if (ntohs(hdr->len512) * 512 != size) {
1067		CH_ERR(adap, "FW image size differs from size in FW header\n");
1068		return -EINVAL;
1069	}
1070	if (size > FLASH_FW_MAX_SIZE) {
1071		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1072		       FLASH_FW_MAX_SIZE);
1073		return -EFBIG;
1074	}
1075
1076	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1077		csum += ntohl(p[i]);
1078
1079	if (csum != 0xffffffff) {
1080		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1081		       csum);
1082		return -EINVAL;
1083	}
1084
1085	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1086	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1087	    FLASH_FW_START_SEC + i - 1);
1088	if (ret)
1089		goto out;
1090
1091	/*
1092	 * We write the correct version at the end so the driver can see a bad
1093	 * version if the FW write fails.  Start by writing a copy of the
1094	 * first page with a bad version.
1095	 */
1096	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1097	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1098	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1099	if (ret)
1100		goto out;
1101
1102	addr = FLASH_FW_START;
1103	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1104		addr += SF_PAGE_SIZE;
1105		fw_data += SF_PAGE_SIZE;
1106		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1107		if (ret)
1108			goto out;
1109	}
1110
1111	ret = t4_write_flash(adap,
1112			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1113			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1114out:
1115	if (ret)
1116		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1117	return ret;
1118}
1119
1120/* BIOS boot headers */
1121typedef struct pci_expansion_rom_header {
1122	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1123	u8	reserved[22]; /* Reserved per processor Architecture data */
1124	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1125} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
1126
1127/* Legacy PCI Expansion ROM Header */
1128typedef struct legacy_pci_expansion_rom_header {
1129	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
1130	u8	size512; /* Current Image Size in units of 512 bytes */
1131	u8	initentry_point[4];
1132	u8	cksum; /* Checksum computed on the entire Image */
1133	u8	reserved[16]; /* Reserved */
1134	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
1135} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
1136
1137/* EFI PCI Expansion ROM Header */
1138typedef struct efi_pci_expansion_rom_header {
1139	u8	signature[2]; // ROM signature. The value 0xaa55
1140	u8	initialization_size[2]; /* Units 512. Includes this header */
1141	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
1142	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
1143	u8	efi_machine_type[2]; /* Machine type from EFI image header */
1144	u8	compression_type[2]; /* Compression type. */
1145		/*
1146		 * Compression type definition
1147		 * 0x0: uncompressed
1148		 * 0x1: Compressed
1149		 * 0x2-0xFFFF: Reserved
1150		 */
1151	u8	reserved[8]; /* Reserved */
1152	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
1153	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
1154} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
1155
1156/* PCI Data Structure Format */
1157typedef struct pcir_data_structure { /* PCI Data Structure */
1158	u8	signature[4]; /* Signature. The string "PCIR" */
1159	u8	vendor_id[2]; /* Vendor Identification */
1160	u8	device_id[2]; /* Device Identification */
1161	u8	vital_product[2]; /* Pointer to Vital Product Data */
1162	u8	length[2]; /* PCIR Data Structure Length */
1163	u8	revision; /* PCIR Data Structure Revision */
1164	u8	class_code[3]; /* Class Code */
1165	u8	image_length[2]; /* Image Length. Multiple of 512B */
1166	u8	code_revision[2]; /* Revision Level of Code/Data */
1167	u8	code_type; /* Code Type. */
1168		/*
1169		 * PCI Expansion ROM Code Types
1170		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
1171		 * 0x01: Open Firmware standard for PCI. FCODE
1172		 * 0x02: Hewlett-Packard PA RISC. HP reserved
1173		 * 0x03: EFI Image. EFI
1174		 * 0x04-0xFF: Reserved.
1175		 */
1176	u8	indicator; /* Indicator. Identifies the last image in the ROM */
1177	u8	reserved[2]; /* Reserved */
1178} pcir_data_t; /* PCI__DATA_STRUCTURE */
1179
1180/* BOOT constants */
1181enum {
1182	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1183	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1184	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1185	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
1186	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
1187	VENDOR_ID = 0x1425, /* Vendor ID */
1188	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
1189};
1190
1191/*
1192 *	modify_device_id - Modifies the device ID of the Boot BIOS image
1193 *	@adatper: the device ID to write.
1194 *	@boot_data: the boot image to modify.
1195 *
1196 *	Write the supplied device ID to the boot BIOS image.
1197 */
1198static void modify_device_id(int device_id, u8 *boot_data)
1199{
1200	legacy_pci_exp_rom_header_t *header;
1201	pcir_data_t *pcir_header;
1202	u32 cur_header = 0;
1203
1204	/*
1205	 * Loop through all chained images and change the device ID's
1206	 */
1207	while (1) {
1208		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
1209		pcir_header = (pcir_data_t *) &boot_data[cur_header +
1210		    le16_to_cpu(*(u16*)header->pcir_offset)];
1211
1212		/*
1213		 * Only modify the Device ID if code type is Legacy or HP.
1214		 * 0x00: Okay to modify
1215		 * 0x01: FCODE. Do not be modify
1216		 * 0x03: Okay to modify
1217		 * 0x04-0xFF: Do not modify
1218		 */
1219		if (pcir_header->code_type == 0x00) {
1220			u8 csum = 0;
1221			int i;
1222
1223			/*
1224			 * Modify Device ID to match current adatper
1225			 */
1226			*(u16*) pcir_header->device_id = device_id;
1227
1228			/*
1229			 * Set checksum temporarily to 0.
1230			 * We will recalculate it later.
1231			 */
1232			header->cksum = 0x0;
1233
1234			/*
1235			 * Calculate and update checksum
1236			 */
1237			for (i = 0; i < (header->size512 * 512); i++)
1238				csum += (u8)boot_data[cur_header + i];
1239
1240			/*
1241			 * Invert summed value to create the checksum
1242			 * Writing new checksum value directly to the boot data
1243			 */
1244			boot_data[cur_header + 7] = -csum;
1245
1246		} else if (pcir_header->code_type == 0x03) {
1247
1248			/*
1249			 * Modify Device ID to match current adatper
1250			 */
1251			*(u16*) pcir_header->device_id = device_id;
1252
1253		}
1254
1255
1256		/*
1257		 * Check indicator element to identify if this is the last
1258		 * image in the ROM.
1259		 */
1260		if (pcir_header->indicator & 0x80)
1261			break;
1262
1263		/*
1264		 * Move header pointer up to the next image in the ROM.
1265		 */
1266		cur_header += header->size512 * 512;
1267	}
1268}
1269
1270/*
1271 *	t4_load_boot - download boot flash
1272 *	@adapter: the adapter
1273 *	@boot_data: the boot image to write
1274 *	@boot_addr: offset in flash to write boot_data
1275 *	@size: image size
1276 *
1277 *	Write the supplied boot image to the card's serial flash.
1278 *	The boot image has the following sections: a 28-byte header and the
1279 *	boot image.
1280 */
1281int t4_load_boot(struct adapter *adap, u8 *boot_data,
1282		 unsigned int boot_addr, unsigned int size)
1283{
1284	pci_exp_rom_header_t *header;
1285	int pcir_offset ;
1286	pcir_data_t *pcir_header;
1287	int ret, addr;
1288	uint16_t device_id;
1289	unsigned int i;
1290	unsigned int boot_sector = boot_addr * 1024;
1291	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1292
1293	/*
1294	 * Make sure the boot image does not encroach on the firmware region
1295	 */
1296	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1297		CH_ERR(adap, "boot image encroaching on firmware region\n");
1298		return -EFBIG;
1299	}
1300
1301	/*
1302	 * Number of sectors spanned
1303	 */
1304	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
1305			sf_sec_size);
1306	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1307				     (boot_sector >> 16) + i - 1);
1308
1309	/*
1310	 * If size == 0 then we're simply erasing the FLASH sectors associated
1311	 * with the on-adapter option ROM file
1312	 */
1313	if (ret || (size == 0))
1314		goto out;
1315
1316	/* Get boot header */
1317	header = (pci_exp_rom_header_t *)boot_data;
1318	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
1319	/* PCIR Data Structure */
1320	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
1321
1322	/*
1323	 * Perform some primitive sanity testing to avoid accidentally
1324	 * writing garbage over the boot sectors.  We ought to check for
1325	 * more but it's not worth it for now ...
1326	 */
1327	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1328		CH_ERR(adap, "boot image too small/large\n");
1329		return -EFBIG;
1330	}
1331
1332	/*
1333	 * Check BOOT ROM header signature
1334	 */
1335	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
1336		CH_ERR(adap, "Boot image missing signature\n");
1337		return -EINVAL;
1338	}
1339
1340	/*
1341	 * Check PCI header signature
1342	 */
1343	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
1344		CH_ERR(adap, "PCI header missing signature\n");
1345		return -EINVAL;
1346	}
1347
1348	/*
1349	 * Check Vendor ID matches Chelsio ID
1350	 */
1351	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
1352		CH_ERR(adap, "Vendor ID missing signature\n");
1353		return -EINVAL;
1354	}
1355
1356	/*
1357	 * Retrieve adapter's device ID
1358	 */
1359	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
1360	/* Want to deal with PF 0 so I strip off PF 4 indicator */
1361	device_id = (device_id & 0xff) | 0x4000;
1362
1363	/*
1364	 * Check PCIE Device ID
1365	 */
1366	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
1367		/*
1368		 * Change the device ID in the Boot BIOS image to match
1369		 * the Device ID of the current adapter.
1370		 */
1371		modify_device_id(device_id, boot_data);
1372	}
1373
1374	/*
1375	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1376	 * we finish copying the rest of the boot image. This will ensure
1377	 * that the BIOS boot header will only be written if the boot image
1378	 * was written in full.
1379	 */
1380	addr = boot_sector;
1381	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1382		addr += SF_PAGE_SIZE;
1383		boot_data += SF_PAGE_SIZE;
1384		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1385		if (ret)
1386			goto out;
1387	}
1388
1389	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1390
1391out:
1392	if (ret)
1393		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1394	return ret;
1395}
1396
1397/**
1398 *	t4_read_cimq_cfg - read CIM queue configuration
1399 *	@adap: the adapter
1400 *	@base: holds the queue base addresses in bytes
1401 *	@size: holds the queue sizes in bytes
1402 *	@thres: holds the queue full thresholds in bytes
1403 *
1404 *	Returns the current configuration of the CIM queues, starting with
1405 *	the IBQs, then the OBQs.
1406 */
1407void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1408{
1409	unsigned int i, v;
1410
1411	for (i = 0; i < CIM_NUM_IBQ; i++) {
1412		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1413			     V_QUENUMSELECT(i));
1414		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1415		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1416		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1417		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1418	}
1419	for (i = 0; i < CIM_NUM_OBQ; i++) {
1420		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1421			     V_QUENUMSELECT(i));
1422		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1423		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1424		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1425	}
1426}
1427
1428/**
1429 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1430 *	@adap: the adapter
1431 *	@qid: the queue index
1432 *	@data: where to store the queue contents
1433 *	@n: capacity of @data in 32-bit words
1434 *
1435 *	Reads the contents of the selected CIM queue starting at address 0 up
1436 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1437 *	error and the number of 32-bit words actually read on success.
1438 */
1439int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1440{
1441	int i, err;
1442	unsigned int addr;
1443	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1444
1445	if (qid > 5 || (n & 3))
1446		return -EINVAL;
1447
1448	addr = qid * nwords;
1449	if (n > nwords)
1450		n = nwords;
1451
1452	for (i = 0; i < n; i++, addr++) {
1453		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1454			     F_IBQDBGEN);
1455		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1456				      2, 1);
1457		if (err)
1458			return err;
1459		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1460	}
1461	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1462	return i;
1463}
1464
1465/**
1466 *	t4_read_cim_obq - read the contents of a CIM outbound queue
1467 *	@adap: the adapter
1468 *	@qid: the queue index
1469 *	@data: where to store the queue contents
1470 *	@n: capacity of @data in 32-bit words
1471 *
1472 *	Reads the contents of the selected CIM queue starting at address 0 up
1473 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1474 *	error and the number of 32-bit words actually read on success.
1475 */
1476int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1477{
1478	int i, err;
1479	unsigned int addr, v, nwords;
1480
1481	if (qid > 5 || (n & 3))
1482		return -EINVAL;
1483
1484	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1485		     V_QUENUMSELECT(qid));
1486	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1487
1488	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1489	nwords = G_CIMQSIZE(v) * 64;  /* same */
1490	if (n > nwords)
1491		n = nwords;
1492
1493	for (i = 0; i < n; i++, addr++) {
1494		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1495			     F_OBQDBGEN);
1496		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1497				      2, 1);
1498		if (err)
1499			return err;
1500		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1501	}
1502	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1503	return i;
1504}
1505
1506enum {
1507	CIM_QCTL_BASE     = 0,
1508	CIM_CTL_BASE      = 0x2000,
1509	CIM_PBT_ADDR_BASE = 0x2800,
1510	CIM_PBT_LRF_BASE  = 0x3000,
1511	CIM_PBT_DATA_BASE = 0x3800
1512};
1513
1514/**
1515 *	t4_cim_read - read a block from CIM internal address space
1516 *	@adap: the adapter
1517 *	@addr: the start address within the CIM address space
1518 *	@n: number of words to read
1519 *	@valp: where to store the result
1520 *
1521 *	Reads a block of 4-byte words from the CIM intenal address space.
1522 */
1523int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1524		unsigned int *valp)
1525{
1526	int ret = 0;
1527
1528	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1529		return -EBUSY;
1530
1531	for ( ; !ret && n--; addr += 4) {
1532		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1533		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1534				      0, 5, 2);
1535		if (!ret)
1536			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1537	}
1538	return ret;
1539}
1540
1541/**
1542 *	t4_cim_write - write a block into CIM internal address space
1543 *	@adap: the adapter
1544 *	@addr: the start address within the CIM address space
1545 *	@n: number of words to write
1546 *	@valp: set of values to write
1547 *
1548 *	Writes a block of 4-byte words into the CIM intenal address space.
1549 */
1550int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1551		 const unsigned int *valp)
1552{
1553	int ret = 0;
1554
1555	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1556		return -EBUSY;
1557
1558	for ( ; !ret && n--; addr += 4) {
1559		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1560		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1561		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1562				      0, 5, 2);
1563	}
1564	return ret;
1565}
1566
1567static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1568{
1569	return t4_cim_write(adap, addr, 1, &val);
1570}
1571
1572/**
1573 *	t4_cim_ctl_read - read a block from CIM control region
1574 *	@adap: the adapter
1575 *	@addr: the start address within the CIM control region
1576 *	@n: number of words to read
1577 *	@valp: where to store the result
1578 *
1579 *	Reads a block of 4-byte words from the CIM control region.
1580 */
1581int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1582		    unsigned int *valp)
1583{
1584	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1585}
1586
1587/**
1588 *	t4_cim_read_la - read CIM LA capture buffer
1589 *	@adap: the adapter
1590 *	@la_buf: where to store the LA data
1591 *	@wrptr: the HW write pointer within the capture buffer
1592 *
1593 *	Reads the contents of the CIM LA buffer with the most recent entry at
1594 *	the end	of the returned data and with the entry at @wrptr first.
1595 *	We try to leave the LA in the running state we find it in.
1596 */
1597int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1598{
1599	int i, ret;
1600	unsigned int cfg, val, idx;
1601
1602	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1603	if (ret)
1604		return ret;
1605
1606	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1607		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1608		if (ret)
1609			return ret;
1610	}
1611
1612	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1613	if (ret)
1614		goto restart;
1615
1616	idx = G_UPDBGLAWRPTR(val);
1617	if (wrptr)
1618		*wrptr = idx;
1619
1620	for (i = 0; i < adap->params.cim_la_size; i++) {
1621		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1622				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1623		if (ret)
1624			break;
1625		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1626		if (ret)
1627			break;
1628		if (val & F_UPDBGLARDEN) {
1629			ret = -ETIMEDOUT;
1630			break;
1631		}
1632		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1633		if (ret)
1634			break;
1635		idx = (idx + 1) & M_UPDBGLARDPTR;
1636	}
1637restart:
1638	if (cfg & F_UPDBGLAEN) {
1639		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1640				      cfg & ~F_UPDBGLARDEN);
1641		if (!ret)
1642			ret = r;
1643	}
1644	return ret;
1645}
1646
1647void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1648			unsigned int *pif_req_wrptr,
1649			unsigned int *pif_rsp_wrptr)
1650{
1651	int i, j;
1652	u32 cfg, val, req, rsp;
1653
1654	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1655	if (cfg & F_LADBGEN)
1656		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1657
1658	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1659	req = G_POLADBGWRPTR(val);
1660	rsp = G_PILADBGWRPTR(val);
1661	if (pif_req_wrptr)
1662		*pif_req_wrptr = req;
1663	if (pif_rsp_wrptr)
1664		*pif_rsp_wrptr = rsp;
1665
1666	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1667		for (j = 0; j < 6; j++) {
1668			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1669				     V_PILADBGRDPTR(rsp));
1670			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1671			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1672			req++;
1673			rsp++;
1674		}
1675		req = (req + 2) & M_POLADBGRDPTR;
1676		rsp = (rsp + 2) & M_PILADBGRDPTR;
1677	}
1678	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1679}
1680
1681void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1682{
1683	u32 cfg;
1684	int i, j, idx;
1685
1686	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1687	if (cfg & F_LADBGEN)
1688		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1689
1690	for (i = 0; i < CIM_MALA_SIZE; i++) {
1691		for (j = 0; j < 5; j++) {
1692			idx = 8 * i + j;
1693			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1694				     V_PILADBGRDPTR(idx));
1695			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1696			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1697		}
1698	}
1699	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1700}
1701
1702/**
1703 *	t4_tp_read_la - read TP LA capture buffer
1704 *	@adap: the adapter
1705 *	@la_buf: where to store the LA data
1706 *	@wrptr: the HW write pointer within the capture buffer
1707 *
1708 *	Reads the contents of the TP LA buffer with the most recent entry at
1709 *	the end	of the returned data and with the entry at @wrptr first.
1710 *	We leave the LA in the running state we find it in.
1711 */
1712void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1713{
1714	bool last_incomplete;
1715	unsigned int i, cfg, val, idx;
1716
1717	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1718	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1719		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1720			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1721
1722	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1723	idx = G_DBGLAWPTR(val);
1724	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1725	if (last_incomplete)
1726		idx = (idx + 1) & M_DBGLARPTR;
1727	if (wrptr)
1728		*wrptr = idx;
1729
1730	val &= 0xffff;
1731	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1732	val |= adap->params.tp.la_mask;
1733
1734	for (i = 0; i < TPLA_SIZE; i++) {
1735		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1736		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1737		idx = (idx + 1) & M_DBGLARPTR;
1738	}
1739
1740	/* Wipe out last entry if it isn't valid */
1741	if (last_incomplete)
1742		la_buf[TPLA_SIZE - 1] = ~0ULL;
1743
1744	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1745		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1746			     cfg | adap->params.tp.la_mask);
1747}
1748
1749void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1750{
1751	unsigned int i, j;
1752
1753	for (i = 0; i < 8; i++) {
1754		u32 *p = la_buf + i;
1755
1756		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1757		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1758		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1759		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1760			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1761	}
1762}
1763
1764#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1765		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1766
1767/**
1768 *	t4_link_start - apply link configuration to MAC/PHY
1769 *	@phy: the PHY to setup
1770 *	@mac: the MAC to setup
1771 *	@lc: the requested link configuration
1772 *
1773 *	Set up a port's MAC and PHY according to a desired link configuration.
1774 *	- If the PHY can auto-negotiate first decide what to advertise, then
1775 *	  enable/disable auto-negotiation as desired, and reset.
1776 *	- If the PHY does not auto-negotiate just reset it.
1777 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1778 *	  otherwise do it later based on the outcome of auto-negotiation.
1779 */
1780int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1781		  struct link_config *lc)
1782{
1783	struct fw_port_cmd c;
1784	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1785
1786	lc->link_ok = 0;
1787	if (lc->requested_fc & PAUSE_RX)
1788		fc |= FW_PORT_CAP_FC_RX;
1789	if (lc->requested_fc & PAUSE_TX)
1790		fc |= FW_PORT_CAP_FC_TX;
1791
1792	memset(&c, 0, sizeof(c));
1793	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1794			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1795	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1796				  FW_LEN16(c));
1797
1798	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1799		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1800		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1801	} else if (lc->autoneg == AUTONEG_DISABLE) {
1802		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1803		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1804	} else
1805		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1806
1807	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1808}
1809
1810/**
1811 *	t4_restart_aneg - restart autonegotiation
1812 *	@adap: the adapter
1813 *	@mbox: mbox to use for the FW command
1814 *	@port: the port id
1815 *
1816 *	Restarts autonegotiation for the selected port.
1817 */
1818int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1819{
1820	struct fw_port_cmd c;
1821
1822	memset(&c, 0, sizeof(c));
1823	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1824			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1825	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1826				  FW_LEN16(c));
1827	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1828	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1829}
1830
1831struct intr_info {
1832	unsigned int mask;       /* bits to check in interrupt status */
1833	const char *msg;         /* message to print or NULL */
1834	short stat_idx;          /* stat counter to increment or -1 */
1835	unsigned short fatal;    /* whether the condition reported is fatal */
1836};
1837
1838/**
1839 *	t4_handle_intr_status - table driven interrupt handler
1840 *	@adapter: the adapter that generated the interrupt
1841 *	@reg: the interrupt status register to process
1842 *	@acts: table of interrupt actions
1843 *
1844 *	A table driven interrupt handler that applies a set of masks to an
1845 *	interrupt status word and performs the corresponding actions if the
1846 *	interrupts described by the mask have occured.  The actions include
1847 *	optionally emitting a warning or alert message.  The table is terminated
1848 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1849 *	conditions.
1850 */
1851static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1852				 const struct intr_info *acts)
1853{
1854	int fatal = 0;
1855	unsigned int mask = 0;
1856	unsigned int status = t4_read_reg(adapter, reg);
1857
1858	for ( ; acts->mask; ++acts) {
1859		if (!(status & acts->mask))
1860			continue;
1861		if (acts->fatal) {
1862			fatal++;
1863			CH_ALERT(adapter, "%s (0x%x)\n",
1864				 acts->msg, status & acts->mask);
1865		} else if (acts->msg)
1866			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1867					  acts->msg, status & acts->mask);
1868		mask |= acts->mask;
1869	}
1870	status &= mask;
1871	if (status)                           /* clear processed interrupts */
1872		t4_write_reg(adapter, reg, status);
1873	return fatal;
1874}
1875
1876/*
1877 * Interrupt handler for the PCIE module.
1878 */
1879static void pcie_intr_handler(struct adapter *adapter)
1880{
1881	static struct intr_info sysbus_intr_info[] = {
1882		{ F_RNPP, "RXNP array parity error", -1, 1 },
1883		{ F_RPCP, "RXPC array parity error", -1, 1 },
1884		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1885		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1886		{ F_RFTP, "RXFT array parity error", -1, 1 },
1887		{ 0 }
1888	};
1889	static struct intr_info pcie_port_intr_info[] = {
1890		{ F_TPCP, "TXPC array parity error", -1, 1 },
1891		{ F_TNPP, "TXNP array parity error", -1, 1 },
1892		{ F_TFTP, "TXFT array parity error", -1, 1 },
1893		{ F_TCAP, "TXCA array parity error", -1, 1 },
1894		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1895		{ F_RCAP, "RXCA array parity error", -1, 1 },
1896		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1897		{ F_RDPE, "Rx data parity error", -1, 1 },
1898		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1899		{ 0 }
1900	};
1901	static struct intr_info pcie_intr_info[] = {
1902		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1903		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1904		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1905		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1906		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1907		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1908		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1909		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1910		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1911		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1912		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1913		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1914		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1915		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1916		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1917		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1918		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1919		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1920		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1921		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1922		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1923		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1924		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1925		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1926		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1927		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1928		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1929		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1930		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1931		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1932		  0 },
1933		{ 0 }
1934	};
1935
1936	int fat;
1937
1938	fat = t4_handle_intr_status(adapter,
1939				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1940				    sysbus_intr_info) +
1941	      t4_handle_intr_status(adapter,
1942				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1943				    pcie_port_intr_info) +
1944	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1945	if (fat)
1946		t4_fatal_err(adapter);
1947}
1948
1949/*
1950 * TP interrupt handler.
1951 */
1952static void tp_intr_handler(struct adapter *adapter)
1953{
1954	static struct intr_info tp_intr_info[] = {
1955		{ 0x3fffffff, "TP parity error", -1, 1 },
1956		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1957		{ 0 }
1958	};
1959
1960	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1961		t4_fatal_err(adapter);
1962}
1963
1964/*
1965 * SGE interrupt handler.
1966 */
1967static void sge_intr_handler(struct adapter *adapter)
1968{
1969	u64 v;
1970	u32 err;
1971
1972	static struct intr_info sge_intr_info[] = {
1973		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1974		  "SGE received CPL exceeding IQE size", -1, 1 },
1975		{ F_ERR_INVALID_CIDX_INC,
1976		  "SGE GTS CIDX increment too large", -1, 0 },
1977		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1978		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1979		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1980		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1981		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1982		  0 },
1983		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1984		  0 },
1985		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1986		  0 },
1987		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1988		  0 },
1989		{ F_ERR_ING_CTXT_PRIO,
1990		  "SGE too many priority ingress contexts", -1, 0 },
1991		{ F_ERR_EGR_CTXT_PRIO,
1992		  "SGE too many priority egress contexts", -1, 0 },
1993		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1994		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1995		{ 0 }
1996	};
1997
1998	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1999	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
2000	if (v) {
2001		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
2002			 (unsigned long long)v);
2003		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
2004		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
2005	}
2006
2007	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
2008
2009	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
2010	if (err & F_ERROR_QID_VALID) {
2011		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
2012		if (err & F_UNCAPTURED_ERROR)
2013			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
2014		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
2015			     F_UNCAPTURED_ERROR);
2016	}
2017
2018	if (v != 0)
2019		t4_fatal_err(adapter);
2020}
2021
2022#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
2023		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
2024#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
2025		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
2026
2027/*
2028 * CIM interrupt handler.
2029 */
2030static void cim_intr_handler(struct adapter *adapter)
2031{
2032	static struct intr_info cim_intr_info[] = {
2033		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2034		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2035		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2036		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2037		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2038		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2039		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2040		{ 0 }
2041	};
2042	static struct intr_info cim_upintr_info[] = {
2043		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2044		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2045		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
2046		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
2047		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2048		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2049		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2050		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2051		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2052		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2053		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2054		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2055		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2056		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2057		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2058		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2059		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2060		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2061		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2062		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2063		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2064		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2065		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2066		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2067		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2068		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2069		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2070		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2071		{ 0 }
2072	};
2073	int fat;
2074
2075	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
2076		t4_report_fw_error(adapter);
2077
2078	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
2079				    cim_intr_info) +
2080	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
2081				    cim_upintr_info);
2082	if (fat)
2083		t4_fatal_err(adapter);
2084}
2085
2086/*
2087 * ULP RX interrupt handler.
2088 */
2089static void ulprx_intr_handler(struct adapter *adapter)
2090{
2091	static struct intr_info ulprx_intr_info[] = {
2092		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
2093		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
2094		{ 0x7fffff, "ULPRX parity error", -1, 1 },
2095		{ 0 }
2096	};
2097
2098	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
2099		t4_fatal_err(adapter);
2100}
2101
2102/*
2103 * ULP TX interrupt handler.
2104 */
2105static void ulptx_intr_handler(struct adapter *adapter)
2106{
2107	static struct intr_info ulptx_intr_info[] = {
2108		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2109		  0 },
2110		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2111		  0 },
2112		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2113		  0 },
2114		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2115		  0 },
2116		{ 0xfffffff, "ULPTX parity error", -1, 1 },
2117		{ 0 }
2118	};
2119
2120	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
2121		t4_fatal_err(adapter);
2122}
2123
2124/*
2125 * PM TX interrupt handler.
2126 */
2127static void pmtx_intr_handler(struct adapter *adapter)
2128{
2129	static struct intr_info pmtx_intr_info[] = {
2130		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2131		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2132		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2133		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2134		{ 0xffffff0, "PMTX framing error", -1, 1 },
2135		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2136		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2137		  1 },
2138		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2139		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2140		{ 0 }
2141	};
2142
2143	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
2144		t4_fatal_err(adapter);
2145}
2146
2147/*
2148 * PM RX interrupt handler.
2149 */
2150static void pmrx_intr_handler(struct adapter *adapter)
2151{
2152	static struct intr_info pmrx_intr_info[] = {
2153		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2154		{ 0x3ffff0, "PMRX framing error", -1, 1 },
2155		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2156		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2157		  1 },
2158		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2159		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2160		{ 0 }
2161	};
2162
2163	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2164		t4_fatal_err(adapter);
2165}
2166
2167/*
2168 * CPL switch interrupt handler.
2169 */
2170static void cplsw_intr_handler(struct adapter *adapter)
2171{
2172	static struct intr_info cplsw_intr_info[] = {
2173		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2174		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2175		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2176		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2177		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2178		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2179		{ 0 }
2180	};
2181
2182	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2183		t4_fatal_err(adapter);
2184}
2185
2186/*
2187 * LE interrupt handler.
2188 */
2189static void le_intr_handler(struct adapter *adap)
2190{
2191	static struct intr_info le_intr_info[] = {
2192		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2193		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2194		{ F_PARITYERR, "LE parity error", -1, 1 },
2195		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2196		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2197		{ 0 }
2198	};
2199
2200	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2201		t4_fatal_err(adap);
2202}
2203
2204/*
2205 * MPS interrupt handler.
2206 */
2207static void mps_intr_handler(struct adapter *adapter)
2208{
2209	static struct intr_info mps_rx_intr_info[] = {
2210		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2211		{ 0 }
2212	};
2213	static struct intr_info mps_tx_intr_info[] = {
2214		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2215		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2216		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2217		  -1, 1 },
2218		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2219		  -1, 1 },
2220		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2221		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2222		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2223		{ 0 }
2224	};
2225	static struct intr_info mps_trc_intr_info[] = {
2226		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2227		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2228		  1 },
2229		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2230		{ 0 }
2231	};
2232	static struct intr_info mps_stat_sram_intr_info[] = {
2233		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2234		{ 0 }
2235	};
2236	static struct intr_info mps_stat_tx_intr_info[] = {
2237		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2238		{ 0 }
2239	};
2240	static struct intr_info mps_stat_rx_intr_info[] = {
2241		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2242		{ 0 }
2243	};
2244	static struct intr_info mps_cls_intr_info[] = {
2245		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2246		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2247		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2248		{ 0 }
2249	};
2250
2251	int fat;
2252
2253	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2254				    mps_rx_intr_info) +
2255	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2256				    mps_tx_intr_info) +
2257	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2258				    mps_trc_intr_info) +
2259	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2260				    mps_stat_sram_intr_info) +
2261	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2262				    mps_stat_tx_intr_info) +
2263	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2264				    mps_stat_rx_intr_info) +
2265	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2266				    mps_cls_intr_info);
2267
2268	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2269	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2270	if (fat)
2271		t4_fatal_err(adapter);
2272}
2273
2274#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2275
2276/*
2277 * EDC/MC interrupt handler.
2278 */
2279static void mem_intr_handler(struct adapter *adapter, int idx)
2280{
2281	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2282
2283	unsigned int addr, cnt_addr, v;
2284
2285	if (idx <= MEM_EDC1) {
2286		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2287		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2288	} else {
2289		addr = A_MC_INT_CAUSE;
2290		cnt_addr = A_MC_ECC_STATUS;
2291	}
2292
2293	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2294	if (v & F_PERR_INT_CAUSE)
2295		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2296	if (v & F_ECC_CE_INT_CAUSE) {
2297		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2298
2299		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2300		CH_WARN_RATELIMIT(adapter,
2301				  "%u %s correctable ECC data error%s\n",
2302				  cnt, name[idx], cnt > 1 ? "s" : "");
2303	}
2304	if (v & F_ECC_UE_INT_CAUSE)
2305		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2306			 name[idx]);
2307
2308	t4_write_reg(adapter, addr, v);
2309	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2310		t4_fatal_err(adapter);
2311}
2312
2313/*
2314 * MA interrupt handler.
2315 */
2316static void ma_intr_handler(struct adapter *adapter)
2317{
2318	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2319
2320	if (status & F_MEM_PERR_INT_CAUSE)
2321		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2322			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2323	if (status & F_MEM_WRAP_INT_CAUSE) {
2324		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2325		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2326			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2327			 G_MEM_WRAP_ADDRESS(v) << 4);
2328	}
2329	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2330	t4_fatal_err(adapter);
2331}
2332
2333/*
2334 * SMB interrupt handler.
2335 */
2336static void smb_intr_handler(struct adapter *adap)
2337{
2338	static struct intr_info smb_intr_info[] = {
2339		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2340		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2341		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2342		{ 0 }
2343	};
2344
2345	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2346		t4_fatal_err(adap);
2347}
2348
2349/*
2350 * NC-SI interrupt handler.
2351 */
2352static void ncsi_intr_handler(struct adapter *adap)
2353{
2354	static struct intr_info ncsi_intr_info[] = {
2355		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2356		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2357		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2358		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2359		{ 0 }
2360	};
2361
2362	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2363		t4_fatal_err(adap);
2364}
2365
2366/*
2367 * XGMAC interrupt handler.
2368 */
2369static void xgmac_intr_handler(struct adapter *adap, int port)
2370{
2371	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2372
2373	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2374	if (!v)
2375		return;
2376
2377	if (v & F_TXFIFO_PRTY_ERR)
2378		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2379	if (v & F_RXFIFO_PRTY_ERR)
2380		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2381	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2382	t4_fatal_err(adap);
2383}
2384
2385/*
2386 * PL interrupt handler.
2387 */
2388static void pl_intr_handler(struct adapter *adap)
2389{
2390	static struct intr_info pl_intr_info[] = {
2391		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2392		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2393		{ 0 }
2394	};
2395
2396	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2397		t4_fatal_err(adap);
2398}
2399
2400#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2401#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2402		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2403		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2404
2405/**
2406 *	t4_slow_intr_handler - control path interrupt handler
2407 *	@adapter: the adapter
2408 *
2409 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2410 *	The designation 'slow' is because it involves register reads, while
2411 *	data interrupts typically don't involve any MMIOs.
2412 */
2413int t4_slow_intr_handler(struct adapter *adapter)
2414{
2415	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2416
2417	if (!(cause & GLBL_INTR_MASK))
2418		return 0;
2419	if (cause & F_CIM)
2420		cim_intr_handler(adapter);
2421	if (cause & F_MPS)
2422		mps_intr_handler(adapter);
2423	if (cause & F_NCSI)
2424		ncsi_intr_handler(adapter);
2425	if (cause & F_PL)
2426		pl_intr_handler(adapter);
2427	if (cause & F_SMB)
2428		smb_intr_handler(adapter);
2429	if (cause & F_XGMAC0)
2430		xgmac_intr_handler(adapter, 0);
2431	if (cause & F_XGMAC1)
2432		xgmac_intr_handler(adapter, 1);
2433	if (cause & F_XGMAC_KR0)
2434		xgmac_intr_handler(adapter, 2);
2435	if (cause & F_XGMAC_KR1)
2436		xgmac_intr_handler(adapter, 3);
2437	if (cause & F_PCIE)
2438		pcie_intr_handler(adapter);
2439	if (cause & F_MC)
2440		mem_intr_handler(adapter, MEM_MC);
2441	if (cause & F_EDC0)
2442		mem_intr_handler(adapter, MEM_EDC0);
2443	if (cause & F_EDC1)
2444		mem_intr_handler(adapter, MEM_EDC1);
2445	if (cause & F_LE)
2446		le_intr_handler(adapter);
2447	if (cause & F_TP)
2448		tp_intr_handler(adapter);
2449	if (cause & F_MA)
2450		ma_intr_handler(adapter);
2451	if (cause & F_PM_TX)
2452		pmtx_intr_handler(adapter);
2453	if (cause & F_PM_RX)
2454		pmrx_intr_handler(adapter);
2455	if (cause & F_ULP_RX)
2456		ulprx_intr_handler(adapter);
2457	if (cause & F_CPL_SWITCH)
2458		cplsw_intr_handler(adapter);
2459	if (cause & F_SGE)
2460		sge_intr_handler(adapter);
2461	if (cause & F_ULP_TX)
2462		ulptx_intr_handler(adapter);
2463
2464	/* Clear the interrupts just processed for which we are the master. */
2465	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2466	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2467	return 1;
2468}
2469
2470/**
2471 *	t4_intr_enable - enable interrupts
2472 *	@adapter: the adapter whose interrupts should be enabled
2473 *
2474 *	Enable PF-specific interrupts for the calling function and the top-level
2475 *	interrupt concentrator for global interrupts.  Interrupts are already
2476 *	enabled at each module,	here we just enable the roots of the interrupt
2477 *	hierarchies.
2478 *
2479 *	Note: this function should be called only when the driver manages
2480 *	non PF-specific interrupts from the various HW modules.  Only one PCI
2481 *	function at a time should be doing this.
2482 */
2483void t4_intr_enable(struct adapter *adapter)
2484{
2485	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2486
2487	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2488		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2489		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2490		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2491		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2492		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2493		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2494		     F_EGRESS_SIZE_ERR);
2495	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2496	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2497}
2498
2499/**
2500 *	t4_intr_disable - disable interrupts
2501 *	@adapter: the adapter whose interrupts should be disabled
2502 *
2503 *	Disable interrupts.  We only disable the top-level interrupt
2504 *	concentrators.  The caller must be a PCI function managing global
2505 *	interrupts.
2506 */
2507void t4_intr_disable(struct adapter *adapter)
2508{
2509	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2510
2511	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2512	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2513}
2514
2515/**
2516 *	t4_intr_clear - clear all interrupts
2517 *	@adapter: the adapter whose interrupts should be cleared
2518 *
2519 *	Clears all interrupts.  The caller must be a PCI function managing
2520 *	global interrupts.
2521 */
2522void t4_intr_clear(struct adapter *adapter)
2523{
2524	static const unsigned int cause_reg[] = {
2525		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2526		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2527		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2528		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2529		A_MC_INT_CAUSE,
2530		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2531		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2532		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2533		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2534		A_TP_INT_CAUSE,
2535		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2536		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2537		A_MPS_RX_PERR_INT_CAUSE,
2538		A_CPL_INTR_CAUSE,
2539		MYPF_REG(A_PL_PF_INT_CAUSE),
2540		A_PL_PL_INT_CAUSE,
2541		A_LE_DB_INT_CAUSE,
2542	};
2543
2544	unsigned int i;
2545
2546	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2547		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2548
2549	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2550	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2551}
2552
2553/**
2554 *	hash_mac_addr - return the hash value of a MAC address
2555 *	@addr: the 48-bit Ethernet MAC address
2556 *
2557 *	Hashes a MAC address according to the hash function used by HW inexact
2558 *	(hash) address matching.
2559 */
2560static int hash_mac_addr(const u8 *addr)
2561{
2562	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2563	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2564	a ^= b;
2565	a ^= (a >> 12);
2566	a ^= (a >> 6);
2567	return a & 0x3f;
2568}
2569
2570/**
2571 *	t4_config_rss_range - configure a portion of the RSS mapping table
2572 *	@adapter: the adapter
2573 *	@mbox: mbox to use for the FW command
2574 *	@viid: virtual interface whose RSS subtable is to be written
2575 *	@start: start entry in the table to write
2576 *	@n: how many table entries to write
2577 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2578 *	@nrspq: number of values in @rspq
2579 *
2580 *	Programs the selected part of the VI's RSS mapping table with the
2581 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2582 *	until the full table range is populated.
2583 *
2584 *	The caller must ensure the values in @rspq are in the range allowed for
2585 *	@viid.
2586 */
2587int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2588			int start, int n, const u16 *rspq, unsigned int nrspq)
2589{
2590	int ret;
2591	const u16 *rsp = rspq;
2592	const u16 *rsp_end = rspq + nrspq;
2593	struct fw_rss_ind_tbl_cmd cmd;
2594
2595	memset(&cmd, 0, sizeof(cmd));
2596	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2597			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2598			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2599	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2600
2601
2602	/*
2603	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2604	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2605	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2606	 * reserved.
2607	 */
2608	while (n > 0) {
2609		int nq = min(n, 32);
2610		int nq_packed = 0;
2611		__be32 *qp = &cmd.iq0_to_iq2;
2612
2613		/*
2614		 * Set up the firmware RSS command header to send the next
2615		 * "nq" Ingress Queue IDs to the firmware.
2616		 */
2617		cmd.niqid = htons(nq);
2618		cmd.startidx = htons(start);
2619
2620		/*
2621		 * "nq" more done for the start of the next loop.
2622		 */
2623		start += nq;
2624		n -= nq;
2625
2626		/*
2627		 * While there are still Ingress Queue IDs to stuff into the
2628		 * current firmware RSS command, retrieve them from the
2629		 * Ingress Queue ID array and insert them into the command.
2630		 */
2631		while (nq > 0) {
2632			/*
2633			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2634			 * around the Ingress Queue ID array if necessary) and
2635			 * insert them into the firmware RSS command at the
2636			 * current 3-tuple position within the commad.
2637			 */
2638			u16 qbuf[3];
2639			u16 *qbp = qbuf;
2640			int nqbuf = min(3, nq);
2641
2642			nq -= nqbuf;
2643			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2644			while (nqbuf && nq_packed < 32) {
2645				nqbuf--;
2646				nq_packed++;
2647				*qbp++ = *rsp++;
2648				if (rsp >= rsp_end)
2649					rsp = rspq;
2650			}
2651			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2652					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2653					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2654		}
2655
2656		/*
2657		 * Send this portion of the RRS table update to the firmware;
2658		 * bail out on any errors.
2659		 */
2660		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2661		if (ret)
2662			return ret;
2663	}
2664
2665	return 0;
2666}
2667
2668/**
2669 *	t4_config_glbl_rss - configure the global RSS mode
2670 *	@adapter: the adapter
2671 *	@mbox: mbox to use for the FW command
2672 *	@mode: global RSS mode
2673 *	@flags: mode-specific flags
2674 *
2675 *	Sets the global RSS mode.
2676 */
2677int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2678		       unsigned int flags)
2679{
2680	struct fw_rss_glb_config_cmd c;
2681
2682	memset(&c, 0, sizeof(c));
2683	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2684			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2685	c.retval_len16 = htonl(FW_LEN16(c));
2686	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2687		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2688	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2689		c.u.basicvirtual.mode_pkd =
2690			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2691		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2692	} else
2693		return -EINVAL;
2694	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2695}
2696
2697/**
2698 *	t4_config_vi_rss - configure per VI RSS settings
2699 *	@adapter: the adapter
2700 *	@mbox: mbox to use for the FW command
2701 *	@viid: the VI id
2702 *	@flags: RSS flags
2703 *	@defq: id of the default RSS queue for the VI.
2704 *
2705 *	Configures VI-specific RSS properties.
2706 */
2707int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2708		     unsigned int flags, unsigned int defq)
2709{
2710	struct fw_rss_vi_config_cmd c;
2711
2712	memset(&c, 0, sizeof(c));
2713	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2714			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2715			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2716	c.retval_len16 = htonl(FW_LEN16(c));
2717	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2718					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2719	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2720}
2721
2722/* Read an RSS table row */
2723static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2724{
2725	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2726	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2727				   5, 0, val);
2728}
2729
2730/**
2731 *	t4_read_rss - read the contents of the RSS mapping table
2732 *	@adapter: the adapter
2733 *	@map: holds the contents of the RSS mapping table
2734 *
2735 *	Reads the contents of the RSS hash->queue mapping table.
2736 */
2737int t4_read_rss(struct adapter *adapter, u16 *map)
2738{
2739	u32 val;
2740	int i, ret;
2741
2742	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2743		ret = rd_rss_row(adapter, i, &val);
2744		if (ret)
2745			return ret;
2746		*map++ = G_LKPTBLQUEUE0(val);
2747		*map++ = G_LKPTBLQUEUE1(val);
2748	}
2749	return 0;
2750}
2751
2752/**
2753 *	t4_read_rss_key - read the global RSS key
2754 *	@adap: the adapter
2755 *	@key: 10-entry array holding the 320-bit RSS key
2756 *
2757 *	Reads the global 320-bit RSS key.
2758 */
2759void t4_read_rss_key(struct adapter *adap, u32 *key)
2760{
2761	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2762			 A_TP_RSS_SECRET_KEY0);
2763}
2764
2765/**
2766 *	t4_write_rss_key - program one of the RSS keys
2767 *	@adap: the adapter
2768 *	@key: 10-entry array holding the 320-bit RSS key
2769 *	@idx: which RSS key to write
2770 *
2771 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2772 *	0..15 the corresponding entry in the RSS key table is written,
2773 *	otherwise the global RSS key is written.
2774 */
2775void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2776{
2777	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2778			  A_TP_RSS_SECRET_KEY0);
2779	if (idx >= 0 && idx < 16)
2780		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2781			     V_KEYWRADDR(idx) | F_KEYWREN);
2782}
2783
2784/**
2785 *	t4_read_rss_pf_config - read PF RSS Configuration Table
2786 *	@adapter: the adapter
2787 *	@index: the entry in the PF RSS table to read
2788 *	@valp: where to store the returned value
2789 *
2790 *	Reads the PF RSS Configuration Table at the specified index and returns
2791 *	the value found there.
2792 */
2793void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2794{
2795	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2796			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2797}
2798
2799/**
2800 *	t4_write_rss_pf_config - write PF RSS Configuration Table
2801 *	@adapter: the adapter
2802 *	@index: the entry in the VF RSS table to read
2803 *	@val: the value to store
2804 *
2805 *	Writes the PF RSS Configuration Table at the specified index with the
2806 *	specified value.
2807 */
2808void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2809{
2810	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2811			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2812}
2813
2814/**
2815 *	t4_read_rss_vf_config - read VF RSS Configuration Table
2816 *	@adapter: the adapter
2817 *	@index: the entry in the VF RSS table to read
2818 *	@vfl: where to store the returned VFL
2819 *	@vfh: where to store the returned VFH
2820 *
2821 *	Reads the VF RSS Configuration Table at the specified index and returns
2822 *	the (VFL, VFH) values found there.
2823 */
2824void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2825			   u32 *vfl, u32 *vfh)
2826{
2827	u32 vrt;
2828
2829	/*
2830	 * Request that the index'th VF Table values be read into VFL/VFH.
2831	 */
2832	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2833	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2834	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2835	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2836
2837	/*
2838	 * Grab the VFL/VFH values ...
2839	 */
2840	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2841			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2842	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2843			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2844}
2845
2846/**
2847 *	t4_write_rss_vf_config - write VF RSS Configuration Table
2848 *
2849 *	@adapter: the adapter
2850 *	@index: the entry in the VF RSS table to write
2851 *	@vfl: the VFL to store
2852 *	@vfh: the VFH to store
2853 *
2854 *	Writes the VF RSS Configuration Table at the specified index with the
2855 *	specified (VFL, VFH) values.
2856 */
2857void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2858			    u32 vfl, u32 vfh)
2859{
2860	u32 vrt;
2861
2862	/*
2863	 * Load up VFL/VFH with the values to be written ...
2864	 */
2865	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2866			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2867	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2868			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2869
2870	/*
2871	 * Write the VFL/VFH into the VF Table at index'th location.
2872	 */
2873	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2874	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2875	vrt |= V_VFWRADDR(index) | F_VFWREN;
2876	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2877}
2878
2879/**
2880 *	t4_read_rss_pf_map - read PF RSS Map
2881 *	@adapter: the adapter
2882 *
2883 *	Reads the PF RSS Map register and returns its value.
2884 */
2885u32 t4_read_rss_pf_map(struct adapter *adapter)
2886{
2887	u32 pfmap;
2888
2889	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2890			 &pfmap, 1, A_TP_RSS_PF_MAP);
2891	return pfmap;
2892}
2893
2894/**
2895 *	t4_write_rss_pf_map - write PF RSS Map
2896 *	@adapter: the adapter
2897 *	@pfmap: PF RSS Map value
2898 *
2899 *	Writes the specified value to the PF RSS Map register.
2900 */
2901void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2902{
2903	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2904			  &pfmap, 1, A_TP_RSS_PF_MAP);
2905}
2906
2907/**
2908 *	t4_read_rss_pf_mask - read PF RSS Mask
2909 *	@adapter: the adapter
2910 *
2911 *	Reads the PF RSS Mask register and returns its value.
2912 */
2913u32 t4_read_rss_pf_mask(struct adapter *adapter)
2914{
2915	u32 pfmask;
2916
2917	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2918			 &pfmask, 1, A_TP_RSS_PF_MSK);
2919	return pfmask;
2920}
2921
2922/**
2923 *	t4_write_rss_pf_mask - write PF RSS Mask
2924 *	@adapter: the adapter
2925 *	@pfmask: PF RSS Mask value
2926 *
2927 *	Writes the specified value to the PF RSS Mask register.
2928 */
2929void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2930{
2931	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2932			  &pfmask, 1, A_TP_RSS_PF_MSK);
2933}
2934
2935/**
2936 *	t4_set_filter_mode - configure the optional components of filter tuples
2937 *	@adap: the adapter
2938 *	@mode_map: a bitmap selcting which optional filter components to enable
2939 *
2940 *	Sets the filter mode by selecting the optional components to enable
2941 *	in filter tuples.  Returns 0 on success and a negative error if the
2942 *	requested mode needs more bits than are available for optional
2943 *	components.
2944 */
2945int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2946{
2947	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2948
2949	int i, nbits = 0;
2950
2951	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2952		if (mode_map & (1 << i))
2953			nbits += width[i];
2954	if (nbits > FILTER_OPT_LEN)
2955		return -EINVAL;
2956	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2957			  A_TP_VLAN_PRI_MAP);
2958	return 0;
2959}
2960
2961/**
2962 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2963 *	@adap: the adapter
2964 *	@v4: holds the TCP/IP counter values
2965 *	@v6: holds the TCP/IPv6 counter values
2966 *
2967 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2968 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2969 */
2970void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2971			 struct tp_tcp_stats *v6)
2972{
2973	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2974
2975#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2976#define STAT(x)     val[STAT_IDX(x)]
2977#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2978
2979	if (v4) {
2980		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2981				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2982		v4->tcpOutRsts = STAT(OUT_RST);
2983		v4->tcpInSegs  = STAT64(IN_SEG);
2984		v4->tcpOutSegs = STAT64(OUT_SEG);
2985		v4->tcpRetransSegs = STAT64(RXT_SEG);
2986	}
2987	if (v6) {
2988		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2989				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2990		v6->tcpOutRsts = STAT(OUT_RST);
2991		v6->tcpInSegs  = STAT64(IN_SEG);
2992		v6->tcpOutSegs = STAT64(OUT_SEG);
2993		v6->tcpRetransSegs = STAT64(RXT_SEG);
2994	}
2995#undef STAT64
2996#undef STAT
2997#undef STAT_IDX
2998}
2999
3000/**
3001 *	t4_tp_get_err_stats - read TP's error MIB counters
3002 *	@adap: the adapter
3003 *	@st: holds the counter values
3004 *
3005 *	Returns the values of TP's error counters.
3006 */
3007void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
3008{
3009	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
3010			 12, A_TP_MIB_MAC_IN_ERR_0);
3011	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
3012			 8, A_TP_MIB_TNL_CNG_DROP_0);
3013	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
3014			 4, A_TP_MIB_TNL_DROP_0);
3015	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
3016			 4, A_TP_MIB_OFD_VLN_DROP_0);
3017	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
3018			 4, A_TP_MIB_TCP_V6IN_ERR_0);
3019	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
3020			 2, A_TP_MIB_OFD_ARP_DROP);
3021}
3022
3023/**
3024 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
3025 *	@adap: the adapter
3026 *	@st: holds the counter values
3027 *
3028 *	Returns the values of TP's proxy counters.
3029 */
3030void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
3031{
3032	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
3033			 4, A_TP_MIB_TNL_LPBK_0);
3034}
3035
3036/**
3037 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
3038 *	@adap: the adapter
3039 *	@st: holds the counter values
3040 *
3041 *	Returns the values of TP's CPL counters.
3042 */
3043void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
3044{
3045	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
3046			 8, A_TP_MIB_CPL_IN_REQ_0);
3047}
3048
3049/**
3050 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
3051 *	@adap: the adapter
3052 *	@st: holds the counter values
3053 *
3054 *	Returns the values of TP's RDMA counters.
3055 */
3056void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
3057{
3058	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
3059			 2, A_TP_MIB_RQE_DFR_MOD);
3060}
3061
3062/**
3063 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
3064 *	@adap: the adapter
3065 *	@idx: the port index
3066 *	@st: holds the counter values
3067 *
3068 *	Returns the values of TP's FCoE counters for the selected port.
3069 */
3070void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
3071		       struct tp_fcoe_stats *st)
3072{
3073	u32 val[2];
3074
3075	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
3076			 1, A_TP_MIB_FCOE_DDP_0 + idx);
3077	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
3078			 1, A_TP_MIB_FCOE_DROP_0 + idx);
3079	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
3080			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
3081	st->octetsDDP = ((u64)val[0] << 32) | val[1];
3082}
3083
3084/**
3085 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
3086 *	@adap: the adapter
3087 *	@st: holds the counter values
3088 *
3089 *	Returns the values of TP's counters for non-TCP directly-placed packets.
3090 */
3091void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
3092{
3093	u32 val[4];
3094
3095	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
3096			 A_TP_MIB_USM_PKTS);
3097	st->frames = val[0];
3098	st->drops = val[1];
3099	st->octets = ((u64)val[2] << 32) | val[3];
3100}
3101
3102/**
3103 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3104 *	@adap: the adapter
3105 *	@mtus: where to store the MTU values
3106 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3107 *
3108 *	Reads the HW path MTU table.
3109 */
3110void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3111{
3112	u32 v;
3113	int i;
3114
3115	for (i = 0; i < NMTUS; ++i) {
3116		t4_write_reg(adap, A_TP_MTU_TABLE,
3117			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
3118		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3119		mtus[i] = G_MTUVALUE(v);
3120		if (mtu_log)
3121			mtu_log[i] = G_MTUWIDTH(v);
3122	}
3123}
3124
3125/**
3126 *	t4_read_cong_tbl - reads the congestion control table
3127 *	@adap: the adapter
3128 *	@incr: where to store the alpha values
3129 *
3130 *	Reads the additive increments programmed into the HW congestion
3131 *	control table.
3132 */
3133void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3134{
3135	unsigned int mtu, w;
3136
3137	for (mtu = 0; mtu < NMTUS; ++mtu)
3138		for (w = 0; w < NCCTRL_WIN; ++w) {
3139			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3140				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
3141			incr[mtu][w] = (u16)t4_read_reg(adap,
3142						A_TP_CCTRL_TABLE) & 0x1fff;
3143		}
3144}
3145
3146/**
3147 *	t4_read_pace_tbl - read the pace table
3148 *	@adap: the adapter
3149 *	@pace_vals: holds the returned values
3150 *
3151 *	Returns the values of TP's pace table in microseconds.
3152 */
3153void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3154{
3155	unsigned int i, v;
3156
3157	for (i = 0; i < NTX_SCHED; i++) {
3158		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3159		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3160		pace_vals[i] = dack_ticks_to_usec(adap, v);
3161	}
3162}
3163
3164/**
3165 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3166 *	@adap: the adapter
3167 *	@addr: the indirect TP register address
3168 *	@mask: specifies the field within the register to modify
3169 *	@val: new value for the field
3170 *
3171 *	Sets a field of an indirect TP register to the given value.
3172 */
3173void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3174			    unsigned int mask, unsigned int val)
3175{
3176	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3177	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3178	t4_write_reg(adap, A_TP_PIO_DATA, val);
3179}
3180
3181/**
3182 *	init_cong_ctrl - initialize congestion control parameters
3183 *	@a: the alpha values for congestion control
3184 *	@b: the beta values for congestion control
3185 *
3186 *	Initialize the congestion control parameters.
3187 */
3188static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3189{
3190	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3191	a[9] = 2;
3192	a[10] = 3;
3193	a[11] = 4;
3194	a[12] = 5;
3195	a[13] = 6;
3196	a[14] = 7;
3197	a[15] = 8;
3198	a[16] = 9;
3199	a[17] = 10;
3200	a[18] = 14;
3201	a[19] = 17;
3202	a[20] = 21;
3203	a[21] = 25;
3204	a[22] = 30;
3205	a[23] = 35;
3206	a[24] = 45;
3207	a[25] = 60;
3208	a[26] = 80;
3209	a[27] = 100;
3210	a[28] = 200;
3211	a[29] = 300;
3212	a[30] = 400;
3213	a[31] = 500;
3214
3215	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3216	b[9] = b[10] = 1;
3217	b[11] = b[12] = 2;
3218	b[13] = b[14] = b[15] = b[16] = 3;
3219	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3220	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3221	b[28] = b[29] = 6;
3222	b[30] = b[31] = 7;
3223}
3224
3225/* The minimum additive increment value for the congestion control table */
3226#define CC_MIN_INCR 2U
3227
3228/**
3229 *	t4_load_mtus - write the MTU and congestion control HW tables
3230 *	@adap: the adapter
3231 *	@mtus: the values for the MTU table
3232 *	@alpha: the values for the congestion control alpha parameter
3233 *	@beta: the values for the congestion control beta parameter
3234 *
3235 *	Write the HW MTU table with the supplied MTUs and the high-speed
3236 *	congestion control table with the supplied alpha, beta, and MTUs.
3237 *	We write the two tables together because the additive increments
3238 *	depend on the MTUs.
3239 */
3240void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3241		  const unsigned short *alpha, const unsigned short *beta)
3242{
3243	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3244		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3245		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3246		28672, 40960, 57344, 81920, 114688, 163840, 229376
3247	};
3248
3249	unsigned int i, w;
3250
3251	for (i = 0; i < NMTUS; ++i) {
3252		unsigned int mtu = mtus[i];
3253		unsigned int log2 = fls(mtu);
3254
3255		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3256			log2--;
3257		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3258			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3259
3260		for (w = 0; w < NCCTRL_WIN; ++w) {
3261			unsigned int inc;
3262
3263			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3264				  CC_MIN_INCR);
3265
3266			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3267				     (w << 16) | (beta[w] << 13) | inc);
3268		}
3269	}
3270}
3271
3272/**
3273 *	t4_set_pace_tbl - set the pace table
3274 *	@adap: the adapter
3275 *	@pace_vals: the pace values in microseconds
3276 *	@start: index of the first entry in the HW pace table to set
3277 *	@n: how many entries to set
3278 *
3279 *	Sets (a subset of the) HW pace table.
3280 */
3281int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3282		     unsigned int start, unsigned int n)
3283{
3284	unsigned int vals[NTX_SCHED], i;
3285	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3286
3287	if (n > NTX_SCHED)
3288	    return -ERANGE;
3289
3290	/* convert values from us to dack ticks, rounding to closest value */
3291	for (i = 0; i < n; i++, pace_vals++) {
3292		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3293		if (vals[i] > 0x7ff)
3294			return -ERANGE;
3295		if (*pace_vals && vals[i] == 0)
3296			return -ERANGE;
3297	}
3298	for (i = 0; i < n; i++, start++)
3299		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3300	return 0;
3301}
3302
3303/**
3304 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3305 *	@adap: the adapter
3306 *	@kbps: target rate in Kbps
3307 *	@sched: the scheduler index
3308 *
3309 *	Configure a Tx HW scheduler for the target rate.
3310 */
3311int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3312{
3313	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3314	unsigned int clk = adap->params.vpd.cclk * 1000;
3315	unsigned int selected_cpt = 0, selected_bpt = 0;
3316
3317	if (kbps > 0) {
3318		kbps *= 125;     /* -> bytes */
3319		for (cpt = 1; cpt <= 255; cpt++) {
3320			tps = clk / cpt;
3321			bpt = (kbps + tps / 2) / tps;
3322			if (bpt > 0 && bpt <= 255) {
3323				v = bpt * tps;
3324				delta = v >= kbps ? v - kbps : kbps - v;
3325				if (delta < mindelta) {
3326					mindelta = delta;
3327					selected_cpt = cpt;
3328					selected_bpt = bpt;
3329				}
3330			} else if (selected_cpt)
3331				break;
3332		}
3333		if (!selected_cpt)
3334			return -EINVAL;
3335	}
3336	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3337		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3338	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3339	if (sched & 1)
3340		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3341	else
3342		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3343	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3344	return 0;
3345}
3346
3347/**
3348 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3349 *	@adap: the adapter
3350 *	@sched: the scheduler index
3351 *	@ipg: the interpacket delay in tenths of nanoseconds
3352 *
3353 *	Set the interpacket delay for a HW packet rate scheduler.
3354 */
3355int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3356{
3357	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3358
3359	/* convert ipg to nearest number of core clocks */
3360	ipg *= core_ticks_per_usec(adap);
3361	ipg = (ipg + 5000) / 10000;
3362	if (ipg > M_TXTIMERSEPQ0)
3363		return -EINVAL;
3364
3365	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3366	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3367	if (sched & 1)
3368		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3369	else
3370		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3371	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3372	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3373	return 0;
3374}
3375
3376/**
3377 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3378 *	@adap: the adapter
3379 *	@sched: the scheduler index
3380 *	@kbps: the byte rate in Kbps
3381 *	@ipg: the interpacket delay in tenths of nanoseconds
3382 *
3383 *	Return the current configuration of a HW Tx scheduler.
3384 */
3385void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3386		     unsigned int *ipg)
3387{
3388	unsigned int v, addr, bpt, cpt;
3389
3390	if (kbps) {
3391		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3392		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3393		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3394		if (sched & 1)
3395			v >>= 16;
3396		bpt = (v >> 8) & 0xff;
3397		cpt = v & 0xff;
3398		if (!cpt)
3399			*kbps = 0;        /* scheduler disabled */
3400		else {
3401			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3402			*kbps = (v * bpt) / 125;
3403		}
3404	}
3405	if (ipg) {
3406		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3407		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3408		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3409		if (sched & 1)
3410			v >>= 16;
3411		v &= 0xffff;
3412		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3413	}
3414}
3415
3416/*
3417 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3418 * clocks.  The formula is
3419 *
3420 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3421 *
3422 * which is equivalent to
3423 *
3424 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3425 */
3426static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3427{
3428	u64 v = bytes256 * adap->params.vpd.cclk;
3429
3430	return v * 62 + v / 2;
3431}
3432
3433/**
3434 *	t4_get_chan_txrate - get the current per channel Tx rates
3435 *	@adap: the adapter
3436 *	@nic_rate: rates for NIC traffic
3437 *	@ofld_rate: rates for offloaded traffic
3438 *
3439 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3440 *	for each channel.
3441 */
3442void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3443{
3444	u32 v;
3445
3446	v = t4_read_reg(adap, A_TP_TX_TRATE);
3447	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3448	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3449	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3450	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3451
3452	v = t4_read_reg(adap, A_TP_TX_ORATE);
3453	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3454	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3455	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3456	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3457}
3458
3459/**
3460 *	t4_set_trace_filter - configure one of the tracing filters
3461 *	@adap: the adapter
3462 *	@tp: the desired trace filter parameters
3463 *	@idx: which filter to configure
3464 *	@enable: whether to enable or disable the filter
3465 *
3466 *	Configures one of the tracing filters available in HW.  If @enable is
3467 *	%0 @tp is not examined and may be %NULL. The user is responsible to
3468 *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
3469 *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
3470 *	docs/readme.txt for a complete description of how to setup traceing on
3471 *	T4.
3472 */
3473int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3474			int enable)
3475{
3476	int i, ofst = idx * 4;
3477	u32 data_reg, mask_reg, cfg;
3478	u32 multitrc = F_TRCMULTIFILTER;
3479
3480	if (!enable) {
3481		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3482		return 0;
3483	}
3484
3485	/*
3486	 * TODO - After T4 data book is updated, specify the exact
3487	 * section below.
3488	 *
3489	 * See T4 data book - MPS section for a complete description
3490	 * of the below if..else handling of A_MPS_TRC_CFG register
3491	 * value.
3492	 */
3493	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3494	if (cfg & F_TRCMULTIFILTER) {
3495		/*
3496		 * If multiple tracers are enabled, then maximum
3497		 * capture size is 2.5KB (FIFO size of a single channel)
3498		 * minus 2 flits for CPL_TRACE_PKT header.
3499		 */
3500		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3501			return -EINVAL;
3502	}
3503	else {
3504		/*
3505		 * If multiple tracers are disabled, to avoid deadlocks
3506		 * maximum packet capture size of 9600 bytes is recommended.
3507		 * Also in this mode, only trace0 can be enabled and running.
3508		 */
3509		multitrc = 0;
3510		if (tp->snap_len > 9600 || idx)
3511			return -EINVAL;
3512	}
3513
3514	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3515	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3516		return -EINVAL;
3517
3518	/* stop the tracer we'll be changing */
3519	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3520
3521	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3522	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3523	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3524
3525	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3526		t4_write_reg(adap, data_reg, tp->data[i]);
3527		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3528	}
3529	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3530		     V_TFCAPTUREMAX(tp->snap_len) |
3531		     V_TFMINPKTSIZE(tp->min_len));
3532	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3533		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3534		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3535
3536	return 0;
3537}
3538
3539/**
3540 *	t4_get_trace_filter - query one of the tracing filters
3541 *	@adap: the adapter
3542 *	@tp: the current trace filter parameters
3543 *	@idx: which trace filter to query
3544 *	@enabled: non-zero if the filter is enabled
3545 *
3546 *	Returns the current settings of one of the HW tracing filters.
3547 */
3548void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3549			 int *enabled)
3550{
3551	u32 ctla, ctlb;
3552	int i, ofst = idx * 4;
3553	u32 data_reg, mask_reg;
3554
3555	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3556	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3557
3558	*enabled = !!(ctla & F_TFEN);
3559	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3560	tp->min_len = G_TFMINPKTSIZE(ctlb);
3561	tp->skip_ofst = G_TFOFFSET(ctla);
3562	tp->skip_len = G_TFLENGTH(ctla);
3563	tp->invert = !!(ctla & F_TFINVERTMATCH);
3564	tp->port = G_TFPORT(ctla);
3565
3566	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3567	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3568	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3569
3570	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3571		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3572		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3573	}
3574}
3575
3576/**
3577 *	t4_pmtx_get_stats - returns the HW stats from PMTX
3578 *	@adap: the adapter
3579 *	@cnt: where to store the count statistics
3580 *	@cycles: where to store the cycle statistics
3581 *
3582 *	Returns performance statistics from PMTX.
3583 */
3584void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3585{
3586	int i;
3587
3588	for (i = 0; i < PM_NSTATS; i++) {
3589		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3590		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3591		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3592	}
3593}
3594
3595/**
3596 *	t4_pmrx_get_stats - returns the HW stats from PMRX
3597 *	@adap: the adapter
3598 *	@cnt: where to store the count statistics
3599 *	@cycles: where to store the cycle statistics
3600 *
3601 *	Returns performance statistics from PMRX.
3602 */
3603void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3604{
3605	int i;
3606
3607	for (i = 0; i < PM_NSTATS; i++) {
3608		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3609		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3610		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3611	}
3612}
3613
3614/**
3615 *	get_mps_bg_map - return the buffer groups associated with a port
3616 *	@adap: the adapter
3617 *	@idx: the port index
3618 *
3619 *	Returns a bitmap indicating which MPS buffer groups are associated
3620 *	with the given port.  Bit i is set if buffer group i is used by the
3621 *	port.
3622 */
3623static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3624{
3625	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3626
3627	if (n == 0)
3628		return idx == 0 ? 0xf : 0;
3629	if (n == 1)
3630		return idx < 2 ? (3 << (2 * idx)) : 0;
3631	return 1 << idx;
3632}
3633
3634/**
3635 *      t4_get_port_stats_offset - collect port stats relative to a previous
3636 *                                 snapshot
3637 *      @adap: The adapter
3638 *      @idx: The port
3639 *      @stats: Current stats to fill
3640 *      @offset: Previous stats snapshot
3641 */
3642void t4_get_port_stats_offset(struct adapter *adap, int idx,
3643		struct port_stats *stats,
3644		struct port_stats *offset)
3645{
3646	u64 *s, *o;
3647	int i;
3648
3649	t4_get_port_stats(adap, idx, stats);
3650	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
3651			i < (sizeof(struct port_stats)/sizeof(u64)) ;
3652			i++, s++, o++)
3653		*s -= *o;
3654}
3655
3656/**
3657 *	t4_get_port_stats - collect port statistics
3658 *	@adap: the adapter
3659 *	@idx: the port index
3660 *	@p: the stats structure to fill
3661 *
3662 *	Collect statistics related to the given port from HW.
3663 */
3664void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3665{
3666	u32 bgmap = get_mps_bg_map(adap, idx);
3667
3668#define GET_STAT(name) \
3669	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3670#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3671
3672	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3673	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3674	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3675	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3676	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3677	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3678	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3679	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3680	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3681	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3682	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3683	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3684	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3685	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3686	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3687	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3688	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3689	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3690	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3691	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3692	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3693	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3694	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3695
3696	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3697	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3698	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3699	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3700	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3701	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3702	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3703	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3704	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3705	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3706	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3707	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3708	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3709	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3710	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3711	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3712	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3713	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3714	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3715	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3716	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3717	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3718	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3719	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3720	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3721	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3722	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3723
3724	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3725	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3726	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3727	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3728	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3729	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3730	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3731	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3732
3733#undef GET_STAT
3734#undef GET_STAT_COM
3735}
3736
3737/**
3738 *	t4_clr_port_stats - clear port statistics
3739 *	@adap: the adapter
3740 *	@idx: the port index
3741 *
3742 *	Clear HW statistics for the given port.
3743 */
3744void t4_clr_port_stats(struct adapter *adap, int idx)
3745{
3746	unsigned int i;
3747	u32 bgmap = get_mps_bg_map(adap, idx);
3748
3749	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3750	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3751		t4_write_reg(adap, PORT_REG(idx, i), 0);
3752	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3753	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3754		t4_write_reg(adap, PORT_REG(idx, i), 0);
3755	for (i = 0; i < 4; i++)
3756		if (bgmap & (1 << i)) {
3757			t4_write_reg(adap,
3758				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3759			t4_write_reg(adap,
3760				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3761		}
3762}
3763
3764/**
3765 *	t4_get_lb_stats - collect loopback port statistics
3766 *	@adap: the adapter
3767 *	@idx: the loopback port index
3768 *	@p: the stats structure to fill
3769 *
3770 *	Return HW statistics for the given loopback port.
3771 */
3772void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3773{
3774	u32 bgmap = get_mps_bg_map(adap, idx);
3775
3776#define GET_STAT(name) \
3777	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3778#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3779
3780	p->octets           = GET_STAT(BYTES);
3781	p->frames           = GET_STAT(FRAMES);
3782	p->bcast_frames     = GET_STAT(BCAST);
3783	p->mcast_frames     = GET_STAT(MCAST);
3784	p->ucast_frames     = GET_STAT(UCAST);
3785	p->error_frames     = GET_STAT(ERROR);
3786
3787	p->frames_64        = GET_STAT(64B);
3788	p->frames_65_127    = GET_STAT(65B_127B);
3789	p->frames_128_255   = GET_STAT(128B_255B);
3790	p->frames_256_511   = GET_STAT(256B_511B);
3791	p->frames_512_1023  = GET_STAT(512B_1023B);
3792	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3793	p->frames_1519_max  = GET_STAT(1519B_MAX);
3794	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3795					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3796
3797	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3798	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3799	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3800	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3801	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3802	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3803	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3804	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3805
3806#undef GET_STAT
3807#undef GET_STAT_COM
3808}
3809
3810/**
3811 *	t4_wol_magic_enable - enable/disable magic packet WoL
3812 *	@adap: the adapter
3813 *	@port: the physical port index
3814 *	@addr: MAC address expected in magic packets, %NULL to disable
3815 *
3816 *	Enables/disables magic packet wake-on-LAN for the selected port.
3817 */
3818void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3819			 const u8 *addr)
3820{
3821	if (addr) {
3822		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3823			     (addr[2] << 24) | (addr[3] << 16) |
3824			     (addr[4] << 8) | addr[5]);
3825		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3826			     (addr[0] << 8) | addr[1]);
3827	}
3828	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3829			 V_MAGICEN(addr != NULL));
3830}
3831
3832/**
3833 *	t4_wol_pat_enable - enable/disable pattern-based WoL
3834 *	@adap: the adapter
3835 *	@port: the physical port index
3836 *	@map: bitmap of which HW pattern filters to set
3837 *	@mask0: byte mask for bytes 0-63 of a packet
3838 *	@mask1: byte mask for bytes 64-127 of a packet
3839 *	@crc: Ethernet CRC for selected bytes
3840 *	@enable: enable/disable switch
3841 *
3842 *	Sets the pattern filters indicated in @map to mask out the bytes
3843 *	specified in @mask0/@mask1 in received packets and compare the CRC of
3844 *	the resulting packet against @crc.  If @enable is %true pattern-based
3845 *	WoL is enabled, otherwise disabled.
3846 */
3847int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3848		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3849{
3850	int i;
3851
3852	if (!enable) {
3853		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3854				 F_PATEN, 0);
3855		return 0;
3856	}
3857	if (map > 0xff)
3858		return -EINVAL;
3859
3860#define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3861
3862	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3863	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3864	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3865
3866	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3867		if (!(map & 1))
3868			continue;
3869
3870		/* write byte masks */
3871		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3872		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3873		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3874		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3875			return -ETIMEDOUT;
3876
3877		/* write CRC */
3878		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3879		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3880		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3881		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3882			return -ETIMEDOUT;
3883	}
3884#undef EPIO_REG
3885
3886	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3887	return 0;
3888}
3889
3890/**
3891 *	t4_mk_filtdelwr - create a delete filter WR
3892 *	@ftid: the filter ID
3893 *	@wr: the filter work request to populate
3894 *	@qid: ingress queue to receive the delete notification
3895 *
3896 *	Creates a filter work request to delete the supplied filter.  If @qid is
3897 *	negative the delete notification is suppressed.
3898 */
3899void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3900{
3901	memset(wr, 0, sizeof(*wr));
3902	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3903	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3904	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3905			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3906	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3907	if (qid >= 0)
3908		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3909}
3910
3911#define INIT_CMD(var, cmd, rd_wr) do { \
3912	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3913				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3914	(var).retval_len16 = htonl(FW_LEN16(var)); \
3915} while (0)
3916
3917int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
3918{
3919	struct fw_ldst_cmd c;
3920
3921	memset(&c, 0, sizeof(c));
3922	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3923		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
3924	c.cycles_to_len16 = htonl(FW_LEN16(c));
3925	c.u.addrval.addr = htonl(addr);
3926	c.u.addrval.val = htonl(val);
3927
3928	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3929}
3930
3931/**
3932 *	t4_i2c_rd - read a byte from an i2c addressable device
3933 *	@adap: the adapter
3934 *	@mbox: mailbox to use for the FW command
3935 *	@port_id: the port id
3936 *	@dev_addr: the i2c device address
3937 *	@offset: the byte offset to read from
3938 *	@valp: where to store the value
3939 */
3940int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
3941	       u8 dev_addr, u8 offset, u8 *valp)
3942{
3943	int ret;
3944	struct fw_ldst_cmd c;
3945
3946	memset(&c, 0, sizeof(c));
3947	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3948		F_FW_CMD_READ |
3949		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
3950	c.cycles_to_len16 = htonl(FW_LEN16(c));
3951	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
3952	c.u.i2c_deprecated.base = dev_addr;
3953	c.u.i2c_deprecated.boffset = offset;
3954
3955	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3956	if (ret == 0)
3957		*valp = c.u.i2c_deprecated.data;
3958	return ret;
3959}
3960
3961/**
3962 *	t4_mdio_rd - read a PHY register through MDIO
3963 *	@adap: the adapter
3964 *	@mbox: mailbox to use for the FW command
3965 *	@phy_addr: the PHY address
3966 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3967 *	@reg: the register to read
3968 *	@valp: where to store the value
3969 *
3970 *	Issues a FW command through the given mailbox to read a PHY register.
3971 */
3972int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3973	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3974{
3975	int ret;
3976	struct fw_ldst_cmd c;
3977
3978	memset(&c, 0, sizeof(c));
3979	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3980		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3981	c.cycles_to_len16 = htonl(FW_LEN16(c));
3982	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3983				   V_FW_LDST_CMD_MMD(mmd));
3984	c.u.mdio.raddr = htons(reg);
3985
3986	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3987	if (ret == 0)
3988		*valp = ntohs(c.u.mdio.rval);
3989	return ret;
3990}
3991
3992/**
3993 *	t4_mdio_wr - write a PHY register through MDIO
3994 *	@adap: the adapter
3995 *	@mbox: mailbox to use for the FW command
3996 *	@phy_addr: the PHY address
3997 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3998 *	@reg: the register to write
3999 *	@valp: value to write
4000 *
4001 *	Issues a FW command through the given mailbox to write a PHY register.
4002 */
4003int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
4004	       unsigned int mmd, unsigned int reg, unsigned int val)
4005{
4006	struct fw_ldst_cmd c;
4007
4008	memset(&c, 0, sizeof(c));
4009	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4010		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
4011	c.cycles_to_len16 = htonl(FW_LEN16(c));
4012	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
4013				   V_FW_LDST_CMD_MMD(mmd));
4014	c.u.mdio.raddr = htons(reg);
4015	c.u.mdio.rval = htons(val);
4016
4017	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4018}
4019
4020/**
4021 *	t4_sge_ctxt_flush - flush the SGE context cache
4022 *	@adap: the adapter
4023 *	@mbox: mailbox to use for the FW command
4024 *
4025 *	Issues a FW command through the given mailbox to flush the
4026 *	SGE context cache.
4027 */
4028int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
4029{
4030	int ret;
4031	struct fw_ldst_cmd c;
4032
4033	memset(&c, 0, sizeof(c));
4034	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4035			F_FW_CMD_READ |
4036			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
4037	c.cycles_to_len16 = htonl(FW_LEN16(c));
4038	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
4039
4040	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4041	return ret;
4042}
4043
4044/**
4045 *	t4_sge_ctxt_rd - read an SGE context through FW
4046 *	@adap: the adapter
4047 *	@mbox: mailbox to use for the FW command
4048 *	@cid: the context id
4049 *	@ctype: the context type
4050 *	@data: where to store the context data
4051 *
4052 *	Issues a FW command through the given mailbox to read an SGE context.
4053 */
4054int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
4055		   enum ctxt_type ctype, u32 *data)
4056{
4057	int ret;
4058	struct fw_ldst_cmd c;
4059
4060	if (ctype == CTXT_EGRESS)
4061		ret = FW_LDST_ADDRSPC_SGE_EGRC;
4062	else if (ctype == CTXT_INGRESS)
4063		ret = FW_LDST_ADDRSPC_SGE_INGC;
4064	else if (ctype == CTXT_FLM)
4065		ret = FW_LDST_ADDRSPC_SGE_FLMC;
4066	else
4067		ret = FW_LDST_ADDRSPC_SGE_CONMC;
4068
4069	memset(&c, 0, sizeof(c));
4070	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
4071				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
4072	c.cycles_to_len16 = htonl(FW_LEN16(c));
4073	c.u.idctxt.physid = htonl(cid);
4074
4075	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4076	if (ret == 0) {
4077		data[0] = ntohl(c.u.idctxt.ctxt_data0);
4078		data[1] = ntohl(c.u.idctxt.ctxt_data1);
4079		data[2] = ntohl(c.u.idctxt.ctxt_data2);
4080		data[3] = ntohl(c.u.idctxt.ctxt_data3);
4081		data[4] = ntohl(c.u.idctxt.ctxt_data4);
4082		data[5] = ntohl(c.u.idctxt.ctxt_data5);
4083	}
4084	return ret;
4085}
4086
4087/**
4088 *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
4089 *	@adap: the adapter
4090 *	@cid: the context id
4091 *	@ctype: the context type
4092 *	@data: where to store the context data
4093 *
4094 *	Reads an SGE context directly, bypassing FW.  This is only for
4095 *	debugging when FW is unavailable.
4096 */
4097int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
4098		      u32 *data)
4099{
4100	int i, ret;
4101
4102	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
4103	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
4104	if (!ret)
4105		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
4106			*data++ = t4_read_reg(adap, i);
4107	return ret;
4108}
4109
4110/**
4111 *	t4_fw_hello - establish communication with FW
4112 *	@adap: the adapter
4113 *	@mbox: mailbox to use for the FW command
4114 *	@evt_mbox: mailbox to receive async FW events
4115 *	@master: specifies the caller's willingness to be the device master
4116 *	@state: returns the current device state (if non-NULL)
4117 *
4118 *	Issues a command to establish communication with FW.  Returns either
4119 *	an error (negative integer) or the mailbox of the Master PF.
4120 */
4121int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
4122		enum dev_master master, enum dev_state *state)
4123{
4124	int ret;
4125	struct fw_hello_cmd c;
4126	u32 v;
4127	unsigned int master_mbox;
4128	int retries = FW_CMD_HELLO_RETRIES;
4129
4130retry:
4131	memset(&c, 0, sizeof(c));
4132	INIT_CMD(c, HELLO, WRITE);
4133	c.err_to_clearinit = htonl(
4134		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4135		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4136		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4137			M_FW_HELLO_CMD_MBMASTER) |
4138		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4139		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4140		F_FW_HELLO_CMD_CLEARINIT);
4141
4142	/*
4143	 * Issue the HELLO command to the firmware.  If it's not successful
4144	 * but indicates that we got a "busy" or "timeout" condition, retry
4145	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4146	 * retry limit, check to see if the firmware left us any error
4147	 * information and report that if so ...
4148	 */
4149	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4150	if (ret != FW_SUCCESS) {
4151		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4152			goto retry;
4153		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4154			t4_report_fw_error(adap);
4155		return ret;
4156	}
4157
4158	v = ntohl(c.err_to_clearinit);
4159	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4160	if (state) {
4161		if (v & F_FW_HELLO_CMD_ERR)
4162			*state = DEV_STATE_ERR;
4163		else if (v & F_FW_HELLO_CMD_INIT)
4164			*state = DEV_STATE_INIT;
4165		else
4166			*state = DEV_STATE_UNINIT;
4167	}
4168
4169	/*
4170	 * If we're not the Master PF then we need to wait around for the
4171	 * Master PF Driver to finish setting up the adapter.
4172	 *
4173	 * Note that we also do this wait if we're a non-Master-capable PF and
4174	 * there is no current Master PF; a Master PF may show up momentarily
4175	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4176	 * OS loads lots of different drivers rapidly at the same time).  In
4177	 * this case, the Master PF returned by the firmware will be
4178	 * M_PCIE_FW_MASTER so the test below will work ...
4179	 */
4180	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4181	    master_mbox != mbox) {
4182		int waiting = FW_CMD_HELLO_TIMEOUT;
4183
4184		/*
4185		 * Wait for the firmware to either indicate an error or
4186		 * initialized state.  If we see either of these we bail out
4187		 * and report the issue to the caller.  If we exhaust the
4188		 * "hello timeout" and we haven't exhausted our retries, try
4189		 * again.  Otherwise bail with a timeout error.
4190		 */
4191		for (;;) {
4192			u32 pcie_fw;
4193
4194			msleep(50);
4195			waiting -= 50;
4196
4197			/*
4198			 * If neither Error nor Initialialized are indicated
4199			 * by the firmware keep waiting till we exhaust our
4200			 * timeout ... and then retry if we haven't exhausted
4201			 * our retries ...
4202			 */
4203			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4204			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4205				if (waiting <= 0) {
4206					if (retries-- > 0)
4207						goto retry;
4208
4209					return -ETIMEDOUT;
4210				}
4211				continue;
4212			}
4213
4214			/*
4215			 * We either have an Error or Initialized condition
4216			 * report errors preferentially.
4217			 */
4218			if (state) {
4219				if (pcie_fw & F_PCIE_FW_ERR)
4220					*state = DEV_STATE_ERR;
4221				else if (pcie_fw & F_PCIE_FW_INIT)
4222					*state = DEV_STATE_INIT;
4223			}
4224
4225			/*
4226			 * If we arrived before a Master PF was selected and
4227			 * there's not a valid Master PF, grab its identity
4228			 * for our caller.
4229			 */
4230			if (master_mbox == M_PCIE_FW_MASTER &&
4231			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4232				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4233			break;
4234		}
4235	}
4236
4237	return master_mbox;
4238}
4239
4240/**
4241 *	t4_fw_bye - end communication with FW
4242 *	@adap: the adapter
4243 *	@mbox: mailbox to use for the FW command
4244 *
4245 *	Issues a command to terminate communication with FW.
4246 */
4247int t4_fw_bye(struct adapter *adap, unsigned int mbox)
4248{
4249	struct fw_bye_cmd c;
4250
4251	memset(&c, 0, sizeof(c));
4252	INIT_CMD(c, BYE, WRITE);
4253	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4254}
4255
4256/**
4257 *	t4_fw_reset - issue a reset to FW
4258 *	@adap: the adapter
4259 *	@mbox: mailbox to use for the FW command
4260 *	@reset: specifies the type of reset to perform
4261 *
4262 *	Issues a reset command of the specified type to FW.
4263 */
4264int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4265{
4266	struct fw_reset_cmd c;
4267
4268	memset(&c, 0, sizeof(c));
4269	INIT_CMD(c, RESET, WRITE);
4270	c.val = htonl(reset);
4271	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4272}
4273
4274/**
4275 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
4276 *	@adap: the adapter
4277 *	@mbox: mailbox to use for the FW RESET command (if desired)
4278 *	@force: force uP into RESET even if FW RESET command fails
4279 *
4280 *	Issues a RESET command to firmware (if desired) with a HALT indication
4281 *	and then puts the microprocessor into RESET state.  The RESET command
4282 *	will only be issued if a legitimate mailbox is provided (mbox <=
4283 *	M_PCIE_FW_MASTER).
4284 *
4285 *	This is generally used in order for the host to safely manipulate the
4286 *	adapter without fear of conflicting with whatever the firmware might
4287 *	be doing.  The only way out of this state is to RESTART the firmware
4288 *	...
4289 */
4290int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
4291{
4292	int ret = 0;
4293
4294	/*
4295	 * If a legitimate mailbox is provided, issue a RESET command
4296	 * with a HALT indication.
4297	 */
4298	if (mbox <= M_PCIE_FW_MASTER) {
4299		struct fw_reset_cmd c;
4300
4301		memset(&c, 0, sizeof(c));
4302		INIT_CMD(c, RESET, WRITE);
4303		c.val = htonl(F_PIORST | F_PIORSTMODE);
4304		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
4305		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4306	}
4307
4308	/*
4309	 * Normally we won't complete the operation if the firmware RESET
4310	 * command fails but if our caller insists we'll go ahead and put the
4311	 * uP into RESET.  This can be useful if the firmware is hung or even
4312	 * missing ...  We'll have to take the risk of putting the uP into
4313	 * RESET without the cooperation of firmware in that case.
4314	 *
4315	 * We also force the firmware's HALT flag to be on in case we bypassed
4316	 * the firmware RESET command above or we're dealing with old firmware
4317	 * which doesn't have the HALT capability.  This will serve as a flag
4318	 * for the incoming firmware to know that it's coming out of a HALT
4319	 * rather than a RESET ... if it's new enough to understand that ...
4320	 */
4321	if (ret == 0 || force) {
4322		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
4323		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
4324	}
4325
4326	/*
4327	 * And we always return the result of the firmware RESET command
4328	 * even when we force the uP into RESET ...
4329	 */
4330	return ret;
4331}
4332
4333/**
4334 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
4335 *	@adap: the adapter
4336 *	@reset: if we want to do a RESET to restart things
4337 *
4338 *	Restart firmware previously halted by t4_fw_halt().  On successful
4339 *	return the previous PF Master remains as the new PF Master and there
4340 *	is no need to issue a new HELLO command, etc.
4341 *
4342 *	We do this in two ways:
4343 *
4344 *	 1. If we're dealing with newer firmware we'll simply want to take
4345 *	    the chip's microprocessor out of RESET.  This will cause the
4346 *	    firmware to start up from its start vector.  And then we'll loop
4347 *	    until the firmware indicates it's started again (PCIE_FW.HALT
4348 *	    reset to 0) or we timeout.
4349 *
4350 *	 2. If we're dealing with older firmware then we'll need to RESET
4351 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
4352 *	    flag and automatically RESET itself on startup.
4353 */
4354int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
4355{
4356	if (reset) {
4357		/*
4358		 * Since we're directing the RESET instead of the firmware
4359		 * doing it automatically, we need to clear the PCIE_FW.HALT
4360		 * bit.
4361		 */
4362		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
4363
4364		/*
4365		 * If we've been given a valid mailbox, first try to get the
4366		 * firmware to do the RESET.  If that works, great and we can
4367		 * return success.  Otherwise, if we haven't been given a
4368		 * valid mailbox or the RESET command failed, fall back to
4369		 * hitting the chip with a hammer.
4370		 */
4371		if (mbox <= M_PCIE_FW_MASTER) {
4372			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4373			msleep(100);
4374			if (t4_fw_reset(adap, mbox,
4375					F_PIORST | F_PIORSTMODE) == 0)
4376				return 0;
4377		}
4378
4379		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
4380		msleep(2000);
4381	} else {
4382		int ms;
4383
4384		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
4385		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
4386			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
4387				return FW_SUCCESS;
4388			msleep(100);
4389			ms += 100;
4390		}
4391		return -ETIMEDOUT;
4392	}
4393	return 0;
4394}
4395
4396/**
4397 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
4398 *	@adap: the adapter
4399 *	@mbox: mailbox to use for the FW RESET command (if desired)
4400 *	@fw_data: the firmware image to write
4401 *	@size: image size
4402 *	@force: force upgrade even if firmware doesn't cooperate
4403 *
4404 *	Perform all of the steps necessary for upgrading an adapter's
4405 *	firmware image.  Normally this requires the cooperation of the
4406 *	existing firmware in order to halt all existing activities
4407 *	but if an invalid mailbox token is passed in we skip that step
4408 *	(though we'll still put the adapter microprocessor into RESET in
4409 *	that case).
4410 *
4411 *	On successful return the new firmware will have been loaded and
4412 *	the adapter will have been fully RESET losing all previous setup
4413 *	state.  On unsuccessful return the adapter may be completely hosed ...
4414 *	positive errno indicates that the adapter is ~probably~ intact, a
4415 *	negative errno indicates that things are looking bad ...
4416 */
4417int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
4418		  const u8 *fw_data, unsigned int size, int force)
4419{
4420	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
4421	int reset, ret;
4422
4423	ret = t4_fw_halt(adap, mbox, force);
4424	if (ret < 0 && !force)
4425		return ret;
4426
4427	ret = t4_load_fw(adap, fw_data, size);
4428	if (ret < 0)
4429		return ret;
4430
4431	/*
4432	 * Older versions of the firmware don't understand the new
4433	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
4434	 * restart.  So for newly loaded older firmware we'll have to do the
4435	 * RESET for it so it starts up on a clean slate.  We can tell if
4436	 * the newly loaded firmware will handle this right by checking
4437	 * its header flags to see if it advertises the capability.
4438	 */
4439	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
4440	return t4_fw_restart(adap, mbox, reset);
4441}
4442
4443/**
4444 *	t4_fw_initialize - ask FW to initialize the device
4445 *	@adap: the adapter
4446 *	@mbox: mailbox to use for the FW command
4447 *
4448 *	Issues a command to FW to partially initialize the device.  This
4449 *	performs initialization that generally doesn't depend on user input.
4450 */
4451int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4452{
4453	struct fw_initialize_cmd c;
4454
4455	memset(&c, 0, sizeof(c));
4456	INIT_CMD(c, INITIALIZE, WRITE);
4457	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4458}
4459
4460/**
4461 *	t4_query_params - query FW or device parameters
4462 *	@adap: the adapter
4463 *	@mbox: mailbox to use for the FW command
4464 *	@pf: the PF
4465 *	@vf: the VF
4466 *	@nparams: the number of parameters
4467 *	@params: the parameter names
4468 *	@val: the parameter values
4469 *
4470 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4471 *	queried at once.
4472 */
4473int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4474		    unsigned int vf, unsigned int nparams, const u32 *params,
4475		    u32 *val)
4476{
4477	int i, ret;
4478	struct fw_params_cmd c;
4479	__be32 *p = &c.param[0].mnem;
4480
4481	if (nparams > 7)
4482		return -EINVAL;
4483
4484	memset(&c, 0, sizeof(c));
4485	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4486			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4487			    V_FW_PARAMS_CMD_VFN(vf));
4488	c.retval_len16 = htonl(FW_LEN16(c));
4489
4490	for (i = 0; i < nparams; i++, p += 2)
4491		*p = htonl(*params++);
4492
4493	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4494	if (ret == 0)
4495		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4496			*val++ = ntohl(*p);
4497	return ret;
4498}
4499
4500/**
4501 *	t4_set_params - sets FW or device parameters
4502 *	@adap: the adapter
4503 *	@mbox: mailbox to use for the FW command
4504 *	@pf: the PF
4505 *	@vf: the VF
4506 *	@nparams: the number of parameters
4507 *	@params: the parameter names
4508 *	@val: the parameter values
4509 *
4510 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4511 *	specified at once.
4512 */
4513int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4514		  unsigned int vf, unsigned int nparams, const u32 *params,
4515		  const u32 *val)
4516{
4517	struct fw_params_cmd c;
4518	__be32 *p = &c.param[0].mnem;
4519
4520	if (nparams > 7)
4521		return -EINVAL;
4522
4523	memset(&c, 0, sizeof(c));
4524	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4525			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4526			    V_FW_PARAMS_CMD_VFN(vf));
4527	c.retval_len16 = htonl(FW_LEN16(c));
4528
4529	while (nparams--) {
4530		*p++ = htonl(*params++);
4531		*p++ = htonl(*val++);
4532	}
4533
4534	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4535}
4536
4537/**
4538 *	t4_cfg_pfvf - configure PF/VF resource limits
4539 *	@adap: the adapter
4540 *	@mbox: mailbox to use for the FW command
4541 *	@pf: the PF being configured
4542 *	@vf: the VF being configured
4543 *	@txq: the max number of egress queues
4544 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4545 *	@rxqi: the max number of interrupt-capable ingress queues
4546 *	@rxq: the max number of interruptless ingress queues
4547 *	@tc: the PCI traffic class
4548 *	@vi: the max number of virtual interfaces
4549 *	@cmask: the channel access rights mask for the PF/VF
4550 *	@pmask: the port access rights mask for the PF/VF
4551 *	@nexact: the maximum number of exact MPS filters
4552 *	@rcaps: read capabilities
4553 *	@wxcaps: write/execute capabilities
4554 *
4555 *	Configures resource limits and capabilities for a physical or virtual
4556 *	function.
4557 */
4558int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4559		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4560		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4561		unsigned int vi, unsigned int cmask, unsigned int pmask,
4562		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4563{
4564	struct fw_pfvf_cmd c;
4565
4566	memset(&c, 0, sizeof(c));
4567	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4568			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4569			    V_FW_PFVF_CMD_VFN(vf));
4570	c.retval_len16 = htonl(FW_LEN16(c));
4571	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4572			       V_FW_PFVF_CMD_NIQ(rxq));
4573	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4574			      V_FW_PFVF_CMD_PMASK(pmask) |
4575			      V_FW_PFVF_CMD_NEQ(txq));
4576	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4577				V_FW_PFVF_CMD_NEXACTF(nexact));
4578	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4579				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4580				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4581	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4582}
4583
4584/**
4585 *	t4_alloc_vi_func - allocate a virtual interface
4586 *	@adap: the adapter
4587 *	@mbox: mailbox to use for the FW command
4588 *	@port: physical port associated with the VI
4589 *	@pf: the PF owning the VI
4590 *	@vf: the VF owning the VI
4591 *	@nmac: number of MAC addresses needed (1 to 5)
4592 *	@mac: the MAC addresses of the VI
4593 *	@rss_size: size of RSS table slice associated with this VI
4594 *	@portfunc: which Port Application Function MAC Address is desired
4595 *	@idstype: Intrusion Detection Type
4596 *
4597 *	Allocates a virtual interface for the given physical port.  If @mac is
4598 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4599 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4600 *	stored consecutively so the space needed is @nmac * 6 bytes.
4601 *	Returns a negative error number or the non-negative VI id.
4602 */
4603int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4604		     unsigned int port, unsigned int pf, unsigned int vf,
4605		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4606		     unsigned int portfunc, unsigned int idstype)
4607{
4608	int ret;
4609	struct fw_vi_cmd c;
4610
4611	memset(&c, 0, sizeof(c));
4612	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4613			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4614			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4615	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4616	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4617			       V_FW_VI_CMD_FUNC(portfunc));
4618	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4619	c.nmac = nmac - 1;
4620
4621	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4622	if (ret)
4623		return ret;
4624
4625	if (mac) {
4626		memcpy(mac, c.mac, sizeof(c.mac));
4627		switch (nmac) {
4628		case 5:
4629			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4630		case 4:
4631			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4632		case 3:
4633			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4634		case 2:
4635			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4636		}
4637	}
4638	if (rss_size)
4639		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4640	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4641}
4642
4643/**
4644 *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4645 *	@adap: the adapter
4646 *	@mbox: mailbox to use for the FW command
4647 *	@port: physical port associated with the VI
4648 *	@pf: the PF owning the VI
4649 *	@vf: the VF owning the VI
4650 *	@nmac: number of MAC addresses needed (1 to 5)
4651 *	@mac: the MAC addresses of the VI
4652 *	@rss_size: size of RSS table slice associated with this VI
4653 *
4654 *	backwards compatible and convieniance routine to allocate a Virtual
4655 *	Interface with a Ethernet Port Application Function and Intrustion
4656 *	Detection System disabled.
4657 */
4658int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4659		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4660		unsigned int *rss_size)
4661{
4662	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4663				FW_VI_FUNC_ETH, 0);
4664}
4665
4666/**
4667 *	t4_free_vi - free a virtual interface
4668 *	@adap: the adapter
4669 *	@mbox: mailbox to use for the FW command
4670 *	@pf: the PF owning the VI
4671 *	@vf: the VF owning the VI
4672 *	@viid: virtual interface identifiler
4673 *
4674 *	Free a previously allocated virtual interface.
4675 */
4676int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4677	       unsigned int vf, unsigned int viid)
4678{
4679	struct fw_vi_cmd c;
4680
4681	memset(&c, 0, sizeof(c));
4682	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4683			    F_FW_CMD_REQUEST |
4684			    F_FW_CMD_EXEC |
4685			    V_FW_VI_CMD_PFN(pf) |
4686			    V_FW_VI_CMD_VFN(vf));
4687	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4688	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4689
4690	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4691}
4692
4693/**
4694 *	t4_set_rxmode - set Rx properties of a virtual interface
4695 *	@adap: the adapter
4696 *	@mbox: mailbox to use for the FW command
4697 *	@viid: the VI id
4698 *	@mtu: the new MTU or -1
4699 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4700 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4701 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4702 *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4703 *	@sleep_ok: if true we may sleep while awaiting command completion
4704 *
4705 *	Sets Rx properties of a virtual interface.
4706 */
4707int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4708		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4709		  bool sleep_ok)
4710{
4711	struct fw_vi_rxmode_cmd c;
4712
4713	/* convert to FW values */
4714	if (mtu < 0)
4715		mtu = M_FW_VI_RXMODE_CMD_MTU;
4716	if (promisc < 0)
4717		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4718	if (all_multi < 0)
4719		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4720	if (bcast < 0)
4721		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4722	if (vlanex < 0)
4723		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4724
4725	memset(&c, 0, sizeof(c));
4726	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4727			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4728	c.retval_len16 = htonl(FW_LEN16(c));
4729	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4730				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4731				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4732				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4733				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4734	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4735}
4736
4737/**
4738 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4739 *	@adap: the adapter
4740 *	@mbox: mailbox to use for the FW command
4741 *	@viid: the VI id
4742 *	@free: if true any existing filters for this VI id are first removed
4743 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4744 *	@addr: the MAC address(es)
4745 *	@idx: where to store the index of each allocated filter
4746 *	@hash: pointer to hash address filter bitmap
4747 *	@sleep_ok: call is allowed to sleep
4748 *
4749 *	Allocates an exact-match filter for each of the supplied addresses and
4750 *	sets it to the corresponding address.  If @idx is not %NULL it should
4751 *	have at least @naddr entries, each of which will be set to the index of
4752 *	the filter allocated for the corresponding MAC address.  If a filter
4753 *	could not be allocated for an address its index is set to 0xffff.
4754 *	If @hash is not %NULL addresses that fail to allocate an exact filter
4755 *	are hashed and update the hash filter bitmap pointed at by @hash.
4756 *
4757 *	Returns a negative error number or the number of filters allocated.
4758 */
4759int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4760		      unsigned int viid, bool free, unsigned int naddr,
4761		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4762{
4763	int offset, ret = 0;
4764	struct fw_vi_mac_cmd c;
4765	unsigned int nfilters = 0;
4766	unsigned int rem = naddr;
4767
4768	if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
4769		return -EINVAL;
4770
4771	for (offset = 0; offset < naddr ; /**/) {
4772		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4773					 ? rem
4774					 : ARRAY_SIZE(c.u.exact));
4775		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4776						     u.exact[fw_naddr]), 16);
4777		struct fw_vi_mac_exact *p;
4778		int i;
4779
4780		memset(&c, 0, sizeof(c));
4781		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4782				     F_FW_CMD_REQUEST |
4783				     F_FW_CMD_WRITE |
4784				     V_FW_CMD_EXEC(free) |
4785				     V_FW_VI_MAC_CMD_VIID(viid));
4786		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4787					    V_FW_CMD_LEN16(len16));
4788
4789		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4790			p->valid_to_idx = htons(
4791				F_FW_VI_MAC_CMD_VALID |
4792				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4793			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4794		}
4795
4796		/*
4797		 * It's okay if we run out of space in our MAC address arena.
4798		 * Some of the addresses we submit may get stored so we need
4799		 * to run through the reply to see what the results were ...
4800		 */
4801		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4802		if (ret && ret != -FW_ENOMEM)
4803			break;
4804
4805		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4806			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4807
4808			if (idx)
4809				idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
4810						 ? 0xffff
4811						 : index);
4812			if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
4813				nfilters++;
4814			else if (hash)
4815				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4816		}
4817
4818		free = false;
4819		offset += fw_naddr;
4820		rem -= fw_naddr;
4821	}
4822
4823	if (ret == 0 || ret == -FW_ENOMEM)
4824		ret = nfilters;
4825	return ret;
4826}
4827
4828/**
4829 *	t4_change_mac - modifies the exact-match filter for a MAC address
4830 *	@adap: the adapter
4831 *	@mbox: mailbox to use for the FW command
4832 *	@viid: the VI id
4833 *	@idx: index of existing filter for old value of MAC address, or -1
4834 *	@addr: the new MAC address value
4835 *	@persist: whether a new MAC allocation should be persistent
4836 *	@add_smt: if true also add the address to the HW SMT
4837 *
4838 *	Modifies an exact-match filter and sets it to the new MAC address if
4839 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4840 *	latter case the address is added persistently if @persist is %true.
4841 *
4842 *	Note that in general it is not possible to modify the value of a given
4843 *	filter so the generic way to modify an address filter is to free the one
4844 *	being used by the old address value and allocate a new filter for the
4845 *	new address value.
4846 *
4847 *	Returns a negative error number or the index of the filter with the new
4848 *	MAC value.  Note that this index may differ from @idx.
4849 */
4850int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4851		  int idx, const u8 *addr, bool persist, bool add_smt)
4852{
4853	int ret, mode;
4854	struct fw_vi_mac_cmd c;
4855	struct fw_vi_mac_exact *p = c.u.exact;
4856
4857	if (idx < 0)                             /* new allocation */
4858		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4859	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4860
4861	memset(&c, 0, sizeof(c));
4862	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4863			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4864	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4865	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4866				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4867				V_FW_VI_MAC_CMD_IDX(idx));
4868	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4869
4870	ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4871	if (ret == 0) {
4872		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4873		if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
4874			ret = -ENOMEM;
4875	}
4876	return ret;
4877}
4878
4879/**
4880 *	t4_set_addr_hash - program the MAC inexact-match hash filter
4881 *	@adap: the adapter
4882 *	@mbox: mailbox to use for the FW command
4883 *	@viid: the VI id
4884 *	@ucast: whether the hash filter should also match unicast addresses
4885 *	@vec: the value to be written to the hash filter
4886 *	@sleep_ok: call is allowed to sleep
4887 *
4888 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4889 */
4890int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4891		     bool ucast, u64 vec, bool sleep_ok)
4892{
4893	struct fw_vi_mac_cmd c;
4894
4895	memset(&c, 0, sizeof(c));
4896	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4897			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4898	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4899				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4900				    V_FW_CMD_LEN16(1));
4901	c.u.hash.hashvec = cpu_to_be64(vec);
4902	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4903}
4904
4905/**
4906 *	t4_enable_vi - enable/disable a virtual interface
4907 *	@adap: the adapter
4908 *	@mbox: mailbox to use for the FW command
4909 *	@viid: the VI id
4910 *	@rx_en: 1=enable Rx, 0=disable Rx
4911 *	@tx_en: 1=enable Tx, 0=disable Tx
4912 *
4913 *	Enables/disables a virtual interface.
4914 */
4915int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4916		 bool rx_en, bool tx_en)
4917{
4918	struct fw_vi_enable_cmd c;
4919
4920	memset(&c, 0, sizeof(c));
4921	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4922			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4923	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4924			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4925	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4926}
4927
4928/**
4929 *	t4_identify_port - identify a VI's port by blinking its LED
4930 *	@adap: the adapter
4931 *	@mbox: mailbox to use for the FW command
4932 *	@viid: the VI id
4933 *	@nblinks: how many times to blink LED at 2.5 Hz
4934 *
4935 *	Identifies a VI's port by blinking its LED.
4936 */
4937int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4938		     unsigned int nblinks)
4939{
4940	struct fw_vi_enable_cmd c;
4941
4942	memset(&c, 0, sizeof(c));
4943	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4944			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4945	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4946	c.blinkdur = htons(nblinks);
4947	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4948}
4949
4950/**
4951 *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4952 *	@adap: the adapter
4953 *	@mbox: mailbox to use for the FW command
4954 *	@start: %true to enable the queues, %false to disable them
4955 *	@pf: the PF owning the queues
4956 *	@vf: the VF owning the queues
4957 *	@iqid: ingress queue id
4958 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4959 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4960 *
4961 *	Starts or stops an ingress queue and its associated FLs, if any.
4962 */
4963int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4964		     unsigned int pf, unsigned int vf, unsigned int iqid,
4965		     unsigned int fl0id, unsigned int fl1id)
4966{
4967	struct fw_iq_cmd c;
4968
4969	memset(&c, 0, sizeof(c));
4970	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4971			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4972			    V_FW_IQ_CMD_VFN(vf));
4973	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4974				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4975	c.iqid = htons(iqid);
4976	c.fl0id = htons(fl0id);
4977	c.fl1id = htons(fl1id);
4978	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4979}
4980
4981/**
4982 *	t4_iq_free - free an ingress queue and its FLs
4983 *	@adap: the adapter
4984 *	@mbox: mailbox to use for the FW command
4985 *	@pf: the PF owning the queues
4986 *	@vf: the VF owning the queues
4987 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4988 *	@iqid: ingress queue id
4989 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4990 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4991 *
4992 *	Frees an ingress queue and its associated FLs, if any.
4993 */
4994int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4995	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4996	       unsigned int fl0id, unsigned int fl1id)
4997{
4998	struct fw_iq_cmd c;
4999
5000	memset(&c, 0, sizeof(c));
5001	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
5002			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
5003			    V_FW_IQ_CMD_VFN(vf));
5004	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
5005	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
5006	c.iqid = htons(iqid);
5007	c.fl0id = htons(fl0id);
5008	c.fl1id = htons(fl1id);
5009	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5010}
5011
5012/**
5013 *	t4_eth_eq_free - free an Ethernet egress queue
5014 *	@adap: the adapter
5015 *	@mbox: mailbox to use for the FW command
5016 *	@pf: the PF owning the queue
5017 *	@vf: the VF owning the queue
5018 *	@eqid: egress queue id
5019 *
5020 *	Frees an Ethernet egress queue.
5021 */
5022int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5023		   unsigned int vf, unsigned int eqid)
5024{
5025	struct fw_eq_eth_cmd c;
5026
5027	memset(&c, 0, sizeof(c));
5028	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
5029			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
5030			    V_FW_EQ_ETH_CMD_VFN(vf));
5031	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
5032	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
5033	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5034}
5035
5036/**
5037 *	t4_ctrl_eq_free - free a control egress queue
5038 *	@adap: the adapter
5039 *	@mbox: mailbox to use for the FW command
5040 *	@pf: the PF owning the queue
5041 *	@vf: the VF owning the queue
5042 *	@eqid: egress queue id
5043 *
5044 *	Frees a control egress queue.
5045 */
5046int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5047		    unsigned int vf, unsigned int eqid)
5048{
5049	struct fw_eq_ctrl_cmd c;
5050
5051	memset(&c, 0, sizeof(c));
5052	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
5053			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
5054			    V_FW_EQ_CTRL_CMD_VFN(vf));
5055	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
5056	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
5057	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5058}
5059
5060/**
5061 *	t4_ofld_eq_free - free an offload egress queue
5062 *	@adap: the adapter
5063 *	@mbox: mailbox to use for the FW command
5064 *	@pf: the PF owning the queue
5065 *	@vf: the VF owning the queue
5066 *	@eqid: egress queue id
5067 *
5068 *	Frees a control egress queue.
5069 */
5070int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
5071		    unsigned int vf, unsigned int eqid)
5072{
5073	struct fw_eq_ofld_cmd c;
5074
5075	memset(&c, 0, sizeof(c));
5076	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
5077			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
5078			    V_FW_EQ_OFLD_CMD_VFN(vf));
5079	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
5080	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
5081	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5082}
5083
5084/**
5085 *	t4_handle_fw_rpl - process a FW reply message
5086 *	@adap: the adapter
5087 *	@rpl: start of the FW message
5088 *
5089 *	Processes a FW message, such as link state change messages.
5090 */
5091int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
5092{
5093	u8 opcode = *(const u8 *)rpl;
5094	const struct fw_port_cmd *p = (const void *)rpl;
5095	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
5096
5097	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
5098		/* link/module state change message */
5099		int speed = 0, fc = 0, i;
5100		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
5101		struct port_info *pi = NULL;
5102		struct link_config *lc;
5103		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5104		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5105		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5106
5107		if (stat & F_FW_PORT_CMD_RXPAUSE)
5108			fc |= PAUSE_RX;
5109		if (stat & F_FW_PORT_CMD_TXPAUSE)
5110			fc |= PAUSE_TX;
5111		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5112			speed = SPEED_100;
5113		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5114			speed = SPEED_1000;
5115		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5116			speed = SPEED_10000;
5117
5118		for_each_port(adap, i) {
5119			pi = adap2pinfo(adap, i);
5120			if (pi->tx_chan == chan)
5121				break;
5122		}
5123		lc = &pi->link_cfg;
5124
5125		if (link_ok != lc->link_ok || speed != lc->speed ||
5126		    fc != lc->fc) {                    /* something changed */
5127			lc->link_ok = link_ok;
5128			lc->speed = speed;
5129			lc->fc = fc;
5130			t4_os_link_changed(adap, i, link_ok);
5131		}
5132		if (mod != pi->mod_type) {
5133			pi->mod_type = mod;
5134			t4_os_portmod_changed(adap, i);
5135		}
5136	} else {
5137		CH_WARN_RATELIMIT(adap,
5138		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5139		return -EINVAL;
5140	}
5141	return 0;
5142}
5143
5144/**
5145 *	get_pci_mode - determine a card's PCI mode
5146 *	@adapter: the adapter
5147 *	@p: where to store the PCI settings
5148 *
5149 *	Determines a card's PCI mode and associated parameters, such as speed
5150 *	and width.
5151 */
5152static void __devinit get_pci_mode(struct adapter *adapter,
5153				   struct pci_params *p)
5154{
5155	u16 val;
5156	u32 pcie_cap;
5157
5158	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5159	if (pcie_cap) {
5160		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5161		p->speed = val & PCI_EXP_LNKSTA_CLS;
5162		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5163	}
5164}
5165
5166/**
5167 *	init_link_config - initialize a link's SW state
5168 *	@lc: structure holding the link state
5169 *	@caps: link capabilities
5170 *
5171 *	Initializes the SW state maintained for each link, including the link's
5172 *	capabilities and default speed/flow-control/autonegotiation settings.
5173 */
5174static void __devinit init_link_config(struct link_config *lc,
5175				       unsigned int caps)
5176{
5177	lc->supported = caps;
5178	lc->requested_speed = 0;
5179	lc->speed = 0;
5180	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5181	if (lc->supported & FW_PORT_CAP_ANEG) {
5182		lc->advertising = lc->supported & ADVERT_MASK;
5183		lc->autoneg = AUTONEG_ENABLE;
5184		lc->requested_fc |= PAUSE_AUTONEG;
5185	} else {
5186		lc->advertising = 0;
5187		lc->autoneg = AUTONEG_DISABLE;
5188	}
5189}
5190
5191static int __devinit wait_dev_ready(struct adapter *adap)
5192{
5193	u32 whoami;
5194
5195	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5196
5197	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
5198		return 0;
5199
5200	msleep(500);
5201	whoami = t4_read_reg(adap, A_PL_WHOAMI);
5202	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
5203		? 0 : -EIO);
5204}
5205
5206static int __devinit get_flash_params(struct adapter *adapter)
5207{
5208	int ret;
5209	u32 info = 0;
5210
5211	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5212	if (!ret)
5213		ret = sf1_read(adapter, 3, 0, 1, &info);
5214	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
5215	if (ret < 0)
5216		return ret;
5217
5218	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
5219		return -EINVAL;
5220	info >>= 16;                           /* log2 of size */
5221	if (info >= 0x14 && info < 0x18)
5222		adapter->params.sf_nsec = 1 << (info - 16);
5223	else if (info == 0x18)
5224		adapter->params.sf_nsec = 64;
5225	else
5226		return -EINVAL;
5227	adapter->params.sf_size = 1 << info;
5228	return 0;
5229}
5230
5231static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
5232						  u8 range)
5233{
5234	u16 val;
5235	u32 pcie_cap;
5236
5237	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5238	if (pcie_cap) {
5239		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5240		val &= 0xfff0;
5241		val |= range ;
5242		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5243	}
5244}
5245
5246/**
5247 *	t4_prep_adapter - prepare SW and HW for operation
5248 *	@adapter: the adapter
5249 *	@reset: if true perform a HW reset
5250 *
5251 *	Initialize adapter SW state for the various HW modules, set initial
5252 *	values for some adapter tunables, take PHYs out of reset, and
5253 *	initialize the MDIO interface.
5254 */
5255int __devinit t4_prep_adapter(struct adapter *adapter)
5256{
5257	int ret;
5258
5259	ret = wait_dev_ready(adapter);
5260	if (ret < 0)
5261		return ret;
5262
5263	get_pci_mode(adapter, &adapter->params.pci);
5264
5265	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
5266	/* T4A1 chip is no longer supported */
5267	if (adapter->params.rev == 1) {
5268		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5269		return -EINVAL;
5270	}
5271	adapter->params.pci.vpd_cap_addr =
5272		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5273
5274	ret = get_flash_params(adapter);
5275	if (ret < 0)
5276		return ret;
5277
5278	ret = get_vpd_params(adapter, &adapter->params.vpd);
5279	if (ret < 0)
5280		return ret;
5281
5282	if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
5283		/* FPGA */
5284		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5285	} else {
5286		/* ASIC */
5287		adapter->params.cim_la_size = CIMLA_SIZE;
5288	}
5289
5290	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5291
5292	/*
5293	 * Default port and clock for debugging in case we can't reach FW.
5294	 */
5295	adapter->params.nports = 1;
5296	adapter->params.portvec = 1;
5297	adapter->params.vpd.cclk = 50000;
5298
5299	/* Set pci completion timeout value to 4 seconds. */
5300	set_pcie_completion_timeout(adapter, 0xd);
5301	return 0;
5302}
5303
5304int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5305{
5306	u8 addr[6];
5307	int ret, i, j;
5308	struct fw_port_cmd c;
5309	unsigned int rss_size;
5310	adapter_t *adap = p->adapter;
5311
5312	memset(&c, 0, sizeof(c));
5313
5314	for (i = 0, j = -1; i <= p->port_id; i++) {
5315		do {
5316			j++;
5317		} while ((adap->params.portvec & (1 << j)) == 0);
5318	}
5319
5320	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5321			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
5322			       V_FW_PORT_CMD_PORTID(j));
5323	c.action_to_len16 = htonl(
5324		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5325		FW_LEN16(c));
5326	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5327	if (ret)
5328		return ret;
5329
5330	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5331	if (ret < 0)
5332		return ret;
5333
5334	p->viid = ret;
5335	p->tx_chan = j;
5336	p->lport = j;
5337	p->rss_size = rss_size;
5338	t4_os_set_hw_addr(adap, p->port_id, addr);
5339
5340	ret = ntohl(c.u.info.lstatus_to_modtype);
5341	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5342		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5343	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5344	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5345
5346	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5347
5348	return 0;
5349}
5350